1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_input.h> 44 #include <netinet/sctp_auth.h> 45 #include <netinet/sctp_indata.h> 46 #include <netinet/sctp_asconf.h> 47 #include <netinet/sctp_bsd_addr.h> 48 #include <netinet/sctp_timer.h> 49 50 51 52 static void 53 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb) 54 { 55 struct sctp_nets *net; 56 57 /* 58 * This now not only stops all cookie timers it also stops any INIT 59 * timers as well. This will make sure that the timers are stopped 60 * in all collision cases. 61 */ 62 SCTP_TCB_LOCK_ASSERT(stcb); 63 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 64 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) { 65 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, 66 stcb->sctp_ep, 67 stcb, 68 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1); 69 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) { 70 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, 71 stcb->sctp_ep, 72 stcb, 73 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2); 74 } 75 } 76 } 77 78 /* INIT handler */ 79 static void 80 sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 81 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 82 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 83 { 84 struct sctp_init *init; 85 struct mbuf *op_err; 86 uint32_t init_limit; 87 88 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n", 89 stcb); 90 if (stcb == NULL) { 91 SCTP_INP_RLOCK(inp); 92 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 93 goto outnow; 94 } 95 } 96 op_err = NULL; 97 init = &cp->init; 98 /* First are we accepting? */ 99 if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) { 100 SCTPDBG(SCTP_DEBUG_INPUT2, 101 "sctp_handle_init: Abort, so_qlimit:%d\n", 102 inp->sctp_socket->so_qlimit); 103 /* 104 * FIX ME ?? What about TCP model and we have a 105 * match/restart case? 106 */ 107 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 108 vrf_id); 109 if (stcb) 110 *abort_no_unlock = 1; 111 goto outnow; 112 } 113 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) { 114 /* Invalid length */ 115 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 116 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 117 vrf_id); 118 if (stcb) 119 *abort_no_unlock = 1; 120 goto outnow; 121 } 122 /* validate parameters */ 123 if (init->initiate_tag == 0) { 124 /* protocol error... send abort */ 125 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 126 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 127 vrf_id); 128 if (stcb) 129 *abort_no_unlock = 1; 130 goto outnow; 131 } 132 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) { 133 /* invalid parameter... send abort */ 134 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 135 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 136 vrf_id); 137 if (stcb) 138 *abort_no_unlock = 1; 139 goto outnow; 140 } 141 if (init->num_inbound_streams == 0) { 142 /* protocol error... send abort */ 143 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 144 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 145 vrf_id); 146 if (stcb) 147 *abort_no_unlock = 1; 148 goto outnow; 149 } 150 if (init->num_outbound_streams == 0) { 151 /* protocol error... send abort */ 152 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 153 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 154 vrf_id); 155 if (stcb) 156 *abort_no_unlock = 1; 157 goto outnow; 158 } 159 init_limit = offset + ntohs(cp->ch.chunk_length); 160 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp), 161 init_limit)) { 162 /* auth parameter(s) error... send abort */ 163 sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id); 164 if (stcb) 165 *abort_no_unlock = 1; 166 goto outnow; 167 } 168 /* send an INIT-ACK w/cookie */ 169 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); 170 sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, 171 ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED)); 172 outnow: 173 if (stcb == NULL) { 174 SCTP_INP_RUNLOCK(inp); 175 } 176 } 177 178 /* 179 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error 180 */ 181 182 int 183 sctp_is_there_unsent_data(struct sctp_tcb *stcb) 184 { 185 int unsent_data = 0; 186 struct sctp_stream_queue_pending *sp; 187 struct sctp_stream_out *strq; 188 struct sctp_association *asoc; 189 190 /* 191 * This function returns the number of streams that have true unsent 192 * data on them. Note that as it looks through it will clean up any 193 * places that have old data that has been sent but left at top of 194 * stream queue. 195 */ 196 asoc = &stcb->asoc; 197 SCTP_TCB_SEND_LOCK(stcb); 198 if (!TAILQ_EMPTY(&asoc->out_wheel)) { 199 /* Check to see if some data queued */ 200 TAILQ_FOREACH(strq, &asoc->out_wheel, next_spoke) { 201 is_there_another: 202 /* sa_ignore FREED_MEMORY */ 203 sp = TAILQ_FIRST(&strq->outqueue); 204 if (sp == NULL) { 205 continue; 206 } 207 if ((sp->msg_is_complete) && 208 (sp->length == 0) && 209 (sp->sender_all_done)) { 210 /* 211 * We are doing differed cleanup. Last time 212 * through when we took all the data the 213 * sender_all_done was not set. 214 */ 215 if (sp->put_last_out == 0) { 216 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 217 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", 218 sp->sender_all_done, 219 sp->length, 220 sp->msg_is_complete, 221 sp->put_last_out); 222 } 223 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1); 224 TAILQ_REMOVE(&strq->outqueue, sp, next); 225 sctp_free_remote_addr(sp->net); 226 if (sp->data) { 227 sctp_m_freem(sp->data); 228 sp->data = NULL; 229 } 230 sctp_free_a_strmoq(stcb, sp); 231 goto is_there_another; 232 } else { 233 unsent_data++; 234 continue; 235 } 236 } 237 } 238 SCTP_TCB_SEND_UNLOCK(stcb); 239 return (unsent_data); 240 } 241 242 static int 243 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb, 244 struct sctp_nets *net) 245 { 246 struct sctp_init *init; 247 struct sctp_association *asoc; 248 struct sctp_nets *lnet; 249 unsigned int i; 250 251 init = &cp->init; 252 asoc = &stcb->asoc; 253 /* save off parameters */ 254 asoc->peer_vtag = ntohl(init->initiate_tag); 255 asoc->peers_rwnd = ntohl(init->a_rwnd); 256 if (TAILQ_FIRST(&asoc->nets)) { 257 /* update any ssthresh's that may have a default */ 258 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 259 lnet->ssthresh = asoc->peers_rwnd; 260 261 if (sctp_logging_level & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 262 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION); 263 } 264 } 265 } 266 SCTP_TCB_SEND_LOCK(stcb); 267 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) { 268 unsigned int newcnt; 269 struct sctp_stream_out *outs; 270 struct sctp_stream_queue_pending *sp; 271 272 /* cut back on number of streams */ 273 newcnt = ntohs(init->num_inbound_streams); 274 /* This if is probably not needed but I am cautious */ 275 if (asoc->strmout) { 276 /* First make sure no data chunks are trapped */ 277 for (i = newcnt; i < asoc->pre_open_streams; i++) { 278 outs = &asoc->strmout[i]; 279 sp = TAILQ_FIRST(&outs->outqueue); 280 while (sp) { 281 TAILQ_REMOVE(&outs->outqueue, sp, 282 next); 283 asoc->stream_queue_cnt--; 284 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, 285 stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, 286 sp, SCTP_SO_NOT_LOCKED); 287 if (sp->data) { 288 sctp_m_freem(sp->data); 289 sp->data = NULL; 290 } 291 sctp_free_remote_addr(sp->net); 292 sp->net = NULL; 293 /* Free the chunk */ 294 SCTP_PRINTF("sp:%p tcb:%p weird free case\n", 295 sp, stcb); 296 297 sctp_free_a_strmoq(stcb, sp); 298 /* sa_ignore FREED_MEMORY */ 299 sp = TAILQ_FIRST(&outs->outqueue); 300 } 301 } 302 } 303 /* cut back the count and abandon the upper streams */ 304 asoc->pre_open_streams = newcnt; 305 } 306 SCTP_TCB_SEND_UNLOCK(stcb); 307 asoc->streamoutcnt = asoc->pre_open_streams; 308 /* init tsn's */ 309 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1; 310 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 311 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 312 } 313 /* This is the next one we expect */ 314 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1; 315 316 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn); 317 asoc->cumulative_tsn = asoc->asconf_seq_in; 318 asoc->last_echo_tsn = asoc->asconf_seq_in; 319 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 320 /* open the requested streams */ 321 322 if (asoc->strmin != NULL) { 323 /* Free the old ones */ 324 struct sctp_queued_to_read *ctl; 325 326 for (i = 0; i < asoc->streamincnt; i++) { 327 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 328 while (ctl) { 329 TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next); 330 sctp_free_remote_addr(ctl->whoFrom); 331 ctl->whoFrom = NULL; 332 sctp_m_freem(ctl->data); 333 ctl->data = NULL; 334 sctp_free_a_readq(stcb, ctl); 335 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 336 } 337 } 338 SCTP_FREE(asoc->strmin, SCTP_M_STRMI); 339 } 340 asoc->streamincnt = ntohs(init->num_outbound_streams); 341 if (asoc->streamincnt > MAX_SCTP_STREAMS) { 342 asoc->streamincnt = MAX_SCTP_STREAMS; 343 } 344 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt * 345 sizeof(struct sctp_stream_in), SCTP_M_STRMI); 346 if (asoc->strmin == NULL) { 347 /* we didn't get memory for the streams! */ 348 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n"); 349 return (-1); 350 } 351 for (i = 0; i < asoc->streamincnt; i++) { 352 asoc->strmin[i].stream_no = i; 353 asoc->strmin[i].last_sequence_delivered = 0xffff; 354 /* 355 * U-stream ranges will be set when the cookie is unpacked. 356 * Or for the INIT sender they are un set (if pr-sctp not 357 * supported) when the INIT-ACK arrives. 358 */ 359 TAILQ_INIT(&asoc->strmin[i].inqueue); 360 asoc->strmin[i].delivery_started = 0; 361 } 362 /* 363 * load_address_from_init will put the addresses into the 364 * association when the COOKIE is processed or the INIT-ACK is 365 * processed. Both types of COOKIE's existing and new call this 366 * routine. It will remove addresses that are no longer in the 367 * association (for the restarting case where addresses are 368 * removed). Up front when the INIT arrives we will discard it if it 369 * is a restart and new addresses have been added. 370 */ 371 /* sa_ignore MEMLEAK */ 372 return (0); 373 } 374 375 /* 376 * INIT-ACK message processing/consumption returns value < 0 on error 377 */ 378 static int 379 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, 380 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 381 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 382 { 383 struct sctp_association *asoc; 384 struct mbuf *op_err; 385 int retval, abort_flag; 386 uint32_t initack_limit; 387 388 /* First verify that we have no illegal param's */ 389 abort_flag = 0; 390 op_err = NULL; 391 392 op_err = sctp_arethere_unrecognized_parameters(m, 393 (offset + sizeof(struct sctp_init_chunk)), 394 &abort_flag, (struct sctp_chunkhdr *)cp); 395 if (abort_flag) { 396 /* Send an abort and notify peer */ 397 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED); 398 *abort_no_unlock = 1; 399 return (-1); 400 } 401 asoc = &stcb->asoc; 402 /* process the peer's parameters in the INIT-ACK */ 403 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net); 404 if (retval < 0) { 405 return (retval); 406 } 407 initack_limit = offset + ntohs(cp->ch.chunk_length); 408 /* load all addresses */ 409 if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen, 410 (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh, 411 NULL))) { 412 /* Huh, we should abort */ 413 SCTPDBG(SCTP_DEBUG_INPUT1, 414 "Load addresses from INIT causes an abort %d\n", 415 retval); 416 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 417 NULL, 0); 418 *abort_no_unlock = 1; 419 return (-1); 420 } 421 /* if the peer doesn't support asconf, flush the asconf queue */ 422 if (asoc->peer_supports_asconf == 0) { 423 struct sctp_asconf_addr *aparam; 424 425 while (!TAILQ_EMPTY(&asoc->asconf_queue)) { 426 /* sa_ignore FREED_MEMORY */ 427 aparam = TAILQ_FIRST(&asoc->asconf_queue); 428 TAILQ_REMOVE(&asoc->asconf_queue, aparam, next); 429 SCTP_FREE(aparam, SCTP_M_ASC_ADDR); 430 } 431 } 432 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs, 433 stcb->asoc.local_hmacs); 434 if (op_err) { 435 sctp_queue_op_err(stcb, op_err); 436 /* queuing will steal away the mbuf chain to the out queue */ 437 op_err = NULL; 438 } 439 /* extract the cookie and queue it to "echo" it back... */ 440 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 441 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 442 stcb->asoc.overall_error_count, 443 0, 444 SCTP_FROM_SCTP_INPUT, 445 __LINE__); 446 } 447 stcb->asoc.overall_error_count = 0; 448 net->error_count = 0; 449 450 /* 451 * Cancel the INIT timer, We do this first before queueing the 452 * cookie. We always cancel at the primary to assue that we are 453 * canceling the timer started by the INIT which always goes to the 454 * primary. 455 */ 456 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb, 457 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4); 458 459 /* calculate the RTO */ 460 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy); 461 462 retval = sctp_send_cookie_echo(m, offset, stcb, net); 463 if (retval < 0) { 464 /* 465 * No cookie, we probably should send a op error. But in any 466 * case if there is no cookie in the INIT-ACK, we can 467 * abandon the peer, its broke. 468 */ 469 if (retval == -3) { 470 /* We abort with an error of missing mandatory param */ 471 op_err = 472 sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM); 473 if (op_err) { 474 /* 475 * Expand beyond to include the mandatory 476 * param cookie 477 */ 478 struct sctp_inv_mandatory_param *mp; 479 480 SCTP_BUF_LEN(op_err) = 481 sizeof(struct sctp_inv_mandatory_param); 482 mp = mtod(op_err, 483 struct sctp_inv_mandatory_param *); 484 /* Subtract the reserved param */ 485 mp->length = 486 htons(sizeof(struct sctp_inv_mandatory_param) - 2); 487 mp->num_param = htonl(1); 488 mp->param = htons(SCTP_STATE_COOKIE); 489 mp->resv = 0; 490 } 491 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 492 sh, op_err, 0); 493 *abort_no_unlock = 1; 494 } 495 return (retval); 496 } 497 return (0); 498 } 499 500 static void 501 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp, 502 struct sctp_tcb *stcb, struct sctp_nets *net) 503 { 504 struct sockaddr_storage store; 505 struct sockaddr_in *sin; 506 struct sockaddr_in6 *sin6; 507 struct sctp_nets *r_net; 508 struct timeval tv; 509 int req_prim = 0; 510 511 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) { 512 /* Invalid length */ 513 return; 514 } 515 sin = (struct sockaddr_in *)&store; 516 sin6 = (struct sockaddr_in6 *)&store; 517 518 memset(&store, 0, sizeof(store)); 519 if (cp->heartbeat.hb_info.addr_family == AF_INET && 520 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) { 521 sin->sin_family = cp->heartbeat.hb_info.addr_family; 522 sin->sin_len = cp->heartbeat.hb_info.addr_len; 523 sin->sin_port = stcb->rport; 524 memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address, 525 sizeof(sin->sin_addr)); 526 } else if (cp->heartbeat.hb_info.addr_family == AF_INET6 && 527 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) { 528 sin6->sin6_family = cp->heartbeat.hb_info.addr_family; 529 sin6->sin6_len = cp->heartbeat.hb_info.addr_len; 530 sin6->sin6_port = stcb->rport; 531 memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address, 532 sizeof(sin6->sin6_addr)); 533 } else { 534 return; 535 } 536 r_net = sctp_findnet(stcb, (struct sockaddr *)sin); 537 if (r_net == NULL) { 538 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n"); 539 return; 540 } 541 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) && 542 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) && 543 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) { 544 /* 545 * If the its a HB and it's random value is correct when can 546 * confirm the destination. 547 */ 548 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 549 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) { 550 stcb->asoc.primary_destination = r_net; 551 r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY; 552 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 553 r_net = TAILQ_FIRST(&stcb->asoc.nets); 554 if (r_net != stcb->asoc.primary_destination) { 555 /* 556 * first one on the list is NOT the primary 557 * sctp_cmpaddr() is much more efficent if 558 * the primary is the first on the list, 559 * make it so. 560 */ 561 TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 562 TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 563 } 564 req_prim = 1; 565 } 566 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 567 stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 568 } 569 r_net->error_count = 0; 570 r_net->hb_responded = 1; 571 tv.tv_sec = cp->heartbeat.hb_info.time_value_1; 572 tv.tv_usec = cp->heartbeat.hb_info.time_value_2; 573 if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 574 r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 575 r_net->dest_state |= SCTP_ADDR_REACHABLE; 576 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 577 SCTP_HEARTBEAT_SUCCESS, (void *)r_net, SCTP_SO_NOT_LOCKED); 578 /* now was it the primary? if so restore */ 579 if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 580 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net); 581 } 582 } 583 /* 584 * JRS 5/14/07 - If CMT PF is on and the destination is in PF state, 585 * set the destination to active state and set the cwnd to one or 586 * two MTU's based on whether PF1 or PF2 is being used. If a T3 587 * timer is running, for the destination, stop the timer because a 588 * PF-heartbeat was received. 589 */ 590 if (sctp_cmt_on_off && sctp_cmt_pf && (net->dest_state & SCTP_ADDR_PF) == 591 SCTP_ADDR_PF) { 592 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 593 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 594 stcb, net, 595 SCTP_FROM_SCTP_INPUT + SCTP_LOC_5); 596 } 597 net->dest_state &= ~SCTP_ADDR_PF; 598 net->cwnd = net->mtu * sctp_cmt_pf; 599 SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n", 600 net, net->cwnd); 601 } 602 /* Now lets do a RTO with this */ 603 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy); 604 /* Mobility adaptation */ 605 if (req_prim) { 606 if ((sctp_is_mobility_feature_on(stcb->sctp_ep, 607 SCTP_MOBILITY_BASE) || 608 sctp_is_mobility_feature_on(stcb->sctp_ep, 609 SCTP_MOBILITY_FASTHANDOFF)) && 610 sctp_is_mobility_feature_on(stcb->sctp_ep, 611 SCTP_MOBILITY_PRIM_DELETED)) { 612 613 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7); 614 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 615 SCTP_MOBILITY_FASTHANDOFF)) { 616 sctp_assoc_immediate_retrans(stcb, 617 stcb->asoc.primary_destination); 618 } 619 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 620 SCTP_MOBILITY_BASE)) { 621 sctp_move_chunks_from_deleted_prim(stcb, 622 stcb->asoc.primary_destination); 623 } 624 sctp_delete_prim_timer(stcb->sctp_ep, stcb, 625 stcb->asoc.deleted_primary); 626 } 627 } 628 } 629 630 static void 631 sctp_handle_abort(struct sctp_abort_chunk *cp, 632 struct sctp_tcb *stcb, struct sctp_nets *net) 633 { 634 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 635 struct socket *so; 636 637 #endif 638 639 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n"); 640 if (stcb == NULL) 641 return; 642 643 /* stop any receive timers */ 644 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 645 /* notify user of the abort and clean up... */ 646 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED); 647 /* free the tcb */ 648 #if defined(SCTP_PANIC_ON_ABORT) 649 printf("stcb:%p state:%d rport:%d net:%p\n", 650 stcb, stcb->asoc.state, stcb->rport, net); 651 if (!(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 652 panic("Received an ABORT"); 653 } else { 654 printf("No panic its in state %x closed\n", stcb->asoc.state); 655 } 656 #endif 657 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 658 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 659 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 660 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 661 } 662 #ifdef SCTP_ASOCLOG_OF_TSNS 663 sctp_print_out_track_log(stcb); 664 #endif 665 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 666 so = SCTP_INP_SO(stcb->sctp_ep); 667 atomic_add_int(&stcb->asoc.refcnt, 1); 668 SCTP_TCB_UNLOCK(stcb); 669 SCTP_SOCKET_LOCK(so, 1); 670 SCTP_TCB_LOCK(stcb); 671 atomic_subtract_int(&stcb->asoc.refcnt, 1); 672 #endif 673 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 674 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 675 SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 676 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 677 SCTP_SOCKET_UNLOCK(so, 1); 678 #endif 679 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n"); 680 } 681 682 static void 683 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, 684 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag) 685 { 686 struct sctp_association *asoc; 687 int some_on_streamwheel; 688 689 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 690 struct socket *so; 691 692 #endif 693 694 SCTPDBG(SCTP_DEBUG_INPUT2, 695 "sctp_handle_shutdown: handling SHUTDOWN\n"); 696 if (stcb == NULL) 697 return; 698 asoc = &stcb->asoc; 699 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 700 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 701 return; 702 } 703 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) { 704 /* Shutdown NOT the expected size */ 705 return; 706 } else { 707 sctp_update_acked(stcb, cp, net, abort_flag); 708 } 709 if (asoc->control_pdapi) { 710 /* 711 * With a normal shutdown we assume the end of last record. 712 */ 713 SCTP_INP_READ_LOCK(stcb->sctp_ep); 714 asoc->control_pdapi->end_added = 1; 715 asoc->control_pdapi->pdapi_aborted = 1; 716 asoc->control_pdapi = NULL; 717 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 718 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 719 so = SCTP_INP_SO(stcb->sctp_ep); 720 atomic_add_int(&stcb->asoc.refcnt, 1); 721 SCTP_TCB_UNLOCK(stcb); 722 SCTP_SOCKET_LOCK(so, 1); 723 SCTP_TCB_LOCK(stcb); 724 atomic_subtract_int(&stcb->asoc.refcnt, 1); 725 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 726 /* assoc was freed while we were unlocked */ 727 SCTP_SOCKET_UNLOCK(so, 1); 728 return; 729 } 730 #endif 731 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 732 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 733 SCTP_SOCKET_UNLOCK(so, 1); 734 #endif 735 } 736 /* goto SHUTDOWN_RECEIVED state to block new requests */ 737 if (stcb->sctp_socket) { 738 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 739 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) && 740 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 741 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED); 742 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 743 /* 744 * notify upper layer that peer has initiated a 745 * shutdown 746 */ 747 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 748 749 /* reset time */ 750 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 751 } 752 } 753 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 754 /* 755 * stop the shutdown timer, since we WILL move to 756 * SHUTDOWN-ACK-SENT. 757 */ 758 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8); 759 } 760 /* Now is there unsent data on a stream somewhere? */ 761 some_on_streamwheel = sctp_is_there_unsent_data(stcb); 762 763 if (!TAILQ_EMPTY(&asoc->send_queue) || 764 !TAILQ_EMPTY(&asoc->sent_queue) || 765 some_on_streamwheel) { 766 /* By returning we will push more data out */ 767 return; 768 } else { 769 /* no outstanding data to send, so move on... */ 770 /* send SHUTDOWN-ACK */ 771 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 772 /* move to SHUTDOWN-ACK-SENT state */ 773 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 774 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 775 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 776 } 777 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 778 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 779 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, 780 SCTP_FROM_SCTP_INPUT + SCTP_LOC_7); 781 /* start SHUTDOWN timer */ 782 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, 783 stcb, net); 784 } 785 } 786 787 static void 788 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp, 789 struct sctp_tcb *stcb, struct sctp_nets *net) 790 { 791 struct sctp_association *asoc; 792 793 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 794 struct socket *so; 795 796 so = SCTP_INP_SO(stcb->sctp_ep); 797 #endif 798 SCTPDBG(SCTP_DEBUG_INPUT2, 799 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n"); 800 if (stcb == NULL) 801 return; 802 803 asoc = &stcb->asoc; 804 /* process according to association state */ 805 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 806 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 807 /* unexpected SHUTDOWN-ACK... so ignore... */ 808 SCTP_TCB_UNLOCK(stcb); 809 return; 810 } 811 if (asoc->control_pdapi) { 812 /* 813 * With a normal shutdown we assume the end of last record. 814 */ 815 SCTP_INP_READ_LOCK(stcb->sctp_ep); 816 asoc->control_pdapi->end_added = 1; 817 asoc->control_pdapi->pdapi_aborted = 1; 818 asoc->control_pdapi = NULL; 819 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 820 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 821 atomic_add_int(&stcb->asoc.refcnt, 1); 822 SCTP_TCB_UNLOCK(stcb); 823 SCTP_SOCKET_LOCK(so, 1); 824 SCTP_TCB_LOCK(stcb); 825 atomic_subtract_int(&stcb->asoc.refcnt, 1); 826 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 827 /* assoc was freed while we were unlocked */ 828 SCTP_SOCKET_UNLOCK(so, 1); 829 return; 830 } 831 #endif 832 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 833 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 834 SCTP_SOCKET_UNLOCK(so, 1); 835 #endif 836 } 837 /* are the queues empty? */ 838 if (!TAILQ_EMPTY(&asoc->send_queue) || 839 !TAILQ_EMPTY(&asoc->sent_queue) || 840 !TAILQ_EMPTY(&asoc->out_wheel)) { 841 sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED); 842 } 843 /* stop the timer */ 844 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9); 845 /* send SHUTDOWN-COMPLETE */ 846 sctp_send_shutdown_complete(stcb, net); 847 /* notify upper layer protocol */ 848 if (stcb->sctp_socket) { 849 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 850 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 851 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 852 /* Set the connected flag to disconnected */ 853 stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0; 854 } 855 } 856 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 857 /* free the TCB but first save off the ep */ 858 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 859 atomic_add_int(&stcb->asoc.refcnt, 1); 860 SCTP_TCB_UNLOCK(stcb); 861 SCTP_SOCKET_LOCK(so, 1); 862 SCTP_TCB_LOCK(stcb); 863 atomic_subtract_int(&stcb->asoc.refcnt, 1); 864 #endif 865 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 866 SCTP_FROM_SCTP_INPUT + SCTP_LOC_10); 867 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 868 SCTP_SOCKET_UNLOCK(so, 1); 869 #endif 870 } 871 872 /* 873 * Skip past the param header and then we will find the chunk that caused the 874 * problem. There are two possiblities ASCONF or FWD-TSN other than that and 875 * our peer must be broken. 876 */ 877 static void 878 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr, 879 struct sctp_nets *net) 880 { 881 struct sctp_chunkhdr *chk; 882 883 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr)); 884 switch (chk->chunk_type) { 885 case SCTP_ASCONF_ACK: 886 case SCTP_ASCONF: 887 sctp_asconf_cleanup(stcb, net); 888 break; 889 case SCTP_FORWARD_CUM_TSN: 890 stcb->asoc.peer_supports_prsctp = 0; 891 break; 892 default: 893 SCTPDBG(SCTP_DEBUG_INPUT2, 894 "Peer does not support chunk type %d(%x)??\n", 895 chk->chunk_type, (uint32_t) chk->chunk_type); 896 break; 897 } 898 } 899 900 /* 901 * Skip past the param header and then we will find the param that caused the 902 * problem. There are a number of param's in a ASCONF OR the prsctp param 903 * these will turn of specific features. 904 */ 905 static void 906 sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr) 907 { 908 struct sctp_paramhdr *pbad; 909 910 pbad = phdr + 1; 911 switch (ntohs(pbad->param_type)) { 912 /* pr-sctp draft */ 913 case SCTP_PRSCTP_SUPPORTED: 914 stcb->asoc.peer_supports_prsctp = 0; 915 break; 916 case SCTP_SUPPORTED_CHUNK_EXT: 917 break; 918 /* draft-ietf-tsvwg-addip-sctp */ 919 case SCTP_ECN_NONCE_SUPPORTED: 920 stcb->asoc.peer_supports_ecn_nonce = 0; 921 stcb->asoc.ecn_nonce_allowed = 0; 922 stcb->asoc.ecn_allowed = 0; 923 break; 924 case SCTP_ADD_IP_ADDRESS: 925 case SCTP_DEL_IP_ADDRESS: 926 case SCTP_SET_PRIM_ADDR: 927 stcb->asoc.peer_supports_asconf = 0; 928 break; 929 case SCTP_SUCCESS_REPORT: 930 case SCTP_ERROR_CAUSE_IND: 931 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n"); 932 SCTPDBG(SCTP_DEBUG_INPUT2, 933 "Turning off ASCONF to this strange peer\n"); 934 stcb->asoc.peer_supports_asconf = 0; 935 break; 936 default: 937 SCTPDBG(SCTP_DEBUG_INPUT2, 938 "Peer does not support param type %d(%x)??\n", 939 pbad->param_type, (uint32_t) pbad->param_type); 940 break; 941 } 942 } 943 944 static int 945 sctp_handle_error(struct sctp_chunkhdr *ch, 946 struct sctp_tcb *stcb, struct sctp_nets *net) 947 { 948 int chklen; 949 struct sctp_paramhdr *phdr; 950 uint16_t error_type; 951 uint16_t error_len; 952 struct sctp_association *asoc; 953 int adjust; 954 955 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 956 struct socket *so; 957 958 #endif 959 960 /* parse through all of the errors and process */ 961 asoc = &stcb->asoc; 962 phdr = (struct sctp_paramhdr *)((caddr_t)ch + 963 sizeof(struct sctp_chunkhdr)); 964 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr); 965 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) { 966 /* Process an Error Cause */ 967 error_type = ntohs(phdr->param_type); 968 error_len = ntohs(phdr->param_length); 969 if ((error_len > chklen) || (error_len == 0)) { 970 /* invalid param length for this param */ 971 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n", 972 chklen, error_len); 973 return (0); 974 } 975 switch (error_type) { 976 case SCTP_CAUSE_INVALID_STREAM: 977 case SCTP_CAUSE_MISSING_PARAM: 978 case SCTP_CAUSE_INVALID_PARAM: 979 case SCTP_CAUSE_NO_USER_DATA: 980 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n", 981 error_type); 982 break; 983 case SCTP_CAUSE_STALE_COOKIE: 984 /* 985 * We only act if we have echoed a cookie and are 986 * waiting. 987 */ 988 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 989 int *p; 990 991 p = (int *)((caddr_t)phdr + sizeof(*phdr)); 992 /* Save the time doubled */ 993 asoc->cookie_preserve_req = ntohl(*p) << 1; 994 asoc->stale_cookie_count++; 995 if (asoc->stale_cookie_count > 996 asoc->max_init_times) { 997 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED); 998 /* now free the asoc */ 999 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1000 so = SCTP_INP_SO(stcb->sctp_ep); 1001 atomic_add_int(&stcb->asoc.refcnt, 1); 1002 SCTP_TCB_UNLOCK(stcb); 1003 SCTP_SOCKET_LOCK(so, 1); 1004 SCTP_TCB_LOCK(stcb); 1005 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1006 #endif 1007 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1008 SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); 1009 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1010 SCTP_SOCKET_UNLOCK(so, 1); 1011 #endif 1012 return (-1); 1013 } 1014 /* blast back to INIT state */ 1015 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED; 1016 asoc->state |= SCTP_STATE_COOKIE_WAIT; 1017 1018 sctp_stop_all_cookie_timers(stcb); 1019 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1020 } 1021 break; 1022 case SCTP_CAUSE_UNRESOLVABLE_ADDR: 1023 /* 1024 * Nothing we can do here, we don't do hostname 1025 * addresses so if the peer does not like my IPv6 1026 * (or IPv4 for that matter) it does not matter. If 1027 * they don't support that type of address, they can 1028 * NOT possibly get that packet type... i.e. with no 1029 * IPv6 you can't recieve a IPv6 packet. so we can 1030 * safely ignore this one. If we ever added support 1031 * for HOSTNAME Addresses, then we would need to do 1032 * something here. 1033 */ 1034 break; 1035 case SCTP_CAUSE_UNRECOG_CHUNK: 1036 sctp_process_unrecog_chunk(stcb, phdr, net); 1037 break; 1038 case SCTP_CAUSE_UNRECOG_PARAM: 1039 sctp_process_unrecog_param(stcb, phdr); 1040 break; 1041 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN: 1042 /* 1043 * We ignore this since the timer will drive out a 1044 * new cookie anyway and there timer will drive us 1045 * to send a SHUTDOWN_COMPLETE. We can't send one 1046 * here since we don't have their tag. 1047 */ 1048 break; 1049 case SCTP_CAUSE_DELETING_LAST_ADDR: 1050 case SCTP_CAUSE_RESOURCE_SHORTAGE: 1051 case SCTP_CAUSE_DELETING_SRC_ADDR: 1052 /* 1053 * We should NOT get these here, but in a 1054 * ASCONF-ACK. 1055 */ 1056 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n", 1057 error_type); 1058 break; 1059 case SCTP_CAUSE_OUT_OF_RESC: 1060 /* 1061 * And what, pray tell do we do with the fact that 1062 * the peer is out of resources? Not really sure we 1063 * could do anything but abort. I suspect this 1064 * should have came WITH an abort instead of in a 1065 * OP-ERROR. 1066 */ 1067 break; 1068 default: 1069 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n", 1070 error_type); 1071 break; 1072 } 1073 adjust = SCTP_SIZE32(error_len); 1074 chklen -= adjust; 1075 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust); 1076 } 1077 return (0); 1078 } 1079 1080 static int 1081 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, 1082 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 1083 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 1084 { 1085 struct sctp_init_ack *init_ack; 1086 struct mbuf *op_err; 1087 1088 SCTPDBG(SCTP_DEBUG_INPUT2, 1089 "sctp_handle_init_ack: handling INIT-ACK\n"); 1090 1091 if (stcb == NULL) { 1092 SCTPDBG(SCTP_DEBUG_INPUT2, 1093 "sctp_handle_init_ack: TCB is null\n"); 1094 return (-1); 1095 } 1096 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) { 1097 /* Invalid length */ 1098 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1099 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1100 op_err, 0); 1101 *abort_no_unlock = 1; 1102 return (-1); 1103 } 1104 init_ack = &cp->init; 1105 /* validate parameters */ 1106 if (init_ack->initiate_tag == 0) { 1107 /* protocol error... send an abort */ 1108 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1109 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1110 op_err, 0); 1111 *abort_no_unlock = 1; 1112 return (-1); 1113 } 1114 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) { 1115 /* protocol error... send an abort */ 1116 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1117 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1118 op_err, 0); 1119 *abort_no_unlock = 1; 1120 return (-1); 1121 } 1122 if (init_ack->num_inbound_streams == 0) { 1123 /* protocol error... send an abort */ 1124 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1125 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1126 op_err, 0); 1127 *abort_no_unlock = 1; 1128 return (-1); 1129 } 1130 if (init_ack->num_outbound_streams == 0) { 1131 /* protocol error... send an abort */ 1132 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1133 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1134 op_err, 0); 1135 *abort_no_unlock = 1; 1136 return (-1); 1137 } 1138 /* process according to association state... */ 1139 switch (stcb->asoc.state & SCTP_STATE_MASK) { 1140 case SCTP_STATE_COOKIE_WAIT: 1141 /* this is the expected state for this chunk */ 1142 /* process the INIT-ACK parameters */ 1143 if (stcb->asoc.primary_destination->dest_state & 1144 SCTP_ADDR_UNCONFIRMED) { 1145 /* 1146 * The primary is where we sent the INIT, we can 1147 * always consider it confirmed when the INIT-ACK is 1148 * returned. Do this before we load addresses 1149 * though. 1150 */ 1151 stcb->asoc.primary_destination->dest_state &= 1152 ~SCTP_ADDR_UNCONFIRMED; 1153 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 1154 stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED); 1155 } 1156 if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb, 1157 net, abort_no_unlock, vrf_id) < 0) { 1158 /* error in parsing parameters */ 1159 return (-1); 1160 } 1161 /* update our state */ 1162 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n"); 1163 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED); 1164 1165 /* reset the RTO calc */ 1166 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 1167 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 1168 stcb->asoc.overall_error_count, 1169 0, 1170 SCTP_FROM_SCTP_INPUT, 1171 __LINE__); 1172 } 1173 stcb->asoc.overall_error_count = 0; 1174 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1175 /* 1176 * collapse the init timer back in case of a exponential 1177 * backoff 1178 */ 1179 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep, 1180 stcb, net); 1181 /* 1182 * the send at the end of the inbound data processing will 1183 * cause the cookie to be sent 1184 */ 1185 break; 1186 case SCTP_STATE_SHUTDOWN_SENT: 1187 /* incorrect state... discard */ 1188 break; 1189 case SCTP_STATE_COOKIE_ECHOED: 1190 /* incorrect state... discard */ 1191 break; 1192 case SCTP_STATE_OPEN: 1193 /* incorrect state... discard */ 1194 break; 1195 case SCTP_STATE_EMPTY: 1196 case SCTP_STATE_INUSE: 1197 default: 1198 /* incorrect state... discard */ 1199 return (-1); 1200 break; 1201 } 1202 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); 1203 return (0); 1204 } 1205 1206 1207 /* 1208 * handle a state cookie for an existing association m: input packet mbuf 1209 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a 1210 * "split" mbuf and the cookie signature does not exist offset: offset into 1211 * mbuf to the cookie-echo chunk 1212 */ 1213 static struct sctp_tcb * 1214 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, 1215 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1216 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 1217 struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id, 1218 uint32_t vrf_id) 1219 { 1220 struct sctp_association *asoc; 1221 struct sctp_init_chunk *init_cp, init_buf; 1222 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1223 int chk_length; 1224 int init_offset, initack_offset, i; 1225 int retval; 1226 int spec_flag = 0; 1227 uint32_t how_indx; 1228 1229 /* I know that the TCB is non-NULL from the caller */ 1230 asoc = &stcb->asoc; 1231 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) { 1232 if (asoc->cookie_how[how_indx] == 0) 1233 break; 1234 } 1235 if (how_indx < sizeof(asoc->cookie_how)) { 1236 asoc->cookie_how[how_indx] = 1; 1237 } 1238 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 1239 /* SHUTDOWN came in after sending INIT-ACK */ 1240 struct mbuf *op_err; 1241 struct sctp_paramhdr *ph; 1242 1243 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 1244 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1245 0, M_DONTWAIT, 1, MT_DATA); 1246 if (op_err == NULL) { 1247 /* FOOBAR */ 1248 return (NULL); 1249 } 1250 /* pre-reserve some space */ 1251 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1252 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1253 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1254 /* Set the len */ 1255 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1256 ph = mtod(op_err, struct sctp_paramhdr *); 1257 ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN); 1258 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 1259 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 1260 vrf_id); 1261 if (how_indx < sizeof(asoc->cookie_how)) 1262 asoc->cookie_how[how_indx] = 2; 1263 return (NULL); 1264 } 1265 /* 1266 * find and validate the INIT chunk in the cookie (peer's info) the 1267 * INIT should start after the cookie-echo header struct (chunk 1268 * header, state cookie header struct) 1269 */ 1270 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk); 1271 1272 init_cp = (struct sctp_init_chunk *) 1273 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1274 (uint8_t *) & init_buf); 1275 if (init_cp == NULL) { 1276 /* could not pull a INIT chunk in cookie */ 1277 return (NULL); 1278 } 1279 chk_length = ntohs(init_cp->ch.chunk_length); 1280 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1281 return (NULL); 1282 } 1283 /* 1284 * find and validate the INIT-ACK chunk in the cookie (my info) the 1285 * INIT-ACK follows the INIT chunk 1286 */ 1287 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1288 initack_cp = (struct sctp_init_ack_chunk *) 1289 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1290 (uint8_t *) & initack_buf); 1291 if (initack_cp == NULL) { 1292 /* could not pull INIT-ACK chunk in cookie */ 1293 return (NULL); 1294 } 1295 chk_length = ntohs(initack_cp->ch.chunk_length); 1296 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1297 return (NULL); 1298 } 1299 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1300 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) { 1301 /* 1302 * case D in Section 5.2.4 Table 2: MMAA process accordingly 1303 * to get into the OPEN state 1304 */ 1305 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1306 /*- 1307 * Opps, this means that we somehow generated two vtag's 1308 * the same. I.e. we did: 1309 * Us Peer 1310 * <---INIT(tag=a)------ 1311 * ----INIT-ACK(tag=t)--> 1312 * ----INIT(tag=t)------> *1 1313 * <---INIT-ACK(tag=a)--- 1314 * <----CE(tag=t)------------- *2 1315 * 1316 * At point *1 we should be generating a different 1317 * tag t'. Which means we would throw away the CE and send 1318 * ours instead. Basically this is case C (throw away side). 1319 */ 1320 if (how_indx < sizeof(asoc->cookie_how)) 1321 asoc->cookie_how[how_indx] = 17; 1322 return (NULL); 1323 1324 } 1325 switch SCTP_GET_STATE 1326 (asoc) { 1327 case SCTP_STATE_COOKIE_WAIT: 1328 case SCTP_STATE_COOKIE_ECHOED: 1329 /* 1330 * INIT was sent but got a COOKIE_ECHO with the 1331 * correct tags... just accept it...but we must 1332 * process the init so that we can make sure we have 1333 * the right seq no's. 1334 */ 1335 /* First we must process the INIT !! */ 1336 retval = sctp_process_init(init_cp, stcb, net); 1337 if (retval < 0) { 1338 if (how_indx < sizeof(asoc->cookie_how)) 1339 asoc->cookie_how[how_indx] = 3; 1340 return (NULL); 1341 } 1342 /* we have already processed the INIT so no problem */ 1343 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, 1344 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); 1345 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13); 1346 /* update current state */ 1347 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1348 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1349 else 1350 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1351 1352 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1353 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1354 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1355 stcb->sctp_ep, stcb, asoc->primary_destination); 1356 } 1357 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1358 sctp_stop_all_cookie_timers(stcb); 1359 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1360 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1361 (inp->sctp_socket->so_qlimit == 0) 1362 ) { 1363 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1364 struct socket *so; 1365 1366 #endif 1367 /* 1368 * Here is where collision would go if we 1369 * did a connect() and instead got a 1370 * init/init-ack/cookie done before the 1371 * init-ack came back.. 1372 */ 1373 stcb->sctp_ep->sctp_flags |= 1374 SCTP_PCB_FLAGS_CONNECTED; 1375 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1376 so = SCTP_INP_SO(stcb->sctp_ep); 1377 atomic_add_int(&stcb->asoc.refcnt, 1); 1378 SCTP_TCB_UNLOCK(stcb); 1379 SCTP_SOCKET_LOCK(so, 1); 1380 SCTP_TCB_LOCK(stcb); 1381 atomic_add_int(&stcb->asoc.refcnt, -1); 1382 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1383 SCTP_SOCKET_UNLOCK(so, 1); 1384 return (NULL); 1385 } 1386 #endif 1387 soisconnected(stcb->sctp_socket); 1388 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1389 SCTP_SOCKET_UNLOCK(so, 1); 1390 #endif 1391 } 1392 /* notify upper layer */ 1393 *notification = SCTP_NOTIFY_ASSOC_UP; 1394 /* 1395 * since we did not send a HB make sure we don't 1396 * double things 1397 */ 1398 net->hb_responded = 1; 1399 net->RTO = sctp_calculate_rto(stcb, asoc, net, 1400 &cookie->time_entered, sctp_align_unsafe_makecopy); 1401 1402 if (stcb->asoc.sctp_autoclose_ticks && 1403 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) { 1404 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 1405 inp, stcb, NULL); 1406 } 1407 break; 1408 default: 1409 /* 1410 * we're in the OPEN state (or beyond), so peer must 1411 * have simply lost the COOKIE-ACK 1412 */ 1413 break; 1414 } /* end switch */ 1415 sctp_stop_all_cookie_timers(stcb); 1416 /* 1417 * We ignore the return code here.. not sure if we should 1418 * somehow abort.. but we do have an existing asoc. This 1419 * really should not fail. 1420 */ 1421 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1422 init_offset + sizeof(struct sctp_init_chunk), 1423 initack_offset, sh, init_src)) { 1424 if (how_indx < sizeof(asoc->cookie_how)) 1425 asoc->cookie_how[how_indx] = 4; 1426 return (NULL); 1427 } 1428 /* respond with a COOKIE-ACK */ 1429 sctp_toss_old_cookies(stcb, asoc); 1430 sctp_send_cookie_ack(stcb); 1431 if (how_indx < sizeof(asoc->cookie_how)) 1432 asoc->cookie_how[how_indx] = 5; 1433 return (stcb); 1434 } 1435 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1436 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag && 1437 cookie->tie_tag_my_vtag == 0 && 1438 cookie->tie_tag_peer_vtag == 0) { 1439 /* 1440 * case C in Section 5.2.4 Table 2: XMOO silently discard 1441 */ 1442 if (how_indx < sizeof(asoc->cookie_how)) 1443 asoc->cookie_how[how_indx] = 6; 1444 return (NULL); 1445 } 1446 if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag && 1447 (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag || 1448 init_cp->init.initiate_tag == 0)) { 1449 /* 1450 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info 1451 * should be ok, re-accept peer info 1452 */ 1453 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1454 /* 1455 * Extension of case C. If we hit this, then the 1456 * random number generator returned the same vtag 1457 * when we first sent our INIT-ACK and when we later 1458 * sent our INIT. The side with the seq numbers that 1459 * are different will be the one that normnally 1460 * would have hit case C. This in effect "extends" 1461 * our vtags in this collision case to be 64 bits. 1462 * The same collision could occur aka you get both 1463 * vtag and seq number the same twice in a row.. but 1464 * is much less likely. If it did happen then we 1465 * would proceed through and bring up the assoc.. we 1466 * may end up with the wrong stream setup however.. 1467 * which would be bad.. but there is no way to 1468 * tell.. until we send on a stream that does not 1469 * exist :-) 1470 */ 1471 if (how_indx < sizeof(asoc->cookie_how)) 1472 asoc->cookie_how[how_indx] = 7; 1473 1474 return (NULL); 1475 } 1476 if (how_indx < sizeof(asoc->cookie_how)) 1477 asoc->cookie_how[how_indx] = 8; 1478 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14); 1479 sctp_stop_all_cookie_timers(stcb); 1480 /* 1481 * since we did not send a HB make sure we don't double 1482 * things 1483 */ 1484 net->hb_responded = 1; 1485 if (stcb->asoc.sctp_autoclose_ticks && 1486 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1487 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1488 NULL); 1489 } 1490 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1491 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1492 1493 /* Note last_cwr_tsn? where is this used? */ 1494 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1495 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { 1496 /* 1497 * Ok the peer probably discarded our data (if we 1498 * echoed a cookie+data). So anything on the 1499 * sent_queue should be marked for retransmit, we 1500 * may not get something to kick us so it COULD 1501 * still take a timeout to move these.. but it can't 1502 * hurt to mark them. 1503 */ 1504 struct sctp_tmit_chunk *chk; 1505 1506 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1507 if (chk->sent < SCTP_DATAGRAM_RESEND) { 1508 chk->sent = SCTP_DATAGRAM_RESEND; 1509 sctp_flight_size_decrease(chk); 1510 sctp_total_flight_decrease(stcb, chk); 1511 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1512 spec_flag++; 1513 } 1514 } 1515 1516 } 1517 /* process the INIT info (peer's info) */ 1518 retval = sctp_process_init(init_cp, stcb, net); 1519 if (retval < 0) { 1520 if (how_indx < sizeof(asoc->cookie_how)) 1521 asoc->cookie_how[how_indx] = 9; 1522 return (NULL); 1523 } 1524 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1525 init_offset + sizeof(struct sctp_init_chunk), 1526 initack_offset, sh, init_src)) { 1527 if (how_indx < sizeof(asoc->cookie_how)) 1528 asoc->cookie_how[how_indx] = 10; 1529 return (NULL); 1530 } 1531 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) || 1532 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) { 1533 *notification = SCTP_NOTIFY_ASSOC_UP; 1534 1535 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1536 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1537 (inp->sctp_socket->so_qlimit == 0)) { 1538 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1539 struct socket *so; 1540 1541 #endif 1542 stcb->sctp_ep->sctp_flags |= 1543 SCTP_PCB_FLAGS_CONNECTED; 1544 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1545 so = SCTP_INP_SO(stcb->sctp_ep); 1546 atomic_add_int(&stcb->asoc.refcnt, 1); 1547 SCTP_TCB_UNLOCK(stcb); 1548 SCTP_SOCKET_LOCK(so, 1); 1549 SCTP_TCB_LOCK(stcb); 1550 atomic_add_int(&stcb->asoc.refcnt, -1); 1551 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1552 SCTP_SOCKET_UNLOCK(so, 1); 1553 return (NULL); 1554 } 1555 #endif 1556 soisconnected(stcb->sctp_socket); 1557 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1558 SCTP_SOCKET_UNLOCK(so, 1); 1559 #endif 1560 } 1561 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1562 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1563 else 1564 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1565 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1566 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1567 SCTP_STAT_INCR_COUNTER32(sctps_restartestab); 1568 } else { 1569 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1570 } 1571 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1572 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1573 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1574 stcb->sctp_ep, stcb, asoc->primary_destination); 1575 } 1576 sctp_stop_all_cookie_timers(stcb); 1577 sctp_toss_old_cookies(stcb, asoc); 1578 sctp_send_cookie_ack(stcb); 1579 if (spec_flag) { 1580 /* 1581 * only if we have retrans set do we do this. What 1582 * this call does is get only the COOKIE-ACK out and 1583 * then when we return the normal call to 1584 * sctp_chunk_output will get the retrans out behind 1585 * this. 1586 */ 1587 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED); 1588 } 1589 if (how_indx < sizeof(asoc->cookie_how)) 1590 asoc->cookie_how[how_indx] = 11; 1591 1592 return (stcb); 1593 } 1594 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1595 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) && 1596 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce && 1597 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce && 1598 cookie->tie_tag_peer_vtag != 0) { 1599 struct sctpasochead *head; 1600 1601 /* 1602 * case A in Section 5.2.4 Table 2: XXMM (peer restarted) 1603 */ 1604 /* temp code */ 1605 if (how_indx < sizeof(asoc->cookie_how)) 1606 asoc->cookie_how[how_indx] = 12; 1607 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15); 1608 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1609 1610 *sac_assoc_id = sctp_get_associd(stcb); 1611 /* notify upper layer */ 1612 *notification = SCTP_NOTIFY_ASSOC_RESTART; 1613 atomic_add_int(&stcb->asoc.refcnt, 1); 1614 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) && 1615 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1616 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 1617 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1618 } 1619 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1620 SCTP_STAT_INCR_GAUGE32(sctps_restartestab); 1621 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1622 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab); 1623 } 1624 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1625 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1626 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1627 stcb->sctp_ep, stcb, asoc->primary_destination); 1628 1629 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) { 1630 /* move to OPEN state, if not in SHUTDOWN_SENT */ 1631 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1632 } 1633 asoc->pre_open_streams = 1634 ntohs(initack_cp->init.num_outbound_streams); 1635 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1636 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1637 1638 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1639 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1640 1641 asoc->str_reset_seq_in = asoc->init_seq_number; 1642 1643 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1644 if (asoc->mapping_array) { 1645 memset(asoc->mapping_array, 0, 1646 asoc->mapping_array_size); 1647 } 1648 SCTP_TCB_UNLOCK(stcb); 1649 SCTP_INP_INFO_WLOCK(); 1650 SCTP_INP_WLOCK(stcb->sctp_ep); 1651 SCTP_TCB_LOCK(stcb); 1652 atomic_add_int(&stcb->asoc.refcnt, -1); 1653 /* send up all the data */ 1654 SCTP_TCB_SEND_LOCK(stcb); 1655 1656 sctp_report_all_outbound(stcb, 1, SCTP_SO_NOT_LOCKED); 1657 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1658 stcb->asoc.strmout[i].stream_no = i; 1659 stcb->asoc.strmout[i].next_sequence_sent = 0; 1660 stcb->asoc.strmout[i].last_msg_incomplete = 0; 1661 } 1662 /* process the INIT-ACK info (my info) */ 1663 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1664 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1665 1666 /* pull from vtag hash */ 1667 LIST_REMOVE(stcb, sctp_asocs); 1668 /* re-insert to new vtag position */ 1669 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, 1670 sctppcbinfo.hashasocmark)]; 1671 /* 1672 * put it in the bucket in the vtag hash of assoc's for the 1673 * system 1674 */ 1675 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 1676 1677 /* Is this the first restart? */ 1678 if (stcb->asoc.in_restart_hash == 0) { 1679 /* Ok add it to assoc_id vtag hash */ 1680 head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id, 1681 sctppcbinfo.hashrestartmark)]; 1682 LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash); 1683 stcb->asoc.in_restart_hash = 1; 1684 } 1685 /* process the INIT info (peer's info) */ 1686 SCTP_TCB_SEND_UNLOCK(stcb); 1687 SCTP_INP_WUNLOCK(stcb->sctp_ep); 1688 SCTP_INP_INFO_WUNLOCK(); 1689 1690 retval = sctp_process_init(init_cp, stcb, net); 1691 if (retval < 0) { 1692 if (how_indx < sizeof(asoc->cookie_how)) 1693 asoc->cookie_how[how_indx] = 13; 1694 1695 return (NULL); 1696 } 1697 /* 1698 * since we did not send a HB make sure we don't double 1699 * things 1700 */ 1701 net->hb_responded = 1; 1702 1703 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1704 init_offset + sizeof(struct sctp_init_chunk), 1705 initack_offset, sh, init_src)) { 1706 if (how_indx < sizeof(asoc->cookie_how)) 1707 asoc->cookie_how[how_indx] = 14; 1708 1709 return (NULL); 1710 } 1711 /* respond with a COOKIE-ACK */ 1712 sctp_stop_all_cookie_timers(stcb); 1713 sctp_toss_old_cookies(stcb, asoc); 1714 sctp_send_cookie_ack(stcb); 1715 if (how_indx < sizeof(asoc->cookie_how)) 1716 asoc->cookie_how[how_indx] = 15; 1717 1718 return (stcb); 1719 } 1720 if (how_indx < sizeof(asoc->cookie_how)) 1721 asoc->cookie_how[how_indx] = 16; 1722 /* all other cases... */ 1723 return (NULL); 1724 } 1725 1726 1727 /* 1728 * handle a state cookie for a new association m: input packet mbuf chain-- 1729 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf 1730 * and the cookie signature does not exist offset: offset into mbuf to the 1731 * cookie-echo chunk length: length of the cookie chunk to: where the init 1732 * was from returns a new TCB 1733 */ 1734 static struct sctp_tcb * 1735 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1736 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1737 struct sctp_inpcb *inp, struct sctp_nets **netp, 1738 struct sockaddr *init_src, int *notification, 1739 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1740 uint32_t vrf_id) 1741 { 1742 struct sctp_tcb *stcb; 1743 struct sctp_init_chunk *init_cp, init_buf; 1744 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1745 struct sockaddr_storage sa_store; 1746 struct sockaddr *initack_src = (struct sockaddr *)&sa_store; 1747 struct sockaddr_in *sin; 1748 struct sockaddr_in6 *sin6; 1749 struct sctp_association *asoc; 1750 int chk_length; 1751 int init_offset, initack_offset, initack_limit; 1752 int retval; 1753 int error = 0; 1754 uint32_t old_tag; 1755 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE]; 1756 1757 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1758 struct socket *so; 1759 1760 so = SCTP_INP_SO(inp); 1761 #endif 1762 1763 /* 1764 * find and validate the INIT chunk in the cookie (peer's info) the 1765 * INIT should start after the cookie-echo header struct (chunk 1766 * header, state cookie header struct) 1767 */ 1768 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk); 1769 init_cp = (struct sctp_init_chunk *) 1770 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1771 (uint8_t *) & init_buf); 1772 if (init_cp == NULL) { 1773 /* could not pull a INIT chunk in cookie */ 1774 SCTPDBG(SCTP_DEBUG_INPUT1, 1775 "process_cookie_new: could not pull INIT chunk hdr\n"); 1776 return (NULL); 1777 } 1778 chk_length = ntohs(init_cp->ch.chunk_length); 1779 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1780 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n"); 1781 return (NULL); 1782 } 1783 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1784 /* 1785 * find and validate the INIT-ACK chunk in the cookie (my info) the 1786 * INIT-ACK follows the INIT chunk 1787 */ 1788 initack_cp = (struct sctp_init_ack_chunk *) 1789 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1790 (uint8_t *) & initack_buf); 1791 if (initack_cp == NULL) { 1792 /* could not pull INIT-ACK chunk in cookie */ 1793 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n"); 1794 return (NULL); 1795 } 1796 chk_length = ntohs(initack_cp->ch.chunk_length); 1797 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1798 return (NULL); 1799 } 1800 /* 1801 * NOTE: We can't use the INIT_ACK's chk_length to determine the 1802 * "initack_limit" value. This is because the chk_length field 1803 * includes the length of the cookie, but the cookie is omitted when 1804 * the INIT and INIT_ACK are tacked onto the cookie... 1805 */ 1806 initack_limit = offset + cookie_len; 1807 1808 /* 1809 * now that we know the INIT/INIT-ACK are in place, create a new TCB 1810 * and popluate 1811 */ 1812 1813 /* 1814 * Here we do a trick, we set in NULL for the proc/thread argument. 1815 * We do this since in effect we only use the p argument when the 1816 * socket is unbound and we must do an implicit bind. Since we are 1817 * getting a cookie, we cannot be unbound. 1818 */ 1819 stcb = sctp_aloc_assoc(inp, init_src, 0, &error, 1820 ntohl(initack_cp->init.initiate_tag), vrf_id, 1821 (struct thread *)NULL 1822 ); 1823 if (stcb == NULL) { 1824 struct mbuf *op_err; 1825 1826 /* memory problem? */ 1827 SCTPDBG(SCTP_DEBUG_INPUT1, 1828 "process_cookie_new: no room for another TCB!\n"); 1829 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1830 1831 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1832 sh, op_err, vrf_id); 1833 return (NULL); 1834 } 1835 /* get the correct sctp_nets */ 1836 if (netp) 1837 *netp = sctp_findnet(stcb, init_src); 1838 1839 asoc = &stcb->asoc; 1840 /* get scope variables out of cookie */ 1841 asoc->ipv4_local_scope = cookie->ipv4_scope; 1842 asoc->site_scope = cookie->site_scope; 1843 asoc->local_scope = cookie->local_scope; 1844 asoc->loopback_scope = cookie->loopback_scope; 1845 1846 if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) || 1847 (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) { 1848 struct mbuf *op_err; 1849 1850 /* 1851 * Houston we have a problem. The EP changed while the 1852 * cookie was in flight. Only recourse is to abort the 1853 * association. 1854 */ 1855 atomic_add_int(&stcb->asoc.refcnt, 1); 1856 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1857 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1858 sh, op_err, vrf_id); 1859 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1860 SCTP_TCB_UNLOCK(stcb); 1861 SCTP_SOCKET_LOCK(so, 1); 1862 SCTP_TCB_LOCK(stcb); 1863 #endif 1864 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 1865 SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1866 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1867 SCTP_SOCKET_UNLOCK(so, 1); 1868 #endif 1869 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1870 return (NULL); 1871 } 1872 /* process the INIT-ACK info (my info) */ 1873 old_tag = asoc->my_vtag; 1874 asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1875 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1876 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1877 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1878 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1879 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1880 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1881 asoc->str_reset_seq_in = asoc->init_seq_number; 1882 1883 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1884 1885 /* process the INIT info (peer's info) */ 1886 if (netp) 1887 retval = sctp_process_init(init_cp, stcb, *netp); 1888 else 1889 retval = 0; 1890 if (retval < 0) { 1891 atomic_add_int(&stcb->asoc.refcnt, 1); 1892 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1893 SCTP_TCB_UNLOCK(stcb); 1894 SCTP_SOCKET_LOCK(so, 1); 1895 SCTP_TCB_LOCK(stcb); 1896 #endif 1897 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1898 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1899 SCTP_SOCKET_UNLOCK(so, 1); 1900 #endif 1901 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1902 return (NULL); 1903 } 1904 /* load all addresses */ 1905 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1906 init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh, 1907 init_src)) { 1908 atomic_add_int(&stcb->asoc.refcnt, 1); 1909 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1910 SCTP_TCB_UNLOCK(stcb); 1911 SCTP_SOCKET_LOCK(so, 1); 1912 SCTP_TCB_LOCK(stcb); 1913 #endif 1914 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17); 1915 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1916 SCTP_SOCKET_UNLOCK(so, 1); 1917 #endif 1918 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1919 return (NULL); 1920 } 1921 /* 1922 * verify any preceding AUTH chunk that was skipped 1923 */ 1924 /* pull the local authentication parameters from the cookie/init-ack */ 1925 sctp_auth_get_cookie_params(stcb, m, 1926 initack_offset + sizeof(struct sctp_init_ack_chunk), 1927 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk))); 1928 if (auth_skipped) { 1929 struct sctp_auth_chunk *auth; 1930 1931 auth = (struct sctp_auth_chunk *) 1932 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); 1933 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { 1934 /* auth HMAC failed, dump the assoc and packet */ 1935 SCTPDBG(SCTP_DEBUG_AUTH1, 1936 "COOKIE-ECHO: AUTH failed\n"); 1937 atomic_add_int(&stcb->asoc.refcnt, 1); 1938 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1939 SCTP_TCB_UNLOCK(stcb); 1940 SCTP_SOCKET_LOCK(so, 1); 1941 SCTP_TCB_LOCK(stcb); 1942 #endif 1943 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18); 1944 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1945 SCTP_SOCKET_UNLOCK(so, 1); 1946 #endif 1947 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1948 return (NULL); 1949 } else { 1950 /* remaining chunks checked... good to go */ 1951 stcb->asoc.authenticated = 1; 1952 } 1953 } 1954 /* update current state */ 1955 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 1956 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1957 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1958 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1959 stcb->sctp_ep, stcb, asoc->primary_destination); 1960 } 1961 sctp_stop_all_cookie_timers(stcb); 1962 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab); 1963 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1964 1965 /* 1966 * if we're doing ASCONFs, check to see if we have any new local 1967 * addresses that need to get added to the peer (eg. addresses 1968 * changed while cookie echo in flight). This needs to be done 1969 * after we go to the OPEN state to do the correct asconf 1970 * processing. else, make sure we have the correct addresses in our 1971 * lists 1972 */ 1973 1974 /* warning, we re-use sin, sin6, sa_store here! */ 1975 /* pull in local_address (our "from" address) */ 1976 if (cookie->laddr_type == SCTP_IPV4_ADDRESS) { 1977 /* source addr is IPv4 */ 1978 sin = (struct sockaddr_in *)initack_src; 1979 memset(sin, 0, sizeof(*sin)); 1980 sin->sin_family = AF_INET; 1981 sin->sin_len = sizeof(struct sockaddr_in); 1982 sin->sin_addr.s_addr = cookie->laddress[0]; 1983 } else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) { 1984 /* source addr is IPv6 */ 1985 sin6 = (struct sockaddr_in6 *)initack_src; 1986 memset(sin6, 0, sizeof(*sin6)); 1987 sin6->sin6_family = AF_INET6; 1988 sin6->sin6_len = sizeof(struct sockaddr_in6); 1989 sin6->sin6_scope_id = cookie->scope_id; 1990 memcpy(&sin6->sin6_addr, cookie->laddress, 1991 sizeof(sin6->sin6_addr)); 1992 } else { 1993 atomic_add_int(&stcb->asoc.refcnt, 1); 1994 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1995 SCTP_TCB_UNLOCK(stcb); 1996 SCTP_SOCKET_LOCK(so, 1); 1997 SCTP_TCB_LOCK(stcb); 1998 #endif 1999 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19); 2000 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2001 SCTP_SOCKET_UNLOCK(so, 1); 2002 #endif 2003 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2004 return (NULL); 2005 } 2006 2007 /* set up to notify upper layer */ 2008 *notification = SCTP_NOTIFY_ASSOC_UP; 2009 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2010 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2011 (inp->sctp_socket->so_qlimit == 0)) { 2012 /* 2013 * This is an endpoint that called connect() how it got a 2014 * cookie that is NEW is a bit of a mystery. It must be that 2015 * the INIT was sent, but before it got there.. a complete 2016 * INIT/INIT-ACK/COOKIE arrived. But of course then it 2017 * should have went to the other code.. not here.. oh well.. 2018 * a bit of protection is worth having.. 2019 */ 2020 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2021 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2022 atomic_add_int(&stcb->asoc.refcnt, 1); 2023 SCTP_TCB_UNLOCK(stcb); 2024 SCTP_SOCKET_LOCK(so, 1); 2025 SCTP_TCB_LOCK(stcb); 2026 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2027 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2028 SCTP_SOCKET_UNLOCK(so, 1); 2029 return (NULL); 2030 } 2031 #endif 2032 soisconnected(stcb->sctp_socket); 2033 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2034 SCTP_SOCKET_UNLOCK(so, 1); 2035 #endif 2036 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 2037 (inp->sctp_socket->so_qlimit)) { 2038 /* 2039 * We don't want to do anything with this one. Since it is 2040 * the listening guy. The timer will get started for 2041 * accepted connections in the caller. 2042 */ 2043 ; 2044 } 2045 /* since we did not send a HB make sure we don't double things */ 2046 if ((netp) && (*netp)) 2047 (*netp)->hb_responded = 1; 2048 2049 if (stcb->asoc.sctp_autoclose_ticks && 2050 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2051 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 2052 } 2053 /* calculate the RTT */ 2054 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 2055 if ((netp) && (*netp)) { 2056 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp, 2057 &cookie->time_entered, sctp_align_unsafe_makecopy); 2058 } 2059 /* respond with a COOKIE-ACK */ 2060 sctp_send_cookie_ack(stcb); 2061 2062 /* 2063 * check the address lists for any ASCONFs that need to be sent 2064 * AFTER the cookie-ack is sent 2065 */ 2066 sctp_check_address_list(stcb, m, 2067 initack_offset + sizeof(struct sctp_init_ack_chunk), 2068 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)), 2069 initack_src, cookie->local_scope, cookie->site_scope, 2070 cookie->ipv4_scope, cookie->loopback_scope); 2071 2072 2073 return (stcb); 2074 } 2075 2076 2077 /* 2078 * handles a COOKIE-ECHO message stcb: modified to either a new or left as 2079 * existing (non-NULL) TCB 2080 */ 2081 static struct mbuf * 2082 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, 2083 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp, 2084 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp, 2085 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2086 struct sctp_tcb **locked_tcb, uint32_t vrf_id) 2087 { 2088 struct sctp_state_cookie *cookie; 2089 struct sockaddr_in6 sin6; 2090 struct sockaddr_in sin; 2091 struct sctp_tcb *l_stcb = *stcb; 2092 struct sctp_inpcb *l_inp; 2093 struct sockaddr *to; 2094 sctp_assoc_t sac_restart_id; 2095 struct sctp_pcb *ep; 2096 struct mbuf *m_sig; 2097 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE]; 2098 uint8_t *sig; 2099 uint8_t cookie_ok = 0; 2100 unsigned int size_of_pkt, sig_offset, cookie_offset; 2101 unsigned int cookie_len; 2102 struct timeval now; 2103 struct timeval time_expires; 2104 struct sockaddr_storage dest_store; 2105 struct sockaddr *localep_sa = (struct sockaddr *)&dest_store; 2106 struct ip *iph; 2107 int notification = 0; 2108 struct sctp_nets *netl; 2109 int had_a_existing_tcb = 0; 2110 2111 SCTPDBG(SCTP_DEBUG_INPUT2, 2112 "sctp_handle_cookie: handling COOKIE-ECHO\n"); 2113 2114 if (inp_p == NULL) { 2115 return (NULL); 2116 } 2117 /* First get the destination address setup too. */ 2118 iph = mtod(m, struct ip *); 2119 if (iph->ip_v == IPVERSION) { 2120 /* its IPv4 */ 2121 struct sockaddr_in *lsin; 2122 2123 lsin = (struct sockaddr_in *)(localep_sa); 2124 memset(lsin, 0, sizeof(*lsin)); 2125 lsin->sin_family = AF_INET; 2126 lsin->sin_len = sizeof(*lsin); 2127 lsin->sin_port = sh->dest_port; 2128 lsin->sin_addr.s_addr = iph->ip_dst.s_addr; 2129 size_of_pkt = SCTP_GET_IPV4_LENGTH(iph); 2130 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 2131 /* its IPv6 */ 2132 struct ip6_hdr *ip6; 2133 struct sockaddr_in6 *lsin6; 2134 2135 lsin6 = (struct sockaddr_in6 *)(localep_sa); 2136 memset(lsin6, 0, sizeof(*lsin6)); 2137 lsin6->sin6_family = AF_INET6; 2138 lsin6->sin6_len = sizeof(struct sockaddr_in6); 2139 ip6 = mtod(m, struct ip6_hdr *); 2140 lsin6->sin6_port = sh->dest_port; 2141 lsin6->sin6_addr = ip6->ip6_dst; 2142 size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen; 2143 } else { 2144 return (NULL); 2145 } 2146 2147 cookie = &cp->cookie; 2148 cookie_offset = offset + sizeof(struct sctp_chunkhdr); 2149 cookie_len = ntohs(cp->ch.chunk_length); 2150 2151 if ((cookie->peerport != sh->src_port) && 2152 (cookie->myport != sh->dest_port) && 2153 (cookie->my_vtag != sh->v_tag)) { 2154 /* 2155 * invalid ports or bad tag. Note that we always leave the 2156 * v_tag in the header in network order and when we stored 2157 * it in the my_vtag slot we also left it in network order. 2158 * This maintains the match even though it may be in the 2159 * opposite byte order of the machine :-> 2160 */ 2161 return (NULL); 2162 } 2163 if (cookie_len > size_of_pkt || 2164 cookie_len < sizeof(struct sctp_cookie_echo_chunk) + 2165 sizeof(struct sctp_init_chunk) + 2166 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) { 2167 /* cookie too long! or too small */ 2168 return (NULL); 2169 } 2170 /* 2171 * split off the signature into its own mbuf (since it should not be 2172 * calculated in the sctp_hmac_m() call). 2173 */ 2174 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE; 2175 if (sig_offset > size_of_pkt) { 2176 /* packet not correct size! */ 2177 /* XXX this may already be accounted for earlier... */ 2178 return (NULL); 2179 } 2180 m_sig = m_split(m, sig_offset, M_DONTWAIT); 2181 if (m_sig == NULL) { 2182 /* out of memory or ?? */ 2183 return (NULL); 2184 } 2185 /* 2186 * compute the signature/digest for the cookie 2187 */ 2188 ep = &(*inp_p)->sctp_ep; 2189 l_inp = *inp_p; 2190 if (l_stcb) { 2191 SCTP_TCB_UNLOCK(l_stcb); 2192 } 2193 SCTP_INP_RLOCK(l_inp); 2194 if (l_stcb) { 2195 SCTP_TCB_LOCK(l_stcb); 2196 } 2197 /* which cookie is it? */ 2198 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) && 2199 (ep->current_secret_number != ep->last_secret_number)) { 2200 /* it's the old cookie */ 2201 (void)sctp_hmac_m(SCTP_HMAC, 2202 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2203 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2204 } else { 2205 /* it's the current cookie */ 2206 (void)sctp_hmac_m(SCTP_HMAC, 2207 (uint8_t *) ep->secret_key[(int)ep->current_secret_number], 2208 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2209 } 2210 /* get the signature */ 2211 SCTP_INP_RUNLOCK(l_inp); 2212 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig); 2213 if (sig == NULL) { 2214 /* couldn't find signature */ 2215 sctp_m_freem(m_sig); 2216 return (NULL); 2217 } 2218 /* compare the received digest with the computed digest */ 2219 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) { 2220 /* try the old cookie? */ 2221 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) && 2222 (ep->current_secret_number != ep->last_secret_number)) { 2223 /* compute digest with old */ 2224 (void)sctp_hmac_m(SCTP_HMAC, 2225 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2226 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2227 /* compare */ 2228 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0) 2229 cookie_ok = 1; 2230 } 2231 } else { 2232 cookie_ok = 1; 2233 } 2234 2235 /* 2236 * Now before we continue we must reconstruct our mbuf so that 2237 * normal processing of any other chunks will work. 2238 */ 2239 { 2240 struct mbuf *m_at; 2241 2242 m_at = m; 2243 while (SCTP_BUF_NEXT(m_at) != NULL) { 2244 m_at = SCTP_BUF_NEXT(m_at); 2245 } 2246 SCTP_BUF_NEXT(m_at) = m_sig; 2247 } 2248 2249 if (cookie_ok == 0) { 2250 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n"); 2251 SCTPDBG(SCTP_DEBUG_INPUT2, 2252 "offset = %u, cookie_offset = %u, sig_offset = %u\n", 2253 (uint32_t) offset, cookie_offset, sig_offset); 2254 return (NULL); 2255 } 2256 /* 2257 * check the cookie timestamps to be sure it's not stale 2258 */ 2259 (void)SCTP_GETTIME_TIMEVAL(&now); 2260 /* Expire time is in Ticks, so we convert to seconds */ 2261 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life); 2262 time_expires.tv_usec = cookie->time_entered.tv_usec; 2263 if (timevalcmp(&now, &time_expires, >)) { 2264 /* cookie is stale! */ 2265 struct mbuf *op_err; 2266 struct sctp_stale_cookie_msg *scm; 2267 uint32_t tim; 2268 2269 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg), 2270 0, M_DONTWAIT, 1, MT_DATA); 2271 if (op_err == NULL) { 2272 /* FOOBAR */ 2273 return (NULL); 2274 } 2275 /* pre-reserve some space */ 2276 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 2277 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 2278 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 2279 2280 /* Set the len */ 2281 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg); 2282 scm = mtod(op_err, struct sctp_stale_cookie_msg *); 2283 scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE); 2284 scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) + 2285 (sizeof(uint32_t)))); 2286 /* seconds to usec */ 2287 tim = (now.tv_sec - time_expires.tv_sec) * 1000000; 2288 /* add in usec */ 2289 if (tim == 0) 2290 tim = now.tv_usec - cookie->time_entered.tv_usec; 2291 scm->time_usec = htonl(tim); 2292 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 2293 vrf_id); 2294 return (NULL); 2295 } 2296 /* 2297 * Now we must see with the lookup address if we have an existing 2298 * asoc. This will only happen if we were in the COOKIE-WAIT state 2299 * and a INIT collided with us and somewhere the peer sent the 2300 * cookie on another address besides the single address our assoc 2301 * had for him. In this case we will have one of the tie-tags set at 2302 * least AND the address field in the cookie can be used to look it 2303 * up. 2304 */ 2305 to = NULL; 2306 if (cookie->addr_type == SCTP_IPV6_ADDRESS) { 2307 memset(&sin6, 0, sizeof(sin6)); 2308 sin6.sin6_family = AF_INET6; 2309 sin6.sin6_len = sizeof(sin6); 2310 sin6.sin6_port = sh->src_port; 2311 sin6.sin6_scope_id = cookie->scope_id; 2312 memcpy(&sin6.sin6_addr.s6_addr, cookie->address, 2313 sizeof(sin6.sin6_addr.s6_addr)); 2314 to = (struct sockaddr *)&sin6; 2315 } else if (cookie->addr_type == SCTP_IPV4_ADDRESS) { 2316 memset(&sin, 0, sizeof(sin)); 2317 sin.sin_family = AF_INET; 2318 sin.sin_len = sizeof(sin); 2319 sin.sin_port = sh->src_port; 2320 sin.sin_addr.s_addr = cookie->address[0]; 2321 to = (struct sockaddr *)&sin; 2322 } else { 2323 /* This should not happen */ 2324 return (NULL); 2325 } 2326 if ((*stcb == NULL) && to) { 2327 /* Yep, lets check */ 2328 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL); 2329 if (*stcb == NULL) { 2330 /* 2331 * We should have only got back the same inp. If we 2332 * got back a different ep we have a problem. The 2333 * original findep got back l_inp and now 2334 */ 2335 if (l_inp != *inp_p) { 2336 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n"); 2337 } 2338 } else { 2339 if (*locked_tcb == NULL) { 2340 /* 2341 * In this case we found the assoc only 2342 * after we locked the create lock. This 2343 * means we are in a colliding case and we 2344 * must make sure that we unlock the tcb if 2345 * its one of the cases where we throw away 2346 * the incoming packets. 2347 */ 2348 *locked_tcb = *stcb; 2349 2350 /* 2351 * We must also increment the inp ref count 2352 * since the ref_count flags was set when we 2353 * did not find the TCB, now we found it 2354 * which reduces the refcount.. we must 2355 * raise it back out to balance it all :-) 2356 */ 2357 SCTP_INP_INCR_REF((*stcb)->sctp_ep); 2358 if ((*stcb)->sctp_ep != l_inp) { 2359 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n", 2360 (*stcb)->sctp_ep, l_inp); 2361 } 2362 } 2363 } 2364 } 2365 if (to == NULL) 2366 return (NULL); 2367 2368 cookie_len -= SCTP_SIGNATURE_SIZE; 2369 if (*stcb == NULL) { 2370 /* this is the "normal" case... get a new TCB */ 2371 *stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie, 2372 cookie_len, *inp_p, netp, to, ¬ification, 2373 auth_skipped, auth_offset, auth_len, vrf_id); 2374 } else { 2375 /* this is abnormal... cookie-echo on existing TCB */ 2376 had_a_existing_tcb = 1; 2377 *stcb = sctp_process_cookie_existing(m, iphlen, offset, sh, 2378 cookie, cookie_len, *inp_p, *stcb, *netp, to, 2379 ¬ification, &sac_restart_id, vrf_id); 2380 } 2381 2382 if (*stcb == NULL) { 2383 /* still no TCB... must be bad cookie-echo */ 2384 return (NULL); 2385 } 2386 /* 2387 * Ok, we built an association so confirm the address we sent the 2388 * INIT-ACK to. 2389 */ 2390 netl = sctp_findnet(*stcb, to); 2391 /* 2392 * This code should in theory NOT run but 2393 */ 2394 if (netl == NULL) { 2395 /* TSNH! Huh, why do I need to add this address here? */ 2396 int ret; 2397 2398 ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE, 2399 SCTP_IN_COOKIE_PROC); 2400 netl = sctp_findnet(*stcb, to); 2401 } 2402 if (netl) { 2403 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) { 2404 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2405 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL, 2406 netl); 2407 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2408 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2409 } 2410 } 2411 if (*stcb) { 2412 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p, 2413 *stcb, NULL); 2414 } 2415 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 2416 if (!had_a_existing_tcb || 2417 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 2418 /* 2419 * If we have a NEW cookie or the connect never 2420 * reached the connected state during collision we 2421 * must do the TCP accept thing. 2422 */ 2423 struct socket *so, *oso; 2424 struct sctp_inpcb *inp; 2425 2426 if (notification == SCTP_NOTIFY_ASSOC_RESTART) { 2427 /* 2428 * For a restart we will keep the same 2429 * socket, no need to do anything. I THINK!! 2430 */ 2431 sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id, SCTP_SO_NOT_LOCKED); 2432 return (m); 2433 } 2434 oso = (*inp_p)->sctp_socket; 2435 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2436 SCTP_TCB_UNLOCK((*stcb)); 2437 so = sonewconn(oso, 0 2438 ); 2439 SCTP_TCB_LOCK((*stcb)); 2440 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2441 2442 if (so == NULL) { 2443 struct mbuf *op_err; 2444 2445 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2446 struct socket *pcb_so; 2447 2448 #endif 2449 /* Too many sockets */ 2450 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n"); 2451 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2452 sctp_abort_association(*inp_p, NULL, m, iphlen, 2453 sh, op_err, vrf_id); 2454 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2455 pcb_so = SCTP_INP_SO(*inp_p); 2456 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2457 SCTP_TCB_UNLOCK((*stcb)); 2458 SCTP_SOCKET_LOCK(pcb_so, 1); 2459 SCTP_TCB_LOCK((*stcb)); 2460 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2461 #endif 2462 (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20); 2463 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2464 SCTP_SOCKET_UNLOCK(pcb_so, 1); 2465 #endif 2466 return (NULL); 2467 } 2468 inp = (struct sctp_inpcb *)so->so_pcb; 2469 SCTP_INP_INCR_REF(inp); 2470 /* 2471 * We add the unbound flag here so that if we get an 2472 * soabort() before we get the move_pcb done, we 2473 * will properly cleanup. 2474 */ 2475 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 2476 SCTP_PCB_FLAGS_CONNECTED | 2477 SCTP_PCB_FLAGS_IN_TCPPOOL | 2478 SCTP_PCB_FLAGS_UNBOUND | 2479 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) | 2480 SCTP_PCB_FLAGS_DONT_WAKE); 2481 inp->sctp_features = (*inp_p)->sctp_features; 2482 inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features; 2483 inp->sctp_socket = so; 2484 inp->sctp_frag_point = (*inp_p)->sctp_frag_point; 2485 inp->partial_delivery_point = (*inp_p)->partial_delivery_point; 2486 inp->sctp_context = (*inp_p)->sctp_context; 2487 inp->inp_starting_point_for_iterator = NULL; 2488 /* 2489 * copy in the authentication parameters from the 2490 * original endpoint 2491 */ 2492 if (inp->sctp_ep.local_hmacs) 2493 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2494 inp->sctp_ep.local_hmacs = 2495 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs); 2496 if (inp->sctp_ep.local_auth_chunks) 2497 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 2498 inp->sctp_ep.local_auth_chunks = 2499 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks); 2500 (void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys, 2501 &inp->sctp_ep.shared_keys); 2502 2503 /* 2504 * Now we must move it from one hash table to 2505 * another and get the tcb in the right place. 2506 */ 2507 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb); 2508 2509 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2510 SCTP_TCB_UNLOCK((*stcb)); 2511 2512 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT); 2513 SCTP_TCB_LOCK((*stcb)); 2514 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2515 2516 2517 /* 2518 * now we must check to see if we were aborted while 2519 * the move was going on and the lock/unlock 2520 * happened. 2521 */ 2522 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 2523 /* 2524 * yep it was, we leave the assoc attached 2525 * to the socket since the sctp_inpcb_free() 2526 * call will send an abort for us. 2527 */ 2528 SCTP_INP_DECR_REF(inp); 2529 return (NULL); 2530 } 2531 SCTP_INP_DECR_REF(inp); 2532 /* Switch over to the new guy */ 2533 *inp_p = inp; 2534 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2535 2536 /* 2537 * Pull it from the incomplete queue and wake the 2538 * guy 2539 */ 2540 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2541 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2542 SCTP_TCB_UNLOCK((*stcb)); 2543 SCTP_SOCKET_LOCK(so, 1); 2544 #endif 2545 soisconnected(so); 2546 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2547 SCTP_TCB_LOCK((*stcb)); 2548 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2549 SCTP_SOCKET_UNLOCK(so, 1); 2550 #endif 2551 return (m); 2552 } 2553 } 2554 if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 2555 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2556 } 2557 return (m); 2558 } 2559 2560 static void 2561 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp, 2562 struct sctp_tcb *stcb, struct sctp_nets *net) 2563 { 2564 /* cp must not be used, others call this without a c-ack :-) */ 2565 struct sctp_association *asoc; 2566 2567 SCTPDBG(SCTP_DEBUG_INPUT2, 2568 "sctp_handle_cookie_ack: handling COOKIE-ACK\n"); 2569 if (stcb == NULL) 2570 return; 2571 2572 asoc = &stcb->asoc; 2573 2574 sctp_stop_all_cookie_timers(stcb); 2575 /* process according to association state */ 2576 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 2577 /* state change only needed when I am in right state */ 2578 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2579 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2580 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2581 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2582 stcb->sctp_ep, stcb, asoc->primary_destination); 2583 2584 } 2585 /* update RTO */ 2586 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 2587 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2588 if (asoc->overall_error_count == 0) { 2589 net->RTO = sctp_calculate_rto(stcb, asoc, net, 2590 &asoc->time_entered, sctp_align_safe_nocopy); 2591 } 2592 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 2593 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2594 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2595 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2596 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2597 struct socket *so; 2598 2599 #endif 2600 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2601 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2602 so = SCTP_INP_SO(stcb->sctp_ep); 2603 atomic_add_int(&stcb->asoc.refcnt, 1); 2604 SCTP_TCB_UNLOCK(stcb); 2605 SCTP_SOCKET_LOCK(so, 1); 2606 SCTP_TCB_LOCK(stcb); 2607 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2608 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2609 SCTP_SOCKET_UNLOCK(so, 1); 2610 return; 2611 } 2612 #endif 2613 soisconnected(stcb->sctp_socket); 2614 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2615 SCTP_SOCKET_UNLOCK(so, 1); 2616 #endif 2617 } 2618 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 2619 stcb, net); 2620 /* 2621 * since we did not send a HB make sure we don't double 2622 * things 2623 */ 2624 net->hb_responded = 1; 2625 2626 if (stcb->asoc.sctp_autoclose_ticks && 2627 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2628 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 2629 stcb->sctp_ep, stcb, NULL); 2630 } 2631 /* 2632 * send ASCONF if parameters are pending and ASCONFs are 2633 * allowed (eg. addresses changed when init/cookie echo were 2634 * in flight) 2635 */ 2636 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) && 2637 (stcb->asoc.peer_supports_asconf) && 2638 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) { 2639 #ifdef SCTP_TIMER_BASED_ASCONF 2640 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, 2641 stcb->sctp_ep, stcb, 2642 stcb->asoc.primary_destination); 2643 #else 2644 sctp_send_asconf(stcb, stcb->asoc.primary_destination, 2645 SCTP_ADDR_NOT_LOCKED); 2646 #endif 2647 } 2648 } 2649 /* Toss the cookie if I can */ 2650 sctp_toss_old_cookies(stcb, asoc); 2651 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 2652 /* Restart the timer if we have pending data */ 2653 struct sctp_tmit_chunk *chk; 2654 2655 chk = TAILQ_FIRST(&asoc->sent_queue); 2656 if (chk) { 2657 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2658 stcb, chk->whoTo); 2659 } 2660 } 2661 } 2662 2663 static void 2664 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp, 2665 struct sctp_tcb *stcb) 2666 { 2667 struct sctp_nets *net; 2668 struct sctp_tmit_chunk *lchk; 2669 uint32_t tsn; 2670 2671 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) { 2672 return; 2673 } 2674 SCTP_STAT_INCR(sctps_recvecne); 2675 tsn = ntohl(cp->tsn); 2676 /* ECN Nonce stuff: need a resync and disable the nonce sum check */ 2677 /* Also we make sure we disable the nonce_wait */ 2678 lchk = TAILQ_FIRST(&stcb->asoc.send_queue); 2679 if (lchk == NULL) { 2680 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 2681 } else { 2682 stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq; 2683 } 2684 stcb->asoc.nonce_wait_for_ecne = 0; 2685 stcb->asoc.nonce_sum_check = 0; 2686 2687 /* Find where it was sent, if possible */ 2688 net = NULL; 2689 lchk = TAILQ_FIRST(&stcb->asoc.sent_queue); 2690 while (lchk) { 2691 if (lchk->rec.data.TSN_seq == tsn) { 2692 net = lchk->whoTo; 2693 break; 2694 } 2695 if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ)) 2696 break; 2697 lchk = TAILQ_NEXT(lchk, sctp_next); 2698 } 2699 if (net == NULL) 2700 /* default is we use the primary */ 2701 net = stcb->asoc.primary_destination; 2702 2703 if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) { 2704 /* 2705 * JRS - Use the congestion control given in the pluggable 2706 * CC module 2707 */ 2708 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net); 2709 /* 2710 * we reduce once every RTT. So we will only lower cwnd at 2711 * the next sending seq i.e. the resync_tsn. 2712 */ 2713 stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn; 2714 } 2715 /* 2716 * We always send a CWR this way if our previous one was lost our 2717 * peer will get an update, or if it is not time again to reduce we 2718 * still get the cwr to the peer. 2719 */ 2720 sctp_send_cwr(stcb, net, tsn); 2721 } 2722 2723 static void 2724 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb) 2725 { 2726 /* 2727 * Here we get a CWR from the peer. We must look in the outqueue and 2728 * make sure that we have a covered ECNE in teh control chunk part. 2729 * If so remove it. 2730 */ 2731 struct sctp_tmit_chunk *chk; 2732 struct sctp_ecne_chunk *ecne; 2733 2734 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 2735 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) { 2736 continue; 2737 } 2738 /* 2739 * Look for and remove if it is the right TSN. Since there 2740 * is only ONE ECNE on the control queue at any one time we 2741 * don't need to worry about more than one! 2742 */ 2743 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 2744 if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn), 2745 MAX_TSN) || (cp->tsn == ecne->tsn)) { 2746 /* this covers this ECNE, we can remove it */ 2747 stcb->asoc.ecn_echo_cnt_onq--; 2748 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, 2749 sctp_next); 2750 if (chk->data) { 2751 sctp_m_freem(chk->data); 2752 chk->data = NULL; 2753 } 2754 stcb->asoc.ctrl_queue_cnt--; 2755 sctp_free_a_chunk(stcb, chk); 2756 break; 2757 } 2758 } 2759 } 2760 2761 static void 2762 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp, 2763 struct sctp_tcb *stcb, struct sctp_nets *net) 2764 { 2765 struct sctp_association *asoc; 2766 2767 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2768 struct socket *so; 2769 2770 #endif 2771 2772 SCTPDBG(SCTP_DEBUG_INPUT2, 2773 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n"); 2774 if (stcb == NULL) 2775 return; 2776 2777 asoc = &stcb->asoc; 2778 /* process according to association state */ 2779 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 2780 /* unexpected SHUTDOWN-COMPLETE... so ignore... */ 2781 SCTPDBG(SCTP_DEBUG_INPUT2, 2782 "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n"); 2783 SCTP_TCB_UNLOCK(stcb); 2784 return; 2785 } 2786 /* notify upper layer protocol */ 2787 if (stcb->sctp_socket) { 2788 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2789 /* are the queues empty? they should be */ 2790 if (!TAILQ_EMPTY(&asoc->send_queue) || 2791 !TAILQ_EMPTY(&asoc->sent_queue) || 2792 !TAILQ_EMPTY(&asoc->out_wheel)) { 2793 sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED); 2794 } 2795 } 2796 /* stop the timer */ 2797 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22); 2798 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 2799 /* free the TCB */ 2800 SCTPDBG(SCTP_DEBUG_INPUT2, 2801 "sctp_handle_shutdown_complete: calls free-asoc\n"); 2802 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2803 so = SCTP_INP_SO(stcb->sctp_ep); 2804 atomic_add_int(&stcb->asoc.refcnt, 1); 2805 SCTP_TCB_UNLOCK(stcb); 2806 SCTP_SOCKET_LOCK(so, 1); 2807 SCTP_TCB_LOCK(stcb); 2808 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2809 #endif 2810 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23); 2811 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2812 SCTP_SOCKET_UNLOCK(so, 1); 2813 #endif 2814 return; 2815 } 2816 2817 static int 2818 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, 2819 struct sctp_nets *net, uint8_t flg) 2820 { 2821 switch (desc->chunk_type) { 2822 case SCTP_DATA: 2823 /* find the tsn to resend (possibly */ 2824 { 2825 uint32_t tsn; 2826 struct sctp_tmit_chunk *tp1; 2827 2828 tsn = ntohl(desc->tsn_ifany); 2829 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2830 while (tp1) { 2831 if (tp1->rec.data.TSN_seq == tsn) { 2832 /* found it */ 2833 break; 2834 } 2835 if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn, 2836 MAX_TSN)) { 2837 /* not found */ 2838 tp1 = NULL; 2839 break; 2840 } 2841 tp1 = TAILQ_NEXT(tp1, sctp_next); 2842 } 2843 if (tp1 == NULL) { 2844 /* 2845 * Do it the other way , aka without paying 2846 * attention to queue seq order. 2847 */ 2848 SCTP_STAT_INCR(sctps_pdrpdnfnd); 2849 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2850 while (tp1) { 2851 if (tp1->rec.data.TSN_seq == tsn) { 2852 /* found it */ 2853 break; 2854 } 2855 tp1 = TAILQ_NEXT(tp1, sctp_next); 2856 } 2857 } 2858 if (tp1 == NULL) { 2859 SCTP_STAT_INCR(sctps_pdrptsnnf); 2860 } 2861 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) { 2862 uint8_t *ddp; 2863 2864 if ((stcb->asoc.peers_rwnd == 0) && 2865 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 2866 SCTP_STAT_INCR(sctps_pdrpdiwnp); 2867 return (0); 2868 } 2869 if (stcb->asoc.peers_rwnd == 0 && 2870 (flg & SCTP_FROM_MIDDLE_BOX)) { 2871 SCTP_STAT_INCR(sctps_pdrpdizrw); 2872 return (0); 2873 } 2874 ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+ 2875 sizeof(struct sctp_data_chunk)); 2876 { 2877 unsigned int iii; 2878 2879 for (iii = 0; iii < sizeof(desc->data_bytes); 2880 iii++) { 2881 if (ddp[iii] != desc->data_bytes[iii]) { 2882 SCTP_STAT_INCR(sctps_pdrpbadd); 2883 return (-1); 2884 } 2885 } 2886 } 2887 /* 2888 * We zero out the nonce so resync not 2889 * needed 2890 */ 2891 tp1->rec.data.ect_nonce = 0; 2892 2893 if (tp1->do_rtt) { 2894 /* 2895 * this guy had a RTO calculation 2896 * pending on it, cancel it 2897 */ 2898 tp1->do_rtt = 0; 2899 } 2900 SCTP_STAT_INCR(sctps_pdrpmark); 2901 if (tp1->sent != SCTP_DATAGRAM_RESEND) 2902 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2903 tp1->sent = SCTP_DATAGRAM_RESEND; 2904 /* 2905 * mark it as if we were doing a FR, since 2906 * we will be getting gap ack reports behind 2907 * the info from the router. 2908 */ 2909 tp1->rec.data.doing_fast_retransmit = 1; 2910 /* 2911 * mark the tsn with what sequences can 2912 * cause a new FR. 2913 */ 2914 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 2915 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 2916 } else { 2917 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 2918 } 2919 2920 /* restart the timer */ 2921 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2922 stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24); 2923 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2924 stcb, tp1->whoTo); 2925 2926 /* fix counts and things */ 2927 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 2928 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP, 2929 tp1->whoTo->flight_size, 2930 tp1->book_size, 2931 (uintptr_t) stcb, 2932 tp1->rec.data.TSN_seq); 2933 } 2934 sctp_flight_size_decrease(tp1); 2935 sctp_total_flight_decrease(stcb, tp1); 2936 } { 2937 /* audit code */ 2938 unsigned int audit; 2939 2940 audit = 0; 2941 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 2942 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2943 audit++; 2944 } 2945 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue, 2946 sctp_next) { 2947 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2948 audit++; 2949 } 2950 if (audit != stcb->asoc.sent_queue_retran_cnt) { 2951 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n", 2952 audit, stcb->asoc.sent_queue_retran_cnt); 2953 #ifndef SCTP_AUDITING_ENABLED 2954 stcb->asoc.sent_queue_retran_cnt = audit; 2955 #endif 2956 } 2957 } 2958 } 2959 break; 2960 case SCTP_ASCONF: 2961 { 2962 struct sctp_tmit_chunk *asconf; 2963 2964 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 2965 sctp_next) { 2966 if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 2967 break; 2968 } 2969 } 2970 if (asconf) { 2971 if (asconf->sent != SCTP_DATAGRAM_RESEND) 2972 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2973 asconf->sent = SCTP_DATAGRAM_RESEND; 2974 asconf->snd_count--; 2975 } 2976 } 2977 break; 2978 case SCTP_INITIATION: 2979 /* resend the INIT */ 2980 stcb->asoc.dropped_special_cnt++; 2981 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) { 2982 /* 2983 * If we can get it in, in a few attempts we do 2984 * this, otherwise we let the timer fire. 2985 */ 2986 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, 2987 stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25); 2988 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 2989 } 2990 break; 2991 case SCTP_SELECTIVE_ACK: 2992 /* resend the sack */ 2993 sctp_send_sack(stcb); 2994 break; 2995 case SCTP_HEARTBEAT_REQUEST: 2996 /* resend a demand HB */ 2997 if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) { 2998 /* 2999 * Only retransmit if we KNOW we wont destroy the 3000 * tcb 3001 */ 3002 (void)sctp_send_hb(stcb, 1, net); 3003 } 3004 break; 3005 case SCTP_SHUTDOWN: 3006 sctp_send_shutdown(stcb, net); 3007 break; 3008 case SCTP_SHUTDOWN_ACK: 3009 sctp_send_shutdown_ack(stcb, net); 3010 break; 3011 case SCTP_COOKIE_ECHO: 3012 { 3013 struct sctp_tmit_chunk *cookie; 3014 3015 cookie = NULL; 3016 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, 3017 sctp_next) { 3018 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 3019 break; 3020 } 3021 } 3022 if (cookie) { 3023 if (cookie->sent != SCTP_DATAGRAM_RESEND) 3024 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3025 cookie->sent = SCTP_DATAGRAM_RESEND; 3026 sctp_stop_all_cookie_timers(stcb); 3027 } 3028 } 3029 break; 3030 case SCTP_COOKIE_ACK: 3031 sctp_send_cookie_ack(stcb); 3032 break; 3033 case SCTP_ASCONF_ACK: 3034 /* resend last asconf ack */ 3035 sctp_send_asconf_ack(stcb); 3036 break; 3037 case SCTP_FORWARD_CUM_TSN: 3038 send_forward_tsn(stcb, &stcb->asoc); 3039 break; 3040 /* can't do anything with these */ 3041 case SCTP_PACKET_DROPPED: 3042 case SCTP_INITIATION_ACK: /* this should not happen */ 3043 case SCTP_HEARTBEAT_ACK: 3044 case SCTP_ABORT_ASSOCIATION: 3045 case SCTP_OPERATION_ERROR: 3046 case SCTP_SHUTDOWN_COMPLETE: 3047 case SCTP_ECN_ECHO: 3048 case SCTP_ECN_CWR: 3049 default: 3050 break; 3051 } 3052 return (0); 3053 } 3054 3055 void 3056 sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3057 { 3058 int i; 3059 uint16_t temp; 3060 3061 /* 3062 * We set things to 0xffff since this is the last delivered sequence 3063 * and we will be sending in 0 after the reset. 3064 */ 3065 3066 if (number_entries) { 3067 for (i = 0; i < number_entries; i++) { 3068 temp = ntohs(list[i]); 3069 if (temp >= stcb->asoc.streamincnt) { 3070 continue; 3071 } 3072 stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff; 3073 } 3074 } else { 3075 list = NULL; 3076 for (i = 0; i < stcb->asoc.streamincnt; i++) { 3077 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff; 3078 } 3079 } 3080 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3081 } 3082 3083 static void 3084 sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3085 { 3086 int i; 3087 3088 if (number_entries == 0) { 3089 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3090 stcb->asoc.strmout[i].next_sequence_sent = 0; 3091 } 3092 } else if (number_entries) { 3093 for (i = 0; i < number_entries; i++) { 3094 uint16_t temp; 3095 3096 temp = ntohs(list[i]); 3097 if (temp >= stcb->asoc.streamoutcnt) { 3098 /* no such stream */ 3099 continue; 3100 } 3101 stcb->asoc.strmout[temp].next_sequence_sent = 0; 3102 } 3103 } 3104 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3105 } 3106 3107 3108 struct sctp_stream_reset_out_request * 3109 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk) 3110 { 3111 struct sctp_association *asoc; 3112 struct sctp_stream_reset_out_req *req; 3113 struct sctp_stream_reset_out_request *r; 3114 struct sctp_tmit_chunk *chk; 3115 int len, clen; 3116 3117 asoc = &stcb->asoc; 3118 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 3119 asoc->stream_reset_outstanding = 0; 3120 return (NULL); 3121 } 3122 if (stcb->asoc.str_reset == NULL) { 3123 asoc->stream_reset_outstanding = 0; 3124 return (NULL); 3125 } 3126 chk = stcb->asoc.str_reset; 3127 if (chk->data == NULL) { 3128 return (NULL); 3129 } 3130 if (bchk) { 3131 /* he wants a copy of the chk pointer */ 3132 *bchk = chk; 3133 } 3134 clen = chk->send_size; 3135 req = mtod(chk->data, struct sctp_stream_reset_out_req *); 3136 r = &req->sr_req; 3137 if (ntohl(r->request_seq) == seq) { 3138 /* found it */ 3139 return (r); 3140 } 3141 len = SCTP_SIZE32(ntohs(r->ph.param_length)); 3142 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) { 3143 /* move to the next one, there can only be a max of two */ 3144 r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len); 3145 if (ntohl(r->request_seq) == seq) { 3146 return (r); 3147 } 3148 } 3149 /* that seq is not here */ 3150 return (NULL); 3151 } 3152 3153 static void 3154 sctp_clean_up_stream_reset(struct sctp_tcb *stcb) 3155 { 3156 struct sctp_association *asoc; 3157 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset; 3158 3159 if (stcb->asoc.str_reset == NULL) { 3160 return; 3161 } 3162 asoc = &stcb->asoc; 3163 3164 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26); 3165 TAILQ_REMOVE(&asoc->control_send_queue, 3166 chk, 3167 sctp_next); 3168 if (chk->data) { 3169 sctp_m_freem(chk->data); 3170 chk->data = NULL; 3171 } 3172 asoc->ctrl_queue_cnt--; 3173 sctp_free_a_chunk(stcb, chk); 3174 /* sa_ignore NO_NULL_CHK */ 3175 stcb->asoc.str_reset = NULL; 3176 } 3177 3178 3179 static int 3180 sctp_handle_stream_reset_response(struct sctp_tcb *stcb, 3181 uint32_t seq, uint32_t action, 3182 struct sctp_stream_reset_response *respin) 3183 { 3184 uint16_t type; 3185 int lparm_len; 3186 struct sctp_association *asoc = &stcb->asoc; 3187 struct sctp_tmit_chunk *chk; 3188 struct sctp_stream_reset_out_request *srparam; 3189 int number_entries; 3190 3191 if (asoc->stream_reset_outstanding == 0) { 3192 /* duplicate */ 3193 return (0); 3194 } 3195 if (seq == stcb->asoc.str_reset_seq_out) { 3196 srparam = sctp_find_stream_reset(stcb, seq, &chk); 3197 if (srparam) { 3198 stcb->asoc.str_reset_seq_out++; 3199 type = ntohs(srparam->ph.param_type); 3200 lparm_len = ntohs(srparam->ph.param_length); 3201 if (type == SCTP_STR_RESET_OUT_REQUEST) { 3202 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t); 3203 asoc->stream_reset_out_is_outstanding = 0; 3204 if (asoc->stream_reset_outstanding) 3205 asoc->stream_reset_outstanding--; 3206 if (action == SCTP_STREAM_RESET_PERFORMED) { 3207 /* do it */ 3208 sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams); 3209 } else { 3210 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3211 } 3212 } else if (type == SCTP_STR_RESET_IN_REQUEST) { 3213 /* Answered my request */ 3214 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t); 3215 if (asoc->stream_reset_outstanding) 3216 asoc->stream_reset_outstanding--; 3217 if (action != SCTP_STREAM_RESET_PERFORMED) { 3218 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3219 } 3220 } else if (type == SCTP_STR_RESET_TSN_REQUEST) { 3221 /** 3222 * a) Adopt the new in tsn. 3223 * b) reset the map 3224 * c) Adopt the new out-tsn 3225 */ 3226 struct sctp_stream_reset_response_tsn *resp; 3227 struct sctp_forward_tsn_chunk fwdtsn; 3228 int abort_flag = 0; 3229 3230 if (respin == NULL) { 3231 /* huh ? */ 3232 return (0); 3233 } 3234 if (action == SCTP_STREAM_RESET_PERFORMED) { 3235 resp = (struct sctp_stream_reset_response_tsn *)respin; 3236 asoc->stream_reset_outstanding--; 3237 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3238 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3239 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1); 3240 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3241 if (abort_flag) { 3242 return (1); 3243 } 3244 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1); 3245 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3246 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn); 3247 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3248 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn); 3249 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn; 3250 3251 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3252 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3253 3254 } 3255 } 3256 /* get rid of the request and get the request flags */ 3257 if (asoc->stream_reset_outstanding == 0) { 3258 sctp_clean_up_stream_reset(stcb); 3259 } 3260 } 3261 } 3262 return (0); 3263 } 3264 3265 static void 3266 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb, 3267 struct sctp_tmit_chunk *chk, 3268 struct sctp_stream_reset_in_request *req, int trunc) 3269 { 3270 uint32_t seq; 3271 int len, i; 3272 int number_entries; 3273 uint16_t temp; 3274 3275 /* 3276 * peer wants me to send a str-reset to him for my outgoing seq's if 3277 * seq_in is right. 3278 */ 3279 struct sctp_association *asoc = &stcb->asoc; 3280 3281 seq = ntohl(req->request_seq); 3282 if (asoc->str_reset_seq_in == seq) { 3283 if (trunc) { 3284 /* Can't do it, since they exceeded our buffer size */ 3285 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3286 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3287 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3288 } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) { 3289 len = ntohs(req->ph.param_length); 3290 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t)); 3291 for (i = 0; i < number_entries; i++) { 3292 temp = ntohs(req->list_of_streams[i]); 3293 req->list_of_streams[i] = temp; 3294 } 3295 /* move the reset action back one */ 3296 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3297 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3298 sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams, 3299 asoc->str_reset_seq_out, 3300 seq, (asoc->sending_seq - 1)); 3301 asoc->stream_reset_out_is_outstanding = 1; 3302 asoc->str_reset = chk; 3303 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 3304 stcb->asoc.stream_reset_outstanding++; 3305 } else { 3306 /* Can't do it, since we have sent one out */ 3307 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3308 asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER; 3309 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3310 } 3311 asoc->str_reset_seq_in++; 3312 } else if (asoc->str_reset_seq_in - 1 == seq) { 3313 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3314 } else if (asoc->str_reset_seq_in - 2 == seq) { 3315 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3316 } else { 3317 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3318 } 3319 } 3320 3321 static int 3322 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb, 3323 struct sctp_tmit_chunk *chk, 3324 struct sctp_stream_reset_tsn_request *req) 3325 { 3326 /* reset all in and out and update the tsn */ 3327 /* 3328 * A) reset my str-seq's on in and out. B) Select a receive next, 3329 * and set cum-ack to it. Also process this selected number as a 3330 * fwd-tsn as well. C) set in the response my next sending seq. 3331 */ 3332 struct sctp_forward_tsn_chunk fwdtsn; 3333 struct sctp_association *asoc = &stcb->asoc; 3334 int abort_flag = 0; 3335 uint32_t seq; 3336 3337 seq = ntohl(req->request_seq); 3338 if (asoc->str_reset_seq_in == seq) { 3339 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3340 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3341 fwdtsn.ch.chunk_flags = 0; 3342 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1); 3343 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3344 if (abort_flag) { 3345 return (1); 3346 } 3347 stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA; 3348 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3349 stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1; 3350 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3351 atomic_add_int(&stcb->asoc.sending_seq, 1); 3352 /* save off historical data for retrans */ 3353 stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0]; 3354 stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq; 3355 stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0]; 3356 stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn; 3357 3358 sctp_add_stream_reset_result_tsn(chk, 3359 ntohl(req->request_seq), 3360 SCTP_STREAM_RESET_PERFORMED, 3361 stcb->asoc.sending_seq, 3362 stcb->asoc.mapping_array_base_tsn); 3363 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3364 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3365 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3366 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3367 3368 asoc->str_reset_seq_in++; 3369 } else if (asoc->str_reset_seq_in - 1 == seq) { 3370 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 3371 stcb->asoc.last_sending_seq[0], 3372 stcb->asoc.last_base_tsnsent[0] 3373 ); 3374 } else if (asoc->str_reset_seq_in - 2 == seq) { 3375 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1], 3376 stcb->asoc.last_sending_seq[1], 3377 stcb->asoc.last_base_tsnsent[1] 3378 ); 3379 } else { 3380 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3381 } 3382 return (0); 3383 } 3384 3385 static void 3386 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb, 3387 struct sctp_tmit_chunk *chk, 3388 struct sctp_stream_reset_out_request *req, int trunc) 3389 { 3390 uint32_t seq, tsn; 3391 int number_entries, len; 3392 struct sctp_association *asoc = &stcb->asoc; 3393 3394 seq = ntohl(req->request_seq); 3395 3396 /* now if its not a duplicate we process it */ 3397 if (asoc->str_reset_seq_in == seq) { 3398 len = ntohs(req->ph.param_length); 3399 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t)); 3400 /* 3401 * the sender is resetting, handle the list issue.. we must 3402 * a) verify if we can do the reset, if so no problem b) If 3403 * we can't do the reset we must copy the request. c) queue 3404 * it, and setup the data in processor to trigger it off 3405 * when needed and dequeue all the queued data. 3406 */ 3407 tsn = ntohl(req->send_reset_at_tsn); 3408 3409 /* move the reset action back one */ 3410 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3411 if (trunc) { 3412 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3413 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3414 } else if ((tsn == asoc->cumulative_tsn) || 3415 (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) { 3416 /* we can do it now */ 3417 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams); 3418 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3419 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3420 } else { 3421 /* 3422 * we must queue it up and thus wait for the TSN's 3423 * to arrive that are at or before tsn 3424 */ 3425 struct sctp_stream_reset_list *liste; 3426 int siz; 3427 3428 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t)); 3429 SCTP_MALLOC(liste, struct sctp_stream_reset_list *, 3430 siz, SCTP_M_STRESET); 3431 if (liste == NULL) { 3432 /* gak out of memory */ 3433 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3434 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3435 return; 3436 } 3437 liste->tsn = tsn; 3438 liste->number_entries = number_entries; 3439 memcpy(&liste->req, req, 3440 (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t)))); 3441 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp); 3442 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3443 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3444 } 3445 asoc->str_reset_seq_in++; 3446 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3447 /* 3448 * one seq back, just echo back last action since my 3449 * response was lost. 3450 */ 3451 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3452 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3453 /* 3454 * two seq back, just echo back last action since my 3455 * response was lost. 3456 */ 3457 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3458 } else { 3459 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3460 } 3461 } 3462 3463 #ifdef __GNUC__ 3464 __attribute__((noinline)) 3465 #endif 3466 static int 3467 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset, 3468 struct sctp_stream_reset_out_req *sr_req) 3469 { 3470 int chk_length, param_len, ptype; 3471 struct sctp_paramhdr pstore; 3472 uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE]; 3473 3474 uint32_t seq; 3475 int num_req = 0; 3476 int trunc = 0; 3477 struct sctp_tmit_chunk *chk; 3478 struct sctp_chunkhdr *ch; 3479 struct sctp_paramhdr *ph; 3480 int ret_code = 0; 3481 int num_param = 0; 3482 3483 /* now it may be a reset or a reset-response */ 3484 chk_length = ntohs(sr_req->ch.chunk_length); 3485 3486 /* setup for adding the response */ 3487 sctp_alloc_a_chunk(stcb, chk); 3488 if (chk == NULL) { 3489 return (ret_code); 3490 } 3491 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 3492 chk->rec.chunk_id.can_take_data = 0; 3493 chk->asoc = &stcb->asoc; 3494 chk->no_fr_allowed = 0; 3495 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr); 3496 chk->book_size_scale = 0; 3497 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3498 if (chk->data == NULL) { 3499 strres_nochunk: 3500 if (chk->data) { 3501 sctp_m_freem(chk->data); 3502 chk->data = NULL; 3503 } 3504 sctp_free_a_chunk(stcb, chk); 3505 return (ret_code); 3506 } 3507 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 3508 3509 /* setup chunk parameters */ 3510 chk->sent = SCTP_DATAGRAM_UNSENT; 3511 chk->snd_count = 0; 3512 chk->whoTo = stcb->asoc.primary_destination; 3513 atomic_add_int(&chk->whoTo->ref_count, 1); 3514 3515 ch = mtod(chk->data, struct sctp_chunkhdr *); 3516 ch->chunk_type = SCTP_STREAM_RESET; 3517 ch->chunk_flags = 0; 3518 ch->chunk_length = htons(chk->send_size); 3519 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 3520 offset += sizeof(struct sctp_chunkhdr); 3521 while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) { 3522 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore); 3523 if (ph == NULL) 3524 break; 3525 param_len = ntohs(ph->param_length); 3526 if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) { 3527 /* bad param */ 3528 break; 3529 } 3530 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)), 3531 (uint8_t *) & cstore); 3532 ptype = ntohs(ph->param_type); 3533 num_param++; 3534 if (param_len > (int)sizeof(cstore)) { 3535 trunc = 1; 3536 } else { 3537 trunc = 0; 3538 } 3539 3540 if (num_param > SCTP_MAX_RESET_PARAMS) { 3541 /* hit the max of parameters already sorry.. */ 3542 break; 3543 } 3544 if (ptype == SCTP_STR_RESET_OUT_REQUEST) { 3545 struct sctp_stream_reset_out_request *req_out; 3546 3547 req_out = (struct sctp_stream_reset_out_request *)ph; 3548 num_req++; 3549 if (stcb->asoc.stream_reset_outstanding) { 3550 seq = ntohl(req_out->response_seq); 3551 if (seq == stcb->asoc.str_reset_seq_out) { 3552 /* implicit ack */ 3553 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL); 3554 } 3555 } 3556 sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc); 3557 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) { 3558 struct sctp_stream_reset_in_request *req_in; 3559 3560 num_req++; 3561 3562 req_in = (struct sctp_stream_reset_in_request *)ph; 3563 3564 sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc); 3565 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) { 3566 struct sctp_stream_reset_tsn_request *req_tsn; 3567 3568 num_req++; 3569 req_tsn = (struct sctp_stream_reset_tsn_request *)ph; 3570 3571 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) { 3572 ret_code = 1; 3573 goto strres_nochunk; 3574 } 3575 /* no more */ 3576 break; 3577 } else if (ptype == SCTP_STR_RESET_RESPONSE) { 3578 struct sctp_stream_reset_response *resp; 3579 uint32_t result; 3580 3581 resp = (struct sctp_stream_reset_response *)ph; 3582 seq = ntohl(resp->response_seq); 3583 result = ntohl(resp->result); 3584 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) { 3585 ret_code = 1; 3586 goto strres_nochunk; 3587 } 3588 } else { 3589 break; 3590 } 3591 offset += SCTP_SIZE32(param_len); 3592 chk_length -= SCTP_SIZE32(param_len); 3593 } 3594 if (num_req == 0) { 3595 /* we have no response free the stuff */ 3596 goto strres_nochunk; 3597 } 3598 /* ok we have a chunk to link in */ 3599 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, 3600 chk, 3601 sctp_next); 3602 stcb->asoc.ctrl_queue_cnt++; 3603 return (ret_code); 3604 } 3605 3606 /* 3607 * Handle a router or endpoints report of a packet loss, there are two ways 3608 * to handle this, either we get the whole packet and must disect it 3609 * ourselves (possibly with truncation and or corruption) or it is a summary 3610 * from a middle box that did the disectting for us. 3611 */ 3612 static void 3613 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, 3614 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit) 3615 { 3616 uint32_t bottle_bw, on_queue; 3617 uint16_t trunc_len; 3618 unsigned int chlen; 3619 unsigned int at; 3620 struct sctp_chunk_desc desc; 3621 struct sctp_chunkhdr *ch; 3622 3623 chlen = ntohs(cp->ch.chunk_length); 3624 chlen -= sizeof(struct sctp_pktdrop_chunk); 3625 /* XXX possible chlen underflow */ 3626 if (chlen == 0) { 3627 ch = NULL; 3628 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) 3629 SCTP_STAT_INCR(sctps_pdrpbwrpt); 3630 } else { 3631 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr)); 3632 chlen -= sizeof(struct sctphdr); 3633 /* XXX possible chlen underflow */ 3634 memset(&desc, 0, sizeof(desc)); 3635 } 3636 trunc_len = (uint16_t) ntohs(cp->trunc_len); 3637 if (trunc_len > limit) { 3638 trunc_len = limit; 3639 } 3640 /* now the chunks themselves */ 3641 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) { 3642 desc.chunk_type = ch->chunk_type; 3643 /* get amount we need to move */ 3644 at = ntohs(ch->chunk_length); 3645 if (at < sizeof(struct sctp_chunkhdr)) { 3646 /* corrupt chunk, maybe at the end? */ 3647 SCTP_STAT_INCR(sctps_pdrpcrupt); 3648 break; 3649 } 3650 if (trunc_len == 0) { 3651 /* we are supposed to have all of it */ 3652 if (at > chlen) { 3653 /* corrupt skip it */ 3654 SCTP_STAT_INCR(sctps_pdrpcrupt); 3655 break; 3656 } 3657 } else { 3658 /* is there enough of it left ? */ 3659 if (desc.chunk_type == SCTP_DATA) { 3660 if (chlen < (sizeof(struct sctp_data_chunk) + 3661 sizeof(desc.data_bytes))) { 3662 break; 3663 } 3664 } else { 3665 if (chlen < sizeof(struct sctp_chunkhdr)) { 3666 break; 3667 } 3668 } 3669 } 3670 if (desc.chunk_type == SCTP_DATA) { 3671 /* can we get out the tsn? */ 3672 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3673 SCTP_STAT_INCR(sctps_pdrpmbda); 3674 3675 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) { 3676 /* yep */ 3677 struct sctp_data_chunk *dcp; 3678 uint8_t *ddp; 3679 unsigned int iii; 3680 3681 dcp = (struct sctp_data_chunk *)ch; 3682 ddp = (uint8_t *) (dcp + 1); 3683 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) { 3684 desc.data_bytes[iii] = ddp[iii]; 3685 } 3686 desc.tsn_ifany = dcp->dp.tsn; 3687 } else { 3688 /* nope we are done. */ 3689 SCTP_STAT_INCR(sctps_pdrpnedat); 3690 break; 3691 } 3692 } else { 3693 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3694 SCTP_STAT_INCR(sctps_pdrpmbct); 3695 } 3696 3697 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) { 3698 SCTP_STAT_INCR(sctps_pdrppdbrk); 3699 break; 3700 } 3701 if (SCTP_SIZE32(at) > chlen) { 3702 break; 3703 } 3704 chlen -= SCTP_SIZE32(at); 3705 if (chlen < sizeof(struct sctp_chunkhdr)) { 3706 /* done, none left */ 3707 break; 3708 } 3709 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at)); 3710 } 3711 /* Now update any rwnd --- possibly */ 3712 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) { 3713 /* From a peer, we get a rwnd report */ 3714 uint32_t a_rwnd; 3715 3716 SCTP_STAT_INCR(sctps_pdrpfehos); 3717 3718 bottle_bw = ntohl(cp->bottle_bw); 3719 on_queue = ntohl(cp->current_onq); 3720 if (bottle_bw && on_queue) { 3721 /* a rwnd report is in here */ 3722 if (bottle_bw > on_queue) 3723 a_rwnd = bottle_bw - on_queue; 3724 else 3725 a_rwnd = 0; 3726 3727 if (a_rwnd == 0) 3728 stcb->asoc.peers_rwnd = 0; 3729 else { 3730 if (a_rwnd > stcb->asoc.total_flight) { 3731 stcb->asoc.peers_rwnd = 3732 a_rwnd - stcb->asoc.total_flight; 3733 } else { 3734 stcb->asoc.peers_rwnd = 0; 3735 } 3736 if (stcb->asoc.peers_rwnd < 3737 stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3738 /* SWS sender side engages */ 3739 stcb->asoc.peers_rwnd = 0; 3740 } 3741 } 3742 } 3743 } else { 3744 SCTP_STAT_INCR(sctps_pdrpfmbox); 3745 } 3746 3747 /* now middle boxes in sat networks get a cwnd bump */ 3748 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) && 3749 (stcb->asoc.sat_t3_loss_recovery == 0) && 3750 (stcb->asoc.sat_network)) { 3751 /* 3752 * This is debateable but for sat networks it makes sense 3753 * Note if a T3 timer has went off, we will prohibit any 3754 * changes to cwnd until we exit the t3 loss recovery. 3755 */ 3756 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb, 3757 net, cp, &bottle_bw, &on_queue); 3758 } 3759 } 3760 3761 /* 3762 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to 3763 * still contain IP/SCTP header - stcb: is the tcb found for this packet - 3764 * offset: offset into the mbuf chain to first chunkhdr - length: is the 3765 * length of the complete packet outputs: - length: modified to remaining 3766 * length after control processing - netp: modified to new sctp_nets after 3767 * cookie-echo processing - return NULL to discard the packet (ie. no asoc, 3768 * bad packet,...) otherwise return the tcb for this packet 3769 */ 3770 #ifdef __GNUC__ 3771 __attribute__((noinline)) 3772 #endif 3773 static struct sctp_tcb * 3774 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, 3775 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp, 3776 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen, 3777 uint32_t vrf_id) 3778 { 3779 struct sctp_association *asoc; 3780 uint32_t vtag_in; 3781 int num_chunks = 0; /* number of control chunks processed */ 3782 uint32_t chk_length; 3783 int ret; 3784 int abort_no_unlock = 0; 3785 3786 /* 3787 * How big should this be, and should it be alloc'd? Lets try the 3788 * d-mtu-ceiling for now (2k) and that should hopefully work ... 3789 * until we get into jumbo grams and such.. 3790 */ 3791 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; 3792 struct sctp_tcb *locked_tcb = stcb; 3793 int got_auth = 0; 3794 uint32_t auth_offset = 0, auth_len = 0; 3795 int auth_skipped = 0; 3796 int asconf_cnt = 0; 3797 3798 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3799 struct socket *so; 3800 3801 #endif 3802 3803 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n", 3804 iphlen, *offset, length, stcb); 3805 3806 /* validate chunk header length... */ 3807 if (ntohs(ch->chunk_length) < sizeof(*ch)) { 3808 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n", 3809 ntohs(ch->chunk_length)); 3810 if (locked_tcb) { 3811 SCTP_TCB_UNLOCK(locked_tcb); 3812 } 3813 return (NULL); 3814 } 3815 /* 3816 * validate the verification tag 3817 */ 3818 vtag_in = ntohl(sh->v_tag); 3819 3820 if (locked_tcb) { 3821 SCTP_TCB_LOCK_ASSERT(locked_tcb); 3822 } 3823 if (ch->chunk_type == SCTP_INITIATION) { 3824 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n", 3825 ntohs(ch->chunk_length), vtag_in); 3826 if (vtag_in != 0) { 3827 /* protocol error- silently discard... */ 3828 SCTP_STAT_INCR(sctps_badvtag); 3829 if (locked_tcb) { 3830 SCTP_TCB_UNLOCK(locked_tcb); 3831 } 3832 return (NULL); 3833 } 3834 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) { 3835 /* 3836 * If there is no stcb, skip the AUTH chunk and process 3837 * later after a stcb is found (to validate the lookup was 3838 * valid. 3839 */ 3840 if ((ch->chunk_type == SCTP_AUTHENTICATION) && 3841 (stcb == NULL) && !sctp_auth_disable) { 3842 /* save this chunk for later processing */ 3843 auth_skipped = 1; 3844 auth_offset = *offset; 3845 auth_len = ntohs(ch->chunk_length); 3846 3847 /* (temporarily) move past this chunk */ 3848 *offset += SCTP_SIZE32(auth_len); 3849 if (*offset >= length) { 3850 /* no more data left in the mbuf chain */ 3851 *offset = length; 3852 if (locked_tcb) { 3853 SCTP_TCB_UNLOCK(locked_tcb); 3854 } 3855 return (NULL); 3856 } 3857 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3858 sizeof(struct sctp_chunkhdr), chunk_buf); 3859 } 3860 if (ch == NULL) { 3861 /* Help */ 3862 *offset = length; 3863 if (locked_tcb) { 3864 SCTP_TCB_UNLOCK(locked_tcb); 3865 } 3866 return (NULL); 3867 } 3868 if (ch->chunk_type == SCTP_COOKIE_ECHO) { 3869 goto process_control_chunks; 3870 } 3871 /* 3872 * first check if it's an ASCONF with an unknown src addr we 3873 * need to look inside to find the association 3874 */ 3875 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) { 3876 struct sctp_chunkhdr *asconf_ch = ch; 3877 uint32_t asconf_offset = 0, asconf_len = 0; 3878 3879 /* inp's refcount may be reduced */ 3880 SCTP_INP_INCR_REF(inp); 3881 3882 asconf_offset = *offset; 3883 do { 3884 asconf_len = ntohs(asconf_ch->chunk_length); 3885 if (asconf_len < sizeof(struct sctp_asconf_paramhdr)) 3886 break; 3887 stcb = sctp_findassociation_ep_asconf(m, iphlen, 3888 *offset, sh, &inp, netp); 3889 if (stcb != NULL) 3890 break; 3891 asconf_offset += SCTP_SIZE32(asconf_len); 3892 asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset, 3893 sizeof(struct sctp_chunkhdr), chunk_buf); 3894 } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF); 3895 if (stcb == NULL) { 3896 /* 3897 * reduce inp's refcount if not reduced in 3898 * sctp_findassociation_ep_asconf(). 3899 */ 3900 SCTP_INP_DECR_REF(inp); 3901 } else { 3902 locked_tcb = stcb; 3903 } 3904 3905 /* now go back and verify any auth chunk to be sure */ 3906 if (auth_skipped && (stcb != NULL)) { 3907 struct sctp_auth_chunk *auth; 3908 3909 auth = (struct sctp_auth_chunk *) 3910 sctp_m_getptr(m, auth_offset, 3911 auth_len, chunk_buf); 3912 got_auth = 1; 3913 auth_skipped = 0; 3914 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, 3915 auth_offset)) { 3916 /* auth HMAC failed so dump it */ 3917 *offset = length; 3918 if (locked_tcb) { 3919 SCTP_TCB_UNLOCK(locked_tcb); 3920 } 3921 return (NULL); 3922 } else { 3923 /* remaining chunks are HMAC checked */ 3924 stcb->asoc.authenticated = 1; 3925 } 3926 } 3927 } 3928 if (stcb == NULL) { 3929 /* no association, so it's out of the blue... */ 3930 sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL, 3931 vrf_id); 3932 *offset = length; 3933 if (locked_tcb) { 3934 SCTP_TCB_UNLOCK(locked_tcb); 3935 } 3936 return (NULL); 3937 } 3938 asoc = &stcb->asoc; 3939 /* ABORT and SHUTDOWN can use either v_tag... */ 3940 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) || 3941 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) || 3942 (ch->chunk_type == SCTP_PACKET_DROPPED)) { 3943 if ((vtag_in == asoc->my_vtag) || 3944 ((ch->chunk_flags & SCTP_HAD_NO_TCB) && 3945 (vtag_in == asoc->peer_vtag))) { 3946 /* this is valid */ 3947 } else { 3948 /* drop this packet... */ 3949 SCTP_STAT_INCR(sctps_badvtag); 3950 if (locked_tcb) { 3951 SCTP_TCB_UNLOCK(locked_tcb); 3952 } 3953 return (NULL); 3954 } 3955 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 3956 if (vtag_in != asoc->my_vtag) { 3957 /* 3958 * this could be a stale SHUTDOWN-ACK or the 3959 * peer never got the SHUTDOWN-COMPLETE and 3960 * is still hung; we have started a new asoc 3961 * but it won't complete until the shutdown 3962 * is completed 3963 */ 3964 if (locked_tcb) { 3965 SCTP_TCB_UNLOCK(locked_tcb); 3966 } 3967 sctp_handle_ootb(m, iphlen, *offset, sh, inp, 3968 NULL, vrf_id); 3969 return (NULL); 3970 } 3971 } else { 3972 /* for all other chunks, vtag must match */ 3973 if (vtag_in != asoc->my_vtag) { 3974 /* invalid vtag... */ 3975 SCTPDBG(SCTP_DEBUG_INPUT3, 3976 "invalid vtag: %xh, expect %xh\n", 3977 vtag_in, asoc->my_vtag); 3978 SCTP_STAT_INCR(sctps_badvtag); 3979 if (locked_tcb) { 3980 SCTP_TCB_UNLOCK(locked_tcb); 3981 } 3982 *offset = length; 3983 return (NULL); 3984 } 3985 } 3986 } /* end if !SCTP_COOKIE_ECHO */ 3987 /* 3988 * process all control chunks... 3989 */ 3990 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) || 3991 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) && 3992 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 3993 /* implied cookie-ack.. we must have lost the ack */ 3994 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 3995 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 3996 stcb->asoc.overall_error_count, 3997 0, 3998 SCTP_FROM_SCTP_INPUT, 3999 __LINE__); 4000 } 4001 stcb->asoc.overall_error_count = 0; 4002 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, 4003 *netp); 4004 } 4005 process_control_chunks: 4006 while (IS_SCTP_CONTROL(ch)) { 4007 /* validate chunk length */ 4008 chk_length = ntohs(ch->chunk_length); 4009 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n", 4010 ch->chunk_type, chk_length); 4011 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length); 4012 if (chk_length < sizeof(*ch) || 4013 (*offset + (int)chk_length) > length) { 4014 *offset = length; 4015 if (locked_tcb) { 4016 SCTP_TCB_UNLOCK(locked_tcb); 4017 } 4018 return (NULL); 4019 } 4020 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 4021 /* 4022 * INIT-ACK only gets the init ack "header" portion only 4023 * because we don't have to process the peer's COOKIE. All 4024 * others get a complete chunk. 4025 */ 4026 if ((ch->chunk_type == SCTP_INITIATION_ACK) || 4027 (ch->chunk_type == SCTP_INITIATION)) { 4028 /* get an init-ack chunk */ 4029 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4030 sizeof(struct sctp_init_ack_chunk), chunk_buf); 4031 if (ch == NULL) { 4032 *offset = length; 4033 if (locked_tcb) { 4034 SCTP_TCB_UNLOCK(locked_tcb); 4035 } 4036 return (NULL); 4037 } 4038 } else { 4039 /* For cookies and all other chunks. */ 4040 if (chk_length > sizeof(chunk_buf)) { 4041 /* 4042 * use just the size of the chunk buffer so 4043 * the front part of our chunks fit in 4044 * contiguous space up to the chunk buffer 4045 * size (508 bytes). For chunks that need to 4046 * get more than that they must use the 4047 * sctp_m_getptr() function or other means 4048 * (e.g. know how to parse mbuf chains). 4049 * Cookies do this already. 4050 */ 4051 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4052 (sizeof(chunk_buf) - 4), 4053 chunk_buf); 4054 if (ch == NULL) { 4055 *offset = length; 4056 if (locked_tcb) { 4057 SCTP_TCB_UNLOCK(locked_tcb); 4058 } 4059 return (NULL); 4060 } 4061 } else { 4062 /* We can fit it all */ 4063 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4064 chk_length, chunk_buf); 4065 if (ch == NULL) { 4066 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n"); 4067 *offset = length; 4068 if (locked_tcb) { 4069 SCTP_TCB_UNLOCK(locked_tcb); 4070 } 4071 return (NULL); 4072 } 4073 } 4074 } 4075 num_chunks++; 4076 /* Save off the last place we got a control from */ 4077 if (stcb != NULL) { 4078 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) { 4079 /* 4080 * allow last_control to be NULL if 4081 * ASCONF... ASCONF processing will find the 4082 * right net later 4083 */ 4084 if ((netp != NULL) && (*netp != NULL)) 4085 stcb->asoc.last_control_chunk_from = *netp; 4086 } 4087 } 4088 #ifdef SCTP_AUDITING_ENABLED 4089 sctp_audit_log(0xB0, ch->chunk_type); 4090 #endif 4091 4092 /* check to see if this chunk required auth, but isn't */ 4093 if ((stcb != NULL) && !sctp_auth_disable && 4094 sctp_auth_is_required_chunk(ch->chunk_type, 4095 stcb->asoc.local_auth_chunks) && 4096 !stcb->asoc.authenticated) { 4097 /* "silently" ignore */ 4098 SCTP_STAT_INCR(sctps_recvauthmissing); 4099 goto next_chunk; 4100 } 4101 switch (ch->chunk_type) { 4102 case SCTP_INITIATION: 4103 /* must be first and only chunk */ 4104 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n"); 4105 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4106 /* We are not interested anymore? */ 4107 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4108 /* 4109 * collision case where we are 4110 * sending to them too 4111 */ 4112 ; 4113 } else { 4114 if (locked_tcb) { 4115 SCTP_TCB_UNLOCK(locked_tcb); 4116 } 4117 *offset = length; 4118 return (NULL); 4119 } 4120 } 4121 if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) || 4122 (num_chunks > 1) || 4123 (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 4124 *offset = length; 4125 if (locked_tcb) { 4126 SCTP_TCB_UNLOCK(locked_tcb); 4127 } 4128 return (NULL); 4129 } 4130 if ((stcb != NULL) && 4131 (SCTP_GET_STATE(&stcb->asoc) == 4132 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 4133 sctp_send_shutdown_ack(stcb, 4134 stcb->asoc.primary_destination); 4135 *offset = length; 4136 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4137 if (locked_tcb) { 4138 SCTP_TCB_UNLOCK(locked_tcb); 4139 } 4140 return (NULL); 4141 } 4142 if (netp) { 4143 sctp_handle_init(m, iphlen, *offset, sh, 4144 (struct sctp_init_chunk *)ch, inp, 4145 stcb, *netp, &abort_no_unlock, vrf_id); 4146 } 4147 if (abort_no_unlock) 4148 return (NULL); 4149 4150 *offset = length; 4151 if (locked_tcb) { 4152 SCTP_TCB_UNLOCK(locked_tcb); 4153 } 4154 return (NULL); 4155 break; 4156 case SCTP_PAD_CHUNK: 4157 break; 4158 case SCTP_INITIATION_ACK: 4159 /* must be first and only chunk */ 4160 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n"); 4161 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4162 /* We are not interested anymore */ 4163 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4164 ; 4165 } else { 4166 if (locked_tcb) { 4167 SCTP_TCB_UNLOCK(locked_tcb); 4168 } 4169 *offset = length; 4170 if (stcb) { 4171 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4172 so = SCTP_INP_SO(inp); 4173 atomic_add_int(&stcb->asoc.refcnt, 1); 4174 SCTP_TCB_UNLOCK(stcb); 4175 SCTP_SOCKET_LOCK(so, 1); 4176 SCTP_TCB_LOCK(stcb); 4177 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4178 #endif 4179 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4180 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4181 SCTP_SOCKET_UNLOCK(so, 1); 4182 #endif 4183 } 4184 return (NULL); 4185 } 4186 } 4187 if ((num_chunks > 1) || 4188 (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 4189 *offset = length; 4190 if (locked_tcb) { 4191 SCTP_TCB_UNLOCK(locked_tcb); 4192 } 4193 return (NULL); 4194 } 4195 if ((netp) && (*netp)) { 4196 ret = sctp_handle_init_ack(m, iphlen, *offset, sh, 4197 (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id); 4198 } else { 4199 ret = -1; 4200 } 4201 /* 4202 * Special case, I must call the output routine to 4203 * get the cookie echoed 4204 */ 4205 if (abort_no_unlock) 4206 return (NULL); 4207 4208 if ((stcb) && ret == 0) 4209 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4210 *offset = length; 4211 if (locked_tcb) { 4212 SCTP_TCB_UNLOCK(locked_tcb); 4213 } 4214 return (NULL); 4215 break; 4216 case SCTP_SELECTIVE_ACK: 4217 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n"); 4218 SCTP_STAT_INCR(sctps_recvsacks); 4219 { 4220 struct sctp_sack_chunk *sack; 4221 int abort_now = 0; 4222 uint32_t a_rwnd, cum_ack; 4223 uint16_t num_seg; 4224 int nonce_sum_flag; 4225 4226 if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) { 4227 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n"); 4228 ignore_sack: 4229 *offset = length; 4230 if (locked_tcb) { 4231 SCTP_TCB_UNLOCK(locked_tcb); 4232 } 4233 return (NULL); 4234 } 4235 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 4236 /*- 4237 * If we have sent a shutdown-ack, we will pay no 4238 * attention to a sack sent in to us since 4239 * we don't care anymore. 4240 */ 4241 goto ignore_sack; 4242 } 4243 sack = (struct sctp_sack_chunk *)ch; 4244 nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM; 4245 cum_ack = ntohl(sack->sack.cum_tsn_ack); 4246 num_seg = ntohs(sack->sack.num_gap_ack_blks); 4247 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd); 4248 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 4249 cum_ack, 4250 num_seg, 4251 a_rwnd 4252 ); 4253 stcb->asoc.seen_a_sack_this_pkt = 1; 4254 if ((stcb->asoc.pr_sctp_cnt == 0) && 4255 (num_seg == 0) && 4256 ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) || 4257 (cum_ack == stcb->asoc.last_acked_seq)) && 4258 (stcb->asoc.saw_sack_with_frags == 0) && 4259 (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) 4260 ) { 4261 /* 4262 * We have a SIMPLE sack having no 4263 * prior segments and data on sent 4264 * queue to be acked.. Use the 4265 * faster path sack processing. We 4266 * also allow window update sacks 4267 * with no missing segments to go 4268 * this way too. 4269 */ 4270 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag, 4271 &abort_now); 4272 } else { 4273 if (netp && *netp) 4274 sctp_handle_sack(m, *offset, 4275 sack, stcb, *netp, &abort_now, chk_length, a_rwnd); 4276 } 4277 if (abort_now) { 4278 /* ABORT signal from sack processing */ 4279 *offset = length; 4280 return (NULL); 4281 } 4282 } 4283 break; 4284 case SCTP_HEARTBEAT_REQUEST: 4285 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n"); 4286 if ((stcb) && netp && *netp) { 4287 SCTP_STAT_INCR(sctps_recvheartbeat); 4288 sctp_send_heartbeat_ack(stcb, m, *offset, 4289 chk_length, *netp); 4290 4291 /* He's alive so give him credit */ 4292 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4293 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4294 stcb->asoc.overall_error_count, 4295 0, 4296 SCTP_FROM_SCTP_INPUT, 4297 __LINE__); 4298 } 4299 stcb->asoc.overall_error_count = 0; 4300 } 4301 break; 4302 case SCTP_HEARTBEAT_ACK: 4303 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n"); 4304 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) { 4305 /* Its not ours */ 4306 *offset = length; 4307 if (locked_tcb) { 4308 SCTP_TCB_UNLOCK(locked_tcb); 4309 } 4310 return (NULL); 4311 } 4312 /* He's alive so give him credit */ 4313 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4314 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4315 stcb->asoc.overall_error_count, 4316 0, 4317 SCTP_FROM_SCTP_INPUT, 4318 __LINE__); 4319 } 4320 stcb->asoc.overall_error_count = 0; 4321 SCTP_STAT_INCR(sctps_recvheartbeatack); 4322 if (netp && *netp) 4323 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch, 4324 stcb, *netp); 4325 break; 4326 case SCTP_ABORT_ASSOCIATION: 4327 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n", 4328 stcb); 4329 if ((stcb) && netp && *netp) 4330 sctp_handle_abort((struct sctp_abort_chunk *)ch, 4331 stcb, *netp); 4332 *offset = length; 4333 return (NULL); 4334 break; 4335 case SCTP_SHUTDOWN: 4336 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n", 4337 stcb); 4338 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) { 4339 *offset = length; 4340 if (locked_tcb) { 4341 SCTP_TCB_UNLOCK(locked_tcb); 4342 } 4343 return (NULL); 4344 } 4345 if (netp && *netp) { 4346 int abort_flag = 0; 4347 4348 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch, 4349 stcb, *netp, &abort_flag); 4350 if (abort_flag) { 4351 *offset = length; 4352 return (NULL); 4353 } 4354 } 4355 break; 4356 case SCTP_SHUTDOWN_ACK: 4357 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb); 4358 if ((stcb) && (netp) && (*netp)) 4359 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp); 4360 *offset = length; 4361 return (NULL); 4362 break; 4363 4364 case SCTP_OPERATION_ERROR: 4365 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n"); 4366 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) { 4367 4368 *offset = length; 4369 return (NULL); 4370 } 4371 break; 4372 case SCTP_COOKIE_ECHO: 4373 SCTPDBG(SCTP_DEBUG_INPUT3, 4374 "SCTP_COOKIE-ECHO, stcb %p\n", stcb); 4375 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4376 ; 4377 } else { 4378 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4379 /* We are not interested anymore */ 4380 *offset = length; 4381 return (NULL); 4382 } 4383 } 4384 /* 4385 * First are we accepting? We do this again here 4386 * sincen it is possible that a previous endpoint 4387 * WAS listening responded to a INIT-ACK and then 4388 * closed. We opened and bound.. and are now no 4389 * longer listening. 4390 */ 4391 4392 if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) { 4393 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4394 (sctp_abort_if_one_2_one_hits_limit)) { 4395 struct mbuf *oper; 4396 struct sctp_paramhdr *phdr; 4397 4398 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4399 0, M_DONTWAIT, 1, MT_DATA); 4400 if (oper) { 4401 SCTP_BUF_LEN(oper) = 4402 sizeof(struct sctp_paramhdr); 4403 phdr = mtod(oper, 4404 struct sctp_paramhdr *); 4405 phdr->param_type = 4406 htons(SCTP_CAUSE_OUT_OF_RESC); 4407 phdr->param_length = 4408 htons(sizeof(struct sctp_paramhdr)); 4409 } 4410 sctp_abort_association(inp, stcb, m, 4411 iphlen, sh, oper, vrf_id); 4412 } 4413 *offset = length; 4414 return (NULL); 4415 } else { 4416 struct mbuf *ret_buf; 4417 struct sctp_inpcb *linp; 4418 4419 if (stcb) { 4420 linp = NULL; 4421 } else { 4422 linp = inp; 4423 } 4424 4425 if (linp) { 4426 SCTP_ASOC_CREATE_LOCK(linp); 4427 } 4428 if (netp) { 4429 ret_buf = 4430 sctp_handle_cookie_echo(m, iphlen, 4431 *offset, sh, 4432 (struct sctp_cookie_echo_chunk *)ch, 4433 &inp, &stcb, netp, 4434 auth_skipped, 4435 auth_offset, 4436 auth_len, 4437 &locked_tcb, 4438 vrf_id); 4439 } else { 4440 ret_buf = NULL; 4441 } 4442 if (linp) { 4443 SCTP_ASOC_CREATE_UNLOCK(linp); 4444 } 4445 if (ret_buf == NULL) { 4446 if (locked_tcb) { 4447 SCTP_TCB_UNLOCK(locked_tcb); 4448 } 4449 SCTPDBG(SCTP_DEBUG_INPUT3, 4450 "GAK, null buffer\n"); 4451 auth_skipped = 0; 4452 *offset = length; 4453 return (NULL); 4454 } 4455 /* if AUTH skipped, see if it verified... */ 4456 if (auth_skipped) { 4457 got_auth = 1; 4458 auth_skipped = 0; 4459 } 4460 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) { 4461 /* 4462 * Restart the timer if we have 4463 * pending data 4464 */ 4465 struct sctp_tmit_chunk *chk; 4466 4467 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 4468 if (chk) { 4469 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4470 stcb->sctp_ep, stcb, 4471 chk->whoTo); 4472 } 4473 } 4474 } 4475 break; 4476 case SCTP_COOKIE_ACK: 4477 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb); 4478 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) { 4479 if (locked_tcb) { 4480 SCTP_TCB_UNLOCK(locked_tcb); 4481 } 4482 return (NULL); 4483 } 4484 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4485 /* We are not interested anymore */ 4486 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4487 ; 4488 } else if (stcb) { 4489 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4490 so = SCTP_INP_SO(inp); 4491 atomic_add_int(&stcb->asoc.refcnt, 1); 4492 SCTP_TCB_UNLOCK(stcb); 4493 SCTP_SOCKET_LOCK(so, 1); 4494 SCTP_TCB_LOCK(stcb); 4495 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4496 #endif 4497 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4498 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4499 SCTP_SOCKET_UNLOCK(so, 1); 4500 #endif 4501 *offset = length; 4502 return (NULL); 4503 } 4504 } 4505 /* He's alive so give him credit */ 4506 if ((stcb) && netp && *netp) { 4507 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4508 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4509 stcb->asoc.overall_error_count, 4510 0, 4511 SCTP_FROM_SCTP_INPUT, 4512 __LINE__); 4513 } 4514 stcb->asoc.overall_error_count = 0; 4515 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp); 4516 } 4517 break; 4518 case SCTP_ECN_ECHO: 4519 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n"); 4520 /* He's alive so give him credit */ 4521 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) { 4522 /* Its not ours */ 4523 if (locked_tcb) { 4524 SCTP_TCB_UNLOCK(locked_tcb); 4525 } 4526 *offset = length; 4527 return (NULL); 4528 } 4529 if (stcb) { 4530 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4531 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4532 stcb->asoc.overall_error_count, 4533 0, 4534 SCTP_FROM_SCTP_INPUT, 4535 __LINE__); 4536 } 4537 stcb->asoc.overall_error_count = 0; 4538 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, 4539 stcb); 4540 } 4541 break; 4542 case SCTP_ECN_CWR: 4543 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n"); 4544 /* He's alive so give him credit */ 4545 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) { 4546 /* Its not ours */ 4547 if (locked_tcb) { 4548 SCTP_TCB_UNLOCK(locked_tcb); 4549 } 4550 *offset = length; 4551 return (NULL); 4552 } 4553 if (stcb) { 4554 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4555 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4556 stcb->asoc.overall_error_count, 4557 0, 4558 SCTP_FROM_SCTP_INPUT, 4559 __LINE__); 4560 } 4561 stcb->asoc.overall_error_count = 0; 4562 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb); 4563 } 4564 break; 4565 case SCTP_SHUTDOWN_COMPLETE: 4566 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb); 4567 /* must be first and only chunk */ 4568 if ((num_chunks > 1) || 4569 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 4570 *offset = length; 4571 if (locked_tcb) { 4572 SCTP_TCB_UNLOCK(locked_tcb); 4573 } 4574 return (NULL); 4575 } 4576 if ((stcb) && netp && *netp) { 4577 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch, 4578 stcb, *netp); 4579 } 4580 *offset = length; 4581 return (NULL); 4582 break; 4583 case SCTP_ASCONF: 4584 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n"); 4585 /* He's alive so give him credit */ 4586 if (stcb) { 4587 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4588 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4589 stcb->asoc.overall_error_count, 4590 0, 4591 SCTP_FROM_SCTP_INPUT, 4592 __LINE__); 4593 } 4594 stcb->asoc.overall_error_count = 0; 4595 sctp_handle_asconf(m, *offset, 4596 (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0); 4597 asconf_cnt++; 4598 } 4599 break; 4600 case SCTP_ASCONF_ACK: 4601 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n"); 4602 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { 4603 /* Its not ours */ 4604 if (locked_tcb) { 4605 SCTP_TCB_UNLOCK(locked_tcb); 4606 } 4607 *offset = length; 4608 return (NULL); 4609 } 4610 if ((stcb) && netp && *netp) { 4611 /* He's alive so give him credit */ 4612 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4613 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4614 stcb->asoc.overall_error_count, 4615 0, 4616 SCTP_FROM_SCTP_INPUT, 4617 __LINE__); 4618 } 4619 stcb->asoc.overall_error_count = 0; 4620 sctp_handle_asconf_ack(m, *offset, 4621 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock); 4622 if (abort_no_unlock) 4623 return (NULL); 4624 } 4625 break; 4626 case SCTP_FORWARD_CUM_TSN: 4627 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n"); 4628 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { 4629 /* Its not ours */ 4630 if (locked_tcb) { 4631 SCTP_TCB_UNLOCK(locked_tcb); 4632 } 4633 *offset = length; 4634 return (NULL); 4635 } 4636 /* He's alive so give him credit */ 4637 if (stcb) { 4638 int abort_flag = 0; 4639 4640 stcb->asoc.overall_error_count = 0; 4641 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4642 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4643 stcb->asoc.overall_error_count, 4644 0, 4645 SCTP_FROM_SCTP_INPUT, 4646 __LINE__); 4647 } 4648 *fwd_tsn_seen = 1; 4649 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4650 /* We are not interested anymore */ 4651 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4652 so = SCTP_INP_SO(inp); 4653 atomic_add_int(&stcb->asoc.refcnt, 1); 4654 SCTP_TCB_UNLOCK(stcb); 4655 SCTP_SOCKET_LOCK(so, 1); 4656 SCTP_TCB_LOCK(stcb); 4657 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4658 #endif 4659 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29); 4660 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4661 SCTP_SOCKET_UNLOCK(so, 1); 4662 #endif 4663 *offset = length; 4664 return (NULL); 4665 } 4666 sctp_handle_forward_tsn(stcb, 4667 (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset); 4668 if (abort_flag) { 4669 *offset = length; 4670 return (NULL); 4671 } else { 4672 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4673 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4674 stcb->asoc.overall_error_count, 4675 0, 4676 SCTP_FROM_SCTP_INPUT, 4677 __LINE__); 4678 } 4679 stcb->asoc.overall_error_count = 0; 4680 } 4681 4682 } 4683 break; 4684 case SCTP_STREAM_RESET: 4685 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n"); 4686 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) { 4687 /* Its not ours */ 4688 if (locked_tcb) { 4689 SCTP_TCB_UNLOCK(locked_tcb); 4690 } 4691 *offset = length; 4692 return (NULL); 4693 } 4694 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4695 /* We are not interested anymore */ 4696 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4697 so = SCTP_INP_SO(inp); 4698 atomic_add_int(&stcb->asoc.refcnt, 1); 4699 SCTP_TCB_UNLOCK(stcb); 4700 SCTP_SOCKET_LOCK(so, 1); 4701 SCTP_TCB_LOCK(stcb); 4702 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4703 #endif 4704 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30); 4705 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4706 SCTP_SOCKET_UNLOCK(so, 1); 4707 #endif 4708 *offset = length; 4709 return (NULL); 4710 } 4711 if (stcb->asoc.peer_supports_strreset == 0) { 4712 /* 4713 * hmm, peer should have announced this, but 4714 * we will turn it on since he is sending us 4715 * a stream reset. 4716 */ 4717 stcb->asoc.peer_supports_strreset = 1; 4718 } 4719 if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) { 4720 /* stop processing */ 4721 *offset = length; 4722 return (NULL); 4723 } 4724 break; 4725 case SCTP_PACKET_DROPPED: 4726 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n"); 4727 /* re-get it all please */ 4728 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { 4729 /* Its not ours */ 4730 if (locked_tcb) { 4731 SCTP_TCB_UNLOCK(locked_tcb); 4732 } 4733 *offset = length; 4734 return (NULL); 4735 } 4736 if (ch && (stcb) && netp && (*netp)) { 4737 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch, 4738 stcb, *netp, 4739 min(chk_length, (sizeof(chunk_buf) - 4))); 4740 4741 } 4742 break; 4743 4744 case SCTP_AUTHENTICATION: 4745 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n"); 4746 if (sctp_auth_disable) 4747 goto unknown_chunk; 4748 4749 if (stcb == NULL) { 4750 /* save the first AUTH for later processing */ 4751 if (auth_skipped == 0) { 4752 auth_offset = *offset; 4753 auth_len = chk_length; 4754 auth_skipped = 1; 4755 } 4756 /* skip this chunk (temporarily) */ 4757 goto next_chunk; 4758 } 4759 if ((chk_length < (sizeof(struct sctp_auth_chunk))) || 4760 (chk_length > (sizeof(struct sctp_auth_chunk) + 4761 SCTP_AUTH_DIGEST_LEN_MAX))) { 4762 /* Its not ours */ 4763 if (locked_tcb) { 4764 SCTP_TCB_UNLOCK(locked_tcb); 4765 } 4766 *offset = length; 4767 return (NULL); 4768 } 4769 if (got_auth == 1) { 4770 /* skip this chunk... it's already auth'd */ 4771 goto next_chunk; 4772 } 4773 got_auth = 1; 4774 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, 4775 m, *offset)) { 4776 /* auth HMAC failed so dump the packet */ 4777 *offset = length; 4778 return (stcb); 4779 } else { 4780 /* remaining chunks are HMAC checked */ 4781 stcb->asoc.authenticated = 1; 4782 } 4783 break; 4784 4785 default: 4786 unknown_chunk: 4787 /* it's an unknown chunk! */ 4788 if ((ch->chunk_type & 0x40) && (stcb != NULL)) { 4789 struct mbuf *mm; 4790 struct sctp_paramhdr *phd; 4791 4792 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4793 0, M_DONTWAIT, 1, MT_DATA); 4794 if (mm) { 4795 phd = mtod(mm, struct sctp_paramhdr *); 4796 /* 4797 * We cheat and use param type since 4798 * we did not bother to define a 4799 * error cause struct. They are the 4800 * same basic format with different 4801 * names. 4802 */ 4803 phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK); 4804 phd->param_length = htons(chk_length + sizeof(*phd)); 4805 SCTP_BUF_LEN(mm) = sizeof(*phd); 4806 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length), 4807 M_DONTWAIT); 4808 if (SCTP_BUF_NEXT(mm)) { 4809 sctp_queue_op_err(stcb, mm); 4810 } else { 4811 sctp_m_freem(mm); 4812 } 4813 } 4814 } 4815 if ((ch->chunk_type & 0x80) == 0) { 4816 /* discard this packet */ 4817 *offset = length; 4818 return (stcb); 4819 } /* else skip this bad chunk and continue... */ 4820 break; 4821 } /* switch (ch->chunk_type) */ 4822 4823 4824 next_chunk: 4825 /* get the next chunk */ 4826 *offset += SCTP_SIZE32(chk_length); 4827 if (*offset >= length) { 4828 /* no more data left in the mbuf chain */ 4829 break; 4830 } 4831 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4832 sizeof(struct sctp_chunkhdr), chunk_buf); 4833 if (ch == NULL) { 4834 if (locked_tcb) { 4835 SCTP_TCB_UNLOCK(locked_tcb); 4836 } 4837 *offset = length; 4838 return (NULL); 4839 } 4840 } /* while */ 4841 4842 if (asconf_cnt > 0 && stcb != NULL) { 4843 sctp_send_asconf_ack(stcb); 4844 } 4845 return (stcb); 4846 } 4847 4848 4849 /* 4850 * Process the ECN bits we have something set so we must look to see if it is 4851 * ECN(0) or ECN(1) or CE 4852 */ 4853 static void 4854 sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net, 4855 uint8_t ecn_bits) 4856 { 4857 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4858 ; 4859 } else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) { 4860 /* 4861 * we only add to the nonce sum for ECT1, ECT0 does not 4862 * change the NS bit (that we have yet to find a way to send 4863 * it yet). 4864 */ 4865 4866 /* ECN Nonce stuff */ 4867 stcb->asoc.receiver_nonce_sum++; 4868 stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM; 4869 4870 /* 4871 * Drag up the last_echo point if cumack is larger since we 4872 * don't want the point falling way behind by more than 4873 * 2^^31 and then having it be incorrect. 4874 */ 4875 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4876 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4877 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4878 } 4879 } else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) { 4880 /* 4881 * Drag up the last_echo point if cumack is larger since we 4882 * don't want the point falling way behind by more than 4883 * 2^^31 and then having it be incorrect. 4884 */ 4885 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4886 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4887 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4888 } 4889 } 4890 } 4891 4892 static void 4893 sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net, 4894 uint32_t high_tsn, uint8_t ecn_bits) 4895 { 4896 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4897 /* 4898 * we possibly must notify the sender that a congestion 4899 * window reduction is in order. We do this by adding a ECNE 4900 * chunk to the output chunk queue. The incoming CWR will 4901 * remove this chunk. 4902 */ 4903 if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn, 4904 MAX_TSN)) { 4905 /* Yep, we need to add a ECNE */ 4906 sctp_send_ecn_echo(stcb, net, high_tsn); 4907 stcb->asoc.last_echo_tsn = high_tsn; 4908 } 4909 } 4910 } 4911 4912 #ifdef INVARIANTS 4913 static void 4914 sctp_validate_no_locks(struct sctp_inpcb *inp) 4915 { 4916 struct sctp_tcb *stcb; 4917 4918 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 4919 if (mtx_owned(&stcb->tcb_mtx)) { 4920 panic("Own lock on stcb at return from input"); 4921 } 4922 } 4923 } 4924 4925 #endif 4926 4927 /* 4928 * common input chunk processing (v4 and v6) 4929 */ 4930 void 4931 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, 4932 int length, struct sctphdr *sh, struct sctp_chunkhdr *ch, 4933 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 4934 uint8_t ecn_bits, uint32_t vrf_id) 4935 { 4936 /* 4937 * Control chunk processing 4938 */ 4939 uint32_t high_tsn; 4940 int fwd_tsn_seen = 0, data_processed = 0; 4941 struct mbuf *m = *mm; 4942 int abort_flag = 0; 4943 int un_sent; 4944 4945 SCTP_STAT_INCR(sctps_recvdatagrams); 4946 #ifdef SCTP_AUDITING_ENABLED 4947 sctp_audit_log(0xE0, 1); 4948 sctp_auditing(0, inp, stcb, net); 4949 #endif 4950 4951 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d stcb:%p\n", 4952 m, iphlen, offset, stcb); 4953 if (stcb) { 4954 /* always clear this before beginning a packet */ 4955 stcb->asoc.authenticated = 0; 4956 stcb->asoc.seen_a_sack_this_pkt = 0; 4957 SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n", 4958 stcb, stcb->asoc.state); 4959 4960 if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) || 4961 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 4962 /*- 4963 * If we hit here, we had a ref count 4964 * up when the assoc was aborted and the 4965 * timer is clearing out the assoc, we should 4966 * NOT respond to any packet.. its OOTB. 4967 */ 4968 SCTP_TCB_UNLOCK(stcb); 4969 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 4970 vrf_id); 4971 goto out_now; 4972 } 4973 } 4974 if (IS_SCTP_CONTROL(ch)) { 4975 /* process the control portion of the SCTP packet */ 4976 /* sa_ignore NO_NULL_CHK */ 4977 stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch, 4978 inp, stcb, &net, &fwd_tsn_seen, vrf_id); 4979 if (stcb) { 4980 /* 4981 * This covers us if the cookie-echo was there and 4982 * it changes our INP. 4983 */ 4984 inp = stcb->sctp_ep; 4985 } 4986 } else { 4987 /* 4988 * no control chunks, so pre-process DATA chunks (these 4989 * checks are taken care of by control processing) 4990 */ 4991 4992 /* 4993 * if DATA only packet, and auth is required, then punt... 4994 * can't have authenticated without any AUTH (control) 4995 * chunks 4996 */ 4997 if ((stcb != NULL) && !sctp_auth_disable && 4998 sctp_auth_is_required_chunk(SCTP_DATA, 4999 stcb->asoc.local_auth_chunks)) { 5000 /* "silently" ignore */ 5001 SCTP_STAT_INCR(sctps_recvauthmissing); 5002 SCTP_TCB_UNLOCK(stcb); 5003 goto out_now; 5004 } 5005 if (stcb == NULL) { 5006 /* out of the blue DATA chunk */ 5007 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5008 vrf_id); 5009 goto out_now; 5010 } 5011 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) { 5012 /* v_tag mismatch! */ 5013 SCTP_STAT_INCR(sctps_badvtag); 5014 SCTP_TCB_UNLOCK(stcb); 5015 goto out_now; 5016 } 5017 } 5018 5019 if (stcb == NULL) { 5020 /* 5021 * no valid TCB for this packet, or we found it's a bad 5022 * packet while processing control, or we're done with this 5023 * packet (done or skip rest of data), so we drop it... 5024 */ 5025 goto out_now; 5026 } 5027 /* 5028 * DATA chunk processing 5029 */ 5030 /* plow through the data chunks while length > offset */ 5031 5032 /* 5033 * Rest should be DATA only. Check authentication state if AUTH for 5034 * DATA is required. 5035 */ 5036 if ((length > offset) && (stcb != NULL) && !sctp_auth_disable && 5037 sctp_auth_is_required_chunk(SCTP_DATA, 5038 stcb->asoc.local_auth_chunks) && 5039 !stcb->asoc.authenticated) { 5040 /* "silently" ignore */ 5041 SCTP_STAT_INCR(sctps_recvauthmissing); 5042 SCTPDBG(SCTP_DEBUG_AUTH1, 5043 "Data chunk requires AUTH, skipped\n"); 5044 goto trigger_send; 5045 } 5046 if (length > offset) { 5047 int retval; 5048 5049 /* 5050 * First check to make sure our state is correct. We would 5051 * not get here unless we really did have a tag, so we don't 5052 * abort if this happens, just dump the chunk silently. 5053 */ 5054 switch (SCTP_GET_STATE(&stcb->asoc)) { 5055 case SCTP_STATE_COOKIE_ECHOED: 5056 /* 5057 * we consider data with valid tags in this state 5058 * shows us the cookie-ack was lost. Imply it was 5059 * there. 5060 */ 5061 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 5062 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5063 stcb->asoc.overall_error_count, 5064 0, 5065 SCTP_FROM_SCTP_INPUT, 5066 __LINE__); 5067 } 5068 stcb->asoc.overall_error_count = 0; 5069 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net); 5070 break; 5071 case SCTP_STATE_COOKIE_WAIT: 5072 /* 5073 * We consider OOTB any data sent during asoc setup. 5074 */ 5075 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5076 vrf_id); 5077 SCTP_TCB_UNLOCK(stcb); 5078 goto out_now; 5079 /* sa_ignore NOTREACHED */ 5080 break; 5081 case SCTP_STATE_EMPTY: /* should not happen */ 5082 case SCTP_STATE_INUSE: /* should not happen */ 5083 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */ 5084 case SCTP_STATE_SHUTDOWN_ACK_SENT: 5085 default: 5086 SCTP_TCB_UNLOCK(stcb); 5087 goto out_now; 5088 /* sa_ignore NOTREACHED */ 5089 break; 5090 case SCTP_STATE_OPEN: 5091 case SCTP_STATE_SHUTDOWN_SENT: 5092 break; 5093 } 5094 /* take care of ECN, part 1. */ 5095 if (stcb->asoc.ecn_allowed && 5096 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 5097 sctp_process_ecn_marked_a(stcb, net, ecn_bits); 5098 } 5099 /* plow through the data chunks while length > offset */ 5100 retval = sctp_process_data(mm, iphlen, &offset, length, sh, 5101 inp, stcb, net, &high_tsn); 5102 if (retval == 2) { 5103 /* 5104 * The association aborted, NO UNLOCK needed since 5105 * the association is destroyed. 5106 */ 5107 goto out_now; 5108 } 5109 data_processed = 1; 5110 if (retval == 0) { 5111 /* take care of ecn part 2. */ 5112 if (stcb->asoc.ecn_allowed && 5113 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 5114 sctp_process_ecn_marked_b(stcb, net, high_tsn, 5115 ecn_bits); 5116 } 5117 } 5118 /* 5119 * Anything important needs to have been m_copy'ed in 5120 * process_data 5121 */ 5122 } 5123 if ((data_processed == 0) && (fwd_tsn_seen)) { 5124 int was_a_gap = 0; 5125 5126 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 5127 stcb->asoc.cumulative_tsn, MAX_TSN)) { 5128 /* there was a gap before this data was processed */ 5129 was_a_gap = 1; 5130 } 5131 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 5132 if (abort_flag) { 5133 /* Again, we aborted so NO UNLOCK needed */ 5134 goto out_now; 5135 } 5136 } 5137 /* trigger send of any chunks in queue... */ 5138 trigger_send: 5139 #ifdef SCTP_AUDITING_ENABLED 5140 sctp_audit_log(0xE0, 2); 5141 sctp_auditing(1, inp, stcb, net); 5142 #endif 5143 SCTPDBG(SCTP_DEBUG_INPUT1, 5144 "Check for chunk output prw:%d tqe:%d tf=%d\n", 5145 stcb->asoc.peers_rwnd, 5146 TAILQ_EMPTY(&stcb->asoc.control_send_queue), 5147 stcb->asoc.total_flight); 5148 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 5149 5150 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) || 5151 ((un_sent) && 5152 (stcb->asoc.peers_rwnd > 0 || 5153 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) { 5154 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n"); 5155 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 5156 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n"); 5157 } 5158 #ifdef SCTP_AUDITING_ENABLED 5159 sctp_audit_log(0xE0, 3); 5160 sctp_auditing(2, inp, stcb, net); 5161 #endif 5162 SCTP_TCB_UNLOCK(stcb); 5163 out_now: 5164 #ifdef INVARIANTS 5165 sctp_validate_no_locks(inp); 5166 #endif 5167 return; 5168 } 5169 5170 5171 5172 void 5173 sctp_input(i_pak, off) 5174 struct mbuf *i_pak; 5175 int off; 5176 5177 { 5178 #ifdef SCTP_MBUF_LOGGING 5179 struct mbuf *mat; 5180 5181 #endif 5182 struct mbuf *m; 5183 int iphlen; 5184 uint32_t vrf_id = 0; 5185 uint8_t ecn_bits; 5186 struct ip *ip; 5187 struct sctphdr *sh; 5188 struct sctp_inpcb *inp = NULL; 5189 5190 uint32_t check, calc_check; 5191 struct sctp_nets *net; 5192 struct sctp_tcb *stcb = NULL; 5193 struct sctp_chunkhdr *ch; 5194 int refcount_up = 0; 5195 int length, mlen, offset; 5196 5197 5198 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) { 5199 SCTP_RELEASE_PKT(i_pak); 5200 return; 5201 } 5202 mlen = SCTP_HEADER_LEN(i_pak); 5203 iphlen = off; 5204 m = SCTP_HEADER_TO_CHAIN(i_pak); 5205 5206 net = NULL; 5207 SCTP_STAT_INCR(sctps_recvpackets); 5208 SCTP_STAT_INCR_COUNTER64(sctps_inpackets); 5209 5210 5211 #ifdef SCTP_MBUF_LOGGING 5212 /* Log in any input mbufs */ 5213 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 5214 mat = m; 5215 while (mat) { 5216 if (SCTP_BUF_IS_EXTENDED(mat)) { 5217 sctp_log_mb(mat, SCTP_MBUF_INPUT); 5218 } 5219 mat = SCTP_BUF_NEXT(mat); 5220 } 5221 } 5222 #endif 5223 #ifdef SCTP_PACKET_LOGGING 5224 if (sctp_logging_level & SCTP_LAST_PACKET_TRACING) 5225 sctp_packet_log(m, mlen); 5226 #endif 5227 /* 5228 * Must take out the iphlen, since mlen expects this (only effect lb 5229 * case) 5230 */ 5231 mlen -= iphlen; 5232 5233 /* 5234 * Get IP, SCTP, and first chunk header together in first mbuf. 5235 */ 5236 ip = mtod(m, struct ip *); 5237 offset = iphlen + sizeof(*sh) + sizeof(*ch); 5238 if (SCTP_BUF_LEN(m) < offset) { 5239 if ((m = m_pullup(m, offset)) == 0) { 5240 SCTP_STAT_INCR(sctps_hdrops); 5241 return; 5242 } 5243 ip = mtod(m, struct ip *); 5244 } 5245 sh = (struct sctphdr *)((caddr_t)ip + iphlen); 5246 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh)); 5247 SCTPDBG(SCTP_DEBUG_INPUT1, 5248 "sctp_input() length:%d iphlen:%d\n", mlen, iphlen); 5249 5250 /* SCTP does not allow broadcasts or multicasts */ 5251 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 5252 goto bad; 5253 } 5254 if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) { 5255 /* 5256 * We only look at broadcast if its a front state, All 5257 * others we will not have a tcb for anyway. 5258 */ 5259 goto bad; 5260 } 5261 /* validate SCTP checksum */ 5262 check = sh->checksum; /* save incoming checksum */ 5263 if ((check == 0) && (sctp_no_csum_on_loopback) && 5264 ((ip->ip_src.s_addr == ip->ip_dst.s_addr) || 5265 (SCTP_IS_IT_LOOPBACK(m))) 5266 ) { 5267 goto sctp_skip_csum_4; 5268 } 5269 sh->checksum = 0; /* prepare for calc */ 5270 calc_check = sctp_calculate_sum(m, &mlen, iphlen); 5271 if (calc_check != check) { 5272 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n", 5273 calc_check, check, m, mlen, iphlen); 5274 5275 stcb = sctp_findassociation_addr(m, iphlen, 5276 offset - sizeof(*ch), 5277 sh, ch, &inp, &net, 5278 vrf_id); 5279 if ((inp) && (stcb)) { 5280 sctp_send_packet_dropped(stcb, net, m, iphlen, 1); 5281 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED); 5282 } else if ((inp != NULL) && (stcb == NULL)) { 5283 refcount_up = 1; 5284 } 5285 SCTP_STAT_INCR(sctps_badsum); 5286 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors); 5287 goto bad; 5288 } 5289 sh->checksum = calc_check; 5290 sctp_skip_csum_4: 5291 /* destination port of 0 is illegal, based on RFC2960. */ 5292 if (sh->dest_port == 0) { 5293 SCTP_STAT_INCR(sctps_hdrops); 5294 goto bad; 5295 } 5296 /* validate mbuf chain length with IP payload length */ 5297 if (mlen < (ip->ip_len - iphlen)) { 5298 SCTP_STAT_INCR(sctps_hdrops); 5299 goto bad; 5300 } 5301 /* 5302 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants 5303 * IP/SCTP/first chunk header... 5304 */ 5305 stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch), 5306 sh, ch, &inp, &net, vrf_id); 5307 /* inp's ref-count increased && stcb locked */ 5308 if (inp == NULL) { 5309 struct sctp_init_chunk *init_chk, chunk_buf; 5310 5311 SCTP_STAT_INCR(sctps_noport); 5312 #ifdef ICMP_BANDLIM 5313 /* 5314 * we use the bandwidth limiting to protect against sending 5315 * too many ABORTS all at once. In this case these count the 5316 * same as an ICMP message. 5317 */ 5318 if (badport_bandlim(0) < 0) 5319 goto bad; 5320 #endif /* ICMP_BANDLIM */ 5321 SCTPDBG(SCTP_DEBUG_INPUT1, 5322 "Sending a ABORT from packet entry!\n"); 5323 if (ch->chunk_type == SCTP_INITIATION) { 5324 /* 5325 * we do a trick here to get the INIT tag, dig in 5326 * and get the tag from the INIT and put it in the 5327 * common header. 5328 */ 5329 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 5330 iphlen + sizeof(*sh), sizeof(*init_chk), 5331 (uint8_t *) & chunk_buf); 5332 if (init_chk != NULL) 5333 sh->v_tag = init_chk->init.initiate_tag; 5334 } 5335 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 5336 sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id); 5337 goto bad; 5338 } 5339 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) { 5340 goto bad; 5341 } 5342 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) 5343 sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id); 5344 goto bad; 5345 } else if (stcb == NULL) { 5346 refcount_up = 1; 5347 } 5348 #ifdef IPSEC 5349 /* 5350 * I very much doubt any of the IPSEC stuff will work but I have no 5351 * idea, so I will leave it in place. 5352 */ 5353 if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) { 5354 ipsec4stat.in_polvio++; 5355 SCTP_STAT_INCR(sctps_hdrops); 5356 goto bad; 5357 } 5358 #endif /* IPSEC */ 5359 5360 /* 5361 * common chunk processing 5362 */ 5363 length = ip->ip_len + iphlen; 5364 offset -= sizeof(struct sctp_chunkhdr); 5365 5366 ecn_bits = ip->ip_tos; 5367 5368 /* sa_ignore NO_NULL_CHK */ 5369 sctp_common_input_processing(&m, iphlen, offset, length, sh, ch, 5370 inp, stcb, net, ecn_bits, vrf_id); 5371 /* inp's ref-count reduced && stcb unlocked */ 5372 if (m) { 5373 sctp_m_freem(m); 5374 } 5375 if ((inp) && (refcount_up)) { 5376 /* reduce ref-count */ 5377 SCTP_INP_DECR_REF(inp); 5378 } 5379 return; 5380 bad: 5381 if (stcb) { 5382 SCTP_TCB_UNLOCK(stcb); 5383 } 5384 if ((inp) && (refcount_up)) { 5385 /* reduce ref-count */ 5386 SCTP_INP_DECR_REF(inp); 5387 } 5388 if (m) { 5389 sctp_m_freem(m); 5390 } 5391 return; 5392 } 5393