1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_input.h> 44 #include <netinet/sctp_auth.h> 45 #include <netinet/sctp_indata.h> 46 #include <netinet/sctp_asconf.h> 47 #include <netinet/sctp_bsd_addr.h> 48 #include <netinet/sctp_timer.h> 49 50 51 52 static void 53 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb) 54 { 55 struct sctp_nets *net; 56 57 /* 58 * This now not only stops all cookie timers it also stops any INIT 59 * timers as well. This will make sure that the timers are stopped 60 * in all collision cases. 61 */ 62 SCTP_TCB_LOCK_ASSERT(stcb); 63 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 64 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) { 65 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, 66 stcb->sctp_ep, 67 stcb, 68 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1); 69 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) { 70 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, 71 stcb->sctp_ep, 72 stcb, 73 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2); 74 } 75 } 76 } 77 78 /* INIT handler */ 79 static void 80 sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 81 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 82 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 83 { 84 struct sctp_init *init; 85 struct mbuf *op_err; 86 uint32_t init_limit; 87 88 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n", 89 stcb); 90 if (stcb == NULL) { 91 SCTP_INP_RLOCK(inp); 92 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 93 goto outnow; 94 } 95 } 96 op_err = NULL; 97 init = &cp->init; 98 /* First are we accepting? */ 99 if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) { 100 SCTPDBG(SCTP_DEBUG_INPUT2, 101 "sctp_handle_init: Abort, so_qlimit:%d\n", 102 inp->sctp_socket->so_qlimit); 103 /* 104 * FIX ME ?? What about TCP model and we have a 105 * match/restart case? 106 */ 107 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 108 vrf_id); 109 if (stcb) 110 *abort_no_unlock = 1; 111 goto outnow; 112 } 113 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) { 114 /* Invalid length */ 115 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 116 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 117 vrf_id); 118 if (stcb) 119 *abort_no_unlock = 1; 120 goto outnow; 121 } 122 /* validate parameters */ 123 if (init->initiate_tag == 0) { 124 /* protocol error... send abort */ 125 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 126 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 127 vrf_id); 128 if (stcb) 129 *abort_no_unlock = 1; 130 goto outnow; 131 } 132 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) { 133 /* invalid parameter... send abort */ 134 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 135 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 136 vrf_id); 137 if (stcb) 138 *abort_no_unlock = 1; 139 goto outnow; 140 } 141 if (init->num_inbound_streams == 0) { 142 /* protocol error... send abort */ 143 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 144 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 145 vrf_id); 146 if (stcb) 147 *abort_no_unlock = 1; 148 goto outnow; 149 } 150 if (init->num_outbound_streams == 0) { 151 /* protocol error... send abort */ 152 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 153 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 154 vrf_id); 155 if (stcb) 156 *abort_no_unlock = 1; 157 goto outnow; 158 } 159 init_limit = offset + ntohs(cp->ch.chunk_length); 160 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp), 161 init_limit)) { 162 /* auth parameter(s) error... send abort */ 163 sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id); 164 if (stcb) 165 *abort_no_unlock = 1; 166 goto outnow; 167 } 168 /* send an INIT-ACK w/cookie */ 169 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); 170 sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, 171 ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED)); 172 outnow: 173 if (stcb == NULL) { 174 SCTP_INP_RUNLOCK(inp); 175 } 176 } 177 178 /* 179 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error 180 */ 181 182 int 183 sctp_is_there_unsent_data(struct sctp_tcb *stcb) 184 { 185 int unsent_data = 0; 186 struct sctp_stream_queue_pending *sp; 187 struct sctp_stream_out *strq; 188 struct sctp_association *asoc; 189 190 /* 191 * This function returns the number of streams that have true unsent 192 * data on them. Note that as it looks through it will clean up any 193 * places that have old data that has been sent but left at top of 194 * stream queue. 195 */ 196 asoc = &stcb->asoc; 197 SCTP_TCB_SEND_LOCK(stcb); 198 if (!TAILQ_EMPTY(&asoc->out_wheel)) { 199 /* Check to see if some data queued */ 200 TAILQ_FOREACH(strq, &asoc->out_wheel, next_spoke) { 201 is_there_another: 202 /* sa_ignore FREED_MEMORY */ 203 sp = TAILQ_FIRST(&strq->outqueue); 204 if (sp == NULL) { 205 continue; 206 } 207 if ((sp->msg_is_complete) && 208 (sp->length == 0) && 209 (sp->sender_all_done)) { 210 /* 211 * We are doing differed cleanup. Last time 212 * through when we took all the data the 213 * sender_all_done was not set. 214 */ 215 if (sp->put_last_out == 0) { 216 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 217 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", 218 sp->sender_all_done, 219 sp->length, 220 sp->msg_is_complete, 221 sp->put_last_out); 222 } 223 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1); 224 TAILQ_REMOVE(&strq->outqueue, sp, next); 225 sctp_free_remote_addr(sp->net); 226 if (sp->data) { 227 sctp_m_freem(sp->data); 228 sp->data = NULL; 229 } 230 sctp_free_a_strmoq(stcb, sp); 231 goto is_there_another; 232 } else { 233 unsent_data++; 234 continue; 235 } 236 } 237 } 238 SCTP_TCB_SEND_UNLOCK(stcb); 239 return (unsent_data); 240 } 241 242 static int 243 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb, 244 struct sctp_nets *net) 245 { 246 struct sctp_init *init; 247 struct sctp_association *asoc; 248 struct sctp_nets *lnet; 249 unsigned int i; 250 251 init = &cp->init; 252 asoc = &stcb->asoc; 253 /* save off parameters */ 254 asoc->peer_vtag = ntohl(init->initiate_tag); 255 asoc->peers_rwnd = ntohl(init->a_rwnd); 256 if (TAILQ_FIRST(&asoc->nets)) { 257 /* update any ssthresh's that may have a default */ 258 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 259 lnet->ssthresh = asoc->peers_rwnd; 260 261 if (sctp_logging_level & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 262 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION); 263 } 264 } 265 } 266 SCTP_TCB_SEND_LOCK(stcb); 267 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) { 268 unsigned int newcnt; 269 struct sctp_stream_out *outs; 270 struct sctp_stream_queue_pending *sp; 271 272 /* cut back on number of streams */ 273 newcnt = ntohs(init->num_inbound_streams); 274 /* This if is probably not needed but I am cautious */ 275 if (asoc->strmout) { 276 /* First make sure no data chunks are trapped */ 277 for (i = newcnt; i < asoc->pre_open_streams; i++) { 278 outs = &asoc->strmout[i]; 279 sp = TAILQ_FIRST(&outs->outqueue); 280 while (sp) { 281 TAILQ_REMOVE(&outs->outqueue, sp, 282 next); 283 asoc->stream_queue_cnt--; 284 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, 285 stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, 286 sp, SCTP_SO_NOT_LOCKED); 287 if (sp->data) { 288 sctp_m_freem(sp->data); 289 sp->data = NULL; 290 } 291 sctp_free_remote_addr(sp->net); 292 sp->net = NULL; 293 /* Free the chunk */ 294 SCTP_PRINTF("sp:%p tcb:%p weird free case\n", 295 sp, stcb); 296 297 sctp_free_a_strmoq(stcb, sp); 298 /* sa_ignore FREED_MEMORY */ 299 sp = TAILQ_FIRST(&outs->outqueue); 300 } 301 } 302 } 303 /* cut back the count and abandon the upper streams */ 304 asoc->pre_open_streams = newcnt; 305 } 306 SCTP_TCB_SEND_UNLOCK(stcb); 307 asoc->streamoutcnt = asoc->pre_open_streams; 308 /* init tsn's */ 309 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1; 310 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 311 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 312 } 313 /* This is the next one we expect */ 314 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1; 315 316 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn); 317 asoc->cumulative_tsn = asoc->asconf_seq_in; 318 asoc->last_echo_tsn = asoc->asconf_seq_in; 319 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 320 /* open the requested streams */ 321 322 if (asoc->strmin != NULL) { 323 /* Free the old ones */ 324 struct sctp_queued_to_read *ctl; 325 326 for (i = 0; i < asoc->streamincnt; i++) { 327 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 328 while (ctl) { 329 TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next); 330 sctp_free_remote_addr(ctl->whoFrom); 331 ctl->whoFrom = NULL; 332 sctp_m_freem(ctl->data); 333 ctl->data = NULL; 334 sctp_free_a_readq(stcb, ctl); 335 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 336 } 337 } 338 SCTP_FREE(asoc->strmin, SCTP_M_STRMI); 339 } 340 asoc->streamincnt = ntohs(init->num_outbound_streams); 341 if (asoc->streamincnt > MAX_SCTP_STREAMS) { 342 asoc->streamincnt = MAX_SCTP_STREAMS; 343 } 344 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt * 345 sizeof(struct sctp_stream_in), SCTP_M_STRMI); 346 if (asoc->strmin == NULL) { 347 /* we didn't get memory for the streams! */ 348 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n"); 349 return (-1); 350 } 351 for (i = 0; i < asoc->streamincnt; i++) { 352 asoc->strmin[i].stream_no = i; 353 asoc->strmin[i].last_sequence_delivered = 0xffff; 354 /* 355 * U-stream ranges will be set when the cookie is unpacked. 356 * Or for the INIT sender they are un set (if pr-sctp not 357 * supported) when the INIT-ACK arrives. 358 */ 359 TAILQ_INIT(&asoc->strmin[i].inqueue); 360 asoc->strmin[i].delivery_started = 0; 361 } 362 /* 363 * load_address_from_init will put the addresses into the 364 * association when the COOKIE is processed or the INIT-ACK is 365 * processed. Both types of COOKIE's existing and new call this 366 * routine. It will remove addresses that are no longer in the 367 * association (for the restarting case where addresses are 368 * removed). Up front when the INIT arrives we will discard it if it 369 * is a restart and new addresses have been added. 370 */ 371 /* sa_ignore MEMLEAK */ 372 return (0); 373 } 374 375 /* 376 * INIT-ACK message processing/consumption returns value < 0 on error 377 */ 378 static int 379 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, 380 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 381 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 382 { 383 struct sctp_association *asoc; 384 struct mbuf *op_err; 385 int retval, abort_flag; 386 uint32_t initack_limit; 387 388 /* First verify that we have no illegal param's */ 389 abort_flag = 0; 390 op_err = NULL; 391 392 op_err = sctp_arethere_unrecognized_parameters(m, 393 (offset + sizeof(struct sctp_init_chunk)), 394 &abort_flag, (struct sctp_chunkhdr *)cp); 395 if (abort_flag) { 396 /* Send an abort and notify peer */ 397 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED); 398 *abort_no_unlock = 1; 399 return (-1); 400 } 401 asoc = &stcb->asoc; 402 /* process the peer's parameters in the INIT-ACK */ 403 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net); 404 if (retval < 0) { 405 return (retval); 406 } 407 initack_limit = offset + ntohs(cp->ch.chunk_length); 408 /* load all addresses */ 409 if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen, 410 (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh, 411 NULL))) { 412 /* Huh, we should abort */ 413 SCTPDBG(SCTP_DEBUG_INPUT1, 414 "Load addresses from INIT causes an abort %d\n", 415 retval); 416 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 417 NULL, 0); 418 *abort_no_unlock = 1; 419 return (-1); 420 } 421 /* if the peer doesn't support asconf, flush the asconf queue */ 422 if (asoc->peer_supports_asconf == 0) { 423 struct sctp_asconf_addr *aparam; 424 425 while (!TAILQ_EMPTY(&asoc->asconf_queue)) { 426 /* sa_ignore FREED_MEMORY */ 427 aparam = TAILQ_FIRST(&asoc->asconf_queue); 428 TAILQ_REMOVE(&asoc->asconf_queue, aparam, next); 429 SCTP_FREE(aparam, SCTP_M_ASC_ADDR); 430 } 431 } 432 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs, 433 stcb->asoc.local_hmacs); 434 if (op_err) { 435 sctp_queue_op_err(stcb, op_err); 436 /* queuing will steal away the mbuf chain to the out queue */ 437 op_err = NULL; 438 } 439 /* extract the cookie and queue it to "echo" it back... */ 440 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 441 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 442 stcb->asoc.overall_error_count, 443 0, 444 SCTP_FROM_SCTP_INPUT, 445 __LINE__); 446 } 447 stcb->asoc.overall_error_count = 0; 448 net->error_count = 0; 449 450 /* 451 * Cancel the INIT timer, We do this first before queueing the 452 * cookie. We always cancel at the primary to assue that we are 453 * canceling the timer started by the INIT which always goes to the 454 * primary. 455 */ 456 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb, 457 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4); 458 459 /* calculate the RTO */ 460 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy); 461 462 retval = sctp_send_cookie_echo(m, offset, stcb, net); 463 if (retval < 0) { 464 /* 465 * No cookie, we probably should send a op error. But in any 466 * case if there is no cookie in the INIT-ACK, we can 467 * abandon the peer, its broke. 468 */ 469 if (retval == -3) { 470 /* We abort with an error of missing mandatory param */ 471 op_err = 472 sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM); 473 if (op_err) { 474 /* 475 * Expand beyond to include the mandatory 476 * param cookie 477 */ 478 struct sctp_inv_mandatory_param *mp; 479 480 SCTP_BUF_LEN(op_err) = 481 sizeof(struct sctp_inv_mandatory_param); 482 mp = mtod(op_err, 483 struct sctp_inv_mandatory_param *); 484 /* Subtract the reserved param */ 485 mp->length = 486 htons(sizeof(struct sctp_inv_mandatory_param) - 2); 487 mp->num_param = htonl(1); 488 mp->param = htons(SCTP_STATE_COOKIE); 489 mp->resv = 0; 490 } 491 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 492 sh, op_err, 0); 493 *abort_no_unlock = 1; 494 } 495 return (retval); 496 } 497 return (0); 498 } 499 500 static void 501 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp, 502 struct sctp_tcb *stcb, struct sctp_nets *net) 503 { 504 struct sockaddr_storage store; 505 struct sockaddr_in *sin; 506 struct sockaddr_in6 *sin6; 507 struct sctp_nets *r_net; 508 struct timeval tv; 509 int req_prim = 0; 510 511 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) { 512 /* Invalid length */ 513 return; 514 } 515 sin = (struct sockaddr_in *)&store; 516 sin6 = (struct sockaddr_in6 *)&store; 517 518 memset(&store, 0, sizeof(store)); 519 if (cp->heartbeat.hb_info.addr_family == AF_INET && 520 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) { 521 sin->sin_family = cp->heartbeat.hb_info.addr_family; 522 sin->sin_len = cp->heartbeat.hb_info.addr_len; 523 sin->sin_port = stcb->rport; 524 memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address, 525 sizeof(sin->sin_addr)); 526 } else if (cp->heartbeat.hb_info.addr_family == AF_INET6 && 527 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) { 528 sin6->sin6_family = cp->heartbeat.hb_info.addr_family; 529 sin6->sin6_len = cp->heartbeat.hb_info.addr_len; 530 sin6->sin6_port = stcb->rport; 531 memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address, 532 sizeof(sin6->sin6_addr)); 533 } else { 534 return; 535 } 536 r_net = sctp_findnet(stcb, (struct sockaddr *)sin); 537 if (r_net == NULL) { 538 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n"); 539 return; 540 } 541 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) && 542 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) && 543 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) { 544 /* 545 * If the its a HB and it's random value is correct when can 546 * confirm the destination. 547 */ 548 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 549 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) { 550 stcb->asoc.primary_destination = r_net; 551 r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY; 552 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 553 r_net = TAILQ_FIRST(&stcb->asoc.nets); 554 if (r_net != stcb->asoc.primary_destination) { 555 /* 556 * first one on the list is NOT the primary 557 * sctp_cmpaddr() is much more efficent if 558 * the primary is the first on the list, 559 * make it so. 560 */ 561 TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 562 TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 563 } 564 req_prim = 1; 565 } 566 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 567 stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 568 } 569 r_net->error_count = 0; 570 r_net->hb_responded = 1; 571 tv.tv_sec = cp->heartbeat.hb_info.time_value_1; 572 tv.tv_usec = cp->heartbeat.hb_info.time_value_2; 573 if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 574 r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 575 r_net->dest_state |= SCTP_ADDR_REACHABLE; 576 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 577 SCTP_HEARTBEAT_SUCCESS, (void *)r_net, SCTP_SO_NOT_LOCKED); 578 /* now was it the primary? if so restore */ 579 if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 580 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net); 581 } 582 } 583 /* 584 * JRS 5/14/07 - If CMT PF is on and the destination is in PF state, 585 * set the destination to active state and set the cwnd to one or 586 * two MTU's based on whether PF1 or PF2 is being used. If a T3 587 * timer is running, for the destination, stop the timer because a 588 * PF-heartbeat was received. 589 */ 590 if (sctp_cmt_on_off && sctp_cmt_pf && (net->dest_state & SCTP_ADDR_PF) == 591 SCTP_ADDR_PF) { 592 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 593 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 594 stcb, net, 595 SCTP_FROM_SCTP_INPUT + SCTP_LOC_5); 596 } 597 net->dest_state &= ~SCTP_ADDR_PF; 598 net->cwnd = net->mtu * sctp_cmt_pf; 599 SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n", 600 net, net->cwnd); 601 } 602 /* Now lets do a RTO with this */ 603 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy); 604 /* Mobility adaptation */ 605 if (req_prim) { 606 if ((sctp_is_mobility_feature_on(stcb->sctp_ep, 607 SCTP_MOBILITY_BASE) || 608 sctp_is_mobility_feature_on(stcb->sctp_ep, 609 SCTP_MOBILITY_FASTHANDOFF)) && 610 sctp_is_mobility_feature_on(stcb->sctp_ep, 611 SCTP_MOBILITY_PRIM_DELETED)) { 612 613 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7); 614 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 615 SCTP_MOBILITY_FASTHANDOFF)) { 616 sctp_assoc_immediate_retrans(stcb, 617 stcb->asoc.primary_destination); 618 } 619 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 620 SCTP_MOBILITY_BASE)) { 621 sctp_move_chunks_from_deleted_prim(stcb, 622 stcb->asoc.primary_destination); 623 } 624 sctp_delete_prim_timer(stcb->sctp_ep, stcb, 625 stcb->asoc.deleted_primary); 626 } 627 } 628 } 629 630 static void 631 sctp_handle_abort(struct sctp_abort_chunk *cp, 632 struct sctp_tcb *stcb, struct sctp_nets *net) 633 { 634 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 635 struct socket *so; 636 637 #endif 638 639 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n"); 640 if (stcb == NULL) 641 return; 642 643 /* stop any receive timers */ 644 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 645 /* notify user of the abort and clean up... */ 646 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED); 647 /* free the tcb */ 648 #if defined(SCTP_PANIC_ON_ABORT) 649 printf("stcb:%p state:%d rport:%d net:%p\n", 650 stcb, stcb->asoc.state, stcb->rport, net); 651 if (!(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 652 panic("Received an ABORT"); 653 } else { 654 printf("No panic its in state %x closed\n", stcb->asoc.state); 655 } 656 #endif 657 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 658 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 659 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 660 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 661 } 662 #ifdef SCTP_ASOCLOG_OF_TSNS 663 sctp_print_out_track_log(stcb); 664 #endif 665 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 666 so = SCTP_INP_SO(stcb->sctp_ep); 667 atomic_add_int(&stcb->asoc.refcnt, 1); 668 SCTP_TCB_UNLOCK(stcb); 669 SCTP_SOCKET_LOCK(so, 1); 670 SCTP_TCB_LOCK(stcb); 671 atomic_subtract_int(&stcb->asoc.refcnt, 1); 672 #endif 673 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 674 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 675 SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 676 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 677 SCTP_SOCKET_UNLOCK(so, 1); 678 #endif 679 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n"); 680 } 681 682 static void 683 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, 684 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag) 685 { 686 struct sctp_association *asoc; 687 int some_on_streamwheel; 688 689 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 690 struct socket *so; 691 692 #endif 693 694 SCTPDBG(SCTP_DEBUG_INPUT2, 695 "sctp_handle_shutdown: handling SHUTDOWN\n"); 696 if (stcb == NULL) 697 return; 698 asoc = &stcb->asoc; 699 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 700 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 701 return; 702 } 703 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) { 704 /* Shutdown NOT the expected size */ 705 return; 706 } else { 707 sctp_update_acked(stcb, cp, net, abort_flag); 708 } 709 if (asoc->control_pdapi) { 710 /* 711 * With a normal shutdown we assume the end of last record. 712 */ 713 SCTP_INP_READ_LOCK(stcb->sctp_ep); 714 asoc->control_pdapi->end_added = 1; 715 asoc->control_pdapi->pdapi_aborted = 1; 716 asoc->control_pdapi = NULL; 717 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 718 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 719 so = SCTP_INP_SO(stcb->sctp_ep); 720 atomic_add_int(&stcb->asoc.refcnt, 1); 721 SCTP_TCB_UNLOCK(stcb); 722 SCTP_SOCKET_LOCK(so, 1); 723 SCTP_TCB_LOCK(stcb); 724 atomic_subtract_int(&stcb->asoc.refcnt, 1); 725 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 726 /* assoc was freed while we were unlocked */ 727 SCTP_SOCKET_UNLOCK(so, 1); 728 return; 729 } 730 #endif 731 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 732 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 733 SCTP_SOCKET_UNLOCK(so, 1); 734 #endif 735 } 736 /* goto SHUTDOWN_RECEIVED state to block new requests */ 737 if (stcb->sctp_socket) { 738 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 739 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) && 740 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 741 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED); 742 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 743 /* 744 * notify upper layer that peer has initiated a 745 * shutdown 746 */ 747 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 748 749 /* reset time */ 750 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 751 } 752 } 753 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 754 /* 755 * stop the shutdown timer, since we WILL move to 756 * SHUTDOWN-ACK-SENT. 757 */ 758 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8); 759 } 760 /* Now is there unsent data on a stream somewhere? */ 761 some_on_streamwheel = sctp_is_there_unsent_data(stcb); 762 763 if (!TAILQ_EMPTY(&asoc->send_queue) || 764 !TAILQ_EMPTY(&asoc->sent_queue) || 765 some_on_streamwheel) { 766 /* By returning we will push more data out */ 767 return; 768 } else { 769 /* no outstanding data to send, so move on... */ 770 /* send SHUTDOWN-ACK */ 771 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 772 /* move to SHUTDOWN-ACK-SENT state */ 773 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 774 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 775 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 776 } 777 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 778 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 779 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, 780 SCTP_FROM_SCTP_INPUT + SCTP_LOC_7); 781 /* start SHUTDOWN timer */ 782 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, 783 stcb, net); 784 } 785 } 786 787 static void 788 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp, 789 struct sctp_tcb *stcb, struct sctp_nets *net) 790 { 791 struct sctp_association *asoc; 792 793 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 794 struct socket *so; 795 796 so = SCTP_INP_SO(stcb->sctp_ep); 797 #endif 798 SCTPDBG(SCTP_DEBUG_INPUT2, 799 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n"); 800 if (stcb == NULL) 801 return; 802 803 asoc = &stcb->asoc; 804 /* process according to association state */ 805 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 806 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 807 /* unexpected SHUTDOWN-ACK... so ignore... */ 808 SCTP_TCB_UNLOCK(stcb); 809 return; 810 } 811 if (asoc->control_pdapi) { 812 /* 813 * With a normal shutdown we assume the end of last record. 814 */ 815 SCTP_INP_READ_LOCK(stcb->sctp_ep); 816 asoc->control_pdapi->end_added = 1; 817 asoc->control_pdapi->pdapi_aborted = 1; 818 asoc->control_pdapi = NULL; 819 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 820 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 821 atomic_add_int(&stcb->asoc.refcnt, 1); 822 SCTP_TCB_UNLOCK(stcb); 823 SCTP_SOCKET_LOCK(so, 1); 824 SCTP_TCB_LOCK(stcb); 825 atomic_subtract_int(&stcb->asoc.refcnt, 1); 826 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 827 /* assoc was freed while we were unlocked */ 828 SCTP_SOCKET_UNLOCK(so, 1); 829 return; 830 } 831 #endif 832 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 833 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 834 SCTP_SOCKET_UNLOCK(so, 1); 835 #endif 836 } 837 /* are the queues empty? */ 838 if (!TAILQ_EMPTY(&asoc->send_queue) || 839 !TAILQ_EMPTY(&asoc->sent_queue) || 840 !TAILQ_EMPTY(&asoc->out_wheel)) { 841 sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED); 842 } 843 /* stop the timer */ 844 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9); 845 /* send SHUTDOWN-COMPLETE */ 846 sctp_send_shutdown_complete(stcb, net); 847 /* notify upper layer protocol */ 848 if (stcb->sctp_socket) { 849 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 850 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 851 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 852 /* Set the connected flag to disconnected */ 853 stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0; 854 } 855 } 856 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 857 /* free the TCB but first save off the ep */ 858 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 859 atomic_add_int(&stcb->asoc.refcnt, 1); 860 SCTP_TCB_UNLOCK(stcb); 861 SCTP_SOCKET_LOCK(so, 1); 862 SCTP_TCB_LOCK(stcb); 863 atomic_subtract_int(&stcb->asoc.refcnt, 1); 864 #endif 865 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 866 SCTP_FROM_SCTP_INPUT + SCTP_LOC_10); 867 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 868 SCTP_SOCKET_UNLOCK(so, 1); 869 #endif 870 } 871 872 /* 873 * Skip past the param header and then we will find the chunk that caused the 874 * problem. There are two possiblities ASCONF or FWD-TSN other than that and 875 * our peer must be broken. 876 */ 877 static void 878 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr, 879 struct sctp_nets *net) 880 { 881 struct sctp_chunkhdr *chk; 882 883 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr)); 884 switch (chk->chunk_type) { 885 case SCTP_ASCONF_ACK: 886 case SCTP_ASCONF: 887 sctp_asconf_cleanup(stcb, net); 888 break; 889 case SCTP_FORWARD_CUM_TSN: 890 stcb->asoc.peer_supports_prsctp = 0; 891 break; 892 default: 893 SCTPDBG(SCTP_DEBUG_INPUT2, 894 "Peer does not support chunk type %d(%x)??\n", 895 chk->chunk_type, (uint32_t) chk->chunk_type); 896 break; 897 } 898 } 899 900 /* 901 * Skip past the param header and then we will find the param that caused the 902 * problem. There are a number of param's in a ASCONF OR the prsctp param 903 * these will turn of specific features. 904 */ 905 static void 906 sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr) 907 { 908 struct sctp_paramhdr *pbad; 909 910 pbad = phdr + 1; 911 switch (ntohs(pbad->param_type)) { 912 /* pr-sctp draft */ 913 case SCTP_PRSCTP_SUPPORTED: 914 stcb->asoc.peer_supports_prsctp = 0; 915 break; 916 case SCTP_SUPPORTED_CHUNK_EXT: 917 break; 918 /* draft-ietf-tsvwg-addip-sctp */ 919 case SCTP_ECN_NONCE_SUPPORTED: 920 stcb->asoc.peer_supports_ecn_nonce = 0; 921 stcb->asoc.ecn_nonce_allowed = 0; 922 stcb->asoc.ecn_allowed = 0; 923 break; 924 case SCTP_ADD_IP_ADDRESS: 925 case SCTP_DEL_IP_ADDRESS: 926 case SCTP_SET_PRIM_ADDR: 927 stcb->asoc.peer_supports_asconf = 0; 928 break; 929 case SCTP_SUCCESS_REPORT: 930 case SCTP_ERROR_CAUSE_IND: 931 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n"); 932 SCTPDBG(SCTP_DEBUG_INPUT2, 933 "Turning off ASCONF to this strange peer\n"); 934 stcb->asoc.peer_supports_asconf = 0; 935 break; 936 default: 937 SCTPDBG(SCTP_DEBUG_INPUT2, 938 "Peer does not support param type %d(%x)??\n", 939 pbad->param_type, (uint32_t) pbad->param_type); 940 break; 941 } 942 } 943 944 static int 945 sctp_handle_error(struct sctp_chunkhdr *ch, 946 struct sctp_tcb *stcb, struct sctp_nets *net) 947 { 948 int chklen; 949 struct sctp_paramhdr *phdr; 950 uint16_t error_type; 951 uint16_t error_len; 952 struct sctp_association *asoc; 953 int adjust; 954 955 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 956 struct socket *so; 957 958 #endif 959 960 /* parse through all of the errors and process */ 961 asoc = &stcb->asoc; 962 phdr = (struct sctp_paramhdr *)((caddr_t)ch + 963 sizeof(struct sctp_chunkhdr)); 964 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr); 965 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) { 966 /* Process an Error Cause */ 967 error_type = ntohs(phdr->param_type); 968 error_len = ntohs(phdr->param_length); 969 if ((error_len > chklen) || (error_len == 0)) { 970 /* invalid param length for this param */ 971 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n", 972 chklen, error_len); 973 return (0); 974 } 975 switch (error_type) { 976 case SCTP_CAUSE_INVALID_STREAM: 977 case SCTP_CAUSE_MISSING_PARAM: 978 case SCTP_CAUSE_INVALID_PARAM: 979 case SCTP_CAUSE_NO_USER_DATA: 980 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n", 981 error_type); 982 break; 983 case SCTP_CAUSE_STALE_COOKIE: 984 /* 985 * We only act if we have echoed a cookie and are 986 * waiting. 987 */ 988 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 989 int *p; 990 991 p = (int *)((caddr_t)phdr + sizeof(*phdr)); 992 /* Save the time doubled */ 993 asoc->cookie_preserve_req = ntohl(*p) << 1; 994 asoc->stale_cookie_count++; 995 if (asoc->stale_cookie_count > 996 asoc->max_init_times) { 997 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED); 998 /* now free the asoc */ 999 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1000 so = SCTP_INP_SO(stcb->sctp_ep); 1001 atomic_add_int(&stcb->asoc.refcnt, 1); 1002 SCTP_TCB_UNLOCK(stcb); 1003 SCTP_SOCKET_LOCK(so, 1); 1004 SCTP_TCB_LOCK(stcb); 1005 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1006 #endif 1007 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1008 SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); 1009 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1010 SCTP_SOCKET_UNLOCK(so, 1); 1011 #endif 1012 return (-1); 1013 } 1014 /* blast back to INIT state */ 1015 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED; 1016 asoc->state |= SCTP_STATE_COOKIE_WAIT; 1017 1018 sctp_stop_all_cookie_timers(stcb); 1019 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1020 } 1021 break; 1022 case SCTP_CAUSE_UNRESOLVABLE_ADDR: 1023 /* 1024 * Nothing we can do here, we don't do hostname 1025 * addresses so if the peer does not like my IPv6 1026 * (or IPv4 for that matter) it does not matter. If 1027 * they don't support that type of address, they can 1028 * NOT possibly get that packet type... i.e. with no 1029 * IPv6 you can't recieve a IPv6 packet. so we can 1030 * safely ignore this one. If we ever added support 1031 * for HOSTNAME Addresses, then we would need to do 1032 * something here. 1033 */ 1034 break; 1035 case SCTP_CAUSE_UNRECOG_CHUNK: 1036 sctp_process_unrecog_chunk(stcb, phdr, net); 1037 break; 1038 case SCTP_CAUSE_UNRECOG_PARAM: 1039 sctp_process_unrecog_param(stcb, phdr); 1040 break; 1041 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN: 1042 /* 1043 * We ignore this since the timer will drive out a 1044 * new cookie anyway and there timer will drive us 1045 * to send a SHUTDOWN_COMPLETE. We can't send one 1046 * here since we don't have their tag. 1047 */ 1048 break; 1049 case SCTP_CAUSE_DELETING_LAST_ADDR: 1050 case SCTP_CAUSE_RESOURCE_SHORTAGE: 1051 case SCTP_CAUSE_DELETING_SRC_ADDR: 1052 /* 1053 * We should NOT get these here, but in a 1054 * ASCONF-ACK. 1055 */ 1056 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n", 1057 error_type); 1058 break; 1059 case SCTP_CAUSE_OUT_OF_RESC: 1060 /* 1061 * And what, pray tell do we do with the fact that 1062 * the peer is out of resources? Not really sure we 1063 * could do anything but abort. I suspect this 1064 * should have came WITH an abort instead of in a 1065 * OP-ERROR. 1066 */ 1067 break; 1068 default: 1069 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n", 1070 error_type); 1071 break; 1072 } 1073 adjust = SCTP_SIZE32(error_len); 1074 chklen -= adjust; 1075 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust); 1076 } 1077 return (0); 1078 } 1079 1080 static int 1081 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, 1082 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 1083 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 1084 { 1085 struct sctp_init_ack *init_ack; 1086 struct mbuf *op_err; 1087 1088 SCTPDBG(SCTP_DEBUG_INPUT2, 1089 "sctp_handle_init_ack: handling INIT-ACK\n"); 1090 1091 if (stcb == NULL) { 1092 SCTPDBG(SCTP_DEBUG_INPUT2, 1093 "sctp_handle_init_ack: TCB is null\n"); 1094 return (-1); 1095 } 1096 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) { 1097 /* Invalid length */ 1098 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1099 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1100 op_err, 0); 1101 *abort_no_unlock = 1; 1102 return (-1); 1103 } 1104 init_ack = &cp->init; 1105 /* validate parameters */ 1106 if (init_ack->initiate_tag == 0) { 1107 /* protocol error... send an abort */ 1108 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1109 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1110 op_err, 0); 1111 *abort_no_unlock = 1; 1112 return (-1); 1113 } 1114 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) { 1115 /* protocol error... send an abort */ 1116 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1117 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1118 op_err, 0); 1119 *abort_no_unlock = 1; 1120 return (-1); 1121 } 1122 if (init_ack->num_inbound_streams == 0) { 1123 /* protocol error... send an abort */ 1124 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1125 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1126 op_err, 0); 1127 *abort_no_unlock = 1; 1128 return (-1); 1129 } 1130 if (init_ack->num_outbound_streams == 0) { 1131 /* protocol error... send an abort */ 1132 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1133 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1134 op_err, 0); 1135 *abort_no_unlock = 1; 1136 return (-1); 1137 } 1138 /* process according to association state... */ 1139 switch (stcb->asoc.state & SCTP_STATE_MASK) { 1140 case SCTP_STATE_COOKIE_WAIT: 1141 /* this is the expected state for this chunk */ 1142 /* process the INIT-ACK parameters */ 1143 if (stcb->asoc.primary_destination->dest_state & 1144 SCTP_ADDR_UNCONFIRMED) { 1145 /* 1146 * The primary is where we sent the INIT, we can 1147 * always consider it confirmed when the INIT-ACK is 1148 * returned. Do this before we load addresses 1149 * though. 1150 */ 1151 stcb->asoc.primary_destination->dest_state &= 1152 ~SCTP_ADDR_UNCONFIRMED; 1153 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 1154 stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED); 1155 } 1156 if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb, 1157 net, abort_no_unlock, vrf_id) < 0) { 1158 /* error in parsing parameters */ 1159 return (-1); 1160 } 1161 /* update our state */ 1162 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n"); 1163 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED); 1164 1165 /* reset the RTO calc */ 1166 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 1167 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 1168 stcb->asoc.overall_error_count, 1169 0, 1170 SCTP_FROM_SCTP_INPUT, 1171 __LINE__); 1172 } 1173 stcb->asoc.overall_error_count = 0; 1174 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1175 /* 1176 * collapse the init timer back in case of a exponential 1177 * backoff 1178 */ 1179 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep, 1180 stcb, net); 1181 /* 1182 * the send at the end of the inbound data processing will 1183 * cause the cookie to be sent 1184 */ 1185 break; 1186 case SCTP_STATE_SHUTDOWN_SENT: 1187 /* incorrect state... discard */ 1188 break; 1189 case SCTP_STATE_COOKIE_ECHOED: 1190 /* incorrect state... discard */ 1191 break; 1192 case SCTP_STATE_OPEN: 1193 /* incorrect state... discard */ 1194 break; 1195 case SCTP_STATE_EMPTY: 1196 case SCTP_STATE_INUSE: 1197 default: 1198 /* incorrect state... discard */ 1199 return (-1); 1200 break; 1201 } 1202 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); 1203 return (0); 1204 } 1205 1206 1207 /* 1208 * handle a state cookie for an existing association m: input packet mbuf 1209 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a 1210 * "split" mbuf and the cookie signature does not exist offset: offset into 1211 * mbuf to the cookie-echo chunk 1212 */ 1213 static struct sctp_tcb * 1214 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, 1215 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1216 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 1217 struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id, 1218 uint32_t vrf_id) 1219 { 1220 struct sctp_association *asoc; 1221 struct sctp_init_chunk *init_cp, init_buf; 1222 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1223 int chk_length; 1224 int init_offset, initack_offset, i; 1225 int retval; 1226 int spec_flag = 0; 1227 uint32_t how_indx; 1228 1229 /* I know that the TCB is non-NULL from the caller */ 1230 asoc = &stcb->asoc; 1231 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) { 1232 if (asoc->cookie_how[how_indx] == 0) 1233 break; 1234 } 1235 if (how_indx < sizeof(asoc->cookie_how)) { 1236 asoc->cookie_how[how_indx] = 1; 1237 } 1238 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 1239 /* SHUTDOWN came in after sending INIT-ACK */ 1240 struct mbuf *op_err; 1241 struct sctp_paramhdr *ph; 1242 1243 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 1244 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1245 0, M_DONTWAIT, 1, MT_DATA); 1246 if (op_err == NULL) { 1247 /* FOOBAR */ 1248 return (NULL); 1249 } 1250 /* pre-reserve some space */ 1251 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1252 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1253 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1254 /* Set the len */ 1255 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1256 ph = mtod(op_err, struct sctp_paramhdr *); 1257 ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN); 1258 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 1259 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 1260 vrf_id); 1261 if (how_indx < sizeof(asoc->cookie_how)) 1262 asoc->cookie_how[how_indx] = 2; 1263 return (NULL); 1264 } 1265 /* 1266 * find and validate the INIT chunk in the cookie (peer's info) the 1267 * INIT should start after the cookie-echo header struct (chunk 1268 * header, state cookie header struct) 1269 */ 1270 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk); 1271 1272 init_cp = (struct sctp_init_chunk *) 1273 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1274 (uint8_t *) & init_buf); 1275 if (init_cp == NULL) { 1276 /* could not pull a INIT chunk in cookie */ 1277 return (NULL); 1278 } 1279 chk_length = ntohs(init_cp->ch.chunk_length); 1280 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1281 return (NULL); 1282 } 1283 /* 1284 * find and validate the INIT-ACK chunk in the cookie (my info) the 1285 * INIT-ACK follows the INIT chunk 1286 */ 1287 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1288 initack_cp = (struct sctp_init_ack_chunk *) 1289 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1290 (uint8_t *) & initack_buf); 1291 if (initack_cp == NULL) { 1292 /* could not pull INIT-ACK chunk in cookie */ 1293 return (NULL); 1294 } 1295 chk_length = ntohs(initack_cp->ch.chunk_length); 1296 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1297 return (NULL); 1298 } 1299 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1300 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) { 1301 /* 1302 * case D in Section 5.2.4 Table 2: MMAA process accordingly 1303 * to get into the OPEN state 1304 */ 1305 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1306 /*- 1307 * Opps, this means that we somehow generated two vtag's 1308 * the same. I.e. we did: 1309 * Us Peer 1310 * <---INIT(tag=a)------ 1311 * ----INIT-ACK(tag=t)--> 1312 * ----INIT(tag=t)------> *1 1313 * <---INIT-ACK(tag=a)--- 1314 * <----CE(tag=t)------------- *2 1315 * 1316 * At point *1 we should be generating a different 1317 * tag t'. Which means we would throw away the CE and send 1318 * ours instead. Basically this is case C (throw away side). 1319 */ 1320 if (how_indx < sizeof(asoc->cookie_how)) 1321 asoc->cookie_how[how_indx] = 17; 1322 return (NULL); 1323 1324 } 1325 switch SCTP_GET_STATE 1326 (asoc) { 1327 case SCTP_STATE_COOKIE_WAIT: 1328 case SCTP_STATE_COOKIE_ECHOED: 1329 /* 1330 * INIT was sent but got a COOKIE_ECHO with the 1331 * correct tags... just accept it...but we must 1332 * process the init so that we can make sure we have 1333 * the right seq no's. 1334 */ 1335 /* First we must process the INIT !! */ 1336 retval = sctp_process_init(init_cp, stcb, net); 1337 if (retval < 0) { 1338 if (how_indx < sizeof(asoc->cookie_how)) 1339 asoc->cookie_how[how_indx] = 3; 1340 return (NULL); 1341 } 1342 /* we have already processed the INIT so no problem */ 1343 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, 1344 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); 1345 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13); 1346 /* update current state */ 1347 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1348 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1349 else 1350 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1351 1352 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1353 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1354 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1355 stcb->sctp_ep, stcb, asoc->primary_destination); 1356 } 1357 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1358 sctp_stop_all_cookie_timers(stcb); 1359 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1360 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1361 (inp->sctp_socket->so_qlimit == 0) 1362 ) { 1363 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1364 struct socket *so; 1365 1366 #endif 1367 /* 1368 * Here is where collision would go if we 1369 * did a connect() and instead got a 1370 * init/init-ack/cookie done before the 1371 * init-ack came back.. 1372 */ 1373 stcb->sctp_ep->sctp_flags |= 1374 SCTP_PCB_FLAGS_CONNECTED; 1375 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1376 so = SCTP_INP_SO(stcb->sctp_ep); 1377 atomic_add_int(&stcb->asoc.refcnt, 1); 1378 SCTP_TCB_UNLOCK(stcb); 1379 SCTP_SOCKET_LOCK(so, 1); 1380 SCTP_TCB_LOCK(stcb); 1381 atomic_add_int(&stcb->asoc.refcnt, -1); 1382 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1383 SCTP_SOCKET_UNLOCK(so, 1); 1384 return (NULL); 1385 } 1386 #endif 1387 soisconnected(stcb->sctp_socket); 1388 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1389 SCTP_SOCKET_UNLOCK(so, 1); 1390 #endif 1391 } 1392 /* notify upper layer */ 1393 *notification = SCTP_NOTIFY_ASSOC_UP; 1394 /* 1395 * since we did not send a HB make sure we don't 1396 * double things 1397 */ 1398 net->hb_responded = 1; 1399 net->RTO = sctp_calculate_rto(stcb, asoc, net, 1400 &cookie->time_entered, sctp_align_unsafe_makecopy); 1401 1402 if (stcb->asoc.sctp_autoclose_ticks && 1403 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) { 1404 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 1405 inp, stcb, NULL); 1406 } 1407 break; 1408 default: 1409 /* 1410 * we're in the OPEN state (or beyond), so peer must 1411 * have simply lost the COOKIE-ACK 1412 */ 1413 break; 1414 } /* end switch */ 1415 sctp_stop_all_cookie_timers(stcb); 1416 /* 1417 * We ignore the return code here.. not sure if we should 1418 * somehow abort.. but we do have an existing asoc. This 1419 * really should not fail. 1420 */ 1421 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1422 init_offset + sizeof(struct sctp_init_chunk), 1423 initack_offset, sh, init_src)) { 1424 if (how_indx < sizeof(asoc->cookie_how)) 1425 asoc->cookie_how[how_indx] = 4; 1426 return (NULL); 1427 } 1428 /* respond with a COOKIE-ACK */ 1429 sctp_toss_old_cookies(stcb, asoc); 1430 sctp_send_cookie_ack(stcb); 1431 if (how_indx < sizeof(asoc->cookie_how)) 1432 asoc->cookie_how[how_indx] = 5; 1433 return (stcb); 1434 } 1435 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1436 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag && 1437 cookie->tie_tag_my_vtag == 0 && 1438 cookie->tie_tag_peer_vtag == 0) { 1439 /* 1440 * case C in Section 5.2.4 Table 2: XMOO silently discard 1441 */ 1442 if (how_indx < sizeof(asoc->cookie_how)) 1443 asoc->cookie_how[how_indx] = 6; 1444 return (NULL); 1445 } 1446 if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag && 1447 (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag || 1448 init_cp->init.initiate_tag == 0)) { 1449 /* 1450 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info 1451 * should be ok, re-accept peer info 1452 */ 1453 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1454 /* 1455 * Extension of case C. If we hit this, then the 1456 * random number generator returned the same vtag 1457 * when we first sent our INIT-ACK and when we later 1458 * sent our INIT. The side with the seq numbers that 1459 * are different will be the one that normnally 1460 * would have hit case C. This in effect "extends" 1461 * our vtags in this collision case to be 64 bits. 1462 * The same collision could occur aka you get both 1463 * vtag and seq number the same twice in a row.. but 1464 * is much less likely. If it did happen then we 1465 * would proceed through and bring up the assoc.. we 1466 * may end up with the wrong stream setup however.. 1467 * which would be bad.. but there is no way to 1468 * tell.. until we send on a stream that does not 1469 * exist :-) 1470 */ 1471 if (how_indx < sizeof(asoc->cookie_how)) 1472 asoc->cookie_how[how_indx] = 7; 1473 1474 return (NULL); 1475 } 1476 if (how_indx < sizeof(asoc->cookie_how)) 1477 asoc->cookie_how[how_indx] = 8; 1478 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14); 1479 sctp_stop_all_cookie_timers(stcb); 1480 /* 1481 * since we did not send a HB make sure we don't double 1482 * things 1483 */ 1484 net->hb_responded = 1; 1485 if (stcb->asoc.sctp_autoclose_ticks && 1486 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1487 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1488 NULL); 1489 } 1490 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1491 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1492 1493 /* Note last_cwr_tsn? where is this used? */ 1494 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1495 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { 1496 /* 1497 * Ok the peer probably discarded our data (if we 1498 * echoed a cookie+data). So anything on the 1499 * sent_queue should be marked for retransmit, we 1500 * may not get something to kick us so it COULD 1501 * still take a timeout to move these.. but it can't 1502 * hurt to mark them. 1503 */ 1504 struct sctp_tmit_chunk *chk; 1505 1506 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1507 if (chk->sent < SCTP_DATAGRAM_RESEND) { 1508 chk->sent = SCTP_DATAGRAM_RESEND; 1509 sctp_flight_size_decrease(chk); 1510 sctp_total_flight_decrease(stcb, chk); 1511 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1512 spec_flag++; 1513 } 1514 } 1515 1516 } 1517 /* process the INIT info (peer's info) */ 1518 retval = sctp_process_init(init_cp, stcb, net); 1519 if (retval < 0) { 1520 if (how_indx < sizeof(asoc->cookie_how)) 1521 asoc->cookie_how[how_indx] = 9; 1522 return (NULL); 1523 } 1524 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1525 init_offset + sizeof(struct sctp_init_chunk), 1526 initack_offset, sh, init_src)) { 1527 if (how_indx < sizeof(asoc->cookie_how)) 1528 asoc->cookie_how[how_indx] = 10; 1529 return (NULL); 1530 } 1531 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) || 1532 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) { 1533 *notification = SCTP_NOTIFY_ASSOC_UP; 1534 1535 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1536 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1537 (inp->sctp_socket->so_qlimit == 0)) { 1538 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1539 struct socket *so; 1540 1541 #endif 1542 stcb->sctp_ep->sctp_flags |= 1543 SCTP_PCB_FLAGS_CONNECTED; 1544 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1545 so = SCTP_INP_SO(stcb->sctp_ep); 1546 atomic_add_int(&stcb->asoc.refcnt, 1); 1547 SCTP_TCB_UNLOCK(stcb); 1548 SCTP_SOCKET_LOCK(so, 1); 1549 SCTP_TCB_LOCK(stcb); 1550 atomic_add_int(&stcb->asoc.refcnt, -1); 1551 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1552 SCTP_SOCKET_UNLOCK(so, 1); 1553 return (NULL); 1554 } 1555 #endif 1556 soisconnected(stcb->sctp_socket); 1557 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1558 SCTP_SOCKET_UNLOCK(so, 1); 1559 #endif 1560 } 1561 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1562 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1563 else 1564 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1565 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1566 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1567 SCTP_STAT_INCR_COUNTER32(sctps_restartestab); 1568 } else { 1569 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1570 } 1571 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1572 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1573 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1574 stcb->sctp_ep, stcb, asoc->primary_destination); 1575 } 1576 sctp_stop_all_cookie_timers(stcb); 1577 sctp_toss_old_cookies(stcb, asoc); 1578 sctp_send_cookie_ack(stcb); 1579 if (spec_flag) { 1580 /* 1581 * only if we have retrans set do we do this. What 1582 * this call does is get only the COOKIE-ACK out and 1583 * then when we return the normal call to 1584 * sctp_chunk_output will get the retrans out behind 1585 * this. 1586 */ 1587 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED); 1588 } 1589 if (how_indx < sizeof(asoc->cookie_how)) 1590 asoc->cookie_how[how_indx] = 11; 1591 1592 return (stcb); 1593 } 1594 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1595 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) && 1596 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce && 1597 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce && 1598 cookie->tie_tag_peer_vtag != 0) { 1599 struct sctpasochead *head; 1600 1601 /* 1602 * case A in Section 5.2.4 Table 2: XXMM (peer restarted) 1603 */ 1604 /* temp code */ 1605 if (how_indx < sizeof(asoc->cookie_how)) 1606 asoc->cookie_how[how_indx] = 12; 1607 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15); 1608 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1609 1610 *sac_assoc_id = sctp_get_associd(stcb); 1611 /* notify upper layer */ 1612 *notification = SCTP_NOTIFY_ASSOC_RESTART; 1613 atomic_add_int(&stcb->asoc.refcnt, 1); 1614 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) && 1615 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1616 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 1617 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1618 } 1619 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1620 SCTP_STAT_INCR_GAUGE32(sctps_restartestab); 1621 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1622 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab); 1623 } 1624 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1625 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1626 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1627 stcb->sctp_ep, stcb, asoc->primary_destination); 1628 1629 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) { 1630 /* move to OPEN state, if not in SHUTDOWN_SENT */ 1631 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1632 } 1633 asoc->pre_open_streams = 1634 ntohs(initack_cp->init.num_outbound_streams); 1635 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1636 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1637 1638 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1639 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1640 1641 asoc->str_reset_seq_in = asoc->init_seq_number; 1642 1643 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1644 if (asoc->mapping_array) { 1645 memset(asoc->mapping_array, 0, 1646 asoc->mapping_array_size); 1647 } 1648 SCTP_TCB_UNLOCK(stcb); 1649 SCTP_INP_INFO_WLOCK(); 1650 SCTP_INP_WLOCK(stcb->sctp_ep); 1651 SCTP_TCB_LOCK(stcb); 1652 atomic_add_int(&stcb->asoc.refcnt, -1); 1653 /* send up all the data */ 1654 SCTP_TCB_SEND_LOCK(stcb); 1655 1656 sctp_report_all_outbound(stcb, 1, SCTP_SO_NOT_LOCKED); 1657 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1658 stcb->asoc.strmout[i].stream_no = i; 1659 stcb->asoc.strmout[i].next_sequence_sent = 0; 1660 stcb->asoc.strmout[i].last_msg_incomplete = 0; 1661 } 1662 /* process the INIT-ACK info (my info) */ 1663 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1664 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1665 1666 /* pull from vtag hash */ 1667 LIST_REMOVE(stcb, sctp_asocs); 1668 /* re-insert to new vtag position */ 1669 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, 1670 sctppcbinfo.hashasocmark)]; 1671 /* 1672 * put it in the bucket in the vtag hash of assoc's for the 1673 * system 1674 */ 1675 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 1676 1677 /* Is this the first restart? */ 1678 if (stcb->asoc.in_restart_hash == 0) { 1679 /* Ok add it to assoc_id vtag hash */ 1680 head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id, 1681 sctppcbinfo.hashrestartmark)]; 1682 LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash); 1683 stcb->asoc.in_restart_hash = 1; 1684 } 1685 /* process the INIT info (peer's info) */ 1686 SCTP_TCB_SEND_UNLOCK(stcb); 1687 SCTP_INP_WUNLOCK(stcb->sctp_ep); 1688 SCTP_INP_INFO_WUNLOCK(); 1689 1690 retval = sctp_process_init(init_cp, stcb, net); 1691 if (retval < 0) { 1692 if (how_indx < sizeof(asoc->cookie_how)) 1693 asoc->cookie_how[how_indx] = 13; 1694 1695 return (NULL); 1696 } 1697 /* 1698 * since we did not send a HB make sure we don't double 1699 * things 1700 */ 1701 net->hb_responded = 1; 1702 1703 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1704 init_offset + sizeof(struct sctp_init_chunk), 1705 initack_offset, sh, init_src)) { 1706 if (how_indx < sizeof(asoc->cookie_how)) 1707 asoc->cookie_how[how_indx] = 14; 1708 1709 return (NULL); 1710 } 1711 /* respond with a COOKIE-ACK */ 1712 sctp_stop_all_cookie_timers(stcb); 1713 sctp_toss_old_cookies(stcb, asoc); 1714 sctp_send_cookie_ack(stcb); 1715 if (how_indx < sizeof(asoc->cookie_how)) 1716 asoc->cookie_how[how_indx] = 15; 1717 1718 return (stcb); 1719 } 1720 if (how_indx < sizeof(asoc->cookie_how)) 1721 asoc->cookie_how[how_indx] = 16; 1722 /* all other cases... */ 1723 return (NULL); 1724 } 1725 1726 1727 /* 1728 * handle a state cookie for a new association m: input packet mbuf chain-- 1729 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf 1730 * and the cookie signature does not exist offset: offset into mbuf to the 1731 * cookie-echo chunk length: length of the cookie chunk to: where the init 1732 * was from returns a new TCB 1733 */ 1734 static struct sctp_tcb * 1735 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1736 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1737 struct sctp_inpcb *inp, struct sctp_nets **netp, 1738 struct sockaddr *init_src, int *notification, 1739 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1740 uint32_t vrf_id) 1741 { 1742 struct sctp_tcb *stcb; 1743 struct sctp_init_chunk *init_cp, init_buf; 1744 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1745 struct sockaddr_storage sa_store; 1746 struct sockaddr *initack_src = (struct sockaddr *)&sa_store; 1747 struct sockaddr_in *sin; 1748 struct sockaddr_in6 *sin6; 1749 struct sctp_association *asoc; 1750 int chk_length; 1751 int init_offset, initack_offset, initack_limit; 1752 int retval; 1753 int error = 0; 1754 uint32_t old_tag; 1755 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE]; 1756 1757 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1758 struct socket *so; 1759 1760 so = SCTP_INP_SO(inp); 1761 #endif 1762 1763 /* 1764 * find and validate the INIT chunk in the cookie (peer's info) the 1765 * INIT should start after the cookie-echo header struct (chunk 1766 * header, state cookie header struct) 1767 */ 1768 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk); 1769 init_cp = (struct sctp_init_chunk *) 1770 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1771 (uint8_t *) & init_buf); 1772 if (init_cp == NULL) { 1773 /* could not pull a INIT chunk in cookie */ 1774 SCTPDBG(SCTP_DEBUG_INPUT1, 1775 "process_cookie_new: could not pull INIT chunk hdr\n"); 1776 return (NULL); 1777 } 1778 chk_length = ntohs(init_cp->ch.chunk_length); 1779 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1780 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n"); 1781 return (NULL); 1782 } 1783 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1784 /* 1785 * find and validate the INIT-ACK chunk in the cookie (my info) the 1786 * INIT-ACK follows the INIT chunk 1787 */ 1788 initack_cp = (struct sctp_init_ack_chunk *) 1789 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1790 (uint8_t *) & initack_buf); 1791 if (initack_cp == NULL) { 1792 /* could not pull INIT-ACK chunk in cookie */ 1793 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n"); 1794 return (NULL); 1795 } 1796 chk_length = ntohs(initack_cp->ch.chunk_length); 1797 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1798 return (NULL); 1799 } 1800 /* 1801 * NOTE: We can't use the INIT_ACK's chk_length to determine the 1802 * "initack_limit" value. This is because the chk_length field 1803 * includes the length of the cookie, but the cookie is omitted when 1804 * the INIT and INIT_ACK are tacked onto the cookie... 1805 */ 1806 initack_limit = offset + cookie_len; 1807 1808 /* 1809 * now that we know the INIT/INIT-ACK are in place, create a new TCB 1810 * and popluate 1811 */ 1812 1813 /* 1814 * Here we do a trick, we set in NULL for the proc/thread argument. 1815 * We do this since in effect we only use the p argument when the 1816 * socket is unbound and we must do an implicit bind. Since we are 1817 * getting a cookie, we cannot be unbound. 1818 */ 1819 stcb = sctp_aloc_assoc(inp, init_src, 0, &error, 1820 ntohl(initack_cp->init.initiate_tag), vrf_id, 1821 (struct thread *)NULL 1822 ); 1823 if (stcb == NULL) { 1824 struct mbuf *op_err; 1825 1826 /* memory problem? */ 1827 SCTPDBG(SCTP_DEBUG_INPUT1, 1828 "process_cookie_new: no room for another TCB!\n"); 1829 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1830 1831 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1832 sh, op_err, vrf_id); 1833 return (NULL); 1834 } 1835 /* get the correct sctp_nets */ 1836 if (netp) 1837 *netp = sctp_findnet(stcb, init_src); 1838 1839 asoc = &stcb->asoc; 1840 /* get scope variables out of cookie */ 1841 asoc->ipv4_local_scope = cookie->ipv4_scope; 1842 asoc->site_scope = cookie->site_scope; 1843 asoc->local_scope = cookie->local_scope; 1844 asoc->loopback_scope = cookie->loopback_scope; 1845 1846 if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) || 1847 (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) { 1848 struct mbuf *op_err; 1849 1850 /* 1851 * Houston we have a problem. The EP changed while the 1852 * cookie was in flight. Only recourse is to abort the 1853 * association. 1854 */ 1855 atomic_add_int(&stcb->asoc.refcnt, 1); 1856 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1857 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1858 sh, op_err, vrf_id); 1859 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1860 SCTP_TCB_UNLOCK(stcb); 1861 SCTP_SOCKET_LOCK(so, 1); 1862 SCTP_TCB_LOCK(stcb); 1863 #endif 1864 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 1865 SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1866 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1867 SCTP_SOCKET_UNLOCK(so, 1); 1868 #endif 1869 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1870 return (NULL); 1871 } 1872 /* process the INIT-ACK info (my info) */ 1873 old_tag = asoc->my_vtag; 1874 asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1875 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1876 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1877 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1878 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1879 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1880 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1881 asoc->str_reset_seq_in = asoc->init_seq_number; 1882 1883 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1884 1885 /* process the INIT info (peer's info) */ 1886 if (netp) 1887 retval = sctp_process_init(init_cp, stcb, *netp); 1888 else 1889 retval = 0; 1890 if (retval < 0) { 1891 atomic_add_int(&stcb->asoc.refcnt, 1); 1892 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1893 SCTP_TCB_UNLOCK(stcb); 1894 SCTP_SOCKET_LOCK(so, 1); 1895 SCTP_TCB_LOCK(stcb); 1896 #endif 1897 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1898 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1899 SCTP_SOCKET_UNLOCK(so, 1); 1900 #endif 1901 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1902 return (NULL); 1903 } 1904 /* load all addresses */ 1905 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1906 init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh, 1907 init_src)) { 1908 atomic_add_int(&stcb->asoc.refcnt, 1); 1909 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1910 SCTP_TCB_UNLOCK(stcb); 1911 SCTP_SOCKET_LOCK(so, 1); 1912 SCTP_TCB_LOCK(stcb); 1913 #endif 1914 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17); 1915 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1916 SCTP_SOCKET_UNLOCK(so, 1); 1917 #endif 1918 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1919 return (NULL); 1920 } 1921 /* 1922 * verify any preceding AUTH chunk that was skipped 1923 */ 1924 /* pull the local authentication parameters from the cookie/init-ack */ 1925 sctp_auth_get_cookie_params(stcb, m, 1926 initack_offset + sizeof(struct sctp_init_ack_chunk), 1927 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk))); 1928 if (auth_skipped) { 1929 struct sctp_auth_chunk *auth; 1930 1931 auth = (struct sctp_auth_chunk *) 1932 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); 1933 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { 1934 /* auth HMAC failed, dump the assoc and packet */ 1935 SCTPDBG(SCTP_DEBUG_AUTH1, 1936 "COOKIE-ECHO: AUTH failed\n"); 1937 atomic_add_int(&stcb->asoc.refcnt, 1); 1938 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1939 SCTP_TCB_UNLOCK(stcb); 1940 SCTP_SOCKET_LOCK(so, 1); 1941 SCTP_TCB_LOCK(stcb); 1942 #endif 1943 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18); 1944 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1945 SCTP_SOCKET_UNLOCK(so, 1); 1946 #endif 1947 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1948 return (NULL); 1949 } else { 1950 /* remaining chunks checked... good to go */ 1951 stcb->asoc.authenticated = 1; 1952 } 1953 } 1954 /* update current state */ 1955 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 1956 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1957 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1958 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1959 stcb->sctp_ep, stcb, asoc->primary_destination); 1960 } 1961 sctp_stop_all_cookie_timers(stcb); 1962 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab); 1963 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1964 1965 /* 1966 * if we're doing ASCONFs, check to see if we have any new local 1967 * addresses that need to get added to the peer (eg. addresses 1968 * changed while cookie echo in flight). This needs to be done 1969 * after we go to the OPEN state to do the correct asconf 1970 * processing. else, make sure we have the correct addresses in our 1971 * lists 1972 */ 1973 1974 /* warning, we re-use sin, sin6, sa_store here! */ 1975 /* pull in local_address (our "from" address) */ 1976 if (cookie->laddr_type == SCTP_IPV4_ADDRESS) { 1977 /* source addr is IPv4 */ 1978 sin = (struct sockaddr_in *)initack_src; 1979 memset(sin, 0, sizeof(*sin)); 1980 sin->sin_family = AF_INET; 1981 sin->sin_len = sizeof(struct sockaddr_in); 1982 sin->sin_addr.s_addr = cookie->laddress[0]; 1983 } else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) { 1984 /* source addr is IPv6 */ 1985 sin6 = (struct sockaddr_in6 *)initack_src; 1986 memset(sin6, 0, sizeof(*sin6)); 1987 sin6->sin6_family = AF_INET6; 1988 sin6->sin6_len = sizeof(struct sockaddr_in6); 1989 sin6->sin6_scope_id = cookie->scope_id; 1990 memcpy(&sin6->sin6_addr, cookie->laddress, 1991 sizeof(sin6->sin6_addr)); 1992 } else { 1993 atomic_add_int(&stcb->asoc.refcnt, 1); 1994 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1995 SCTP_TCB_UNLOCK(stcb); 1996 SCTP_SOCKET_LOCK(so, 1); 1997 SCTP_TCB_LOCK(stcb); 1998 #endif 1999 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19); 2000 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2001 SCTP_SOCKET_UNLOCK(so, 1); 2002 #endif 2003 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2004 return (NULL); 2005 } 2006 2007 /* set up to notify upper layer */ 2008 *notification = SCTP_NOTIFY_ASSOC_UP; 2009 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2010 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2011 (inp->sctp_socket->so_qlimit == 0)) { 2012 /* 2013 * This is an endpoint that called connect() how it got a 2014 * cookie that is NEW is a bit of a mystery. It must be that 2015 * the INIT was sent, but before it got there.. a complete 2016 * INIT/INIT-ACK/COOKIE arrived. But of course then it 2017 * should have went to the other code.. not here.. oh well.. 2018 * a bit of protection is worth having.. 2019 */ 2020 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2021 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2022 atomic_add_int(&stcb->asoc.refcnt, 1); 2023 SCTP_TCB_UNLOCK(stcb); 2024 SCTP_SOCKET_LOCK(so, 1); 2025 SCTP_TCB_LOCK(stcb); 2026 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2027 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2028 SCTP_SOCKET_UNLOCK(so, 1); 2029 return (NULL); 2030 } 2031 #endif 2032 soisconnected(stcb->sctp_socket); 2033 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2034 SCTP_SOCKET_UNLOCK(so, 1); 2035 #endif 2036 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 2037 (inp->sctp_socket->so_qlimit)) { 2038 /* 2039 * We don't want to do anything with this one. Since it is 2040 * the listening guy. The timer will get started for 2041 * accepted connections in the caller. 2042 */ 2043 ; 2044 } 2045 /* since we did not send a HB make sure we don't double things */ 2046 if ((netp) && (*netp)) 2047 (*netp)->hb_responded = 1; 2048 2049 if (stcb->asoc.sctp_autoclose_ticks && 2050 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2051 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 2052 } 2053 /* calculate the RTT */ 2054 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 2055 if ((netp) && (*netp)) { 2056 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp, 2057 &cookie->time_entered, sctp_align_unsafe_makecopy); 2058 } 2059 /* respond with a COOKIE-ACK */ 2060 sctp_send_cookie_ack(stcb); 2061 2062 /* 2063 * check the address lists for any ASCONFs that need to be sent 2064 * AFTER the cookie-ack is sent 2065 */ 2066 sctp_check_address_list(stcb, m, 2067 initack_offset + sizeof(struct sctp_init_ack_chunk), 2068 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)), 2069 initack_src, cookie->local_scope, cookie->site_scope, 2070 cookie->ipv4_scope, cookie->loopback_scope); 2071 2072 2073 return (stcb); 2074 } 2075 2076 2077 /* 2078 * handles a COOKIE-ECHO message stcb: modified to either a new or left as 2079 * existing (non-NULL) TCB 2080 */ 2081 static struct mbuf * 2082 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, 2083 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp, 2084 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp, 2085 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2086 struct sctp_tcb **locked_tcb, uint32_t vrf_id) 2087 { 2088 struct sctp_state_cookie *cookie; 2089 struct sockaddr_in6 sin6; 2090 struct sockaddr_in sin; 2091 struct sctp_tcb *l_stcb = *stcb; 2092 struct sctp_inpcb *l_inp; 2093 struct sockaddr *to; 2094 sctp_assoc_t sac_restart_id; 2095 struct sctp_pcb *ep; 2096 struct mbuf *m_sig; 2097 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE]; 2098 uint8_t *sig; 2099 uint8_t cookie_ok = 0; 2100 unsigned int size_of_pkt, sig_offset, cookie_offset; 2101 unsigned int cookie_len; 2102 struct timeval now; 2103 struct timeval time_expires; 2104 struct sockaddr_storage dest_store; 2105 struct sockaddr *localep_sa = (struct sockaddr *)&dest_store; 2106 struct ip *iph; 2107 int notification = 0; 2108 struct sctp_nets *netl; 2109 int had_a_existing_tcb = 0; 2110 2111 SCTPDBG(SCTP_DEBUG_INPUT2, 2112 "sctp_handle_cookie: handling COOKIE-ECHO\n"); 2113 2114 if (inp_p == NULL) { 2115 return (NULL); 2116 } 2117 /* First get the destination address setup too. */ 2118 iph = mtod(m, struct ip *); 2119 if (iph->ip_v == IPVERSION) { 2120 /* its IPv4 */ 2121 struct sockaddr_in *lsin; 2122 2123 lsin = (struct sockaddr_in *)(localep_sa); 2124 memset(lsin, 0, sizeof(*lsin)); 2125 lsin->sin_family = AF_INET; 2126 lsin->sin_len = sizeof(*lsin); 2127 lsin->sin_port = sh->dest_port; 2128 lsin->sin_addr.s_addr = iph->ip_dst.s_addr; 2129 size_of_pkt = SCTP_GET_IPV4_LENGTH(iph); 2130 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 2131 /* its IPv6 */ 2132 struct ip6_hdr *ip6; 2133 struct sockaddr_in6 *lsin6; 2134 2135 lsin6 = (struct sockaddr_in6 *)(localep_sa); 2136 memset(lsin6, 0, sizeof(*lsin6)); 2137 lsin6->sin6_family = AF_INET6; 2138 lsin6->sin6_len = sizeof(struct sockaddr_in6); 2139 ip6 = mtod(m, struct ip6_hdr *); 2140 lsin6->sin6_port = sh->dest_port; 2141 lsin6->sin6_addr = ip6->ip6_dst; 2142 size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen; 2143 } else { 2144 return (NULL); 2145 } 2146 2147 cookie = &cp->cookie; 2148 cookie_offset = offset + sizeof(struct sctp_chunkhdr); 2149 cookie_len = ntohs(cp->ch.chunk_length); 2150 2151 if ((cookie->peerport != sh->src_port) && 2152 (cookie->myport != sh->dest_port) && 2153 (cookie->my_vtag != sh->v_tag)) { 2154 /* 2155 * invalid ports or bad tag. Note that we always leave the 2156 * v_tag in the header in network order and when we stored 2157 * it in the my_vtag slot we also left it in network order. 2158 * This maintains the match even though it may be in the 2159 * opposite byte order of the machine :-> 2160 */ 2161 return (NULL); 2162 } 2163 if (cookie_len > size_of_pkt || 2164 cookie_len < sizeof(struct sctp_cookie_echo_chunk) + 2165 sizeof(struct sctp_init_chunk) + 2166 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) { 2167 /* cookie too long! or too small */ 2168 return (NULL); 2169 } 2170 /* 2171 * split off the signature into its own mbuf (since it should not be 2172 * calculated in the sctp_hmac_m() call). 2173 */ 2174 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE; 2175 if (sig_offset > size_of_pkt) { 2176 /* packet not correct size! */ 2177 /* XXX this may already be accounted for earlier... */ 2178 return (NULL); 2179 } 2180 m_sig = m_split(m, sig_offset, M_DONTWAIT); 2181 if (m_sig == NULL) { 2182 /* out of memory or ?? */ 2183 return (NULL); 2184 } 2185 /* 2186 * compute the signature/digest for the cookie 2187 */ 2188 ep = &(*inp_p)->sctp_ep; 2189 l_inp = *inp_p; 2190 if (l_stcb) { 2191 SCTP_TCB_UNLOCK(l_stcb); 2192 } 2193 SCTP_INP_RLOCK(l_inp); 2194 if (l_stcb) { 2195 SCTP_TCB_LOCK(l_stcb); 2196 } 2197 /* which cookie is it? */ 2198 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) && 2199 (ep->current_secret_number != ep->last_secret_number)) { 2200 /* it's the old cookie */ 2201 (void)sctp_hmac_m(SCTP_HMAC, 2202 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2203 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2204 } else { 2205 /* it's the current cookie */ 2206 (void)sctp_hmac_m(SCTP_HMAC, 2207 (uint8_t *) ep->secret_key[(int)ep->current_secret_number], 2208 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2209 } 2210 /* get the signature */ 2211 SCTP_INP_RUNLOCK(l_inp); 2212 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig); 2213 if (sig == NULL) { 2214 /* couldn't find signature */ 2215 sctp_m_freem(m_sig); 2216 return (NULL); 2217 } 2218 /* compare the received digest with the computed digest */ 2219 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) { 2220 /* try the old cookie? */ 2221 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) && 2222 (ep->current_secret_number != ep->last_secret_number)) { 2223 /* compute digest with old */ 2224 (void)sctp_hmac_m(SCTP_HMAC, 2225 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2226 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2227 /* compare */ 2228 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0) 2229 cookie_ok = 1; 2230 } 2231 } else { 2232 cookie_ok = 1; 2233 } 2234 2235 /* 2236 * Now before we continue we must reconstruct our mbuf so that 2237 * normal processing of any other chunks will work. 2238 */ 2239 { 2240 struct mbuf *m_at; 2241 2242 m_at = m; 2243 while (SCTP_BUF_NEXT(m_at) != NULL) { 2244 m_at = SCTP_BUF_NEXT(m_at); 2245 } 2246 SCTP_BUF_NEXT(m_at) = m_sig; 2247 } 2248 2249 if (cookie_ok == 0) { 2250 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n"); 2251 SCTPDBG(SCTP_DEBUG_INPUT2, 2252 "offset = %u, cookie_offset = %u, sig_offset = %u\n", 2253 (uint32_t) offset, cookie_offset, sig_offset); 2254 return (NULL); 2255 } 2256 /* 2257 * check the cookie timestamps to be sure it's not stale 2258 */ 2259 (void)SCTP_GETTIME_TIMEVAL(&now); 2260 /* Expire time is in Ticks, so we convert to seconds */ 2261 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life); 2262 time_expires.tv_usec = cookie->time_entered.tv_usec; 2263 if (timevalcmp(&now, &time_expires, >)) { 2264 /* cookie is stale! */ 2265 struct mbuf *op_err; 2266 struct sctp_stale_cookie_msg *scm; 2267 uint32_t tim; 2268 2269 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg), 2270 0, M_DONTWAIT, 1, MT_DATA); 2271 if (op_err == NULL) { 2272 /* FOOBAR */ 2273 return (NULL); 2274 } 2275 /* pre-reserve some space */ 2276 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 2277 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 2278 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 2279 2280 /* Set the len */ 2281 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg); 2282 scm = mtod(op_err, struct sctp_stale_cookie_msg *); 2283 scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE); 2284 scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) + 2285 (sizeof(uint32_t)))); 2286 /* seconds to usec */ 2287 tim = (now.tv_sec - time_expires.tv_sec) * 1000000; 2288 /* add in usec */ 2289 if (tim == 0) 2290 tim = now.tv_usec - cookie->time_entered.tv_usec; 2291 scm->time_usec = htonl(tim); 2292 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 2293 vrf_id); 2294 return (NULL); 2295 } 2296 /* 2297 * Now we must see with the lookup address if we have an existing 2298 * asoc. This will only happen if we were in the COOKIE-WAIT state 2299 * and a INIT collided with us and somewhere the peer sent the 2300 * cookie on another address besides the single address our assoc 2301 * had for him. In this case we will have one of the tie-tags set at 2302 * least AND the address field in the cookie can be used to look it 2303 * up. 2304 */ 2305 to = NULL; 2306 if (cookie->addr_type == SCTP_IPV6_ADDRESS) { 2307 memset(&sin6, 0, sizeof(sin6)); 2308 sin6.sin6_family = AF_INET6; 2309 sin6.sin6_len = sizeof(sin6); 2310 sin6.sin6_port = sh->src_port; 2311 sin6.sin6_scope_id = cookie->scope_id; 2312 memcpy(&sin6.sin6_addr.s6_addr, cookie->address, 2313 sizeof(sin6.sin6_addr.s6_addr)); 2314 to = (struct sockaddr *)&sin6; 2315 } else if (cookie->addr_type == SCTP_IPV4_ADDRESS) { 2316 memset(&sin, 0, sizeof(sin)); 2317 sin.sin_family = AF_INET; 2318 sin.sin_len = sizeof(sin); 2319 sin.sin_port = sh->src_port; 2320 sin.sin_addr.s_addr = cookie->address[0]; 2321 to = (struct sockaddr *)&sin; 2322 } else { 2323 /* This should not happen */ 2324 return (NULL); 2325 } 2326 if ((*stcb == NULL) && to) { 2327 /* Yep, lets check */ 2328 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL); 2329 if (*stcb == NULL) { 2330 /* 2331 * We should have only got back the same inp. If we 2332 * got back a different ep we have a problem. The 2333 * original findep got back l_inp and now 2334 */ 2335 if (l_inp != *inp_p) { 2336 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n"); 2337 } 2338 } else { 2339 if (*locked_tcb == NULL) { 2340 /* 2341 * In this case we found the assoc only 2342 * after we locked the create lock. This 2343 * means we are in a colliding case and we 2344 * must make sure that we unlock the tcb if 2345 * its one of the cases where we throw away 2346 * the incoming packets. 2347 */ 2348 *locked_tcb = *stcb; 2349 2350 /* 2351 * We must also increment the inp ref count 2352 * since the ref_count flags was set when we 2353 * did not find the TCB, now we found it 2354 * which reduces the refcount.. we must 2355 * raise it back out to balance it all :-) 2356 */ 2357 SCTP_INP_INCR_REF((*stcb)->sctp_ep); 2358 if ((*stcb)->sctp_ep != l_inp) { 2359 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n", 2360 (*stcb)->sctp_ep, l_inp); 2361 } 2362 } 2363 } 2364 } 2365 if (to == NULL) 2366 return (NULL); 2367 2368 cookie_len -= SCTP_SIGNATURE_SIZE; 2369 if (*stcb == NULL) { 2370 /* this is the "normal" case... get a new TCB */ 2371 *stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie, 2372 cookie_len, *inp_p, netp, to, ¬ification, 2373 auth_skipped, auth_offset, auth_len, vrf_id); 2374 } else { 2375 /* this is abnormal... cookie-echo on existing TCB */ 2376 had_a_existing_tcb = 1; 2377 *stcb = sctp_process_cookie_existing(m, iphlen, offset, sh, 2378 cookie, cookie_len, *inp_p, *stcb, *netp, to, 2379 ¬ification, &sac_restart_id, vrf_id); 2380 } 2381 2382 if (*stcb == NULL) { 2383 /* still no TCB... must be bad cookie-echo */ 2384 return (NULL); 2385 } 2386 /* 2387 * Ok, we built an association so confirm the address we sent the 2388 * INIT-ACK to. 2389 */ 2390 netl = sctp_findnet(*stcb, to); 2391 /* 2392 * This code should in theory NOT run but 2393 */ 2394 if (netl == NULL) { 2395 /* TSNH! Huh, why do I need to add this address here? */ 2396 int ret; 2397 2398 ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE, 2399 SCTP_IN_COOKIE_PROC); 2400 netl = sctp_findnet(*stcb, to); 2401 } 2402 if (netl) { 2403 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) { 2404 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2405 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL, 2406 netl); 2407 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2408 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2409 } 2410 } 2411 if (*stcb) { 2412 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p, 2413 *stcb, NULL); 2414 } 2415 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 2416 if (!had_a_existing_tcb || 2417 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 2418 /* 2419 * If we have a NEW cookie or the connect never 2420 * reached the connected state during collision we 2421 * must do the TCP accept thing. 2422 */ 2423 struct socket *so, *oso; 2424 struct sctp_inpcb *inp; 2425 2426 if (notification == SCTP_NOTIFY_ASSOC_RESTART) { 2427 /* 2428 * For a restart we will keep the same 2429 * socket, no need to do anything. I THINK!! 2430 */ 2431 sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id, SCTP_SO_NOT_LOCKED); 2432 return (m); 2433 } 2434 oso = (*inp_p)->sctp_socket; 2435 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2436 SCTP_TCB_UNLOCK((*stcb)); 2437 so = sonewconn(oso, 0 2438 ); 2439 SCTP_TCB_LOCK((*stcb)); 2440 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2441 2442 if (so == NULL) { 2443 struct mbuf *op_err; 2444 2445 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2446 struct socket *pcb_so; 2447 2448 #endif 2449 /* Too many sockets */ 2450 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n"); 2451 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2452 sctp_abort_association(*inp_p, NULL, m, iphlen, 2453 sh, op_err, vrf_id); 2454 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2455 pcb_so = SCTP_INP_SO(*inp_p); 2456 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2457 SCTP_TCB_UNLOCK((*stcb)); 2458 SCTP_SOCKET_LOCK(pcb_so, 1); 2459 SCTP_TCB_LOCK((*stcb)); 2460 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2461 #endif 2462 (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20); 2463 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2464 SCTP_SOCKET_UNLOCK(pcb_so, 1); 2465 #endif 2466 return (NULL); 2467 } 2468 inp = (struct sctp_inpcb *)so->so_pcb; 2469 SCTP_INP_INCR_REF(inp); 2470 /* 2471 * We add the unbound flag here so that if we get an 2472 * soabort() before we get the move_pcb done, we 2473 * will properly cleanup. 2474 */ 2475 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 2476 SCTP_PCB_FLAGS_CONNECTED | 2477 SCTP_PCB_FLAGS_IN_TCPPOOL | 2478 SCTP_PCB_FLAGS_UNBOUND | 2479 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) | 2480 SCTP_PCB_FLAGS_DONT_WAKE); 2481 inp->sctp_features = (*inp_p)->sctp_features; 2482 inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features; 2483 inp->sctp_socket = so; 2484 inp->sctp_frag_point = (*inp_p)->sctp_frag_point; 2485 inp->partial_delivery_point = (*inp_p)->partial_delivery_point; 2486 inp->sctp_context = (*inp_p)->sctp_context; 2487 inp->inp_starting_point_for_iterator = NULL; 2488 /* 2489 * copy in the authentication parameters from the 2490 * original endpoint 2491 */ 2492 if (inp->sctp_ep.local_hmacs) 2493 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2494 inp->sctp_ep.local_hmacs = 2495 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs); 2496 if (inp->sctp_ep.local_auth_chunks) 2497 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 2498 inp->sctp_ep.local_auth_chunks = 2499 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks); 2500 (void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys, 2501 &inp->sctp_ep.shared_keys); 2502 2503 /* 2504 * Now we must move it from one hash table to 2505 * another and get the tcb in the right place. 2506 */ 2507 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb); 2508 2509 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2510 SCTP_TCB_UNLOCK((*stcb)); 2511 2512 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, 2513 0); 2514 SCTP_TCB_LOCK((*stcb)); 2515 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2516 2517 2518 /* 2519 * now we must check to see if we were aborted while 2520 * the move was going on and the lock/unlock 2521 * happened. 2522 */ 2523 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 2524 /* 2525 * yep it was, we leave the assoc attached 2526 * to the socket since the sctp_inpcb_free() 2527 * call will send an abort for us. 2528 */ 2529 SCTP_INP_DECR_REF(inp); 2530 return (NULL); 2531 } 2532 SCTP_INP_DECR_REF(inp); 2533 /* Switch over to the new guy */ 2534 *inp_p = inp; 2535 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2536 2537 /* 2538 * Pull it from the incomplete queue and wake the 2539 * guy 2540 */ 2541 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2542 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2543 SCTP_TCB_UNLOCK((*stcb)); 2544 SCTP_SOCKET_LOCK(so, 1); 2545 #endif 2546 soisconnected(so); 2547 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2548 SCTP_TCB_LOCK((*stcb)); 2549 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2550 SCTP_SOCKET_UNLOCK(so, 1); 2551 #endif 2552 return (m); 2553 } 2554 } 2555 if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 2556 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2557 } 2558 return (m); 2559 } 2560 2561 static void 2562 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp, 2563 struct sctp_tcb *stcb, struct sctp_nets *net) 2564 { 2565 /* cp must not be used, others call this without a c-ack :-) */ 2566 struct sctp_association *asoc; 2567 2568 SCTPDBG(SCTP_DEBUG_INPUT2, 2569 "sctp_handle_cookie_ack: handling COOKIE-ACK\n"); 2570 if (stcb == NULL) 2571 return; 2572 2573 asoc = &stcb->asoc; 2574 2575 sctp_stop_all_cookie_timers(stcb); 2576 /* process according to association state */ 2577 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 2578 /* state change only needed when I am in right state */ 2579 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2580 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2581 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2582 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2583 stcb->sctp_ep, stcb, asoc->primary_destination); 2584 2585 } 2586 /* update RTO */ 2587 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 2588 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2589 if (asoc->overall_error_count == 0) { 2590 net->RTO = sctp_calculate_rto(stcb, asoc, net, 2591 &asoc->time_entered, sctp_align_safe_nocopy); 2592 } 2593 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 2594 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2595 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2596 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2597 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2598 struct socket *so; 2599 2600 #endif 2601 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2602 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2603 so = SCTP_INP_SO(stcb->sctp_ep); 2604 atomic_add_int(&stcb->asoc.refcnt, 1); 2605 SCTP_TCB_UNLOCK(stcb); 2606 SCTP_SOCKET_LOCK(so, 1); 2607 SCTP_TCB_LOCK(stcb); 2608 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2609 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2610 SCTP_SOCKET_UNLOCK(so, 1); 2611 return; 2612 } 2613 #endif 2614 soisconnected(stcb->sctp_socket); 2615 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2616 SCTP_SOCKET_UNLOCK(so, 1); 2617 #endif 2618 } 2619 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 2620 stcb, net); 2621 /* 2622 * since we did not send a HB make sure we don't double 2623 * things 2624 */ 2625 net->hb_responded = 1; 2626 2627 if (stcb->asoc.sctp_autoclose_ticks && 2628 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2629 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 2630 stcb->sctp_ep, stcb, NULL); 2631 } 2632 /* 2633 * send ASCONF if parameters are pending and ASCONFs are 2634 * allowed (eg. addresses changed when init/cookie echo were 2635 * in flight) 2636 */ 2637 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) && 2638 (stcb->asoc.peer_supports_asconf) && 2639 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) { 2640 #ifdef SCTP_TIMER_BASED_ASCONF 2641 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, 2642 stcb->sctp_ep, stcb, 2643 stcb->asoc.primary_destination); 2644 #else 2645 sctp_send_asconf(stcb, stcb->asoc.primary_destination, 2646 SCTP_ADDR_NOT_LOCKED); 2647 #endif 2648 } 2649 } 2650 /* Toss the cookie if I can */ 2651 sctp_toss_old_cookies(stcb, asoc); 2652 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 2653 /* Restart the timer if we have pending data */ 2654 struct sctp_tmit_chunk *chk; 2655 2656 chk = TAILQ_FIRST(&asoc->sent_queue); 2657 if (chk) { 2658 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2659 stcb, chk->whoTo); 2660 } 2661 } 2662 } 2663 2664 static void 2665 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp, 2666 struct sctp_tcb *stcb) 2667 { 2668 struct sctp_nets *net; 2669 struct sctp_tmit_chunk *lchk; 2670 uint32_t tsn; 2671 2672 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) { 2673 return; 2674 } 2675 SCTP_STAT_INCR(sctps_recvecne); 2676 tsn = ntohl(cp->tsn); 2677 /* ECN Nonce stuff: need a resync and disable the nonce sum check */ 2678 /* Also we make sure we disable the nonce_wait */ 2679 lchk = TAILQ_FIRST(&stcb->asoc.send_queue); 2680 if (lchk == NULL) { 2681 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 2682 } else { 2683 stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq; 2684 } 2685 stcb->asoc.nonce_wait_for_ecne = 0; 2686 stcb->asoc.nonce_sum_check = 0; 2687 2688 /* Find where it was sent, if possible */ 2689 net = NULL; 2690 lchk = TAILQ_FIRST(&stcb->asoc.sent_queue); 2691 while (lchk) { 2692 if (lchk->rec.data.TSN_seq == tsn) { 2693 net = lchk->whoTo; 2694 break; 2695 } 2696 if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ)) 2697 break; 2698 lchk = TAILQ_NEXT(lchk, sctp_next); 2699 } 2700 if (net == NULL) 2701 /* default is we use the primary */ 2702 net = stcb->asoc.primary_destination; 2703 2704 if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) { 2705 /* 2706 * JRS - Use the congestion control given in the pluggable 2707 * CC module 2708 */ 2709 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net); 2710 /* 2711 * we reduce once every RTT. So we will only lower cwnd at 2712 * the next sending seq i.e. the resync_tsn. 2713 */ 2714 stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn; 2715 } 2716 /* 2717 * We always send a CWR this way if our previous one was lost our 2718 * peer will get an update, or if it is not time again to reduce we 2719 * still get the cwr to the peer. 2720 */ 2721 sctp_send_cwr(stcb, net, tsn); 2722 } 2723 2724 static void 2725 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb) 2726 { 2727 /* 2728 * Here we get a CWR from the peer. We must look in the outqueue and 2729 * make sure that we have a covered ECNE in teh control chunk part. 2730 * If so remove it. 2731 */ 2732 struct sctp_tmit_chunk *chk; 2733 struct sctp_ecne_chunk *ecne; 2734 2735 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 2736 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) { 2737 continue; 2738 } 2739 /* 2740 * Look for and remove if it is the right TSN. Since there 2741 * is only ONE ECNE on the control queue at any one time we 2742 * don't need to worry about more than one! 2743 */ 2744 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 2745 if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn), 2746 MAX_TSN) || (cp->tsn == ecne->tsn)) { 2747 /* this covers this ECNE, we can remove it */ 2748 stcb->asoc.ecn_echo_cnt_onq--; 2749 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, 2750 sctp_next); 2751 if (chk->data) { 2752 sctp_m_freem(chk->data); 2753 chk->data = NULL; 2754 } 2755 stcb->asoc.ctrl_queue_cnt--; 2756 sctp_free_a_chunk(stcb, chk); 2757 break; 2758 } 2759 } 2760 } 2761 2762 static void 2763 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp, 2764 struct sctp_tcb *stcb, struct sctp_nets *net) 2765 { 2766 struct sctp_association *asoc; 2767 2768 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2769 struct socket *so; 2770 2771 #endif 2772 2773 SCTPDBG(SCTP_DEBUG_INPUT2, 2774 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n"); 2775 if (stcb == NULL) 2776 return; 2777 2778 asoc = &stcb->asoc; 2779 /* process according to association state */ 2780 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 2781 /* unexpected SHUTDOWN-COMPLETE... so ignore... */ 2782 SCTPDBG(SCTP_DEBUG_INPUT2, 2783 "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n"); 2784 SCTP_TCB_UNLOCK(stcb); 2785 return; 2786 } 2787 /* notify upper layer protocol */ 2788 if (stcb->sctp_socket) { 2789 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2790 /* are the queues empty? they should be */ 2791 if (!TAILQ_EMPTY(&asoc->send_queue) || 2792 !TAILQ_EMPTY(&asoc->sent_queue) || 2793 !TAILQ_EMPTY(&asoc->out_wheel)) { 2794 sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED); 2795 } 2796 } 2797 /* stop the timer */ 2798 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22); 2799 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 2800 /* free the TCB */ 2801 SCTPDBG(SCTP_DEBUG_INPUT2, 2802 "sctp_handle_shutdown_complete: calls free-asoc\n"); 2803 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2804 so = SCTP_INP_SO(stcb->sctp_ep); 2805 atomic_add_int(&stcb->asoc.refcnt, 1); 2806 SCTP_TCB_UNLOCK(stcb); 2807 SCTP_SOCKET_LOCK(so, 1); 2808 SCTP_TCB_LOCK(stcb); 2809 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2810 #endif 2811 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23); 2812 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2813 SCTP_SOCKET_UNLOCK(so, 1); 2814 #endif 2815 return; 2816 } 2817 2818 static int 2819 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, 2820 struct sctp_nets *net, uint8_t flg) 2821 { 2822 switch (desc->chunk_type) { 2823 case SCTP_DATA: 2824 /* find the tsn to resend (possibly */ 2825 { 2826 uint32_t tsn; 2827 struct sctp_tmit_chunk *tp1; 2828 2829 tsn = ntohl(desc->tsn_ifany); 2830 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2831 while (tp1) { 2832 if (tp1->rec.data.TSN_seq == tsn) { 2833 /* found it */ 2834 break; 2835 } 2836 if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn, 2837 MAX_TSN)) { 2838 /* not found */ 2839 tp1 = NULL; 2840 break; 2841 } 2842 tp1 = TAILQ_NEXT(tp1, sctp_next); 2843 } 2844 if (tp1 == NULL) { 2845 /* 2846 * Do it the other way , aka without paying 2847 * attention to queue seq order. 2848 */ 2849 SCTP_STAT_INCR(sctps_pdrpdnfnd); 2850 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2851 while (tp1) { 2852 if (tp1->rec.data.TSN_seq == tsn) { 2853 /* found it */ 2854 break; 2855 } 2856 tp1 = TAILQ_NEXT(tp1, sctp_next); 2857 } 2858 } 2859 if (tp1 == NULL) { 2860 SCTP_STAT_INCR(sctps_pdrptsnnf); 2861 } 2862 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) { 2863 uint8_t *ddp; 2864 2865 if ((stcb->asoc.peers_rwnd == 0) && 2866 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 2867 SCTP_STAT_INCR(sctps_pdrpdiwnp); 2868 return (0); 2869 } 2870 if (stcb->asoc.peers_rwnd == 0 && 2871 (flg & SCTP_FROM_MIDDLE_BOX)) { 2872 SCTP_STAT_INCR(sctps_pdrpdizrw); 2873 return (0); 2874 } 2875 ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+ 2876 sizeof(struct sctp_data_chunk)); 2877 { 2878 unsigned int iii; 2879 2880 for (iii = 0; iii < sizeof(desc->data_bytes); 2881 iii++) { 2882 if (ddp[iii] != desc->data_bytes[iii]) { 2883 SCTP_STAT_INCR(sctps_pdrpbadd); 2884 return (-1); 2885 } 2886 } 2887 } 2888 /* 2889 * We zero out the nonce so resync not 2890 * needed 2891 */ 2892 tp1->rec.data.ect_nonce = 0; 2893 2894 if (tp1->do_rtt) { 2895 /* 2896 * this guy had a RTO calculation 2897 * pending on it, cancel it 2898 */ 2899 tp1->do_rtt = 0; 2900 } 2901 SCTP_STAT_INCR(sctps_pdrpmark); 2902 if (tp1->sent != SCTP_DATAGRAM_RESEND) 2903 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2904 tp1->sent = SCTP_DATAGRAM_RESEND; 2905 /* 2906 * mark it as if we were doing a FR, since 2907 * we will be getting gap ack reports behind 2908 * the info from the router. 2909 */ 2910 tp1->rec.data.doing_fast_retransmit = 1; 2911 /* 2912 * mark the tsn with what sequences can 2913 * cause a new FR. 2914 */ 2915 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 2916 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 2917 } else { 2918 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 2919 } 2920 2921 /* restart the timer */ 2922 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2923 stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24); 2924 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2925 stcb, tp1->whoTo); 2926 2927 /* fix counts and things */ 2928 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 2929 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP, 2930 tp1->whoTo->flight_size, 2931 tp1->book_size, 2932 (uintptr_t) stcb, 2933 tp1->rec.data.TSN_seq); 2934 } 2935 sctp_flight_size_decrease(tp1); 2936 sctp_total_flight_decrease(stcb, tp1); 2937 } { 2938 /* audit code */ 2939 unsigned int audit; 2940 2941 audit = 0; 2942 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 2943 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2944 audit++; 2945 } 2946 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue, 2947 sctp_next) { 2948 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2949 audit++; 2950 } 2951 if (audit != stcb->asoc.sent_queue_retran_cnt) { 2952 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n", 2953 audit, stcb->asoc.sent_queue_retran_cnt); 2954 #ifndef SCTP_AUDITING_ENABLED 2955 stcb->asoc.sent_queue_retran_cnt = audit; 2956 #endif 2957 } 2958 } 2959 } 2960 break; 2961 case SCTP_ASCONF: 2962 { 2963 struct sctp_tmit_chunk *asconf; 2964 2965 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 2966 sctp_next) { 2967 if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 2968 break; 2969 } 2970 } 2971 if (asconf) { 2972 if (asconf->sent != SCTP_DATAGRAM_RESEND) 2973 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2974 asconf->sent = SCTP_DATAGRAM_RESEND; 2975 asconf->snd_count--; 2976 } 2977 } 2978 break; 2979 case SCTP_INITIATION: 2980 /* resend the INIT */ 2981 stcb->asoc.dropped_special_cnt++; 2982 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) { 2983 /* 2984 * If we can get it in, in a few attempts we do 2985 * this, otherwise we let the timer fire. 2986 */ 2987 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, 2988 stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25); 2989 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 2990 } 2991 break; 2992 case SCTP_SELECTIVE_ACK: 2993 /* resend the sack */ 2994 sctp_send_sack(stcb); 2995 break; 2996 case SCTP_HEARTBEAT_REQUEST: 2997 /* resend a demand HB */ 2998 if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) { 2999 /* 3000 * Only retransmit if we KNOW we wont destroy the 3001 * tcb 3002 */ 3003 (void)sctp_send_hb(stcb, 1, net); 3004 } 3005 break; 3006 case SCTP_SHUTDOWN: 3007 sctp_send_shutdown(stcb, net); 3008 break; 3009 case SCTP_SHUTDOWN_ACK: 3010 sctp_send_shutdown_ack(stcb, net); 3011 break; 3012 case SCTP_COOKIE_ECHO: 3013 { 3014 struct sctp_tmit_chunk *cookie; 3015 3016 cookie = NULL; 3017 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, 3018 sctp_next) { 3019 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 3020 break; 3021 } 3022 } 3023 if (cookie) { 3024 if (cookie->sent != SCTP_DATAGRAM_RESEND) 3025 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3026 cookie->sent = SCTP_DATAGRAM_RESEND; 3027 sctp_stop_all_cookie_timers(stcb); 3028 } 3029 } 3030 break; 3031 case SCTP_COOKIE_ACK: 3032 sctp_send_cookie_ack(stcb); 3033 break; 3034 case SCTP_ASCONF_ACK: 3035 /* resend last asconf ack */ 3036 sctp_send_asconf_ack(stcb); 3037 break; 3038 case SCTP_FORWARD_CUM_TSN: 3039 send_forward_tsn(stcb, &stcb->asoc); 3040 break; 3041 /* can't do anything with these */ 3042 case SCTP_PACKET_DROPPED: 3043 case SCTP_INITIATION_ACK: /* this should not happen */ 3044 case SCTP_HEARTBEAT_ACK: 3045 case SCTP_ABORT_ASSOCIATION: 3046 case SCTP_OPERATION_ERROR: 3047 case SCTP_SHUTDOWN_COMPLETE: 3048 case SCTP_ECN_ECHO: 3049 case SCTP_ECN_CWR: 3050 default: 3051 break; 3052 } 3053 return (0); 3054 } 3055 3056 void 3057 sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3058 { 3059 int i; 3060 uint16_t temp; 3061 3062 /* 3063 * We set things to 0xffff since this is the last delivered sequence 3064 * and we will be sending in 0 after the reset. 3065 */ 3066 3067 if (number_entries) { 3068 for (i = 0; i < number_entries; i++) { 3069 temp = ntohs(list[i]); 3070 if (temp >= stcb->asoc.streamincnt) { 3071 continue; 3072 } 3073 stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff; 3074 } 3075 } else { 3076 list = NULL; 3077 for (i = 0; i < stcb->asoc.streamincnt; i++) { 3078 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff; 3079 } 3080 } 3081 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3082 } 3083 3084 static void 3085 sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3086 { 3087 int i; 3088 3089 if (number_entries == 0) { 3090 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3091 stcb->asoc.strmout[i].next_sequence_sent = 0; 3092 } 3093 } else if (number_entries) { 3094 for (i = 0; i < number_entries; i++) { 3095 uint16_t temp; 3096 3097 temp = ntohs(list[i]); 3098 if (temp >= stcb->asoc.streamoutcnt) { 3099 /* no such stream */ 3100 continue; 3101 } 3102 stcb->asoc.strmout[temp].next_sequence_sent = 0; 3103 } 3104 } 3105 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3106 } 3107 3108 3109 struct sctp_stream_reset_out_request * 3110 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk) 3111 { 3112 struct sctp_association *asoc; 3113 struct sctp_stream_reset_out_req *req; 3114 struct sctp_stream_reset_out_request *r; 3115 struct sctp_tmit_chunk *chk; 3116 int len, clen; 3117 3118 asoc = &stcb->asoc; 3119 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 3120 asoc->stream_reset_outstanding = 0; 3121 return (NULL); 3122 } 3123 if (stcb->asoc.str_reset == NULL) { 3124 asoc->stream_reset_outstanding = 0; 3125 return (NULL); 3126 } 3127 chk = stcb->asoc.str_reset; 3128 if (chk->data == NULL) { 3129 return (NULL); 3130 } 3131 if (bchk) { 3132 /* he wants a copy of the chk pointer */ 3133 *bchk = chk; 3134 } 3135 clen = chk->send_size; 3136 req = mtod(chk->data, struct sctp_stream_reset_out_req *); 3137 r = &req->sr_req; 3138 if (ntohl(r->request_seq) == seq) { 3139 /* found it */ 3140 return (r); 3141 } 3142 len = SCTP_SIZE32(ntohs(r->ph.param_length)); 3143 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) { 3144 /* move to the next one, there can only be a max of two */ 3145 r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len); 3146 if (ntohl(r->request_seq) == seq) { 3147 return (r); 3148 } 3149 } 3150 /* that seq is not here */ 3151 return (NULL); 3152 } 3153 3154 static void 3155 sctp_clean_up_stream_reset(struct sctp_tcb *stcb) 3156 { 3157 struct sctp_association *asoc; 3158 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset; 3159 3160 if (stcb->asoc.str_reset == NULL) { 3161 return; 3162 } 3163 asoc = &stcb->asoc; 3164 3165 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26); 3166 TAILQ_REMOVE(&asoc->control_send_queue, 3167 chk, 3168 sctp_next); 3169 if (chk->data) { 3170 sctp_m_freem(chk->data); 3171 chk->data = NULL; 3172 } 3173 asoc->ctrl_queue_cnt--; 3174 sctp_free_a_chunk(stcb, chk); 3175 /* sa_ignore NO_NULL_CHK */ 3176 stcb->asoc.str_reset = NULL; 3177 } 3178 3179 3180 static int 3181 sctp_handle_stream_reset_response(struct sctp_tcb *stcb, 3182 uint32_t seq, uint32_t action, 3183 struct sctp_stream_reset_response *respin) 3184 { 3185 uint16_t type; 3186 int lparm_len; 3187 struct sctp_association *asoc = &stcb->asoc; 3188 struct sctp_tmit_chunk *chk; 3189 struct sctp_stream_reset_out_request *srparam; 3190 int number_entries; 3191 3192 if (asoc->stream_reset_outstanding == 0) { 3193 /* duplicate */ 3194 return (0); 3195 } 3196 if (seq == stcb->asoc.str_reset_seq_out) { 3197 srparam = sctp_find_stream_reset(stcb, seq, &chk); 3198 if (srparam) { 3199 stcb->asoc.str_reset_seq_out++; 3200 type = ntohs(srparam->ph.param_type); 3201 lparm_len = ntohs(srparam->ph.param_length); 3202 if (type == SCTP_STR_RESET_OUT_REQUEST) { 3203 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t); 3204 asoc->stream_reset_out_is_outstanding = 0; 3205 if (asoc->stream_reset_outstanding) 3206 asoc->stream_reset_outstanding--; 3207 if (action == SCTP_STREAM_RESET_PERFORMED) { 3208 /* do it */ 3209 sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams); 3210 } else { 3211 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3212 } 3213 } else if (type == SCTP_STR_RESET_IN_REQUEST) { 3214 /* Answered my request */ 3215 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t); 3216 if (asoc->stream_reset_outstanding) 3217 asoc->stream_reset_outstanding--; 3218 if (action != SCTP_STREAM_RESET_PERFORMED) { 3219 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3220 } 3221 } else if (type == SCTP_STR_RESET_TSN_REQUEST) { 3222 /** 3223 * a) Adopt the new in tsn. 3224 * b) reset the map 3225 * c) Adopt the new out-tsn 3226 */ 3227 struct sctp_stream_reset_response_tsn *resp; 3228 struct sctp_forward_tsn_chunk fwdtsn; 3229 int abort_flag = 0; 3230 3231 if (respin == NULL) { 3232 /* huh ? */ 3233 return (0); 3234 } 3235 if (action == SCTP_STREAM_RESET_PERFORMED) { 3236 resp = (struct sctp_stream_reset_response_tsn *)respin; 3237 asoc->stream_reset_outstanding--; 3238 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3239 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3240 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1); 3241 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3242 if (abort_flag) { 3243 return (1); 3244 } 3245 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1); 3246 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3247 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn); 3248 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3249 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn); 3250 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn; 3251 3252 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3253 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3254 3255 } 3256 } 3257 /* get rid of the request and get the request flags */ 3258 if (asoc->stream_reset_outstanding == 0) { 3259 sctp_clean_up_stream_reset(stcb); 3260 } 3261 } 3262 } 3263 return (0); 3264 } 3265 3266 static void 3267 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb, 3268 struct sctp_tmit_chunk *chk, 3269 struct sctp_stream_reset_in_request *req, int trunc) 3270 { 3271 uint32_t seq; 3272 int len, i; 3273 int number_entries; 3274 uint16_t temp; 3275 3276 /* 3277 * peer wants me to send a str-reset to him for my outgoing seq's if 3278 * seq_in is right. 3279 */ 3280 struct sctp_association *asoc = &stcb->asoc; 3281 3282 seq = ntohl(req->request_seq); 3283 if (asoc->str_reset_seq_in == seq) { 3284 if (trunc) { 3285 /* Can't do it, since they exceeded our buffer size */ 3286 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3287 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3288 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3289 } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) { 3290 len = ntohs(req->ph.param_length); 3291 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t)); 3292 for (i = 0; i < number_entries; i++) { 3293 temp = ntohs(req->list_of_streams[i]); 3294 req->list_of_streams[i] = temp; 3295 } 3296 /* move the reset action back one */ 3297 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3298 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3299 sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams, 3300 asoc->str_reset_seq_out, 3301 seq, (asoc->sending_seq - 1)); 3302 asoc->stream_reset_out_is_outstanding = 1; 3303 asoc->str_reset = chk; 3304 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 3305 stcb->asoc.stream_reset_outstanding++; 3306 } else { 3307 /* Can't do it, since we have sent one out */ 3308 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3309 asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER; 3310 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3311 } 3312 asoc->str_reset_seq_in++; 3313 } else if (asoc->str_reset_seq_in - 1 == seq) { 3314 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3315 } else if (asoc->str_reset_seq_in - 2 == seq) { 3316 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3317 } else { 3318 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3319 } 3320 } 3321 3322 static int 3323 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb, 3324 struct sctp_tmit_chunk *chk, 3325 struct sctp_stream_reset_tsn_request *req) 3326 { 3327 /* reset all in and out and update the tsn */ 3328 /* 3329 * A) reset my str-seq's on in and out. B) Select a receive next, 3330 * and set cum-ack to it. Also process this selected number as a 3331 * fwd-tsn as well. C) set in the response my next sending seq. 3332 */ 3333 struct sctp_forward_tsn_chunk fwdtsn; 3334 struct sctp_association *asoc = &stcb->asoc; 3335 int abort_flag = 0; 3336 uint32_t seq; 3337 3338 seq = ntohl(req->request_seq); 3339 if (asoc->str_reset_seq_in == seq) { 3340 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3341 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3342 fwdtsn.ch.chunk_flags = 0; 3343 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1); 3344 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3345 if (abort_flag) { 3346 return (1); 3347 } 3348 stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA; 3349 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3350 stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1; 3351 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3352 atomic_add_int(&stcb->asoc.sending_seq, 1); 3353 /* save off historical data for retrans */ 3354 stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0]; 3355 stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq; 3356 stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0]; 3357 stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn; 3358 3359 sctp_add_stream_reset_result_tsn(chk, 3360 ntohl(req->request_seq), 3361 SCTP_STREAM_RESET_PERFORMED, 3362 stcb->asoc.sending_seq, 3363 stcb->asoc.mapping_array_base_tsn); 3364 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3365 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3366 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3367 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3368 3369 asoc->str_reset_seq_in++; 3370 } else if (asoc->str_reset_seq_in - 1 == seq) { 3371 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 3372 stcb->asoc.last_sending_seq[0], 3373 stcb->asoc.last_base_tsnsent[0] 3374 ); 3375 } else if (asoc->str_reset_seq_in - 2 == seq) { 3376 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1], 3377 stcb->asoc.last_sending_seq[1], 3378 stcb->asoc.last_base_tsnsent[1] 3379 ); 3380 } else { 3381 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3382 } 3383 return (0); 3384 } 3385 3386 static void 3387 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb, 3388 struct sctp_tmit_chunk *chk, 3389 struct sctp_stream_reset_out_request *req, int trunc) 3390 { 3391 uint32_t seq, tsn; 3392 int number_entries, len; 3393 struct sctp_association *asoc = &stcb->asoc; 3394 3395 seq = ntohl(req->request_seq); 3396 3397 /* now if its not a duplicate we process it */ 3398 if (asoc->str_reset_seq_in == seq) { 3399 len = ntohs(req->ph.param_length); 3400 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t)); 3401 /* 3402 * the sender is resetting, handle the list issue.. we must 3403 * a) verify if we can do the reset, if so no problem b) If 3404 * we can't do the reset we must copy the request. c) queue 3405 * it, and setup the data in processor to trigger it off 3406 * when needed and dequeue all the queued data. 3407 */ 3408 tsn = ntohl(req->send_reset_at_tsn); 3409 3410 /* move the reset action back one */ 3411 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3412 if (trunc) { 3413 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3414 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3415 } else if ((tsn == asoc->cumulative_tsn) || 3416 (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) { 3417 /* we can do it now */ 3418 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams); 3419 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3420 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3421 } else { 3422 /* 3423 * we must queue it up and thus wait for the TSN's 3424 * to arrive that are at or before tsn 3425 */ 3426 struct sctp_stream_reset_list *liste; 3427 int siz; 3428 3429 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t)); 3430 SCTP_MALLOC(liste, struct sctp_stream_reset_list *, 3431 siz, SCTP_M_STRESET); 3432 if (liste == NULL) { 3433 /* gak out of memory */ 3434 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3435 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3436 return; 3437 } 3438 liste->tsn = tsn; 3439 liste->number_entries = number_entries; 3440 memcpy(&liste->req, req, 3441 (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t)))); 3442 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp); 3443 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3444 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3445 } 3446 asoc->str_reset_seq_in++; 3447 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3448 /* 3449 * one seq back, just echo back last action since my 3450 * response was lost. 3451 */ 3452 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3453 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3454 /* 3455 * two seq back, just echo back last action since my 3456 * response was lost. 3457 */ 3458 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3459 } else { 3460 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3461 } 3462 } 3463 3464 #ifdef __GNUC__ 3465 __attribute__((noinline)) 3466 #endif 3467 static int 3468 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset, 3469 struct sctp_stream_reset_out_req *sr_req) 3470 { 3471 int chk_length, param_len, ptype; 3472 struct sctp_paramhdr pstore; 3473 uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE]; 3474 3475 uint32_t seq; 3476 int num_req = 0; 3477 int trunc = 0; 3478 struct sctp_tmit_chunk *chk; 3479 struct sctp_chunkhdr *ch; 3480 struct sctp_paramhdr *ph; 3481 int ret_code = 0; 3482 int num_param = 0; 3483 3484 /* now it may be a reset or a reset-response */ 3485 chk_length = ntohs(sr_req->ch.chunk_length); 3486 3487 /* setup for adding the response */ 3488 sctp_alloc_a_chunk(stcb, chk); 3489 if (chk == NULL) { 3490 return (ret_code); 3491 } 3492 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 3493 chk->rec.chunk_id.can_take_data = 0; 3494 chk->asoc = &stcb->asoc; 3495 chk->no_fr_allowed = 0; 3496 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr); 3497 chk->book_size_scale = 0; 3498 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3499 if (chk->data == NULL) { 3500 strres_nochunk: 3501 if (chk->data) { 3502 sctp_m_freem(chk->data); 3503 chk->data = NULL; 3504 } 3505 sctp_free_a_chunk(stcb, chk); 3506 return (ret_code); 3507 } 3508 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 3509 3510 /* setup chunk parameters */ 3511 chk->sent = SCTP_DATAGRAM_UNSENT; 3512 chk->snd_count = 0; 3513 chk->whoTo = stcb->asoc.primary_destination; 3514 atomic_add_int(&chk->whoTo->ref_count, 1); 3515 3516 ch = mtod(chk->data, struct sctp_chunkhdr *); 3517 ch->chunk_type = SCTP_STREAM_RESET; 3518 ch->chunk_flags = 0; 3519 ch->chunk_length = htons(chk->send_size); 3520 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 3521 offset += sizeof(struct sctp_chunkhdr); 3522 while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) { 3523 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore); 3524 if (ph == NULL) 3525 break; 3526 param_len = ntohs(ph->param_length); 3527 if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) { 3528 /* bad param */ 3529 break; 3530 } 3531 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)), 3532 (uint8_t *) & cstore); 3533 ptype = ntohs(ph->param_type); 3534 num_param++; 3535 if (param_len > (int)sizeof(cstore)) { 3536 trunc = 1; 3537 } else { 3538 trunc = 0; 3539 } 3540 3541 if (num_param > SCTP_MAX_RESET_PARAMS) { 3542 /* hit the max of parameters already sorry.. */ 3543 break; 3544 } 3545 if (ptype == SCTP_STR_RESET_OUT_REQUEST) { 3546 struct sctp_stream_reset_out_request *req_out; 3547 3548 req_out = (struct sctp_stream_reset_out_request *)ph; 3549 num_req++; 3550 if (stcb->asoc.stream_reset_outstanding) { 3551 seq = ntohl(req_out->response_seq); 3552 if (seq == stcb->asoc.str_reset_seq_out) { 3553 /* implicit ack */ 3554 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL); 3555 } 3556 } 3557 sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc); 3558 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) { 3559 struct sctp_stream_reset_in_request *req_in; 3560 3561 num_req++; 3562 3563 req_in = (struct sctp_stream_reset_in_request *)ph; 3564 3565 sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc); 3566 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) { 3567 struct sctp_stream_reset_tsn_request *req_tsn; 3568 3569 num_req++; 3570 req_tsn = (struct sctp_stream_reset_tsn_request *)ph; 3571 3572 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) { 3573 ret_code = 1; 3574 goto strres_nochunk; 3575 } 3576 /* no more */ 3577 break; 3578 } else if (ptype == SCTP_STR_RESET_RESPONSE) { 3579 struct sctp_stream_reset_response *resp; 3580 uint32_t result; 3581 3582 resp = (struct sctp_stream_reset_response *)ph; 3583 seq = ntohl(resp->response_seq); 3584 result = ntohl(resp->result); 3585 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) { 3586 ret_code = 1; 3587 goto strres_nochunk; 3588 } 3589 } else { 3590 break; 3591 } 3592 offset += SCTP_SIZE32(param_len); 3593 chk_length -= SCTP_SIZE32(param_len); 3594 } 3595 if (num_req == 0) { 3596 /* we have no response free the stuff */ 3597 goto strres_nochunk; 3598 } 3599 /* ok we have a chunk to link in */ 3600 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, 3601 chk, 3602 sctp_next); 3603 stcb->asoc.ctrl_queue_cnt++; 3604 return (ret_code); 3605 } 3606 3607 /* 3608 * Handle a router or endpoints report of a packet loss, there are two ways 3609 * to handle this, either we get the whole packet and must disect it 3610 * ourselves (possibly with truncation and or corruption) or it is a summary 3611 * from a middle box that did the disectting for us. 3612 */ 3613 static void 3614 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, 3615 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit) 3616 { 3617 uint32_t bottle_bw, on_queue; 3618 uint16_t trunc_len; 3619 unsigned int chlen; 3620 unsigned int at; 3621 struct sctp_chunk_desc desc; 3622 struct sctp_chunkhdr *ch; 3623 3624 chlen = ntohs(cp->ch.chunk_length); 3625 chlen -= sizeof(struct sctp_pktdrop_chunk); 3626 /* XXX possible chlen underflow */ 3627 if (chlen == 0) { 3628 ch = NULL; 3629 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) 3630 SCTP_STAT_INCR(sctps_pdrpbwrpt); 3631 } else { 3632 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr)); 3633 chlen -= sizeof(struct sctphdr); 3634 /* XXX possible chlen underflow */ 3635 memset(&desc, 0, sizeof(desc)); 3636 } 3637 trunc_len = (uint16_t) ntohs(cp->trunc_len); 3638 if (trunc_len > limit) { 3639 trunc_len = limit; 3640 } 3641 /* now the chunks themselves */ 3642 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) { 3643 desc.chunk_type = ch->chunk_type; 3644 /* get amount we need to move */ 3645 at = ntohs(ch->chunk_length); 3646 if (at < sizeof(struct sctp_chunkhdr)) { 3647 /* corrupt chunk, maybe at the end? */ 3648 SCTP_STAT_INCR(sctps_pdrpcrupt); 3649 break; 3650 } 3651 if (trunc_len == 0) { 3652 /* we are supposed to have all of it */ 3653 if (at > chlen) { 3654 /* corrupt skip it */ 3655 SCTP_STAT_INCR(sctps_pdrpcrupt); 3656 break; 3657 } 3658 } else { 3659 /* is there enough of it left ? */ 3660 if (desc.chunk_type == SCTP_DATA) { 3661 if (chlen < (sizeof(struct sctp_data_chunk) + 3662 sizeof(desc.data_bytes))) { 3663 break; 3664 } 3665 } else { 3666 if (chlen < sizeof(struct sctp_chunkhdr)) { 3667 break; 3668 } 3669 } 3670 } 3671 if (desc.chunk_type == SCTP_DATA) { 3672 /* can we get out the tsn? */ 3673 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3674 SCTP_STAT_INCR(sctps_pdrpmbda); 3675 3676 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) { 3677 /* yep */ 3678 struct sctp_data_chunk *dcp; 3679 uint8_t *ddp; 3680 unsigned int iii; 3681 3682 dcp = (struct sctp_data_chunk *)ch; 3683 ddp = (uint8_t *) (dcp + 1); 3684 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) { 3685 desc.data_bytes[iii] = ddp[iii]; 3686 } 3687 desc.tsn_ifany = dcp->dp.tsn; 3688 } else { 3689 /* nope we are done. */ 3690 SCTP_STAT_INCR(sctps_pdrpnedat); 3691 break; 3692 } 3693 } else { 3694 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3695 SCTP_STAT_INCR(sctps_pdrpmbct); 3696 } 3697 3698 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) { 3699 SCTP_STAT_INCR(sctps_pdrppdbrk); 3700 break; 3701 } 3702 if (SCTP_SIZE32(at) > chlen) { 3703 break; 3704 } 3705 chlen -= SCTP_SIZE32(at); 3706 if (chlen < sizeof(struct sctp_chunkhdr)) { 3707 /* done, none left */ 3708 break; 3709 } 3710 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at)); 3711 } 3712 /* Now update any rwnd --- possibly */ 3713 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) { 3714 /* From a peer, we get a rwnd report */ 3715 uint32_t a_rwnd; 3716 3717 SCTP_STAT_INCR(sctps_pdrpfehos); 3718 3719 bottle_bw = ntohl(cp->bottle_bw); 3720 on_queue = ntohl(cp->current_onq); 3721 if (bottle_bw && on_queue) { 3722 /* a rwnd report is in here */ 3723 if (bottle_bw > on_queue) 3724 a_rwnd = bottle_bw - on_queue; 3725 else 3726 a_rwnd = 0; 3727 3728 if (a_rwnd == 0) 3729 stcb->asoc.peers_rwnd = 0; 3730 else { 3731 if (a_rwnd > stcb->asoc.total_flight) { 3732 stcb->asoc.peers_rwnd = 3733 a_rwnd - stcb->asoc.total_flight; 3734 } else { 3735 stcb->asoc.peers_rwnd = 0; 3736 } 3737 if (stcb->asoc.peers_rwnd < 3738 stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3739 /* SWS sender side engages */ 3740 stcb->asoc.peers_rwnd = 0; 3741 } 3742 } 3743 } 3744 } else { 3745 SCTP_STAT_INCR(sctps_pdrpfmbox); 3746 } 3747 3748 /* now middle boxes in sat networks get a cwnd bump */ 3749 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) && 3750 (stcb->asoc.sat_t3_loss_recovery == 0) && 3751 (stcb->asoc.sat_network)) { 3752 /* 3753 * This is debateable but for sat networks it makes sense 3754 * Note if a T3 timer has went off, we will prohibit any 3755 * changes to cwnd until we exit the t3 loss recovery. 3756 */ 3757 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb, 3758 net, cp, &bottle_bw, &on_queue); 3759 } 3760 } 3761 3762 /* 3763 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to 3764 * still contain IP/SCTP header - stcb: is the tcb found for this packet - 3765 * offset: offset into the mbuf chain to first chunkhdr - length: is the 3766 * length of the complete packet outputs: - length: modified to remaining 3767 * length after control processing - netp: modified to new sctp_nets after 3768 * cookie-echo processing - return NULL to discard the packet (ie. no asoc, 3769 * bad packet,...) otherwise return the tcb for this packet 3770 */ 3771 #ifdef __GNUC__ 3772 __attribute__((noinline)) 3773 #endif 3774 static struct sctp_tcb * 3775 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, 3776 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp, 3777 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen, 3778 uint32_t vrf_id) 3779 { 3780 struct sctp_association *asoc; 3781 uint32_t vtag_in; 3782 int num_chunks = 0; /* number of control chunks processed */ 3783 uint32_t chk_length; 3784 int ret; 3785 int abort_no_unlock = 0; 3786 3787 /* 3788 * How big should this be, and should it be alloc'd? Lets try the 3789 * d-mtu-ceiling for now (2k) and that should hopefully work ... 3790 * until we get into jumbo grams and such.. 3791 */ 3792 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; 3793 struct sctp_tcb *locked_tcb = stcb; 3794 int got_auth = 0; 3795 uint32_t auth_offset = 0, auth_len = 0; 3796 int auth_skipped = 0; 3797 int asconf_cnt = 0; 3798 3799 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3800 struct socket *so; 3801 3802 #endif 3803 3804 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n", 3805 iphlen, *offset, length, stcb); 3806 3807 /* validate chunk header length... */ 3808 if (ntohs(ch->chunk_length) < sizeof(*ch)) { 3809 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n", 3810 ntohs(ch->chunk_length)); 3811 if (locked_tcb) { 3812 SCTP_TCB_UNLOCK(locked_tcb); 3813 } 3814 return (NULL); 3815 } 3816 /* 3817 * validate the verification tag 3818 */ 3819 vtag_in = ntohl(sh->v_tag); 3820 3821 if (locked_tcb) { 3822 SCTP_TCB_LOCK_ASSERT(locked_tcb); 3823 } 3824 if (ch->chunk_type == SCTP_INITIATION) { 3825 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n", 3826 ntohs(ch->chunk_length), vtag_in); 3827 if (vtag_in != 0) { 3828 /* protocol error- silently discard... */ 3829 SCTP_STAT_INCR(sctps_badvtag); 3830 if (locked_tcb) { 3831 SCTP_TCB_UNLOCK(locked_tcb); 3832 } 3833 return (NULL); 3834 } 3835 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) { 3836 /* 3837 * If there is no stcb, skip the AUTH chunk and process 3838 * later after a stcb is found (to validate the lookup was 3839 * valid. 3840 */ 3841 if ((ch->chunk_type == SCTP_AUTHENTICATION) && 3842 (stcb == NULL) && !sctp_auth_disable) { 3843 /* save this chunk for later processing */ 3844 auth_skipped = 1; 3845 auth_offset = *offset; 3846 auth_len = ntohs(ch->chunk_length); 3847 3848 /* (temporarily) move past this chunk */ 3849 *offset += SCTP_SIZE32(auth_len); 3850 if (*offset >= length) { 3851 /* no more data left in the mbuf chain */ 3852 *offset = length; 3853 if (locked_tcb) { 3854 SCTP_TCB_UNLOCK(locked_tcb); 3855 } 3856 return (NULL); 3857 } 3858 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3859 sizeof(struct sctp_chunkhdr), chunk_buf); 3860 } 3861 if (ch == NULL) { 3862 /* Help */ 3863 *offset = length; 3864 if (locked_tcb) { 3865 SCTP_TCB_UNLOCK(locked_tcb); 3866 } 3867 return (NULL); 3868 } 3869 if (ch->chunk_type == SCTP_COOKIE_ECHO) { 3870 goto process_control_chunks; 3871 } 3872 /* 3873 * first check if it's an ASCONF with an unknown src addr we 3874 * need to look inside to find the association 3875 */ 3876 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) { 3877 struct sctp_chunkhdr *asconf_ch = ch; 3878 uint32_t asconf_offset = 0, asconf_len = 0; 3879 3880 /* inp's refcount may be reduced */ 3881 SCTP_INP_INCR_REF(inp); 3882 3883 asconf_offset = *offset; 3884 do { 3885 asconf_len = ntohs(asconf_ch->chunk_length); 3886 if (asconf_len < sizeof(struct sctp_asconf_paramhdr)) 3887 break; 3888 stcb = sctp_findassociation_ep_asconf(m, iphlen, 3889 *offset, sh, &inp, netp); 3890 if (stcb != NULL) 3891 break; 3892 asconf_offset += SCTP_SIZE32(asconf_len); 3893 asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset, 3894 sizeof(struct sctp_chunkhdr), chunk_buf); 3895 } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF); 3896 if (stcb == NULL) { 3897 /* 3898 * reduce inp's refcount if not reduced in 3899 * sctp_findassociation_ep_asconf(). 3900 */ 3901 SCTP_INP_DECR_REF(inp); 3902 } else { 3903 locked_tcb = stcb; 3904 } 3905 3906 /* now go back and verify any auth chunk to be sure */ 3907 if (auth_skipped && (stcb != NULL)) { 3908 struct sctp_auth_chunk *auth; 3909 3910 auth = (struct sctp_auth_chunk *) 3911 sctp_m_getptr(m, auth_offset, 3912 auth_len, chunk_buf); 3913 got_auth = 1; 3914 auth_skipped = 0; 3915 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, 3916 auth_offset)) { 3917 /* auth HMAC failed so dump it */ 3918 *offset = length; 3919 if (locked_tcb) { 3920 SCTP_TCB_UNLOCK(locked_tcb); 3921 } 3922 return (NULL); 3923 } else { 3924 /* remaining chunks are HMAC checked */ 3925 stcb->asoc.authenticated = 1; 3926 } 3927 } 3928 } 3929 if (stcb == NULL) { 3930 /* no association, so it's out of the blue... */ 3931 sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL, 3932 vrf_id); 3933 *offset = length; 3934 if (locked_tcb) { 3935 SCTP_TCB_UNLOCK(locked_tcb); 3936 } 3937 return (NULL); 3938 } 3939 asoc = &stcb->asoc; 3940 /* ABORT and SHUTDOWN can use either v_tag... */ 3941 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) || 3942 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) || 3943 (ch->chunk_type == SCTP_PACKET_DROPPED)) { 3944 if ((vtag_in == asoc->my_vtag) || 3945 ((ch->chunk_flags & SCTP_HAD_NO_TCB) && 3946 (vtag_in == asoc->peer_vtag))) { 3947 /* this is valid */ 3948 } else { 3949 /* drop this packet... */ 3950 SCTP_STAT_INCR(sctps_badvtag); 3951 if (locked_tcb) { 3952 SCTP_TCB_UNLOCK(locked_tcb); 3953 } 3954 return (NULL); 3955 } 3956 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 3957 if (vtag_in != asoc->my_vtag) { 3958 /* 3959 * this could be a stale SHUTDOWN-ACK or the 3960 * peer never got the SHUTDOWN-COMPLETE and 3961 * is still hung; we have started a new asoc 3962 * but it won't complete until the shutdown 3963 * is completed 3964 */ 3965 if (locked_tcb) { 3966 SCTP_TCB_UNLOCK(locked_tcb); 3967 } 3968 sctp_handle_ootb(m, iphlen, *offset, sh, inp, 3969 NULL, vrf_id); 3970 return (NULL); 3971 } 3972 } else { 3973 /* for all other chunks, vtag must match */ 3974 if (vtag_in != asoc->my_vtag) { 3975 /* invalid vtag... */ 3976 SCTPDBG(SCTP_DEBUG_INPUT3, 3977 "invalid vtag: %xh, expect %xh\n", 3978 vtag_in, asoc->my_vtag); 3979 SCTP_STAT_INCR(sctps_badvtag); 3980 if (locked_tcb) { 3981 SCTP_TCB_UNLOCK(locked_tcb); 3982 } 3983 *offset = length; 3984 return (NULL); 3985 } 3986 } 3987 } /* end if !SCTP_COOKIE_ECHO */ 3988 /* 3989 * process all control chunks... 3990 */ 3991 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) || 3992 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) && 3993 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 3994 /* implied cookie-ack.. we must have lost the ack */ 3995 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 3996 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 3997 stcb->asoc.overall_error_count, 3998 0, 3999 SCTP_FROM_SCTP_INPUT, 4000 __LINE__); 4001 } 4002 stcb->asoc.overall_error_count = 0; 4003 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, 4004 *netp); 4005 } 4006 process_control_chunks: 4007 while (IS_SCTP_CONTROL(ch)) { 4008 /* validate chunk length */ 4009 chk_length = ntohs(ch->chunk_length); 4010 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n", 4011 ch->chunk_type, chk_length); 4012 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length); 4013 if (chk_length < sizeof(*ch) || 4014 (*offset + (int)chk_length) > length) { 4015 *offset = length; 4016 if (locked_tcb) { 4017 SCTP_TCB_UNLOCK(locked_tcb); 4018 } 4019 return (NULL); 4020 } 4021 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 4022 /* 4023 * INIT-ACK only gets the init ack "header" portion only 4024 * because we don't have to process the peer's COOKIE. All 4025 * others get a complete chunk. 4026 */ 4027 if ((ch->chunk_type == SCTP_INITIATION_ACK) || 4028 (ch->chunk_type == SCTP_INITIATION)) { 4029 /* get an init-ack chunk */ 4030 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4031 sizeof(struct sctp_init_ack_chunk), chunk_buf); 4032 if (ch == NULL) { 4033 *offset = length; 4034 if (locked_tcb) { 4035 SCTP_TCB_UNLOCK(locked_tcb); 4036 } 4037 return (NULL); 4038 } 4039 } else { 4040 /* For cookies and all other chunks. */ 4041 if (chk_length > sizeof(chunk_buf)) { 4042 /* 4043 * use just the size of the chunk buffer so 4044 * the front part of our chunks fit in 4045 * contiguous space up to the chunk buffer 4046 * size (508 bytes). For chunks that need to 4047 * get more than that they must use the 4048 * sctp_m_getptr() function or other means 4049 * (e.g. know how to parse mbuf chains). 4050 * Cookies do this already. 4051 */ 4052 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4053 (sizeof(chunk_buf) - 4), 4054 chunk_buf); 4055 if (ch == NULL) { 4056 *offset = length; 4057 if (locked_tcb) { 4058 SCTP_TCB_UNLOCK(locked_tcb); 4059 } 4060 return (NULL); 4061 } 4062 } else { 4063 /* We can fit it all */ 4064 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4065 chk_length, chunk_buf); 4066 if (ch == NULL) { 4067 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n"); 4068 *offset = length; 4069 if (locked_tcb) { 4070 SCTP_TCB_UNLOCK(locked_tcb); 4071 } 4072 return (NULL); 4073 } 4074 } 4075 } 4076 num_chunks++; 4077 /* Save off the last place we got a control from */ 4078 if (stcb != NULL) { 4079 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) { 4080 /* 4081 * allow last_control to be NULL if 4082 * ASCONF... ASCONF processing will find the 4083 * right net later 4084 */ 4085 if ((netp != NULL) && (*netp != NULL)) 4086 stcb->asoc.last_control_chunk_from = *netp; 4087 } 4088 } 4089 #ifdef SCTP_AUDITING_ENABLED 4090 sctp_audit_log(0xB0, ch->chunk_type); 4091 #endif 4092 4093 /* check to see if this chunk required auth, but isn't */ 4094 if ((stcb != NULL) && !sctp_auth_disable && 4095 sctp_auth_is_required_chunk(ch->chunk_type, 4096 stcb->asoc.local_auth_chunks) && 4097 !stcb->asoc.authenticated) { 4098 /* "silently" ignore */ 4099 SCTP_STAT_INCR(sctps_recvauthmissing); 4100 goto next_chunk; 4101 } 4102 switch (ch->chunk_type) { 4103 case SCTP_INITIATION: 4104 /* must be first and only chunk */ 4105 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n"); 4106 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4107 /* We are not interested anymore? */ 4108 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4109 /* 4110 * collision case where we are 4111 * sending to them too 4112 */ 4113 ; 4114 } else { 4115 if (locked_tcb) { 4116 SCTP_TCB_UNLOCK(locked_tcb); 4117 } 4118 *offset = length; 4119 return (NULL); 4120 } 4121 } 4122 if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) || 4123 (num_chunks > 1) || 4124 (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 4125 *offset = length; 4126 if (locked_tcb) { 4127 SCTP_TCB_UNLOCK(locked_tcb); 4128 } 4129 return (NULL); 4130 } 4131 if ((stcb != NULL) && 4132 (SCTP_GET_STATE(&stcb->asoc) == 4133 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 4134 sctp_send_shutdown_ack(stcb, 4135 stcb->asoc.primary_destination); 4136 *offset = length; 4137 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4138 if (locked_tcb) { 4139 SCTP_TCB_UNLOCK(locked_tcb); 4140 } 4141 return (NULL); 4142 } 4143 if (netp) { 4144 sctp_handle_init(m, iphlen, *offset, sh, 4145 (struct sctp_init_chunk *)ch, inp, 4146 stcb, *netp, &abort_no_unlock, vrf_id); 4147 } 4148 if (abort_no_unlock) 4149 return (NULL); 4150 4151 *offset = length; 4152 if (locked_tcb) { 4153 SCTP_TCB_UNLOCK(locked_tcb); 4154 } 4155 return (NULL); 4156 break; 4157 case SCTP_PAD_CHUNK: 4158 break; 4159 case SCTP_INITIATION_ACK: 4160 /* must be first and only chunk */ 4161 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n"); 4162 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4163 /* We are not interested anymore */ 4164 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4165 ; 4166 } else { 4167 if (locked_tcb) { 4168 SCTP_TCB_UNLOCK(locked_tcb); 4169 } 4170 *offset = length; 4171 if (stcb) { 4172 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4173 so = SCTP_INP_SO(inp); 4174 atomic_add_int(&stcb->asoc.refcnt, 1); 4175 SCTP_TCB_UNLOCK(stcb); 4176 SCTP_SOCKET_LOCK(so, 1); 4177 SCTP_TCB_LOCK(stcb); 4178 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4179 #endif 4180 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4181 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4182 SCTP_SOCKET_UNLOCK(so, 1); 4183 #endif 4184 } 4185 return (NULL); 4186 } 4187 } 4188 if ((num_chunks > 1) || 4189 (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 4190 *offset = length; 4191 if (locked_tcb) { 4192 SCTP_TCB_UNLOCK(locked_tcb); 4193 } 4194 return (NULL); 4195 } 4196 if ((netp) && (*netp)) { 4197 ret = sctp_handle_init_ack(m, iphlen, *offset, sh, 4198 (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id); 4199 } else { 4200 ret = -1; 4201 } 4202 /* 4203 * Special case, I must call the output routine to 4204 * get the cookie echoed 4205 */ 4206 if (abort_no_unlock) 4207 return (NULL); 4208 4209 if ((stcb) && ret == 0) 4210 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4211 *offset = length; 4212 if (locked_tcb) { 4213 SCTP_TCB_UNLOCK(locked_tcb); 4214 } 4215 return (NULL); 4216 break; 4217 case SCTP_SELECTIVE_ACK: 4218 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n"); 4219 SCTP_STAT_INCR(sctps_recvsacks); 4220 { 4221 struct sctp_sack_chunk *sack; 4222 int abort_now = 0; 4223 uint32_t a_rwnd, cum_ack; 4224 uint16_t num_seg; 4225 int nonce_sum_flag; 4226 4227 if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) { 4228 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n"); 4229 ignore_sack: 4230 *offset = length; 4231 if (locked_tcb) { 4232 SCTP_TCB_UNLOCK(locked_tcb); 4233 } 4234 return (NULL); 4235 } 4236 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 4237 /*- 4238 * If we have sent a shutdown-ack, we will pay no 4239 * attention to a sack sent in to us since 4240 * we don't care anymore. 4241 */ 4242 goto ignore_sack; 4243 } 4244 sack = (struct sctp_sack_chunk *)ch; 4245 nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM; 4246 cum_ack = ntohl(sack->sack.cum_tsn_ack); 4247 num_seg = ntohs(sack->sack.num_gap_ack_blks); 4248 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd); 4249 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 4250 cum_ack, 4251 num_seg, 4252 a_rwnd 4253 ); 4254 stcb->asoc.seen_a_sack_this_pkt = 1; 4255 if ((stcb->asoc.pr_sctp_cnt == 0) && 4256 (num_seg == 0) && 4257 ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) || 4258 (cum_ack == stcb->asoc.last_acked_seq)) && 4259 (stcb->asoc.saw_sack_with_frags == 0) && 4260 (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) 4261 ) { 4262 /* 4263 * We have a SIMPLE sack having no 4264 * prior segments and data on sent 4265 * queue to be acked.. Use the 4266 * faster path sack processing. We 4267 * also allow window update sacks 4268 * with no missing segments to go 4269 * this way too. 4270 */ 4271 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag, 4272 &abort_now); 4273 } else { 4274 if (netp && *netp) 4275 sctp_handle_sack(m, *offset, 4276 sack, stcb, *netp, &abort_now, chk_length, a_rwnd); 4277 } 4278 if (abort_now) { 4279 /* ABORT signal from sack processing */ 4280 *offset = length; 4281 return (NULL); 4282 } 4283 } 4284 break; 4285 case SCTP_HEARTBEAT_REQUEST: 4286 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n"); 4287 if ((stcb) && netp && *netp) { 4288 SCTP_STAT_INCR(sctps_recvheartbeat); 4289 sctp_send_heartbeat_ack(stcb, m, *offset, 4290 chk_length, *netp); 4291 4292 /* He's alive so give him credit */ 4293 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4294 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4295 stcb->asoc.overall_error_count, 4296 0, 4297 SCTP_FROM_SCTP_INPUT, 4298 __LINE__); 4299 } 4300 stcb->asoc.overall_error_count = 0; 4301 } 4302 break; 4303 case SCTP_HEARTBEAT_ACK: 4304 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n"); 4305 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) { 4306 /* Its not ours */ 4307 *offset = length; 4308 if (locked_tcb) { 4309 SCTP_TCB_UNLOCK(locked_tcb); 4310 } 4311 return (NULL); 4312 } 4313 /* He's alive so give him credit */ 4314 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4315 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4316 stcb->asoc.overall_error_count, 4317 0, 4318 SCTP_FROM_SCTP_INPUT, 4319 __LINE__); 4320 } 4321 stcb->asoc.overall_error_count = 0; 4322 SCTP_STAT_INCR(sctps_recvheartbeatack); 4323 if (netp && *netp) 4324 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch, 4325 stcb, *netp); 4326 break; 4327 case SCTP_ABORT_ASSOCIATION: 4328 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n", 4329 stcb); 4330 if ((stcb) && netp && *netp) 4331 sctp_handle_abort((struct sctp_abort_chunk *)ch, 4332 stcb, *netp); 4333 *offset = length; 4334 return (NULL); 4335 break; 4336 case SCTP_SHUTDOWN: 4337 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n", 4338 stcb); 4339 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) { 4340 *offset = length; 4341 if (locked_tcb) { 4342 SCTP_TCB_UNLOCK(locked_tcb); 4343 } 4344 return (NULL); 4345 } 4346 if (netp && *netp) { 4347 int abort_flag = 0; 4348 4349 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch, 4350 stcb, *netp, &abort_flag); 4351 if (abort_flag) { 4352 *offset = length; 4353 return (NULL); 4354 } 4355 } 4356 break; 4357 case SCTP_SHUTDOWN_ACK: 4358 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb); 4359 if ((stcb) && (netp) && (*netp)) 4360 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp); 4361 *offset = length; 4362 return (NULL); 4363 break; 4364 4365 case SCTP_OPERATION_ERROR: 4366 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n"); 4367 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) { 4368 4369 *offset = length; 4370 return (NULL); 4371 } 4372 break; 4373 case SCTP_COOKIE_ECHO: 4374 SCTPDBG(SCTP_DEBUG_INPUT3, 4375 "SCTP_COOKIE-ECHO, stcb %p\n", stcb); 4376 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4377 ; 4378 } else { 4379 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4380 /* We are not interested anymore */ 4381 *offset = length; 4382 return (NULL); 4383 } 4384 } 4385 /* 4386 * First are we accepting? We do this again here 4387 * sincen it is possible that a previous endpoint 4388 * WAS listening responded to a INIT-ACK and then 4389 * closed. We opened and bound.. and are now no 4390 * longer listening. 4391 */ 4392 4393 if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) { 4394 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4395 (sctp_abort_if_one_2_one_hits_limit)) { 4396 struct mbuf *oper; 4397 struct sctp_paramhdr *phdr; 4398 4399 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4400 0, M_DONTWAIT, 1, MT_DATA); 4401 if (oper) { 4402 SCTP_BUF_LEN(oper) = 4403 sizeof(struct sctp_paramhdr); 4404 phdr = mtod(oper, 4405 struct sctp_paramhdr *); 4406 phdr->param_type = 4407 htons(SCTP_CAUSE_OUT_OF_RESC); 4408 phdr->param_length = 4409 htons(sizeof(struct sctp_paramhdr)); 4410 } 4411 sctp_abort_association(inp, stcb, m, 4412 iphlen, sh, oper, vrf_id); 4413 } 4414 *offset = length; 4415 return (NULL); 4416 } else { 4417 struct mbuf *ret_buf; 4418 struct sctp_inpcb *linp; 4419 4420 if (stcb) { 4421 linp = NULL; 4422 } else { 4423 linp = inp; 4424 } 4425 4426 if (linp) { 4427 SCTP_ASOC_CREATE_LOCK(linp); 4428 } 4429 if (netp) { 4430 ret_buf = 4431 sctp_handle_cookie_echo(m, iphlen, 4432 *offset, sh, 4433 (struct sctp_cookie_echo_chunk *)ch, 4434 &inp, &stcb, netp, 4435 auth_skipped, 4436 auth_offset, 4437 auth_len, 4438 &locked_tcb, 4439 vrf_id); 4440 } else { 4441 ret_buf = NULL; 4442 } 4443 if (linp) { 4444 SCTP_ASOC_CREATE_UNLOCK(linp); 4445 } 4446 if (ret_buf == NULL) { 4447 if (locked_tcb) { 4448 SCTP_TCB_UNLOCK(locked_tcb); 4449 } 4450 SCTPDBG(SCTP_DEBUG_INPUT3, 4451 "GAK, null buffer\n"); 4452 auth_skipped = 0; 4453 *offset = length; 4454 return (NULL); 4455 } 4456 /* if AUTH skipped, see if it verified... */ 4457 if (auth_skipped) { 4458 got_auth = 1; 4459 auth_skipped = 0; 4460 } 4461 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) { 4462 /* 4463 * Restart the timer if we have 4464 * pending data 4465 */ 4466 struct sctp_tmit_chunk *chk; 4467 4468 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 4469 if (chk) { 4470 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4471 stcb->sctp_ep, stcb, 4472 chk->whoTo); 4473 } 4474 } 4475 } 4476 break; 4477 case SCTP_COOKIE_ACK: 4478 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb); 4479 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) { 4480 if (locked_tcb) { 4481 SCTP_TCB_UNLOCK(locked_tcb); 4482 } 4483 return (NULL); 4484 } 4485 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4486 /* We are not interested anymore */ 4487 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4488 ; 4489 } else if (stcb) { 4490 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4491 so = SCTP_INP_SO(inp); 4492 atomic_add_int(&stcb->asoc.refcnt, 1); 4493 SCTP_TCB_UNLOCK(stcb); 4494 SCTP_SOCKET_LOCK(so, 1); 4495 SCTP_TCB_LOCK(stcb); 4496 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4497 #endif 4498 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4499 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4500 SCTP_SOCKET_UNLOCK(so, 1); 4501 #endif 4502 *offset = length; 4503 return (NULL); 4504 } 4505 } 4506 /* He's alive so give him credit */ 4507 if ((stcb) && netp && *netp) { 4508 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4509 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4510 stcb->asoc.overall_error_count, 4511 0, 4512 SCTP_FROM_SCTP_INPUT, 4513 __LINE__); 4514 } 4515 stcb->asoc.overall_error_count = 0; 4516 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp); 4517 } 4518 break; 4519 case SCTP_ECN_ECHO: 4520 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n"); 4521 /* He's alive so give him credit */ 4522 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) { 4523 /* Its not ours */ 4524 if (locked_tcb) { 4525 SCTP_TCB_UNLOCK(locked_tcb); 4526 } 4527 *offset = length; 4528 return (NULL); 4529 } 4530 if (stcb) { 4531 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4532 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4533 stcb->asoc.overall_error_count, 4534 0, 4535 SCTP_FROM_SCTP_INPUT, 4536 __LINE__); 4537 } 4538 stcb->asoc.overall_error_count = 0; 4539 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, 4540 stcb); 4541 } 4542 break; 4543 case SCTP_ECN_CWR: 4544 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n"); 4545 /* He's alive so give him credit */ 4546 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) { 4547 /* Its not ours */ 4548 if (locked_tcb) { 4549 SCTP_TCB_UNLOCK(locked_tcb); 4550 } 4551 *offset = length; 4552 return (NULL); 4553 } 4554 if (stcb) { 4555 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4556 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4557 stcb->asoc.overall_error_count, 4558 0, 4559 SCTP_FROM_SCTP_INPUT, 4560 __LINE__); 4561 } 4562 stcb->asoc.overall_error_count = 0; 4563 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb); 4564 } 4565 break; 4566 case SCTP_SHUTDOWN_COMPLETE: 4567 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb); 4568 /* must be first and only chunk */ 4569 if ((num_chunks > 1) || 4570 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 4571 *offset = length; 4572 if (locked_tcb) { 4573 SCTP_TCB_UNLOCK(locked_tcb); 4574 } 4575 return (NULL); 4576 } 4577 if ((stcb) && netp && *netp) { 4578 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch, 4579 stcb, *netp); 4580 } 4581 *offset = length; 4582 return (NULL); 4583 break; 4584 case SCTP_ASCONF: 4585 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n"); 4586 /* He's alive so give him credit */ 4587 if (stcb) { 4588 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4589 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4590 stcb->asoc.overall_error_count, 4591 0, 4592 SCTP_FROM_SCTP_INPUT, 4593 __LINE__); 4594 } 4595 stcb->asoc.overall_error_count = 0; 4596 sctp_handle_asconf(m, *offset, 4597 (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0); 4598 asconf_cnt++; 4599 } 4600 break; 4601 case SCTP_ASCONF_ACK: 4602 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n"); 4603 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { 4604 /* Its not ours */ 4605 if (locked_tcb) { 4606 SCTP_TCB_UNLOCK(locked_tcb); 4607 } 4608 *offset = length; 4609 return (NULL); 4610 } 4611 if ((stcb) && netp && *netp) { 4612 /* He's alive so give him credit */ 4613 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4614 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4615 stcb->asoc.overall_error_count, 4616 0, 4617 SCTP_FROM_SCTP_INPUT, 4618 __LINE__); 4619 } 4620 stcb->asoc.overall_error_count = 0; 4621 sctp_handle_asconf_ack(m, *offset, 4622 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock); 4623 if (abort_no_unlock) 4624 return (NULL); 4625 } 4626 break; 4627 case SCTP_FORWARD_CUM_TSN: 4628 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n"); 4629 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { 4630 /* Its not ours */ 4631 if (locked_tcb) { 4632 SCTP_TCB_UNLOCK(locked_tcb); 4633 } 4634 *offset = length; 4635 return (NULL); 4636 } 4637 /* He's alive so give him credit */ 4638 if (stcb) { 4639 int abort_flag = 0; 4640 4641 stcb->asoc.overall_error_count = 0; 4642 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4643 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4644 stcb->asoc.overall_error_count, 4645 0, 4646 SCTP_FROM_SCTP_INPUT, 4647 __LINE__); 4648 } 4649 *fwd_tsn_seen = 1; 4650 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4651 /* We are not interested anymore */ 4652 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4653 so = SCTP_INP_SO(inp); 4654 atomic_add_int(&stcb->asoc.refcnt, 1); 4655 SCTP_TCB_UNLOCK(stcb); 4656 SCTP_SOCKET_LOCK(so, 1); 4657 SCTP_TCB_LOCK(stcb); 4658 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4659 #endif 4660 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29); 4661 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4662 SCTP_SOCKET_UNLOCK(so, 1); 4663 #endif 4664 *offset = length; 4665 return (NULL); 4666 } 4667 sctp_handle_forward_tsn(stcb, 4668 (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset); 4669 if (abort_flag) { 4670 *offset = length; 4671 return (NULL); 4672 } else { 4673 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4674 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4675 stcb->asoc.overall_error_count, 4676 0, 4677 SCTP_FROM_SCTP_INPUT, 4678 __LINE__); 4679 } 4680 stcb->asoc.overall_error_count = 0; 4681 } 4682 4683 } 4684 break; 4685 case SCTP_STREAM_RESET: 4686 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n"); 4687 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) { 4688 /* Its not ours */ 4689 if (locked_tcb) { 4690 SCTP_TCB_UNLOCK(locked_tcb); 4691 } 4692 *offset = length; 4693 return (NULL); 4694 } 4695 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4696 /* We are not interested anymore */ 4697 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4698 so = SCTP_INP_SO(inp); 4699 atomic_add_int(&stcb->asoc.refcnt, 1); 4700 SCTP_TCB_UNLOCK(stcb); 4701 SCTP_SOCKET_LOCK(so, 1); 4702 SCTP_TCB_LOCK(stcb); 4703 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4704 #endif 4705 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30); 4706 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4707 SCTP_SOCKET_UNLOCK(so, 1); 4708 #endif 4709 *offset = length; 4710 return (NULL); 4711 } 4712 if (stcb->asoc.peer_supports_strreset == 0) { 4713 /* 4714 * hmm, peer should have announced this, but 4715 * we will turn it on since he is sending us 4716 * a stream reset. 4717 */ 4718 stcb->asoc.peer_supports_strreset = 1; 4719 } 4720 if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) { 4721 /* stop processing */ 4722 *offset = length; 4723 return (NULL); 4724 } 4725 break; 4726 case SCTP_PACKET_DROPPED: 4727 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n"); 4728 /* re-get it all please */ 4729 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { 4730 /* Its not ours */ 4731 if (locked_tcb) { 4732 SCTP_TCB_UNLOCK(locked_tcb); 4733 } 4734 *offset = length; 4735 return (NULL); 4736 } 4737 if (ch && (stcb) && netp && (*netp)) { 4738 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch, 4739 stcb, *netp, 4740 min(chk_length, (sizeof(chunk_buf) - 4))); 4741 4742 } 4743 break; 4744 4745 case SCTP_AUTHENTICATION: 4746 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n"); 4747 if (sctp_auth_disable) 4748 goto unknown_chunk; 4749 4750 if (stcb == NULL) { 4751 /* save the first AUTH for later processing */ 4752 if (auth_skipped == 0) { 4753 auth_offset = *offset; 4754 auth_len = chk_length; 4755 auth_skipped = 1; 4756 } 4757 /* skip this chunk (temporarily) */ 4758 goto next_chunk; 4759 } 4760 if ((chk_length < (sizeof(struct sctp_auth_chunk))) || 4761 (chk_length > (sizeof(struct sctp_auth_chunk) + 4762 SCTP_AUTH_DIGEST_LEN_MAX))) { 4763 /* Its not ours */ 4764 if (locked_tcb) { 4765 SCTP_TCB_UNLOCK(locked_tcb); 4766 } 4767 *offset = length; 4768 return (NULL); 4769 } 4770 if (got_auth == 1) { 4771 /* skip this chunk... it's already auth'd */ 4772 goto next_chunk; 4773 } 4774 got_auth = 1; 4775 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, 4776 m, *offset)) { 4777 /* auth HMAC failed so dump the packet */ 4778 *offset = length; 4779 return (stcb); 4780 } else { 4781 /* remaining chunks are HMAC checked */ 4782 stcb->asoc.authenticated = 1; 4783 } 4784 break; 4785 4786 default: 4787 unknown_chunk: 4788 /* it's an unknown chunk! */ 4789 if ((ch->chunk_type & 0x40) && (stcb != NULL)) { 4790 struct mbuf *mm; 4791 struct sctp_paramhdr *phd; 4792 4793 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4794 0, M_DONTWAIT, 1, MT_DATA); 4795 if (mm) { 4796 phd = mtod(mm, struct sctp_paramhdr *); 4797 /* 4798 * We cheat and use param type since 4799 * we did not bother to define a 4800 * error cause struct. They are the 4801 * same basic format with different 4802 * names. 4803 */ 4804 phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK); 4805 phd->param_length = htons(chk_length + sizeof(*phd)); 4806 SCTP_BUF_LEN(mm) = sizeof(*phd); 4807 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length), 4808 M_DONTWAIT); 4809 if (SCTP_BUF_NEXT(mm)) { 4810 sctp_queue_op_err(stcb, mm); 4811 } else { 4812 sctp_m_freem(mm); 4813 } 4814 } 4815 } 4816 if ((ch->chunk_type & 0x80) == 0) { 4817 /* discard this packet */ 4818 *offset = length; 4819 return (stcb); 4820 } /* else skip this bad chunk and continue... */ 4821 break; 4822 } /* switch (ch->chunk_type) */ 4823 4824 4825 next_chunk: 4826 /* get the next chunk */ 4827 *offset += SCTP_SIZE32(chk_length); 4828 if (*offset >= length) { 4829 /* no more data left in the mbuf chain */ 4830 break; 4831 } 4832 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4833 sizeof(struct sctp_chunkhdr), chunk_buf); 4834 if (ch == NULL) { 4835 if (locked_tcb) { 4836 SCTP_TCB_UNLOCK(locked_tcb); 4837 } 4838 *offset = length; 4839 return (NULL); 4840 } 4841 } /* while */ 4842 4843 if (asconf_cnt > 0 && stcb != NULL) { 4844 sctp_send_asconf_ack(stcb); 4845 } 4846 return (stcb); 4847 } 4848 4849 4850 /* 4851 * Process the ECN bits we have something set so we must look to see if it is 4852 * ECN(0) or ECN(1) or CE 4853 */ 4854 static void 4855 sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net, 4856 uint8_t ecn_bits) 4857 { 4858 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4859 ; 4860 } else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) { 4861 /* 4862 * we only add to the nonce sum for ECT1, ECT0 does not 4863 * change the NS bit (that we have yet to find a way to send 4864 * it yet). 4865 */ 4866 4867 /* ECN Nonce stuff */ 4868 stcb->asoc.receiver_nonce_sum++; 4869 stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM; 4870 4871 /* 4872 * Drag up the last_echo point if cumack is larger since we 4873 * don't want the point falling way behind by more than 4874 * 2^^31 and then having it be incorrect. 4875 */ 4876 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4877 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4878 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4879 } 4880 } else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) { 4881 /* 4882 * Drag up the last_echo point if cumack is larger since we 4883 * don't want the point falling way behind by more than 4884 * 2^^31 and then having it be incorrect. 4885 */ 4886 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4887 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4888 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4889 } 4890 } 4891 } 4892 4893 static void 4894 sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net, 4895 uint32_t high_tsn, uint8_t ecn_bits) 4896 { 4897 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4898 /* 4899 * we possibly must notify the sender that a congestion 4900 * window reduction is in order. We do this by adding a ECNE 4901 * chunk to the output chunk queue. The incoming CWR will 4902 * remove this chunk. 4903 */ 4904 if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn, 4905 MAX_TSN)) { 4906 /* Yep, we need to add a ECNE */ 4907 sctp_send_ecn_echo(stcb, net, high_tsn); 4908 stcb->asoc.last_echo_tsn = high_tsn; 4909 } 4910 } 4911 } 4912 4913 #ifdef INVARIANTS 4914 static void 4915 sctp_validate_no_locks(struct sctp_inpcb *inp) 4916 { 4917 struct sctp_tcb *stcb; 4918 4919 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 4920 if (mtx_owned(&stcb->tcb_mtx)) { 4921 panic("Own lock on stcb at return from input"); 4922 } 4923 } 4924 } 4925 4926 #endif 4927 4928 /* 4929 * common input chunk processing (v4 and v6) 4930 */ 4931 void 4932 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, 4933 int length, struct sctphdr *sh, struct sctp_chunkhdr *ch, 4934 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 4935 uint8_t ecn_bits, uint32_t vrf_id) 4936 { 4937 /* 4938 * Control chunk processing 4939 */ 4940 uint32_t high_tsn; 4941 int fwd_tsn_seen = 0, data_processed = 0; 4942 struct mbuf *m = *mm; 4943 int abort_flag = 0; 4944 int un_sent; 4945 4946 SCTP_STAT_INCR(sctps_recvdatagrams); 4947 #ifdef SCTP_AUDITING_ENABLED 4948 sctp_audit_log(0xE0, 1); 4949 sctp_auditing(0, inp, stcb, net); 4950 #endif 4951 4952 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d stcb:%p\n", 4953 m, iphlen, offset, stcb); 4954 if (stcb) { 4955 /* always clear this before beginning a packet */ 4956 stcb->asoc.authenticated = 0; 4957 stcb->asoc.seen_a_sack_this_pkt = 0; 4958 SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n", 4959 stcb, stcb->asoc.state); 4960 4961 if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) || 4962 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 4963 /*- 4964 * If we hit here, we had a ref count 4965 * up when the assoc was aborted and the 4966 * timer is clearing out the assoc, we should 4967 * NOT respond to any packet.. its OOTB. 4968 */ 4969 SCTP_TCB_UNLOCK(stcb); 4970 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 4971 vrf_id); 4972 goto out_now; 4973 } 4974 } 4975 if (IS_SCTP_CONTROL(ch)) { 4976 /* process the control portion of the SCTP packet */ 4977 /* sa_ignore NO_NULL_CHK */ 4978 stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch, 4979 inp, stcb, &net, &fwd_tsn_seen, vrf_id); 4980 if (stcb) { 4981 /* 4982 * This covers us if the cookie-echo was there and 4983 * it changes our INP. 4984 */ 4985 inp = stcb->sctp_ep; 4986 } 4987 } else { 4988 /* 4989 * no control chunks, so pre-process DATA chunks (these 4990 * checks are taken care of by control processing) 4991 */ 4992 4993 /* 4994 * if DATA only packet, and auth is required, then punt... 4995 * can't have authenticated without any AUTH (control) 4996 * chunks 4997 */ 4998 if ((stcb != NULL) && !sctp_auth_disable && 4999 sctp_auth_is_required_chunk(SCTP_DATA, 5000 stcb->asoc.local_auth_chunks)) { 5001 /* "silently" ignore */ 5002 SCTP_STAT_INCR(sctps_recvauthmissing); 5003 SCTP_TCB_UNLOCK(stcb); 5004 goto out_now; 5005 } 5006 if (stcb == NULL) { 5007 /* out of the blue DATA chunk */ 5008 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5009 vrf_id); 5010 goto out_now; 5011 } 5012 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) { 5013 /* v_tag mismatch! */ 5014 SCTP_STAT_INCR(sctps_badvtag); 5015 SCTP_TCB_UNLOCK(stcb); 5016 goto out_now; 5017 } 5018 } 5019 5020 if (stcb == NULL) { 5021 /* 5022 * no valid TCB for this packet, or we found it's a bad 5023 * packet while processing control, or we're done with this 5024 * packet (done or skip rest of data), so we drop it... 5025 */ 5026 goto out_now; 5027 } 5028 /* 5029 * DATA chunk processing 5030 */ 5031 /* plow through the data chunks while length > offset */ 5032 5033 /* 5034 * Rest should be DATA only. Check authentication state if AUTH for 5035 * DATA is required. 5036 */ 5037 if ((length > offset) && (stcb != NULL) && !sctp_auth_disable && 5038 sctp_auth_is_required_chunk(SCTP_DATA, 5039 stcb->asoc.local_auth_chunks) && 5040 !stcb->asoc.authenticated) { 5041 /* "silently" ignore */ 5042 SCTP_STAT_INCR(sctps_recvauthmissing); 5043 SCTPDBG(SCTP_DEBUG_AUTH1, 5044 "Data chunk requires AUTH, skipped\n"); 5045 goto trigger_send; 5046 } 5047 if (length > offset) { 5048 int retval; 5049 5050 /* 5051 * First check to make sure our state is correct. We would 5052 * not get here unless we really did have a tag, so we don't 5053 * abort if this happens, just dump the chunk silently. 5054 */ 5055 switch (SCTP_GET_STATE(&stcb->asoc)) { 5056 case SCTP_STATE_COOKIE_ECHOED: 5057 /* 5058 * we consider data with valid tags in this state 5059 * shows us the cookie-ack was lost. Imply it was 5060 * there. 5061 */ 5062 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 5063 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5064 stcb->asoc.overall_error_count, 5065 0, 5066 SCTP_FROM_SCTP_INPUT, 5067 __LINE__); 5068 } 5069 stcb->asoc.overall_error_count = 0; 5070 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net); 5071 break; 5072 case SCTP_STATE_COOKIE_WAIT: 5073 /* 5074 * We consider OOTB any data sent during asoc setup. 5075 */ 5076 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5077 vrf_id); 5078 SCTP_TCB_UNLOCK(stcb); 5079 goto out_now; 5080 /* sa_ignore NOTREACHED */ 5081 break; 5082 case SCTP_STATE_EMPTY: /* should not happen */ 5083 case SCTP_STATE_INUSE: /* should not happen */ 5084 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */ 5085 case SCTP_STATE_SHUTDOWN_ACK_SENT: 5086 default: 5087 SCTP_TCB_UNLOCK(stcb); 5088 goto out_now; 5089 /* sa_ignore NOTREACHED */ 5090 break; 5091 case SCTP_STATE_OPEN: 5092 case SCTP_STATE_SHUTDOWN_SENT: 5093 break; 5094 } 5095 /* take care of ECN, part 1. */ 5096 if (stcb->asoc.ecn_allowed && 5097 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 5098 sctp_process_ecn_marked_a(stcb, net, ecn_bits); 5099 } 5100 /* plow through the data chunks while length > offset */ 5101 retval = sctp_process_data(mm, iphlen, &offset, length, sh, 5102 inp, stcb, net, &high_tsn); 5103 if (retval == 2) { 5104 /* 5105 * The association aborted, NO UNLOCK needed since 5106 * the association is destroyed. 5107 */ 5108 goto out_now; 5109 } 5110 data_processed = 1; 5111 if (retval == 0) { 5112 /* take care of ecn part 2. */ 5113 if (stcb->asoc.ecn_allowed && 5114 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 5115 sctp_process_ecn_marked_b(stcb, net, high_tsn, 5116 ecn_bits); 5117 } 5118 } 5119 /* 5120 * Anything important needs to have been m_copy'ed in 5121 * process_data 5122 */ 5123 } 5124 if ((data_processed == 0) && (fwd_tsn_seen)) { 5125 int was_a_gap = 0; 5126 5127 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 5128 stcb->asoc.cumulative_tsn, MAX_TSN)) { 5129 /* there was a gap before this data was processed */ 5130 was_a_gap = 1; 5131 } 5132 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 5133 if (abort_flag) { 5134 /* Again, we aborted so NO UNLOCK needed */ 5135 goto out_now; 5136 } 5137 } 5138 /* trigger send of any chunks in queue... */ 5139 trigger_send: 5140 #ifdef SCTP_AUDITING_ENABLED 5141 sctp_audit_log(0xE0, 2); 5142 sctp_auditing(1, inp, stcb, net); 5143 #endif 5144 SCTPDBG(SCTP_DEBUG_INPUT1, 5145 "Check for chunk output prw:%d tqe:%d tf=%d\n", 5146 stcb->asoc.peers_rwnd, 5147 TAILQ_EMPTY(&stcb->asoc.control_send_queue), 5148 stcb->asoc.total_flight); 5149 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 5150 5151 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) || 5152 ((un_sent) && 5153 (stcb->asoc.peers_rwnd > 0 || 5154 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) { 5155 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n"); 5156 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 5157 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n"); 5158 } 5159 #ifdef SCTP_AUDITING_ENABLED 5160 sctp_audit_log(0xE0, 3); 5161 sctp_auditing(2, inp, stcb, net); 5162 #endif 5163 SCTP_TCB_UNLOCK(stcb); 5164 out_now: 5165 #ifdef INVARIANTS 5166 sctp_validate_no_locks(inp); 5167 #endif 5168 return; 5169 } 5170 5171 5172 5173 void 5174 sctp_input(i_pak, off) 5175 struct mbuf *i_pak; 5176 int off; 5177 5178 { 5179 #ifdef SCTP_MBUF_LOGGING 5180 struct mbuf *mat; 5181 5182 #endif 5183 struct mbuf *m; 5184 int iphlen; 5185 uint32_t vrf_id = 0; 5186 uint8_t ecn_bits; 5187 struct ip *ip; 5188 struct sctphdr *sh; 5189 struct sctp_inpcb *inp = NULL; 5190 5191 uint32_t check, calc_check; 5192 struct sctp_nets *net; 5193 struct sctp_tcb *stcb = NULL; 5194 struct sctp_chunkhdr *ch; 5195 int refcount_up = 0; 5196 int length, mlen, offset; 5197 5198 5199 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) { 5200 SCTP_RELEASE_PKT(i_pak); 5201 return; 5202 } 5203 mlen = SCTP_HEADER_LEN(i_pak); 5204 iphlen = off; 5205 m = SCTP_HEADER_TO_CHAIN(i_pak); 5206 5207 net = NULL; 5208 SCTP_STAT_INCR(sctps_recvpackets); 5209 SCTP_STAT_INCR_COUNTER64(sctps_inpackets); 5210 5211 5212 #ifdef SCTP_MBUF_LOGGING 5213 /* Log in any input mbufs */ 5214 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 5215 mat = m; 5216 while (mat) { 5217 if (SCTP_BUF_IS_EXTENDED(mat)) { 5218 sctp_log_mb(mat, SCTP_MBUF_INPUT); 5219 } 5220 mat = SCTP_BUF_NEXT(mat); 5221 } 5222 } 5223 #endif 5224 #ifdef SCTP_PACKET_LOGGING 5225 if (sctp_logging_level & SCTP_LAST_PACKET_TRACING) 5226 sctp_packet_log(m, mlen); 5227 #endif 5228 /* 5229 * Must take out the iphlen, since mlen expects this (only effect lb 5230 * case) 5231 */ 5232 mlen -= iphlen; 5233 5234 /* 5235 * Get IP, SCTP, and first chunk header together in first mbuf. 5236 */ 5237 ip = mtod(m, struct ip *); 5238 offset = iphlen + sizeof(*sh) + sizeof(*ch); 5239 if (SCTP_BUF_LEN(m) < offset) { 5240 if ((m = m_pullup(m, offset)) == 0) { 5241 SCTP_STAT_INCR(sctps_hdrops); 5242 return; 5243 } 5244 ip = mtod(m, struct ip *); 5245 } 5246 sh = (struct sctphdr *)((caddr_t)ip + iphlen); 5247 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh)); 5248 SCTPDBG(SCTP_DEBUG_INPUT1, 5249 "sctp_input() length:%d iphlen:%d\n", mlen, iphlen); 5250 5251 /* SCTP does not allow broadcasts or multicasts */ 5252 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 5253 goto bad; 5254 } 5255 if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) { 5256 /* 5257 * We only look at broadcast if its a front state, All 5258 * others we will not have a tcb for anyway. 5259 */ 5260 goto bad; 5261 } 5262 /* validate SCTP checksum */ 5263 check = sh->checksum; /* save incoming checksum */ 5264 if ((check == 0) && (sctp_no_csum_on_loopback) && 5265 ((ip->ip_src.s_addr == ip->ip_dst.s_addr) || 5266 (SCTP_IS_IT_LOOPBACK(m))) 5267 ) { 5268 goto sctp_skip_csum_4; 5269 } 5270 sh->checksum = 0; /* prepare for calc */ 5271 calc_check = sctp_calculate_sum(m, &mlen, iphlen); 5272 if (calc_check != check) { 5273 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n", 5274 calc_check, check, m, mlen, iphlen); 5275 5276 stcb = sctp_findassociation_addr(m, iphlen, 5277 offset - sizeof(*ch), 5278 sh, ch, &inp, &net, 5279 vrf_id); 5280 if ((inp) && (stcb)) { 5281 sctp_send_packet_dropped(stcb, net, m, iphlen, 1); 5282 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED); 5283 } else if ((inp != NULL) && (stcb == NULL)) { 5284 refcount_up = 1; 5285 } 5286 SCTP_STAT_INCR(sctps_badsum); 5287 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors); 5288 goto bad; 5289 } 5290 sh->checksum = calc_check; 5291 sctp_skip_csum_4: 5292 /* destination port of 0 is illegal, based on RFC2960. */ 5293 if (sh->dest_port == 0) { 5294 SCTP_STAT_INCR(sctps_hdrops); 5295 goto bad; 5296 } 5297 /* validate mbuf chain length with IP payload length */ 5298 if (mlen < (ip->ip_len - iphlen)) { 5299 SCTP_STAT_INCR(sctps_hdrops); 5300 goto bad; 5301 } 5302 /* 5303 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants 5304 * IP/SCTP/first chunk header... 5305 */ 5306 stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch), 5307 sh, ch, &inp, &net, vrf_id); 5308 /* inp's ref-count increased && stcb locked */ 5309 if (inp == NULL) { 5310 struct sctp_init_chunk *init_chk, chunk_buf; 5311 5312 SCTP_STAT_INCR(sctps_noport); 5313 #ifdef ICMP_BANDLIM 5314 /* 5315 * we use the bandwidth limiting to protect against sending 5316 * too many ABORTS all at once. In this case these count the 5317 * same as an ICMP message. 5318 */ 5319 if (badport_bandlim(0) < 0) 5320 goto bad; 5321 #endif /* ICMP_BANDLIM */ 5322 SCTPDBG(SCTP_DEBUG_INPUT1, 5323 "Sending a ABORT from packet entry!\n"); 5324 if (ch->chunk_type == SCTP_INITIATION) { 5325 /* 5326 * we do a trick here to get the INIT tag, dig in 5327 * and get the tag from the INIT and put it in the 5328 * common header. 5329 */ 5330 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 5331 iphlen + sizeof(*sh), sizeof(*init_chk), 5332 (uint8_t *) & chunk_buf); 5333 if (init_chk != NULL) 5334 sh->v_tag = init_chk->init.initiate_tag; 5335 } 5336 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 5337 sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id); 5338 goto bad; 5339 } 5340 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) { 5341 goto bad; 5342 } 5343 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) 5344 sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id); 5345 goto bad; 5346 } else if (stcb == NULL) { 5347 refcount_up = 1; 5348 } 5349 #ifdef IPSEC 5350 /* 5351 * I very much doubt any of the IPSEC stuff will work but I have no 5352 * idea, so I will leave it in place. 5353 */ 5354 if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) { 5355 ipsec4stat.in_polvio++; 5356 SCTP_STAT_INCR(sctps_hdrops); 5357 goto bad; 5358 } 5359 #endif /* IPSEC */ 5360 5361 /* 5362 * common chunk processing 5363 */ 5364 length = ip->ip_len + iphlen; 5365 offset -= sizeof(struct sctp_chunkhdr); 5366 5367 ecn_bits = ip->ip_tos; 5368 5369 /* sa_ignore NO_NULL_CHK */ 5370 sctp_common_input_processing(&m, iphlen, offset, length, sh, ch, 5371 inp, stcb, net, ecn_bits, vrf_id); 5372 /* inp's ref-count reduced && stcb unlocked */ 5373 if (m) { 5374 sctp_m_freem(m); 5375 } 5376 if ((inp) && (refcount_up)) { 5377 /* reduce ref-count */ 5378 SCTP_INP_DECR_REF(inp); 5379 } 5380 return; 5381 bad: 5382 if (stcb) { 5383 SCTP_TCB_UNLOCK(stcb); 5384 } 5385 if ((inp) && (refcount_up)) { 5386 /* reduce ref-count */ 5387 SCTP_INP_DECR_REF(inp); 5388 } 5389 if (m) { 5390 sctp_m_freem(m); 5391 } 5392 return; 5393 } 5394