1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_input.h> 44 #include <netinet/sctp_auth.h> 45 #include <netinet/sctp_indata.h> 46 #include <netinet/sctp_asconf.h> 47 #include <netinet/sctp_bsd_addr.h> 48 #include <netinet/sctp_timer.h> 49 #include <netinet/udp.h> 50 51 52 53 static void 54 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb) 55 { 56 struct sctp_nets *net; 57 58 /* 59 * This now not only stops all cookie timers it also stops any INIT 60 * timers as well. This will make sure that the timers are stopped 61 * in all collision cases. 62 */ 63 SCTP_TCB_LOCK_ASSERT(stcb); 64 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 65 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) { 66 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, 67 stcb->sctp_ep, 68 stcb, 69 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1); 70 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) { 71 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, 72 stcb->sctp_ep, 73 stcb, 74 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2); 75 } 76 } 77 } 78 79 /* INIT handler */ 80 static void 81 sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 82 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 83 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, uint16_t port) 84 { 85 struct sctp_init *init; 86 struct mbuf *op_err; 87 uint32_t init_limit; 88 89 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n", 90 stcb); 91 if (stcb == NULL) { 92 SCTP_INP_RLOCK(inp); 93 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 94 goto outnow; 95 } 96 } 97 op_err = NULL; 98 init = &cp->init; 99 /* First are we accepting? */ 100 if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) { 101 SCTPDBG(SCTP_DEBUG_INPUT2, 102 "sctp_handle_init: Abort, so_qlimit:%d\n", 103 inp->sctp_socket->so_qlimit); 104 /* 105 * FIX ME ?? What about TCP model and we have a 106 * match/restart case? Actually no fix is needed. the lookup 107 * will always find the existing assoc so stcb would not be 108 * NULL. It may be questionable to do this since we COULD 109 * just send back the INIT-ACK and hope that the app did 110 * accept()'s by the time the COOKIE was sent. But there is 111 * a price to pay for COOKIE generation and I don't want to 112 * pay it on the chance that the app will actually do some 113 * accepts(). The App just looses and should NOT be in this 114 * state :-) 115 */ 116 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 117 vrf_id, port); 118 if (stcb) 119 *abort_no_unlock = 1; 120 goto outnow; 121 } 122 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) { 123 /* Invalid length */ 124 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 125 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 126 vrf_id, port); 127 if (stcb) 128 *abort_no_unlock = 1; 129 goto outnow; 130 } 131 /* validate parameters */ 132 if (init->initiate_tag == 0) { 133 /* protocol error... send abort */ 134 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 135 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 136 vrf_id, port); 137 if (stcb) 138 *abort_no_unlock = 1; 139 goto outnow; 140 } 141 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) { 142 /* invalid parameter... send abort */ 143 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 144 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 145 vrf_id, port); 146 if (stcb) 147 *abort_no_unlock = 1; 148 goto outnow; 149 } 150 if (init->num_inbound_streams == 0) { 151 /* protocol error... send abort */ 152 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 153 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 154 vrf_id, port); 155 if (stcb) 156 *abort_no_unlock = 1; 157 goto outnow; 158 } 159 if (init->num_outbound_streams == 0) { 160 /* protocol error... send abort */ 161 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 162 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 163 vrf_id, port); 164 if (stcb) 165 *abort_no_unlock = 1; 166 goto outnow; 167 } 168 init_limit = offset + ntohs(cp->ch.chunk_length); 169 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp), 170 init_limit)) { 171 /* auth parameter(s) error... send abort */ 172 sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id, port); 173 if (stcb) 174 *abort_no_unlock = 1; 175 goto outnow; 176 } 177 /* send an INIT-ACK w/cookie */ 178 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); 179 sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, port, 180 ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED)); 181 outnow: 182 if (stcb == NULL) { 183 SCTP_INP_RUNLOCK(inp); 184 } 185 } 186 187 /* 188 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error 189 */ 190 191 int 192 sctp_is_there_unsent_data(struct sctp_tcb *stcb) 193 { 194 int unsent_data = 0; 195 struct sctp_stream_queue_pending *sp; 196 struct sctp_stream_out *strq; 197 struct sctp_association *asoc; 198 199 /* 200 * This function returns the number of streams that have true unsent 201 * data on them. Note that as it looks through it will clean up any 202 * places that have old data that has been sent but left at top of 203 * stream queue. 204 */ 205 asoc = &stcb->asoc; 206 SCTP_TCB_SEND_LOCK(stcb); 207 if (!TAILQ_EMPTY(&asoc->out_wheel)) { 208 /* Check to see if some data queued */ 209 TAILQ_FOREACH(strq, &asoc->out_wheel, next_spoke) { 210 is_there_another: 211 /* sa_ignore FREED_MEMORY */ 212 sp = TAILQ_FIRST(&strq->outqueue); 213 if (sp == NULL) { 214 continue; 215 } 216 if ((sp->msg_is_complete) && 217 (sp->length == 0) && 218 (sp->sender_all_done)) { 219 /* 220 * We are doing differed cleanup. Last time 221 * through when we took all the data the 222 * sender_all_done was not set. 223 */ 224 if (sp->put_last_out == 0) { 225 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 226 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", 227 sp->sender_all_done, 228 sp->length, 229 sp->msg_is_complete, 230 sp->put_last_out); 231 } 232 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1); 233 TAILQ_REMOVE(&strq->outqueue, sp, next); 234 sctp_free_remote_addr(sp->net); 235 if (sp->data) { 236 sctp_m_freem(sp->data); 237 sp->data = NULL; 238 } 239 sctp_free_a_strmoq(stcb, sp); 240 goto is_there_another; 241 } else { 242 unsent_data++; 243 continue; 244 } 245 } 246 } 247 SCTP_TCB_SEND_UNLOCK(stcb); 248 return (unsent_data); 249 } 250 251 static int 252 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb, 253 struct sctp_nets *net) 254 { 255 struct sctp_init *init; 256 struct sctp_association *asoc; 257 struct sctp_nets *lnet; 258 unsigned int i; 259 260 init = &cp->init; 261 asoc = &stcb->asoc; 262 /* save off parameters */ 263 asoc->peer_vtag = ntohl(init->initiate_tag); 264 asoc->peers_rwnd = ntohl(init->a_rwnd); 265 if (TAILQ_FIRST(&asoc->nets)) { 266 /* update any ssthresh's that may have a default */ 267 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 268 lnet->ssthresh = asoc->peers_rwnd; 269 270 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 271 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION); 272 } 273 } 274 } 275 SCTP_TCB_SEND_LOCK(stcb); 276 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) { 277 unsigned int newcnt; 278 struct sctp_stream_out *outs; 279 struct sctp_stream_queue_pending *sp; 280 281 /* cut back on number of streams */ 282 newcnt = ntohs(init->num_inbound_streams); 283 /* This if is probably not needed but I am cautious */ 284 if (asoc->strmout) { 285 /* First make sure no data chunks are trapped */ 286 for (i = newcnt; i < asoc->pre_open_streams; i++) { 287 outs = &asoc->strmout[i]; 288 sp = TAILQ_FIRST(&outs->outqueue); 289 while (sp) { 290 TAILQ_REMOVE(&outs->outqueue, sp, 291 next); 292 asoc->stream_queue_cnt--; 293 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, 294 stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, 295 sp, SCTP_SO_NOT_LOCKED); 296 if (sp->data) { 297 sctp_m_freem(sp->data); 298 sp->data = NULL; 299 } 300 sctp_free_remote_addr(sp->net); 301 sp->net = NULL; 302 /* Free the chunk */ 303 SCTP_PRINTF("sp:%p tcb:%p weird free case\n", 304 sp, stcb); 305 306 sctp_free_a_strmoq(stcb, sp); 307 /* sa_ignore FREED_MEMORY */ 308 sp = TAILQ_FIRST(&outs->outqueue); 309 } 310 } 311 } 312 /* cut back the count and abandon the upper streams */ 313 asoc->pre_open_streams = newcnt; 314 } 315 SCTP_TCB_SEND_UNLOCK(stcb); 316 asoc->streamoutcnt = asoc->pre_open_streams; 317 /* init tsn's */ 318 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1; 319 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 320 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 321 } 322 /* This is the next one we expect */ 323 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1; 324 325 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn); 326 asoc->cumulative_tsn = asoc->asconf_seq_in; 327 asoc->last_echo_tsn = asoc->asconf_seq_in; 328 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 329 /* open the requested streams */ 330 331 if (asoc->strmin != NULL) { 332 /* Free the old ones */ 333 struct sctp_queued_to_read *ctl; 334 335 for (i = 0; i < asoc->streamincnt; i++) { 336 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 337 while (ctl) { 338 TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next); 339 sctp_free_remote_addr(ctl->whoFrom); 340 ctl->whoFrom = NULL; 341 sctp_m_freem(ctl->data); 342 ctl->data = NULL; 343 sctp_free_a_readq(stcb, ctl); 344 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 345 } 346 } 347 SCTP_FREE(asoc->strmin, SCTP_M_STRMI); 348 } 349 asoc->streamincnt = ntohs(init->num_outbound_streams); 350 if (asoc->streamincnt > MAX_SCTP_STREAMS) { 351 asoc->streamincnt = MAX_SCTP_STREAMS; 352 } 353 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt * 354 sizeof(struct sctp_stream_in), SCTP_M_STRMI); 355 if (asoc->strmin == NULL) { 356 /* we didn't get memory for the streams! */ 357 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n"); 358 return (-1); 359 } 360 for (i = 0; i < asoc->streamincnt; i++) { 361 asoc->strmin[i].stream_no = i; 362 asoc->strmin[i].last_sequence_delivered = 0xffff; 363 /* 364 * U-stream ranges will be set when the cookie is unpacked. 365 * Or for the INIT sender they are un set (if pr-sctp not 366 * supported) when the INIT-ACK arrives. 367 */ 368 TAILQ_INIT(&asoc->strmin[i].inqueue); 369 asoc->strmin[i].delivery_started = 0; 370 } 371 /* 372 * load_address_from_init will put the addresses into the 373 * association when the COOKIE is processed or the INIT-ACK is 374 * processed. Both types of COOKIE's existing and new call this 375 * routine. It will remove addresses that are no longer in the 376 * association (for the restarting case where addresses are 377 * removed). Up front when the INIT arrives we will discard it if it 378 * is a restart and new addresses have been added. 379 */ 380 /* sa_ignore MEMLEAK */ 381 return (0); 382 } 383 384 /* 385 * INIT-ACK message processing/consumption returns value < 0 on error 386 */ 387 static int 388 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, 389 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 390 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 391 { 392 struct sctp_association *asoc; 393 struct mbuf *op_err; 394 int retval, abort_flag; 395 uint32_t initack_limit; 396 397 /* First verify that we have no illegal param's */ 398 abort_flag = 0; 399 op_err = NULL; 400 401 op_err = sctp_arethere_unrecognized_parameters(m, 402 (offset + sizeof(struct sctp_init_chunk)), 403 &abort_flag, (struct sctp_chunkhdr *)cp); 404 if (abort_flag) { 405 /* Send an abort and notify peer */ 406 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED); 407 *abort_no_unlock = 1; 408 return (-1); 409 } 410 asoc = &stcb->asoc; 411 /* process the peer's parameters in the INIT-ACK */ 412 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net); 413 if (retval < 0) { 414 return (retval); 415 } 416 initack_limit = offset + ntohs(cp->ch.chunk_length); 417 /* load all addresses */ 418 if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen, 419 (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh, 420 NULL))) { 421 /* Huh, we should abort */ 422 SCTPDBG(SCTP_DEBUG_INPUT1, 423 "Load addresses from INIT causes an abort %d\n", 424 retval); 425 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 426 NULL, 0, net->port); 427 *abort_no_unlock = 1; 428 return (-1); 429 } 430 /* if the peer doesn't support asconf, flush the asconf queue */ 431 if (asoc->peer_supports_asconf == 0) { 432 struct sctp_asconf_addr *aparam; 433 434 while (!TAILQ_EMPTY(&asoc->asconf_queue)) { 435 /* sa_ignore FREED_MEMORY */ 436 aparam = TAILQ_FIRST(&asoc->asconf_queue); 437 TAILQ_REMOVE(&asoc->asconf_queue, aparam, next); 438 SCTP_FREE(aparam, SCTP_M_ASC_ADDR); 439 } 440 } 441 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs, 442 stcb->asoc.local_hmacs); 443 if (op_err) { 444 sctp_queue_op_err(stcb, op_err); 445 /* queuing will steal away the mbuf chain to the out queue */ 446 op_err = NULL; 447 } 448 /* extract the cookie and queue it to "echo" it back... */ 449 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 450 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 451 stcb->asoc.overall_error_count, 452 0, 453 SCTP_FROM_SCTP_INPUT, 454 __LINE__); 455 } 456 stcb->asoc.overall_error_count = 0; 457 net->error_count = 0; 458 459 /* 460 * Cancel the INIT timer, We do this first before queueing the 461 * cookie. We always cancel at the primary to assue that we are 462 * canceling the timer started by the INIT which always goes to the 463 * primary. 464 */ 465 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb, 466 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4); 467 468 /* calculate the RTO */ 469 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy); 470 471 retval = sctp_send_cookie_echo(m, offset, stcb, net); 472 if (retval < 0) { 473 /* 474 * No cookie, we probably should send a op error. But in any 475 * case if there is no cookie in the INIT-ACK, we can 476 * abandon the peer, its broke. 477 */ 478 if (retval == -3) { 479 /* We abort with an error of missing mandatory param */ 480 op_err = 481 sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM); 482 if (op_err) { 483 /* 484 * Expand beyond to include the mandatory 485 * param cookie 486 */ 487 struct sctp_inv_mandatory_param *mp; 488 489 SCTP_BUF_LEN(op_err) = 490 sizeof(struct sctp_inv_mandatory_param); 491 mp = mtod(op_err, 492 struct sctp_inv_mandatory_param *); 493 /* Subtract the reserved param */ 494 mp->length = 495 htons(sizeof(struct sctp_inv_mandatory_param) - 2); 496 mp->num_param = htonl(1); 497 mp->param = htons(SCTP_STATE_COOKIE); 498 mp->resv = 0; 499 } 500 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 501 sh, op_err, 0, net->port); 502 *abort_no_unlock = 1; 503 } 504 return (retval); 505 } 506 return (0); 507 } 508 509 static void 510 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp, 511 struct sctp_tcb *stcb, struct sctp_nets *net) 512 { 513 struct sockaddr_storage store; 514 struct sockaddr_in *sin; 515 struct sockaddr_in6 *sin6; 516 struct sctp_nets *r_net; 517 struct timeval tv; 518 int req_prim = 0; 519 520 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) { 521 /* Invalid length */ 522 return; 523 } 524 sin = (struct sockaddr_in *)&store; 525 sin6 = (struct sockaddr_in6 *)&store; 526 527 memset(&store, 0, sizeof(store)); 528 if (cp->heartbeat.hb_info.addr_family == AF_INET && 529 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) { 530 sin->sin_family = cp->heartbeat.hb_info.addr_family; 531 sin->sin_len = cp->heartbeat.hb_info.addr_len; 532 sin->sin_port = stcb->rport; 533 memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address, 534 sizeof(sin->sin_addr)); 535 } else if (cp->heartbeat.hb_info.addr_family == AF_INET6 && 536 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) { 537 sin6->sin6_family = cp->heartbeat.hb_info.addr_family; 538 sin6->sin6_len = cp->heartbeat.hb_info.addr_len; 539 sin6->sin6_port = stcb->rport; 540 memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address, 541 sizeof(sin6->sin6_addr)); 542 } else { 543 return; 544 } 545 r_net = sctp_findnet(stcb, (struct sockaddr *)sin); 546 if (r_net == NULL) { 547 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n"); 548 return; 549 } 550 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) && 551 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) && 552 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) { 553 /* 554 * If the its a HB and it's random value is correct when can 555 * confirm the destination. 556 */ 557 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 558 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) { 559 stcb->asoc.primary_destination = r_net; 560 r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY; 561 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 562 r_net = TAILQ_FIRST(&stcb->asoc.nets); 563 if (r_net != stcb->asoc.primary_destination) { 564 /* 565 * first one on the list is NOT the primary 566 * sctp_cmpaddr() is much more efficent if 567 * the primary is the first on the list, 568 * make it so. 569 */ 570 TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 571 TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 572 } 573 req_prim = 1; 574 } 575 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 576 stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 577 } 578 r_net->error_count = 0; 579 r_net->hb_responded = 1; 580 tv.tv_sec = cp->heartbeat.hb_info.time_value_1; 581 tv.tv_usec = cp->heartbeat.hb_info.time_value_2; 582 if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 583 r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 584 r_net->dest_state |= SCTP_ADDR_REACHABLE; 585 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 586 SCTP_HEARTBEAT_SUCCESS, (void *)r_net, SCTP_SO_NOT_LOCKED); 587 /* now was it the primary? if so restore */ 588 if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 589 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net); 590 } 591 } 592 /* 593 * JRS 5/14/07 - If CMT PF is on and the destination is in PF state, 594 * set the destination to active state and set the cwnd to one or 595 * two MTU's based on whether PF1 or PF2 is being used. If a T3 596 * timer is running, for the destination, stop the timer because a 597 * PF-heartbeat was received. 598 */ 599 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && 600 SCTP_BASE_SYSCTL(sctp_cmt_pf) && 601 (net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) { 602 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 603 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 604 stcb, net, 605 SCTP_FROM_SCTP_INPUT + SCTP_LOC_5); 606 } 607 net->dest_state &= ~SCTP_ADDR_PF; 608 net->cwnd = net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf); 609 SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n", 610 net, net->cwnd); 611 } 612 /* Now lets do a RTO with this */ 613 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy); 614 /* Mobility adaptation */ 615 if (req_prim) { 616 if ((sctp_is_mobility_feature_on(stcb->sctp_ep, 617 SCTP_MOBILITY_BASE) || 618 sctp_is_mobility_feature_on(stcb->sctp_ep, 619 SCTP_MOBILITY_FASTHANDOFF)) && 620 sctp_is_mobility_feature_on(stcb->sctp_ep, 621 SCTP_MOBILITY_PRIM_DELETED)) { 622 623 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7); 624 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 625 SCTP_MOBILITY_FASTHANDOFF)) { 626 sctp_assoc_immediate_retrans(stcb, 627 stcb->asoc.primary_destination); 628 } 629 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 630 SCTP_MOBILITY_BASE)) { 631 sctp_move_chunks_from_deleted_prim(stcb, 632 stcb->asoc.primary_destination); 633 } 634 sctp_delete_prim_timer(stcb->sctp_ep, stcb, 635 stcb->asoc.deleted_primary); 636 } 637 } 638 } 639 640 static void 641 sctp_handle_abort(struct sctp_abort_chunk *cp, 642 struct sctp_tcb *stcb, struct sctp_nets *net) 643 { 644 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 645 struct socket *so; 646 647 #endif 648 649 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n"); 650 if (stcb == NULL) 651 return; 652 653 /* stop any receive timers */ 654 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 655 /* notify user of the abort and clean up... */ 656 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED); 657 /* free the tcb */ 658 #if defined(SCTP_PANIC_ON_ABORT) 659 printf("stcb:%p state:%d rport:%d net:%p\n", 660 stcb, stcb->asoc.state, stcb->rport, net); 661 if (!(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 662 panic("Received an ABORT"); 663 } else { 664 printf("No panic its in state %x closed\n", stcb->asoc.state); 665 } 666 #endif 667 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 668 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 669 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 670 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 671 } 672 #ifdef SCTP_ASOCLOG_OF_TSNS 673 sctp_print_out_track_log(stcb); 674 #endif 675 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 676 so = SCTP_INP_SO(stcb->sctp_ep); 677 atomic_add_int(&stcb->asoc.refcnt, 1); 678 SCTP_TCB_UNLOCK(stcb); 679 SCTP_SOCKET_LOCK(so, 1); 680 SCTP_TCB_LOCK(stcb); 681 atomic_subtract_int(&stcb->asoc.refcnt, 1); 682 #endif 683 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 684 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 685 SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 686 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 687 SCTP_SOCKET_UNLOCK(so, 1); 688 #endif 689 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n"); 690 } 691 692 static void 693 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, 694 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag) 695 { 696 struct sctp_association *asoc; 697 int some_on_streamwheel; 698 699 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 700 struct socket *so; 701 702 #endif 703 704 SCTPDBG(SCTP_DEBUG_INPUT2, 705 "sctp_handle_shutdown: handling SHUTDOWN\n"); 706 if (stcb == NULL) 707 return; 708 asoc = &stcb->asoc; 709 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 710 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 711 return; 712 } 713 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) { 714 /* Shutdown NOT the expected size */ 715 return; 716 } else { 717 sctp_update_acked(stcb, cp, net, abort_flag); 718 } 719 if (asoc->control_pdapi) { 720 /* 721 * With a normal shutdown we assume the end of last record. 722 */ 723 SCTP_INP_READ_LOCK(stcb->sctp_ep); 724 asoc->control_pdapi->end_added = 1; 725 asoc->control_pdapi->pdapi_aborted = 1; 726 asoc->control_pdapi = NULL; 727 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 728 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 729 so = SCTP_INP_SO(stcb->sctp_ep); 730 atomic_add_int(&stcb->asoc.refcnt, 1); 731 SCTP_TCB_UNLOCK(stcb); 732 SCTP_SOCKET_LOCK(so, 1); 733 SCTP_TCB_LOCK(stcb); 734 atomic_subtract_int(&stcb->asoc.refcnt, 1); 735 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 736 /* assoc was freed while we were unlocked */ 737 SCTP_SOCKET_UNLOCK(so, 1); 738 return; 739 } 740 #endif 741 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 742 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 743 SCTP_SOCKET_UNLOCK(so, 1); 744 #endif 745 } 746 /* goto SHUTDOWN_RECEIVED state to block new requests */ 747 if (stcb->sctp_socket) { 748 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 749 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) && 750 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 751 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED); 752 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 753 /* 754 * notify upper layer that peer has initiated a 755 * shutdown 756 */ 757 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 758 759 /* reset time */ 760 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 761 } 762 } 763 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 764 /* 765 * stop the shutdown timer, since we WILL move to 766 * SHUTDOWN-ACK-SENT. 767 */ 768 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8); 769 } 770 /* Now is there unsent data on a stream somewhere? */ 771 some_on_streamwheel = sctp_is_there_unsent_data(stcb); 772 773 if (!TAILQ_EMPTY(&asoc->send_queue) || 774 !TAILQ_EMPTY(&asoc->sent_queue) || 775 some_on_streamwheel) { 776 /* By returning we will push more data out */ 777 return; 778 } else { 779 /* no outstanding data to send, so move on... */ 780 /* send SHUTDOWN-ACK */ 781 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 782 /* move to SHUTDOWN-ACK-SENT state */ 783 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 784 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 785 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 786 } 787 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 788 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 789 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, 790 SCTP_FROM_SCTP_INPUT + SCTP_LOC_7); 791 /* start SHUTDOWN timer */ 792 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, 793 stcb, net); 794 } 795 } 796 797 static void 798 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp, 799 struct sctp_tcb *stcb, struct sctp_nets *net) 800 { 801 struct sctp_association *asoc; 802 803 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 804 struct socket *so; 805 806 so = SCTP_INP_SO(stcb->sctp_ep); 807 #endif 808 SCTPDBG(SCTP_DEBUG_INPUT2, 809 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n"); 810 if (stcb == NULL) 811 return; 812 813 asoc = &stcb->asoc; 814 /* process according to association state */ 815 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 816 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 817 /* unexpected SHUTDOWN-ACK... so ignore... */ 818 SCTP_TCB_UNLOCK(stcb); 819 return; 820 } 821 if (asoc->control_pdapi) { 822 /* 823 * With a normal shutdown we assume the end of last record. 824 */ 825 SCTP_INP_READ_LOCK(stcb->sctp_ep); 826 asoc->control_pdapi->end_added = 1; 827 asoc->control_pdapi->pdapi_aborted = 1; 828 asoc->control_pdapi = NULL; 829 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 830 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 831 atomic_add_int(&stcb->asoc.refcnt, 1); 832 SCTP_TCB_UNLOCK(stcb); 833 SCTP_SOCKET_LOCK(so, 1); 834 SCTP_TCB_LOCK(stcb); 835 atomic_subtract_int(&stcb->asoc.refcnt, 1); 836 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 837 /* assoc was freed while we were unlocked */ 838 SCTP_SOCKET_UNLOCK(so, 1); 839 return; 840 } 841 #endif 842 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 843 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 844 SCTP_SOCKET_UNLOCK(so, 1); 845 #endif 846 } 847 /* are the queues empty? */ 848 if (!TAILQ_EMPTY(&asoc->send_queue) || 849 !TAILQ_EMPTY(&asoc->sent_queue) || 850 !TAILQ_EMPTY(&asoc->out_wheel)) { 851 sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED); 852 } 853 /* stop the timer */ 854 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9); 855 /* send SHUTDOWN-COMPLETE */ 856 sctp_send_shutdown_complete(stcb, net); 857 /* notify upper layer protocol */ 858 if (stcb->sctp_socket) { 859 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 860 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 861 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 862 /* Set the connected flag to disconnected */ 863 stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0; 864 } 865 } 866 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 867 /* free the TCB but first save off the ep */ 868 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 869 atomic_add_int(&stcb->asoc.refcnt, 1); 870 SCTP_TCB_UNLOCK(stcb); 871 SCTP_SOCKET_LOCK(so, 1); 872 SCTP_TCB_LOCK(stcb); 873 atomic_subtract_int(&stcb->asoc.refcnt, 1); 874 #endif 875 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 876 SCTP_FROM_SCTP_INPUT + SCTP_LOC_10); 877 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 878 SCTP_SOCKET_UNLOCK(so, 1); 879 #endif 880 } 881 882 /* 883 * Skip past the param header and then we will find the chunk that caused the 884 * problem. There are two possiblities ASCONF or FWD-TSN other than that and 885 * our peer must be broken. 886 */ 887 static void 888 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr, 889 struct sctp_nets *net) 890 { 891 struct sctp_chunkhdr *chk; 892 893 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr)); 894 switch (chk->chunk_type) { 895 case SCTP_ASCONF_ACK: 896 case SCTP_ASCONF: 897 sctp_asconf_cleanup(stcb, net); 898 break; 899 case SCTP_FORWARD_CUM_TSN: 900 stcb->asoc.peer_supports_prsctp = 0; 901 break; 902 default: 903 SCTPDBG(SCTP_DEBUG_INPUT2, 904 "Peer does not support chunk type %d(%x)??\n", 905 chk->chunk_type, (uint32_t) chk->chunk_type); 906 break; 907 } 908 } 909 910 /* 911 * Skip past the param header and then we will find the param that caused the 912 * problem. There are a number of param's in a ASCONF OR the prsctp param 913 * these will turn of specific features. 914 */ 915 static void 916 sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr) 917 { 918 struct sctp_paramhdr *pbad; 919 920 pbad = phdr + 1; 921 switch (ntohs(pbad->param_type)) { 922 /* pr-sctp draft */ 923 case SCTP_PRSCTP_SUPPORTED: 924 stcb->asoc.peer_supports_prsctp = 0; 925 break; 926 case SCTP_SUPPORTED_CHUNK_EXT: 927 break; 928 /* draft-ietf-tsvwg-addip-sctp */ 929 case SCTP_ECN_NONCE_SUPPORTED: 930 stcb->asoc.peer_supports_ecn_nonce = 0; 931 stcb->asoc.ecn_nonce_allowed = 0; 932 stcb->asoc.ecn_allowed = 0; 933 break; 934 case SCTP_ADD_IP_ADDRESS: 935 case SCTP_DEL_IP_ADDRESS: 936 case SCTP_SET_PRIM_ADDR: 937 stcb->asoc.peer_supports_asconf = 0; 938 break; 939 case SCTP_SUCCESS_REPORT: 940 case SCTP_ERROR_CAUSE_IND: 941 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n"); 942 SCTPDBG(SCTP_DEBUG_INPUT2, 943 "Turning off ASCONF to this strange peer\n"); 944 stcb->asoc.peer_supports_asconf = 0; 945 break; 946 default: 947 SCTPDBG(SCTP_DEBUG_INPUT2, 948 "Peer does not support param type %d(%x)??\n", 949 pbad->param_type, (uint32_t) pbad->param_type); 950 break; 951 } 952 } 953 954 static int 955 sctp_handle_error(struct sctp_chunkhdr *ch, 956 struct sctp_tcb *stcb, struct sctp_nets *net) 957 { 958 int chklen; 959 struct sctp_paramhdr *phdr; 960 uint16_t error_type; 961 uint16_t error_len; 962 struct sctp_association *asoc; 963 int adjust; 964 965 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 966 struct socket *so; 967 968 #endif 969 970 /* parse through all of the errors and process */ 971 asoc = &stcb->asoc; 972 phdr = (struct sctp_paramhdr *)((caddr_t)ch + 973 sizeof(struct sctp_chunkhdr)); 974 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr); 975 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) { 976 /* Process an Error Cause */ 977 error_type = ntohs(phdr->param_type); 978 error_len = ntohs(phdr->param_length); 979 if ((error_len > chklen) || (error_len == 0)) { 980 /* invalid param length for this param */ 981 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n", 982 chklen, error_len); 983 return (0); 984 } 985 switch (error_type) { 986 case SCTP_CAUSE_INVALID_STREAM: 987 case SCTP_CAUSE_MISSING_PARAM: 988 case SCTP_CAUSE_INVALID_PARAM: 989 case SCTP_CAUSE_NO_USER_DATA: 990 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n", 991 error_type); 992 break; 993 case SCTP_CAUSE_STALE_COOKIE: 994 /* 995 * We only act if we have echoed a cookie and are 996 * waiting. 997 */ 998 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 999 int *p; 1000 1001 p = (int *)((caddr_t)phdr + sizeof(*phdr)); 1002 /* Save the time doubled */ 1003 asoc->cookie_preserve_req = ntohl(*p) << 1; 1004 asoc->stale_cookie_count++; 1005 if (asoc->stale_cookie_count > 1006 asoc->max_init_times) { 1007 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED); 1008 /* now free the asoc */ 1009 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1010 so = SCTP_INP_SO(stcb->sctp_ep); 1011 atomic_add_int(&stcb->asoc.refcnt, 1); 1012 SCTP_TCB_UNLOCK(stcb); 1013 SCTP_SOCKET_LOCK(so, 1); 1014 SCTP_TCB_LOCK(stcb); 1015 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1016 #endif 1017 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1018 SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); 1019 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1020 SCTP_SOCKET_UNLOCK(so, 1); 1021 #endif 1022 return (-1); 1023 } 1024 /* blast back to INIT state */ 1025 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED; 1026 asoc->state |= SCTP_STATE_COOKIE_WAIT; 1027 1028 sctp_stop_all_cookie_timers(stcb); 1029 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1030 } 1031 break; 1032 case SCTP_CAUSE_UNRESOLVABLE_ADDR: 1033 /* 1034 * Nothing we can do here, we don't do hostname 1035 * addresses so if the peer does not like my IPv6 1036 * (or IPv4 for that matter) it does not matter. If 1037 * they don't support that type of address, they can 1038 * NOT possibly get that packet type... i.e. with no 1039 * IPv6 you can't recieve a IPv6 packet. so we can 1040 * safely ignore this one. If we ever added support 1041 * for HOSTNAME Addresses, then we would need to do 1042 * something here. 1043 */ 1044 break; 1045 case SCTP_CAUSE_UNRECOG_CHUNK: 1046 sctp_process_unrecog_chunk(stcb, phdr, net); 1047 break; 1048 case SCTP_CAUSE_UNRECOG_PARAM: 1049 sctp_process_unrecog_param(stcb, phdr); 1050 break; 1051 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN: 1052 /* 1053 * We ignore this since the timer will drive out a 1054 * new cookie anyway and there timer will drive us 1055 * to send a SHUTDOWN_COMPLETE. We can't send one 1056 * here since we don't have their tag. 1057 */ 1058 break; 1059 case SCTP_CAUSE_DELETING_LAST_ADDR: 1060 case SCTP_CAUSE_RESOURCE_SHORTAGE: 1061 case SCTP_CAUSE_DELETING_SRC_ADDR: 1062 /* 1063 * We should NOT get these here, but in a 1064 * ASCONF-ACK. 1065 */ 1066 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n", 1067 error_type); 1068 break; 1069 case SCTP_CAUSE_OUT_OF_RESC: 1070 /* 1071 * And what, pray tell do we do with the fact that 1072 * the peer is out of resources? Not really sure we 1073 * could do anything but abort. I suspect this 1074 * should have came WITH an abort instead of in a 1075 * OP-ERROR. 1076 */ 1077 break; 1078 default: 1079 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n", 1080 error_type); 1081 break; 1082 } 1083 adjust = SCTP_SIZE32(error_len); 1084 chklen -= adjust; 1085 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust); 1086 } 1087 return (0); 1088 } 1089 1090 static int 1091 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, 1092 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 1093 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 1094 { 1095 struct sctp_init_ack *init_ack; 1096 struct mbuf *op_err; 1097 1098 SCTPDBG(SCTP_DEBUG_INPUT2, 1099 "sctp_handle_init_ack: handling INIT-ACK\n"); 1100 1101 if (stcb == NULL) { 1102 SCTPDBG(SCTP_DEBUG_INPUT2, 1103 "sctp_handle_init_ack: TCB is null\n"); 1104 return (-1); 1105 } 1106 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) { 1107 /* Invalid length */ 1108 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1109 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1110 op_err, 0, net->port); 1111 *abort_no_unlock = 1; 1112 return (-1); 1113 } 1114 init_ack = &cp->init; 1115 /* validate parameters */ 1116 if (init_ack->initiate_tag == 0) { 1117 /* protocol error... send an abort */ 1118 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1119 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1120 op_err, 0, net->port); 1121 *abort_no_unlock = 1; 1122 return (-1); 1123 } 1124 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) { 1125 /* protocol error... send an abort */ 1126 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1127 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1128 op_err, 0, net->port); 1129 *abort_no_unlock = 1; 1130 return (-1); 1131 } 1132 if (init_ack->num_inbound_streams == 0) { 1133 /* protocol error... send an abort */ 1134 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1135 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1136 op_err, 0, net->port); 1137 *abort_no_unlock = 1; 1138 return (-1); 1139 } 1140 if (init_ack->num_outbound_streams == 0) { 1141 /* protocol error... send an abort */ 1142 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1143 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1144 op_err, 0, net->port); 1145 *abort_no_unlock = 1; 1146 return (-1); 1147 } 1148 /* process according to association state... */ 1149 switch (stcb->asoc.state & SCTP_STATE_MASK) { 1150 case SCTP_STATE_COOKIE_WAIT: 1151 /* this is the expected state for this chunk */ 1152 /* process the INIT-ACK parameters */ 1153 if (stcb->asoc.primary_destination->dest_state & 1154 SCTP_ADDR_UNCONFIRMED) { 1155 /* 1156 * The primary is where we sent the INIT, we can 1157 * always consider it confirmed when the INIT-ACK is 1158 * returned. Do this before we load addresses 1159 * though. 1160 */ 1161 stcb->asoc.primary_destination->dest_state &= 1162 ~SCTP_ADDR_UNCONFIRMED; 1163 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 1164 stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED); 1165 } 1166 if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb, 1167 net, abort_no_unlock, vrf_id) < 0) { 1168 /* error in parsing parameters */ 1169 return (-1); 1170 } 1171 /* update our state */ 1172 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n"); 1173 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED); 1174 1175 /* reset the RTO calc */ 1176 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 1177 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 1178 stcb->asoc.overall_error_count, 1179 0, 1180 SCTP_FROM_SCTP_INPUT, 1181 __LINE__); 1182 } 1183 stcb->asoc.overall_error_count = 0; 1184 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1185 /* 1186 * collapse the init timer back in case of a exponential 1187 * backoff 1188 */ 1189 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep, 1190 stcb, net); 1191 /* 1192 * the send at the end of the inbound data processing will 1193 * cause the cookie to be sent 1194 */ 1195 break; 1196 case SCTP_STATE_SHUTDOWN_SENT: 1197 /* incorrect state... discard */ 1198 break; 1199 case SCTP_STATE_COOKIE_ECHOED: 1200 /* incorrect state... discard */ 1201 break; 1202 case SCTP_STATE_OPEN: 1203 /* incorrect state... discard */ 1204 break; 1205 case SCTP_STATE_EMPTY: 1206 case SCTP_STATE_INUSE: 1207 default: 1208 /* incorrect state... discard */ 1209 return (-1); 1210 break; 1211 } 1212 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); 1213 return (0); 1214 } 1215 1216 1217 /* 1218 * handle a state cookie for an existing association m: input packet mbuf 1219 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a 1220 * "split" mbuf and the cookie signature does not exist offset: offset into 1221 * mbuf to the cookie-echo chunk 1222 */ 1223 static struct sctp_tcb * 1224 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, 1225 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1226 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 1227 struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id, 1228 uint32_t vrf_id) 1229 { 1230 struct sctp_association *asoc; 1231 struct sctp_init_chunk *init_cp, init_buf; 1232 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1233 int chk_length; 1234 int init_offset, initack_offset, i; 1235 int retval; 1236 int spec_flag = 0; 1237 uint32_t how_indx; 1238 1239 /* I know that the TCB is non-NULL from the caller */ 1240 asoc = &stcb->asoc; 1241 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) { 1242 if (asoc->cookie_how[how_indx] == 0) 1243 break; 1244 } 1245 if (how_indx < sizeof(asoc->cookie_how)) { 1246 asoc->cookie_how[how_indx] = 1; 1247 } 1248 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 1249 /* SHUTDOWN came in after sending INIT-ACK */ 1250 struct mbuf *op_err; 1251 struct sctp_paramhdr *ph; 1252 1253 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 1254 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1255 0, M_DONTWAIT, 1, MT_DATA); 1256 if (op_err == NULL) { 1257 /* FOOBAR */ 1258 return (NULL); 1259 } 1260 /* pre-reserve some space */ 1261 #ifdef INET6 1262 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1263 #else 1264 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 1265 #endif 1266 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1267 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1268 /* Set the len */ 1269 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1270 ph = mtod(op_err, struct sctp_paramhdr *); 1271 ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN); 1272 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 1273 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 1274 vrf_id, net->port); 1275 if (how_indx < sizeof(asoc->cookie_how)) 1276 asoc->cookie_how[how_indx] = 2; 1277 return (NULL); 1278 } 1279 /* 1280 * find and validate the INIT chunk in the cookie (peer's info) the 1281 * INIT should start after the cookie-echo header struct (chunk 1282 * header, state cookie header struct) 1283 */ 1284 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk); 1285 1286 init_cp = (struct sctp_init_chunk *) 1287 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1288 (uint8_t *) & init_buf); 1289 if (init_cp == NULL) { 1290 /* could not pull a INIT chunk in cookie */ 1291 return (NULL); 1292 } 1293 chk_length = ntohs(init_cp->ch.chunk_length); 1294 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1295 return (NULL); 1296 } 1297 /* 1298 * find and validate the INIT-ACK chunk in the cookie (my info) the 1299 * INIT-ACK follows the INIT chunk 1300 */ 1301 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1302 initack_cp = (struct sctp_init_ack_chunk *) 1303 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1304 (uint8_t *) & initack_buf); 1305 if (initack_cp == NULL) { 1306 /* could not pull INIT-ACK chunk in cookie */ 1307 return (NULL); 1308 } 1309 chk_length = ntohs(initack_cp->ch.chunk_length); 1310 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1311 return (NULL); 1312 } 1313 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1314 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) { 1315 /* 1316 * case D in Section 5.2.4 Table 2: MMAA process accordingly 1317 * to get into the OPEN state 1318 */ 1319 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1320 /*- 1321 * Opps, this means that we somehow generated two vtag's 1322 * the same. I.e. we did: 1323 * Us Peer 1324 * <---INIT(tag=a)------ 1325 * ----INIT-ACK(tag=t)--> 1326 * ----INIT(tag=t)------> *1 1327 * <---INIT-ACK(tag=a)--- 1328 * <----CE(tag=t)------------- *2 1329 * 1330 * At point *1 we should be generating a different 1331 * tag t'. Which means we would throw away the CE and send 1332 * ours instead. Basically this is case C (throw away side). 1333 */ 1334 if (how_indx < sizeof(asoc->cookie_how)) 1335 asoc->cookie_how[how_indx] = 17; 1336 return (NULL); 1337 1338 } 1339 switch SCTP_GET_STATE 1340 (asoc) { 1341 case SCTP_STATE_COOKIE_WAIT: 1342 case SCTP_STATE_COOKIE_ECHOED: 1343 /* 1344 * INIT was sent but got a COOKIE_ECHO with the 1345 * correct tags... just accept it...but we must 1346 * process the init so that we can make sure we have 1347 * the right seq no's. 1348 */ 1349 /* First we must process the INIT !! */ 1350 retval = sctp_process_init(init_cp, stcb, net); 1351 if (retval < 0) { 1352 if (how_indx < sizeof(asoc->cookie_how)) 1353 asoc->cookie_how[how_indx] = 3; 1354 return (NULL); 1355 } 1356 /* we have already processed the INIT so no problem */ 1357 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, 1358 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); 1359 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13); 1360 /* update current state */ 1361 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1362 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1363 else 1364 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1365 1366 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1367 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1368 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1369 stcb->sctp_ep, stcb, asoc->primary_destination); 1370 } 1371 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1372 sctp_stop_all_cookie_timers(stcb); 1373 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1374 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1375 (inp->sctp_socket->so_qlimit == 0) 1376 ) { 1377 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1378 struct socket *so; 1379 1380 #endif 1381 /* 1382 * Here is where collision would go if we 1383 * did a connect() and instead got a 1384 * init/init-ack/cookie done before the 1385 * init-ack came back.. 1386 */ 1387 stcb->sctp_ep->sctp_flags |= 1388 SCTP_PCB_FLAGS_CONNECTED; 1389 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1390 so = SCTP_INP_SO(stcb->sctp_ep); 1391 atomic_add_int(&stcb->asoc.refcnt, 1); 1392 SCTP_TCB_UNLOCK(stcb); 1393 SCTP_SOCKET_LOCK(so, 1); 1394 SCTP_TCB_LOCK(stcb); 1395 atomic_add_int(&stcb->asoc.refcnt, -1); 1396 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1397 SCTP_SOCKET_UNLOCK(so, 1); 1398 return (NULL); 1399 } 1400 #endif 1401 soisconnected(stcb->sctp_socket); 1402 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1403 SCTP_SOCKET_UNLOCK(so, 1); 1404 #endif 1405 } 1406 /* notify upper layer */ 1407 *notification = SCTP_NOTIFY_ASSOC_UP; 1408 /* 1409 * since we did not send a HB make sure we don't 1410 * double things 1411 */ 1412 net->hb_responded = 1; 1413 net->RTO = sctp_calculate_rto(stcb, asoc, net, 1414 &cookie->time_entered, sctp_align_unsafe_makecopy); 1415 1416 if (stcb->asoc.sctp_autoclose_ticks && 1417 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) { 1418 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 1419 inp, stcb, NULL); 1420 } 1421 break; 1422 default: 1423 /* 1424 * we're in the OPEN state (or beyond), so peer must 1425 * have simply lost the COOKIE-ACK 1426 */ 1427 break; 1428 } /* end switch */ 1429 sctp_stop_all_cookie_timers(stcb); 1430 /* 1431 * We ignore the return code here.. not sure if we should 1432 * somehow abort.. but we do have an existing asoc. This 1433 * really should not fail. 1434 */ 1435 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1436 init_offset + sizeof(struct sctp_init_chunk), 1437 initack_offset, sh, init_src)) { 1438 if (how_indx < sizeof(asoc->cookie_how)) 1439 asoc->cookie_how[how_indx] = 4; 1440 return (NULL); 1441 } 1442 /* respond with a COOKIE-ACK */ 1443 sctp_toss_old_cookies(stcb, asoc); 1444 sctp_send_cookie_ack(stcb); 1445 if (how_indx < sizeof(asoc->cookie_how)) 1446 asoc->cookie_how[how_indx] = 5; 1447 return (stcb); 1448 } 1449 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1450 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag && 1451 cookie->tie_tag_my_vtag == 0 && 1452 cookie->tie_tag_peer_vtag == 0) { 1453 /* 1454 * case C in Section 5.2.4 Table 2: XMOO silently discard 1455 */ 1456 if (how_indx < sizeof(asoc->cookie_how)) 1457 asoc->cookie_how[how_indx] = 6; 1458 return (NULL); 1459 } 1460 if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag && 1461 (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag || 1462 init_cp->init.initiate_tag == 0)) { 1463 /* 1464 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info 1465 * should be ok, re-accept peer info 1466 */ 1467 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1468 /* 1469 * Extension of case C. If we hit this, then the 1470 * random number generator returned the same vtag 1471 * when we first sent our INIT-ACK and when we later 1472 * sent our INIT. The side with the seq numbers that 1473 * are different will be the one that normnally 1474 * would have hit case C. This in effect "extends" 1475 * our vtags in this collision case to be 64 bits. 1476 * The same collision could occur aka you get both 1477 * vtag and seq number the same twice in a row.. but 1478 * is much less likely. If it did happen then we 1479 * would proceed through and bring up the assoc.. we 1480 * may end up with the wrong stream setup however.. 1481 * which would be bad.. but there is no way to 1482 * tell.. until we send on a stream that does not 1483 * exist :-) 1484 */ 1485 if (how_indx < sizeof(asoc->cookie_how)) 1486 asoc->cookie_how[how_indx] = 7; 1487 1488 return (NULL); 1489 } 1490 if (how_indx < sizeof(asoc->cookie_how)) 1491 asoc->cookie_how[how_indx] = 8; 1492 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14); 1493 sctp_stop_all_cookie_timers(stcb); 1494 /* 1495 * since we did not send a HB make sure we don't double 1496 * things 1497 */ 1498 net->hb_responded = 1; 1499 if (stcb->asoc.sctp_autoclose_ticks && 1500 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1501 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1502 NULL); 1503 } 1504 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1505 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1506 1507 /* Note last_cwr_tsn? where is this used? */ 1508 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1509 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { 1510 /* 1511 * Ok the peer probably discarded our data (if we 1512 * echoed a cookie+data). So anything on the 1513 * sent_queue should be marked for retransmit, we 1514 * may not get something to kick us so it COULD 1515 * still take a timeout to move these.. but it can't 1516 * hurt to mark them. 1517 */ 1518 struct sctp_tmit_chunk *chk; 1519 1520 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1521 if (chk->sent < SCTP_DATAGRAM_RESEND) { 1522 chk->sent = SCTP_DATAGRAM_RESEND; 1523 sctp_flight_size_decrease(chk); 1524 sctp_total_flight_decrease(stcb, chk); 1525 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1526 spec_flag++; 1527 } 1528 } 1529 1530 } 1531 /* process the INIT info (peer's info) */ 1532 retval = sctp_process_init(init_cp, stcb, net); 1533 if (retval < 0) { 1534 if (how_indx < sizeof(asoc->cookie_how)) 1535 asoc->cookie_how[how_indx] = 9; 1536 return (NULL); 1537 } 1538 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1539 init_offset + sizeof(struct sctp_init_chunk), 1540 initack_offset, sh, init_src)) { 1541 if (how_indx < sizeof(asoc->cookie_how)) 1542 asoc->cookie_how[how_indx] = 10; 1543 return (NULL); 1544 } 1545 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) || 1546 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) { 1547 *notification = SCTP_NOTIFY_ASSOC_UP; 1548 1549 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1550 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1551 (inp->sctp_socket->so_qlimit == 0)) { 1552 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1553 struct socket *so; 1554 1555 #endif 1556 stcb->sctp_ep->sctp_flags |= 1557 SCTP_PCB_FLAGS_CONNECTED; 1558 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1559 so = SCTP_INP_SO(stcb->sctp_ep); 1560 atomic_add_int(&stcb->asoc.refcnt, 1); 1561 SCTP_TCB_UNLOCK(stcb); 1562 SCTP_SOCKET_LOCK(so, 1); 1563 SCTP_TCB_LOCK(stcb); 1564 atomic_add_int(&stcb->asoc.refcnt, -1); 1565 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1566 SCTP_SOCKET_UNLOCK(so, 1); 1567 return (NULL); 1568 } 1569 #endif 1570 soisconnected(stcb->sctp_socket); 1571 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1572 SCTP_SOCKET_UNLOCK(so, 1); 1573 #endif 1574 } 1575 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1576 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1577 else 1578 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1579 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1580 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1581 SCTP_STAT_INCR_COUNTER32(sctps_restartestab); 1582 } else { 1583 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1584 } 1585 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1586 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1587 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1588 stcb->sctp_ep, stcb, asoc->primary_destination); 1589 } 1590 sctp_stop_all_cookie_timers(stcb); 1591 sctp_toss_old_cookies(stcb, asoc); 1592 sctp_send_cookie_ack(stcb); 1593 if (spec_flag) { 1594 /* 1595 * only if we have retrans set do we do this. What 1596 * this call does is get only the COOKIE-ACK out and 1597 * then when we return the normal call to 1598 * sctp_chunk_output will get the retrans out behind 1599 * this. 1600 */ 1601 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED); 1602 } 1603 if (how_indx < sizeof(asoc->cookie_how)) 1604 asoc->cookie_how[how_indx] = 11; 1605 1606 return (stcb); 1607 } 1608 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1609 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) && 1610 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce && 1611 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce && 1612 cookie->tie_tag_peer_vtag != 0) { 1613 struct sctpasochead *head; 1614 1615 /* 1616 * case A in Section 5.2.4 Table 2: XXMM (peer restarted) 1617 */ 1618 /* temp code */ 1619 if (how_indx < sizeof(asoc->cookie_how)) 1620 asoc->cookie_how[how_indx] = 12; 1621 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15); 1622 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1623 1624 *sac_assoc_id = sctp_get_associd(stcb); 1625 /* notify upper layer */ 1626 *notification = SCTP_NOTIFY_ASSOC_RESTART; 1627 atomic_add_int(&stcb->asoc.refcnt, 1); 1628 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) && 1629 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1630 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 1631 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1632 } 1633 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1634 SCTP_STAT_INCR_GAUGE32(sctps_restartestab); 1635 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1636 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab); 1637 } 1638 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1639 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1640 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1641 stcb->sctp_ep, stcb, asoc->primary_destination); 1642 1643 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) { 1644 /* move to OPEN state, if not in SHUTDOWN_SENT */ 1645 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1646 } 1647 asoc->pre_open_streams = 1648 ntohs(initack_cp->init.num_outbound_streams); 1649 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1650 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1651 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1652 1653 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1654 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1655 1656 asoc->str_reset_seq_in = asoc->init_seq_number; 1657 1658 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1659 if (asoc->mapping_array) { 1660 memset(asoc->mapping_array, 0, 1661 asoc->mapping_array_size); 1662 } 1663 SCTP_TCB_UNLOCK(stcb); 1664 SCTP_INP_INFO_WLOCK(); 1665 SCTP_INP_WLOCK(stcb->sctp_ep); 1666 SCTP_TCB_LOCK(stcb); 1667 atomic_add_int(&stcb->asoc.refcnt, -1); 1668 /* send up all the data */ 1669 SCTP_TCB_SEND_LOCK(stcb); 1670 1671 sctp_report_all_outbound(stcb, 1, SCTP_SO_NOT_LOCKED); 1672 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1673 stcb->asoc.strmout[i].stream_no = i; 1674 stcb->asoc.strmout[i].next_sequence_sent = 0; 1675 stcb->asoc.strmout[i].last_msg_incomplete = 0; 1676 } 1677 /* process the INIT-ACK info (my info) */ 1678 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1679 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1680 1681 /* pull from vtag hash */ 1682 LIST_REMOVE(stcb, sctp_asocs); 1683 /* re-insert to new vtag position */ 1684 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, 1685 SCTP_BASE_INFO(hashasocmark))]; 1686 /* 1687 * put it in the bucket in the vtag hash of assoc's for the 1688 * system 1689 */ 1690 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 1691 1692 /* Is this the first restart? */ 1693 if (stcb->asoc.in_restart_hash == 0) { 1694 /* Ok add it to assoc_id vtag hash */ 1695 head = &SCTP_BASE_INFO(sctp_restarthash)[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id, 1696 SCTP_BASE_INFO(hashrestartmark))]; 1697 LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash); 1698 stcb->asoc.in_restart_hash = 1; 1699 } 1700 /* process the INIT info (peer's info) */ 1701 SCTP_TCB_SEND_UNLOCK(stcb); 1702 SCTP_INP_WUNLOCK(stcb->sctp_ep); 1703 SCTP_INP_INFO_WUNLOCK(); 1704 1705 retval = sctp_process_init(init_cp, stcb, net); 1706 if (retval < 0) { 1707 if (how_indx < sizeof(asoc->cookie_how)) 1708 asoc->cookie_how[how_indx] = 13; 1709 1710 return (NULL); 1711 } 1712 /* 1713 * since we did not send a HB make sure we don't double 1714 * things 1715 */ 1716 net->hb_responded = 1; 1717 1718 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1719 init_offset + sizeof(struct sctp_init_chunk), 1720 initack_offset, sh, init_src)) { 1721 if (how_indx < sizeof(asoc->cookie_how)) 1722 asoc->cookie_how[how_indx] = 14; 1723 1724 return (NULL); 1725 } 1726 /* respond with a COOKIE-ACK */ 1727 sctp_stop_all_cookie_timers(stcb); 1728 sctp_toss_old_cookies(stcb, asoc); 1729 sctp_send_cookie_ack(stcb); 1730 if (how_indx < sizeof(asoc->cookie_how)) 1731 asoc->cookie_how[how_indx] = 15; 1732 1733 return (stcb); 1734 } 1735 if (how_indx < sizeof(asoc->cookie_how)) 1736 asoc->cookie_how[how_indx] = 16; 1737 /* all other cases... */ 1738 return (NULL); 1739 } 1740 1741 1742 /* 1743 * handle a state cookie for a new association m: input packet mbuf chain-- 1744 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf 1745 * and the cookie signature does not exist offset: offset into mbuf to the 1746 * cookie-echo chunk length: length of the cookie chunk to: where the init 1747 * was from returns a new TCB 1748 */ 1749 static struct sctp_tcb * 1750 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1751 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1752 struct sctp_inpcb *inp, struct sctp_nets **netp, 1753 struct sockaddr *init_src, int *notification, 1754 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1755 uint32_t vrf_id, uint16_t port) 1756 { 1757 struct sctp_tcb *stcb; 1758 struct sctp_init_chunk *init_cp, init_buf; 1759 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1760 struct sockaddr_storage sa_store; 1761 struct sockaddr *initack_src = (struct sockaddr *)&sa_store; 1762 struct sockaddr_in *sin; 1763 struct sockaddr_in6 *sin6; 1764 struct sctp_association *asoc; 1765 int chk_length; 1766 int init_offset, initack_offset, initack_limit; 1767 int retval; 1768 int error = 0; 1769 uint32_t old_tag; 1770 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE]; 1771 1772 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1773 struct socket *so; 1774 1775 so = SCTP_INP_SO(inp); 1776 #endif 1777 1778 /* 1779 * find and validate the INIT chunk in the cookie (peer's info) the 1780 * INIT should start after the cookie-echo header struct (chunk 1781 * header, state cookie header struct) 1782 */ 1783 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk); 1784 init_cp = (struct sctp_init_chunk *) 1785 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1786 (uint8_t *) & init_buf); 1787 if (init_cp == NULL) { 1788 /* could not pull a INIT chunk in cookie */ 1789 SCTPDBG(SCTP_DEBUG_INPUT1, 1790 "process_cookie_new: could not pull INIT chunk hdr\n"); 1791 return (NULL); 1792 } 1793 chk_length = ntohs(init_cp->ch.chunk_length); 1794 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1795 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n"); 1796 return (NULL); 1797 } 1798 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1799 /* 1800 * find and validate the INIT-ACK chunk in the cookie (my info) the 1801 * INIT-ACK follows the INIT chunk 1802 */ 1803 initack_cp = (struct sctp_init_ack_chunk *) 1804 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1805 (uint8_t *) & initack_buf); 1806 if (initack_cp == NULL) { 1807 /* could not pull INIT-ACK chunk in cookie */ 1808 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n"); 1809 return (NULL); 1810 } 1811 chk_length = ntohs(initack_cp->ch.chunk_length); 1812 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1813 return (NULL); 1814 } 1815 /* 1816 * NOTE: We can't use the INIT_ACK's chk_length to determine the 1817 * "initack_limit" value. This is because the chk_length field 1818 * includes the length of the cookie, but the cookie is omitted when 1819 * the INIT and INIT_ACK are tacked onto the cookie... 1820 */ 1821 initack_limit = offset + cookie_len; 1822 1823 /* 1824 * now that we know the INIT/INIT-ACK are in place, create a new TCB 1825 * and popluate 1826 */ 1827 1828 /* 1829 * Here we do a trick, we set in NULL for the proc/thread argument. 1830 * We do this since in effect we only use the p argument when the 1831 * socket is unbound and we must do an implicit bind. Since we are 1832 * getting a cookie, we cannot be unbound. 1833 */ 1834 stcb = sctp_aloc_assoc(inp, init_src, 0, &error, 1835 ntohl(initack_cp->init.initiate_tag), vrf_id, 1836 (struct thread *)NULL 1837 ); 1838 if (stcb == NULL) { 1839 struct mbuf *op_err; 1840 1841 /* memory problem? */ 1842 SCTPDBG(SCTP_DEBUG_INPUT1, 1843 "process_cookie_new: no room for another TCB!\n"); 1844 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1845 1846 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1847 sh, op_err, vrf_id, port); 1848 return (NULL); 1849 } 1850 /* get the correct sctp_nets */ 1851 if (netp) 1852 *netp = sctp_findnet(stcb, init_src); 1853 1854 asoc = &stcb->asoc; 1855 /* get scope variables out of cookie */ 1856 asoc->ipv4_local_scope = cookie->ipv4_scope; 1857 asoc->site_scope = cookie->site_scope; 1858 asoc->local_scope = cookie->local_scope; 1859 asoc->loopback_scope = cookie->loopback_scope; 1860 1861 if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) || 1862 (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) { 1863 struct mbuf *op_err; 1864 1865 /* 1866 * Houston we have a problem. The EP changed while the 1867 * cookie was in flight. Only recourse is to abort the 1868 * association. 1869 */ 1870 atomic_add_int(&stcb->asoc.refcnt, 1); 1871 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1872 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1873 sh, op_err, vrf_id, port); 1874 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1875 SCTP_TCB_UNLOCK(stcb); 1876 SCTP_SOCKET_LOCK(so, 1); 1877 SCTP_TCB_LOCK(stcb); 1878 #endif 1879 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 1880 SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1881 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1882 SCTP_SOCKET_UNLOCK(so, 1); 1883 #endif 1884 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1885 return (NULL); 1886 } 1887 /* process the INIT-ACK info (my info) */ 1888 old_tag = asoc->my_vtag; 1889 asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1890 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1891 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1892 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1893 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1894 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1895 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1896 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1897 asoc->str_reset_seq_in = asoc->init_seq_number; 1898 1899 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1900 1901 /* process the INIT info (peer's info) */ 1902 if (netp) 1903 retval = sctp_process_init(init_cp, stcb, *netp); 1904 else 1905 retval = 0; 1906 if (retval < 0) { 1907 atomic_add_int(&stcb->asoc.refcnt, 1); 1908 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1909 SCTP_TCB_UNLOCK(stcb); 1910 SCTP_SOCKET_LOCK(so, 1); 1911 SCTP_TCB_LOCK(stcb); 1912 #endif 1913 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1914 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1915 SCTP_SOCKET_UNLOCK(so, 1); 1916 #endif 1917 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1918 return (NULL); 1919 } 1920 /* load all addresses */ 1921 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1922 init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh, 1923 init_src)) { 1924 atomic_add_int(&stcb->asoc.refcnt, 1); 1925 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1926 SCTP_TCB_UNLOCK(stcb); 1927 SCTP_SOCKET_LOCK(so, 1); 1928 SCTP_TCB_LOCK(stcb); 1929 #endif 1930 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17); 1931 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1932 SCTP_SOCKET_UNLOCK(so, 1); 1933 #endif 1934 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1935 return (NULL); 1936 } 1937 /* 1938 * verify any preceding AUTH chunk that was skipped 1939 */ 1940 /* pull the local authentication parameters from the cookie/init-ack */ 1941 sctp_auth_get_cookie_params(stcb, m, 1942 initack_offset + sizeof(struct sctp_init_ack_chunk), 1943 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk))); 1944 if (auth_skipped) { 1945 struct sctp_auth_chunk *auth; 1946 1947 auth = (struct sctp_auth_chunk *) 1948 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); 1949 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { 1950 /* auth HMAC failed, dump the assoc and packet */ 1951 SCTPDBG(SCTP_DEBUG_AUTH1, 1952 "COOKIE-ECHO: AUTH failed\n"); 1953 atomic_add_int(&stcb->asoc.refcnt, 1); 1954 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1955 SCTP_TCB_UNLOCK(stcb); 1956 SCTP_SOCKET_LOCK(so, 1); 1957 SCTP_TCB_LOCK(stcb); 1958 #endif 1959 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18); 1960 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1961 SCTP_SOCKET_UNLOCK(so, 1); 1962 #endif 1963 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1964 return (NULL); 1965 } else { 1966 /* remaining chunks checked... good to go */ 1967 stcb->asoc.authenticated = 1; 1968 } 1969 } 1970 /* update current state */ 1971 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 1972 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1973 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1974 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1975 stcb->sctp_ep, stcb, asoc->primary_destination); 1976 } 1977 sctp_stop_all_cookie_timers(stcb); 1978 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab); 1979 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1980 1981 /* 1982 * if we're doing ASCONFs, check to see if we have any new local 1983 * addresses that need to get added to the peer (eg. addresses 1984 * changed while cookie echo in flight). This needs to be done 1985 * after we go to the OPEN state to do the correct asconf 1986 * processing. else, make sure we have the correct addresses in our 1987 * lists 1988 */ 1989 1990 /* warning, we re-use sin, sin6, sa_store here! */ 1991 /* pull in local_address (our "from" address) */ 1992 if (cookie->laddr_type == SCTP_IPV4_ADDRESS) { 1993 /* source addr is IPv4 */ 1994 sin = (struct sockaddr_in *)initack_src; 1995 memset(sin, 0, sizeof(*sin)); 1996 sin->sin_family = AF_INET; 1997 sin->sin_len = sizeof(struct sockaddr_in); 1998 sin->sin_addr.s_addr = cookie->laddress[0]; 1999 } else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) { 2000 /* source addr is IPv6 */ 2001 sin6 = (struct sockaddr_in6 *)initack_src; 2002 memset(sin6, 0, sizeof(*sin6)); 2003 sin6->sin6_family = AF_INET6; 2004 sin6->sin6_len = sizeof(struct sockaddr_in6); 2005 sin6->sin6_scope_id = cookie->scope_id; 2006 memcpy(&sin6->sin6_addr, cookie->laddress, 2007 sizeof(sin6->sin6_addr)); 2008 } else { 2009 atomic_add_int(&stcb->asoc.refcnt, 1); 2010 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2011 SCTP_TCB_UNLOCK(stcb); 2012 SCTP_SOCKET_LOCK(so, 1); 2013 SCTP_TCB_LOCK(stcb); 2014 #endif 2015 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19); 2016 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2017 SCTP_SOCKET_UNLOCK(so, 1); 2018 #endif 2019 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2020 return (NULL); 2021 } 2022 2023 /* set up to notify upper layer */ 2024 *notification = SCTP_NOTIFY_ASSOC_UP; 2025 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2026 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2027 (inp->sctp_socket->so_qlimit == 0)) { 2028 /* 2029 * This is an endpoint that called connect() how it got a 2030 * cookie that is NEW is a bit of a mystery. It must be that 2031 * the INIT was sent, but before it got there.. a complete 2032 * INIT/INIT-ACK/COOKIE arrived. But of course then it 2033 * should have went to the other code.. not here.. oh well.. 2034 * a bit of protection is worth having.. 2035 */ 2036 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2037 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2038 atomic_add_int(&stcb->asoc.refcnt, 1); 2039 SCTP_TCB_UNLOCK(stcb); 2040 SCTP_SOCKET_LOCK(so, 1); 2041 SCTP_TCB_LOCK(stcb); 2042 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2043 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2044 SCTP_SOCKET_UNLOCK(so, 1); 2045 return (NULL); 2046 } 2047 #endif 2048 soisconnected(stcb->sctp_socket); 2049 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2050 SCTP_SOCKET_UNLOCK(so, 1); 2051 #endif 2052 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 2053 (inp->sctp_socket->so_qlimit)) { 2054 /* 2055 * We don't want to do anything with this one. Since it is 2056 * the listening guy. The timer will get started for 2057 * accepted connections in the caller. 2058 */ 2059 ; 2060 } 2061 /* since we did not send a HB make sure we don't double things */ 2062 if ((netp) && (*netp)) 2063 (*netp)->hb_responded = 1; 2064 2065 if (stcb->asoc.sctp_autoclose_ticks && 2066 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2067 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 2068 } 2069 /* calculate the RTT */ 2070 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 2071 if ((netp) && (*netp)) { 2072 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp, 2073 &cookie->time_entered, sctp_align_unsafe_makecopy); 2074 } 2075 /* respond with a COOKIE-ACK */ 2076 sctp_send_cookie_ack(stcb); 2077 2078 /* 2079 * check the address lists for any ASCONFs that need to be sent 2080 * AFTER the cookie-ack is sent 2081 */ 2082 sctp_check_address_list(stcb, m, 2083 initack_offset + sizeof(struct sctp_init_ack_chunk), 2084 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)), 2085 initack_src, cookie->local_scope, cookie->site_scope, 2086 cookie->ipv4_scope, cookie->loopback_scope); 2087 2088 2089 return (stcb); 2090 } 2091 2092 2093 /* 2094 * handles a COOKIE-ECHO message stcb: modified to either a new or left as 2095 * existing (non-NULL) TCB 2096 */ 2097 static struct mbuf * 2098 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, 2099 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp, 2100 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp, 2101 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2102 struct sctp_tcb **locked_tcb, uint32_t vrf_id, uint16_t port) 2103 { 2104 struct sctp_state_cookie *cookie; 2105 struct sockaddr_in6 sin6; 2106 struct sockaddr_in sin; 2107 struct sctp_tcb *l_stcb = *stcb; 2108 struct sctp_inpcb *l_inp; 2109 struct sockaddr *to; 2110 sctp_assoc_t sac_restart_id; 2111 struct sctp_pcb *ep; 2112 struct mbuf *m_sig; 2113 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE]; 2114 uint8_t *sig; 2115 uint8_t cookie_ok = 0; 2116 unsigned int size_of_pkt, sig_offset, cookie_offset; 2117 unsigned int cookie_len; 2118 struct timeval now; 2119 struct timeval time_expires; 2120 struct sockaddr_storage dest_store; 2121 struct sockaddr *localep_sa = (struct sockaddr *)&dest_store; 2122 struct ip *iph; 2123 int notification = 0; 2124 struct sctp_nets *netl; 2125 int had_a_existing_tcb = 0; 2126 2127 SCTPDBG(SCTP_DEBUG_INPUT2, 2128 "sctp_handle_cookie: handling COOKIE-ECHO\n"); 2129 2130 if (inp_p == NULL) { 2131 return (NULL); 2132 } 2133 /* First get the destination address setup too. */ 2134 iph = mtod(m, struct ip *); 2135 switch (iph->ip_v) { 2136 case IPVERSION: 2137 { 2138 /* its IPv4 */ 2139 struct sockaddr_in *lsin; 2140 2141 lsin = (struct sockaddr_in *)(localep_sa); 2142 memset(lsin, 0, sizeof(*lsin)); 2143 lsin->sin_family = AF_INET; 2144 lsin->sin_len = sizeof(*lsin); 2145 lsin->sin_port = sh->dest_port; 2146 lsin->sin_addr.s_addr = iph->ip_dst.s_addr; 2147 size_of_pkt = SCTP_GET_IPV4_LENGTH(iph); 2148 break; 2149 } 2150 #ifdef INET6 2151 case IPV6_VERSION >> 4: 2152 { 2153 /* its IPv6 */ 2154 struct ip6_hdr *ip6; 2155 struct sockaddr_in6 *lsin6; 2156 2157 lsin6 = (struct sockaddr_in6 *)(localep_sa); 2158 memset(lsin6, 0, sizeof(*lsin6)); 2159 lsin6->sin6_family = AF_INET6; 2160 lsin6->sin6_len = sizeof(struct sockaddr_in6); 2161 ip6 = mtod(m, struct ip6_hdr *); 2162 lsin6->sin6_port = sh->dest_port; 2163 lsin6->sin6_addr = ip6->ip6_dst; 2164 size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen; 2165 break; 2166 } 2167 #endif 2168 default: 2169 return (NULL); 2170 } 2171 2172 cookie = &cp->cookie; 2173 cookie_offset = offset + sizeof(struct sctp_chunkhdr); 2174 cookie_len = ntohs(cp->ch.chunk_length); 2175 2176 if ((cookie->peerport != sh->src_port) && 2177 (cookie->myport != sh->dest_port) && 2178 (cookie->my_vtag != sh->v_tag)) { 2179 /* 2180 * invalid ports or bad tag. Note that we always leave the 2181 * v_tag in the header in network order and when we stored 2182 * it in the my_vtag slot we also left it in network order. 2183 * This maintains the match even though it may be in the 2184 * opposite byte order of the machine :-> 2185 */ 2186 return (NULL); 2187 } 2188 if (cookie_len > size_of_pkt || 2189 cookie_len < sizeof(struct sctp_cookie_echo_chunk) + 2190 sizeof(struct sctp_init_chunk) + 2191 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) { 2192 /* cookie too long! or too small */ 2193 return (NULL); 2194 } 2195 /* 2196 * split off the signature into its own mbuf (since it should not be 2197 * calculated in the sctp_hmac_m() call). 2198 */ 2199 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE; 2200 if (sig_offset > size_of_pkt) { 2201 /* packet not correct size! */ 2202 /* XXX this may already be accounted for earlier... */ 2203 return (NULL); 2204 } 2205 m_sig = m_split(m, sig_offset, M_DONTWAIT); 2206 if (m_sig == NULL) { 2207 /* out of memory or ?? */ 2208 return (NULL); 2209 } 2210 #ifdef SCTP_MBUF_LOGGING 2211 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 2212 struct mbuf *mat; 2213 2214 mat = m_sig; 2215 while (mat) { 2216 if (SCTP_BUF_IS_EXTENDED(mat)) { 2217 sctp_log_mb(mat, SCTP_MBUF_SPLIT); 2218 } 2219 mat = SCTP_BUF_NEXT(mat); 2220 } 2221 } 2222 #endif 2223 2224 /* 2225 * compute the signature/digest for the cookie 2226 */ 2227 ep = &(*inp_p)->sctp_ep; 2228 l_inp = *inp_p; 2229 if (l_stcb) { 2230 SCTP_TCB_UNLOCK(l_stcb); 2231 } 2232 SCTP_INP_RLOCK(l_inp); 2233 if (l_stcb) { 2234 SCTP_TCB_LOCK(l_stcb); 2235 } 2236 /* which cookie is it? */ 2237 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) && 2238 (ep->current_secret_number != ep->last_secret_number)) { 2239 /* it's the old cookie */ 2240 (void)sctp_hmac_m(SCTP_HMAC, 2241 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2242 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2243 } else { 2244 /* it's the current cookie */ 2245 (void)sctp_hmac_m(SCTP_HMAC, 2246 (uint8_t *) ep->secret_key[(int)ep->current_secret_number], 2247 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2248 } 2249 /* get the signature */ 2250 SCTP_INP_RUNLOCK(l_inp); 2251 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig); 2252 if (sig == NULL) { 2253 /* couldn't find signature */ 2254 sctp_m_freem(m_sig); 2255 return (NULL); 2256 } 2257 /* compare the received digest with the computed digest */ 2258 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) { 2259 /* try the old cookie? */ 2260 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) && 2261 (ep->current_secret_number != ep->last_secret_number)) { 2262 /* compute digest with old */ 2263 (void)sctp_hmac_m(SCTP_HMAC, 2264 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2265 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2266 /* compare */ 2267 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0) 2268 cookie_ok = 1; 2269 } 2270 } else { 2271 cookie_ok = 1; 2272 } 2273 2274 /* 2275 * Now before we continue we must reconstruct our mbuf so that 2276 * normal processing of any other chunks will work. 2277 */ 2278 { 2279 struct mbuf *m_at; 2280 2281 m_at = m; 2282 while (SCTP_BUF_NEXT(m_at) != NULL) { 2283 m_at = SCTP_BUF_NEXT(m_at); 2284 } 2285 SCTP_BUF_NEXT(m_at) = m_sig; 2286 } 2287 2288 if (cookie_ok == 0) { 2289 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n"); 2290 SCTPDBG(SCTP_DEBUG_INPUT2, 2291 "offset = %u, cookie_offset = %u, sig_offset = %u\n", 2292 (uint32_t) offset, cookie_offset, sig_offset); 2293 return (NULL); 2294 } 2295 /* 2296 * check the cookie timestamps to be sure it's not stale 2297 */ 2298 (void)SCTP_GETTIME_TIMEVAL(&now); 2299 /* Expire time is in Ticks, so we convert to seconds */ 2300 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life); 2301 time_expires.tv_usec = cookie->time_entered.tv_usec; 2302 if (timevalcmp(&now, &time_expires, >)) { 2303 /* cookie is stale! */ 2304 struct mbuf *op_err; 2305 struct sctp_stale_cookie_msg *scm; 2306 uint32_t tim; 2307 2308 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg), 2309 0, M_DONTWAIT, 1, MT_DATA); 2310 if (op_err == NULL) { 2311 /* FOOBAR */ 2312 return (NULL); 2313 } 2314 /* pre-reserve some space */ 2315 #ifdef INET6 2316 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 2317 #else 2318 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 2319 #endif 2320 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 2321 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 2322 2323 /* Set the len */ 2324 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg); 2325 scm = mtod(op_err, struct sctp_stale_cookie_msg *); 2326 scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE); 2327 scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) + 2328 (sizeof(uint32_t)))); 2329 /* seconds to usec */ 2330 tim = (now.tv_sec - time_expires.tv_sec) * 1000000; 2331 /* add in usec */ 2332 if (tim == 0) 2333 tim = now.tv_usec - cookie->time_entered.tv_usec; 2334 scm->time_usec = htonl(tim); 2335 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 2336 vrf_id, port); 2337 return (NULL); 2338 } 2339 /* 2340 * Now we must see with the lookup address if we have an existing 2341 * asoc. This will only happen if we were in the COOKIE-WAIT state 2342 * and a INIT collided with us and somewhere the peer sent the 2343 * cookie on another address besides the single address our assoc 2344 * had for him. In this case we will have one of the tie-tags set at 2345 * least AND the address field in the cookie can be used to look it 2346 * up. 2347 */ 2348 to = NULL; 2349 if (cookie->addr_type == SCTP_IPV6_ADDRESS) { 2350 memset(&sin6, 0, sizeof(sin6)); 2351 sin6.sin6_family = AF_INET6; 2352 sin6.sin6_len = sizeof(sin6); 2353 sin6.sin6_port = sh->src_port; 2354 sin6.sin6_scope_id = cookie->scope_id; 2355 memcpy(&sin6.sin6_addr.s6_addr, cookie->address, 2356 sizeof(sin6.sin6_addr.s6_addr)); 2357 to = (struct sockaddr *)&sin6; 2358 } else if (cookie->addr_type == SCTP_IPV4_ADDRESS) { 2359 memset(&sin, 0, sizeof(sin)); 2360 sin.sin_family = AF_INET; 2361 sin.sin_len = sizeof(sin); 2362 sin.sin_port = sh->src_port; 2363 sin.sin_addr.s_addr = cookie->address[0]; 2364 to = (struct sockaddr *)&sin; 2365 } else { 2366 /* This should not happen */ 2367 return (NULL); 2368 } 2369 if ((*stcb == NULL) && to) { 2370 /* Yep, lets check */ 2371 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL); 2372 if (*stcb == NULL) { 2373 /* 2374 * We should have only got back the same inp. If we 2375 * got back a different ep we have a problem. The 2376 * original findep got back l_inp and now 2377 */ 2378 if (l_inp != *inp_p) { 2379 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n"); 2380 } 2381 } else { 2382 if (*locked_tcb == NULL) { 2383 /* 2384 * In this case we found the assoc only 2385 * after we locked the create lock. This 2386 * means we are in a colliding case and we 2387 * must make sure that we unlock the tcb if 2388 * its one of the cases where we throw away 2389 * the incoming packets. 2390 */ 2391 *locked_tcb = *stcb; 2392 2393 /* 2394 * We must also increment the inp ref count 2395 * since the ref_count flags was set when we 2396 * did not find the TCB, now we found it 2397 * which reduces the refcount.. we must 2398 * raise it back out to balance it all :-) 2399 */ 2400 SCTP_INP_INCR_REF((*stcb)->sctp_ep); 2401 if ((*stcb)->sctp_ep != l_inp) { 2402 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n", 2403 (*stcb)->sctp_ep, l_inp); 2404 } 2405 } 2406 } 2407 } 2408 if (to == NULL) 2409 return (NULL); 2410 2411 cookie_len -= SCTP_SIGNATURE_SIZE; 2412 if (*stcb == NULL) { 2413 /* this is the "normal" case... get a new TCB */ 2414 *stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie, 2415 cookie_len, *inp_p, netp, to, ¬ification, 2416 auth_skipped, auth_offset, auth_len, vrf_id, port); 2417 } else { 2418 /* this is abnormal... cookie-echo on existing TCB */ 2419 had_a_existing_tcb = 1; 2420 *stcb = sctp_process_cookie_existing(m, iphlen, offset, sh, 2421 cookie, cookie_len, *inp_p, *stcb, *netp, to, 2422 ¬ification, &sac_restart_id, vrf_id); 2423 } 2424 2425 if (*stcb == NULL) { 2426 /* still no TCB... must be bad cookie-echo */ 2427 return (NULL); 2428 } 2429 /* 2430 * Ok, we built an association so confirm the address we sent the 2431 * INIT-ACK to. 2432 */ 2433 netl = sctp_findnet(*stcb, to); 2434 /* 2435 * This code should in theory NOT run but 2436 */ 2437 if (netl == NULL) { 2438 /* TSNH! Huh, why do I need to add this address here? */ 2439 int ret; 2440 2441 ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE, 2442 SCTP_IN_COOKIE_PROC); 2443 netl = sctp_findnet(*stcb, to); 2444 } 2445 if (netl) { 2446 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) { 2447 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2448 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL, 2449 netl); 2450 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2451 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2452 } 2453 } 2454 if (*stcb) { 2455 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p, 2456 *stcb, NULL); 2457 } 2458 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 2459 if (!had_a_existing_tcb || 2460 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 2461 /* 2462 * If we have a NEW cookie or the connect never 2463 * reached the connected state during collision we 2464 * must do the TCP accept thing. 2465 */ 2466 struct socket *so, *oso; 2467 struct sctp_inpcb *inp; 2468 2469 if (notification == SCTP_NOTIFY_ASSOC_RESTART) { 2470 /* 2471 * For a restart we will keep the same 2472 * socket, no need to do anything. I THINK!! 2473 */ 2474 sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id, SCTP_SO_NOT_LOCKED); 2475 return (m); 2476 } 2477 oso = (*inp_p)->sctp_socket; 2478 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2479 SCTP_TCB_UNLOCK((*stcb)); 2480 so = sonewconn(oso, 0 2481 ); 2482 SCTP_TCB_LOCK((*stcb)); 2483 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2484 2485 if (so == NULL) { 2486 struct mbuf *op_err; 2487 2488 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2489 struct socket *pcb_so; 2490 2491 #endif 2492 /* Too many sockets */ 2493 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n"); 2494 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2495 sctp_abort_association(*inp_p, NULL, m, iphlen, 2496 sh, op_err, vrf_id, port); 2497 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2498 pcb_so = SCTP_INP_SO(*inp_p); 2499 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2500 SCTP_TCB_UNLOCK((*stcb)); 2501 SCTP_SOCKET_LOCK(pcb_so, 1); 2502 SCTP_TCB_LOCK((*stcb)); 2503 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2504 #endif 2505 (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20); 2506 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2507 SCTP_SOCKET_UNLOCK(pcb_so, 1); 2508 #endif 2509 return (NULL); 2510 } 2511 inp = (struct sctp_inpcb *)so->so_pcb; 2512 SCTP_INP_INCR_REF(inp); 2513 /* 2514 * We add the unbound flag here so that if we get an 2515 * soabort() before we get the move_pcb done, we 2516 * will properly cleanup. 2517 */ 2518 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 2519 SCTP_PCB_FLAGS_CONNECTED | 2520 SCTP_PCB_FLAGS_IN_TCPPOOL | 2521 SCTP_PCB_FLAGS_UNBOUND | 2522 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) | 2523 SCTP_PCB_FLAGS_DONT_WAKE); 2524 inp->sctp_features = (*inp_p)->sctp_features; 2525 inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features; 2526 inp->sctp_socket = so; 2527 inp->sctp_frag_point = (*inp_p)->sctp_frag_point; 2528 inp->partial_delivery_point = (*inp_p)->partial_delivery_point; 2529 inp->sctp_context = (*inp_p)->sctp_context; 2530 inp->inp_starting_point_for_iterator = NULL; 2531 /* 2532 * copy in the authentication parameters from the 2533 * original endpoint 2534 */ 2535 if (inp->sctp_ep.local_hmacs) 2536 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2537 inp->sctp_ep.local_hmacs = 2538 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs); 2539 if (inp->sctp_ep.local_auth_chunks) 2540 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 2541 inp->sctp_ep.local_auth_chunks = 2542 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks); 2543 (void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys, 2544 &inp->sctp_ep.shared_keys); 2545 2546 /* 2547 * Now we must move it from one hash table to 2548 * another and get the tcb in the right place. 2549 */ 2550 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb); 2551 2552 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2553 SCTP_TCB_UNLOCK((*stcb)); 2554 2555 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, 2556 0); 2557 SCTP_TCB_LOCK((*stcb)); 2558 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2559 2560 2561 /* 2562 * now we must check to see if we were aborted while 2563 * the move was going on and the lock/unlock 2564 * happened. 2565 */ 2566 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 2567 /* 2568 * yep it was, we leave the assoc attached 2569 * to the socket since the sctp_inpcb_free() 2570 * call will send an abort for us. 2571 */ 2572 SCTP_INP_DECR_REF(inp); 2573 return (NULL); 2574 } 2575 SCTP_INP_DECR_REF(inp); 2576 /* Switch over to the new guy */ 2577 *inp_p = inp; 2578 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2579 2580 /* 2581 * Pull it from the incomplete queue and wake the 2582 * guy 2583 */ 2584 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2585 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2586 SCTP_TCB_UNLOCK((*stcb)); 2587 SCTP_SOCKET_LOCK(so, 1); 2588 #endif 2589 soisconnected(so); 2590 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2591 SCTP_TCB_LOCK((*stcb)); 2592 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2593 SCTP_SOCKET_UNLOCK(so, 1); 2594 #endif 2595 return (m); 2596 } 2597 } 2598 if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 2599 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2600 } 2601 return (m); 2602 } 2603 2604 static void 2605 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp, 2606 struct sctp_tcb *stcb, struct sctp_nets *net) 2607 { 2608 /* cp must not be used, others call this without a c-ack :-) */ 2609 struct sctp_association *asoc; 2610 2611 SCTPDBG(SCTP_DEBUG_INPUT2, 2612 "sctp_handle_cookie_ack: handling COOKIE-ACK\n"); 2613 if (stcb == NULL) 2614 return; 2615 2616 asoc = &stcb->asoc; 2617 2618 sctp_stop_all_cookie_timers(stcb); 2619 /* process according to association state */ 2620 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 2621 /* state change only needed when I am in right state */ 2622 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2623 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2624 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2625 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2626 stcb->sctp_ep, stcb, asoc->primary_destination); 2627 2628 } 2629 /* update RTO */ 2630 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 2631 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2632 if (asoc->overall_error_count == 0) { 2633 net->RTO = sctp_calculate_rto(stcb, asoc, net, 2634 &asoc->time_entered, sctp_align_safe_nocopy); 2635 } 2636 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 2637 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2638 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2639 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2640 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2641 struct socket *so; 2642 2643 #endif 2644 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2645 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2646 so = SCTP_INP_SO(stcb->sctp_ep); 2647 atomic_add_int(&stcb->asoc.refcnt, 1); 2648 SCTP_TCB_UNLOCK(stcb); 2649 SCTP_SOCKET_LOCK(so, 1); 2650 SCTP_TCB_LOCK(stcb); 2651 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2652 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2653 SCTP_SOCKET_UNLOCK(so, 1); 2654 return; 2655 } 2656 #endif 2657 soisconnected(stcb->sctp_socket); 2658 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2659 SCTP_SOCKET_UNLOCK(so, 1); 2660 #endif 2661 } 2662 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 2663 stcb, net); 2664 /* 2665 * since we did not send a HB make sure we don't double 2666 * things 2667 */ 2668 net->hb_responded = 1; 2669 2670 if (stcb->asoc.sctp_autoclose_ticks && 2671 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2672 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 2673 stcb->sctp_ep, stcb, NULL); 2674 } 2675 /* 2676 * send ASCONF if parameters are pending and ASCONFs are 2677 * allowed (eg. addresses changed when init/cookie echo were 2678 * in flight) 2679 */ 2680 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) && 2681 (stcb->asoc.peer_supports_asconf) && 2682 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) { 2683 #ifdef SCTP_TIMER_BASED_ASCONF 2684 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, 2685 stcb->sctp_ep, stcb, 2686 stcb->asoc.primary_destination); 2687 #else 2688 sctp_send_asconf(stcb, stcb->asoc.primary_destination, 2689 SCTP_ADDR_NOT_LOCKED); 2690 #endif 2691 } 2692 } 2693 /* Toss the cookie if I can */ 2694 sctp_toss_old_cookies(stcb, asoc); 2695 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 2696 /* Restart the timer if we have pending data */ 2697 struct sctp_tmit_chunk *chk; 2698 2699 chk = TAILQ_FIRST(&asoc->sent_queue); 2700 if (chk) { 2701 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2702 stcb, chk->whoTo); 2703 } 2704 } 2705 } 2706 2707 static void 2708 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp, 2709 struct sctp_tcb *stcb) 2710 { 2711 struct sctp_nets *net; 2712 struct sctp_tmit_chunk *lchk; 2713 uint32_t tsn; 2714 2715 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) { 2716 return; 2717 } 2718 SCTP_STAT_INCR(sctps_recvecne); 2719 tsn = ntohl(cp->tsn); 2720 /* ECN Nonce stuff: need a resync and disable the nonce sum check */ 2721 /* Also we make sure we disable the nonce_wait */ 2722 lchk = TAILQ_FIRST(&stcb->asoc.send_queue); 2723 if (lchk == NULL) { 2724 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 2725 } else { 2726 stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq; 2727 } 2728 stcb->asoc.nonce_wait_for_ecne = 0; 2729 stcb->asoc.nonce_sum_check = 0; 2730 2731 /* Find where it was sent, if possible */ 2732 net = NULL; 2733 lchk = TAILQ_FIRST(&stcb->asoc.sent_queue); 2734 while (lchk) { 2735 if (lchk->rec.data.TSN_seq == tsn) { 2736 net = lchk->whoTo; 2737 break; 2738 } 2739 if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ)) 2740 break; 2741 lchk = TAILQ_NEXT(lchk, sctp_next); 2742 } 2743 if (net == NULL) 2744 /* default is we use the primary */ 2745 net = stcb->asoc.primary_destination; 2746 2747 if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) { 2748 /* 2749 * JRS - Use the congestion control given in the pluggable 2750 * CC module 2751 */ 2752 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net); 2753 /* 2754 * we reduce once every RTT. So we will only lower cwnd at 2755 * the next sending seq i.e. the resync_tsn. 2756 */ 2757 stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn; 2758 } 2759 /* 2760 * We always send a CWR this way if our previous one was lost our 2761 * peer will get an update, or if it is not time again to reduce we 2762 * still get the cwr to the peer. 2763 */ 2764 sctp_send_cwr(stcb, net, tsn); 2765 } 2766 2767 static void 2768 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb) 2769 { 2770 /* 2771 * Here we get a CWR from the peer. We must look in the outqueue and 2772 * make sure that we have a covered ECNE in teh control chunk part. 2773 * If so remove it. 2774 */ 2775 struct sctp_tmit_chunk *chk; 2776 struct sctp_ecne_chunk *ecne; 2777 2778 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 2779 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) { 2780 continue; 2781 } 2782 /* 2783 * Look for and remove if it is the right TSN. Since there 2784 * is only ONE ECNE on the control queue at any one time we 2785 * don't need to worry about more than one! 2786 */ 2787 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 2788 if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn), 2789 MAX_TSN) || (cp->tsn == ecne->tsn)) { 2790 /* this covers this ECNE, we can remove it */ 2791 stcb->asoc.ecn_echo_cnt_onq--; 2792 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, 2793 sctp_next); 2794 if (chk->data) { 2795 sctp_m_freem(chk->data); 2796 chk->data = NULL; 2797 } 2798 stcb->asoc.ctrl_queue_cnt--; 2799 sctp_free_a_chunk(stcb, chk); 2800 break; 2801 } 2802 } 2803 } 2804 2805 static void 2806 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp, 2807 struct sctp_tcb *stcb, struct sctp_nets *net) 2808 { 2809 struct sctp_association *asoc; 2810 2811 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2812 struct socket *so; 2813 2814 #endif 2815 2816 SCTPDBG(SCTP_DEBUG_INPUT2, 2817 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n"); 2818 if (stcb == NULL) 2819 return; 2820 2821 asoc = &stcb->asoc; 2822 /* process according to association state */ 2823 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 2824 /* unexpected SHUTDOWN-COMPLETE... so ignore... */ 2825 SCTPDBG(SCTP_DEBUG_INPUT2, 2826 "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n"); 2827 SCTP_TCB_UNLOCK(stcb); 2828 return; 2829 } 2830 /* notify upper layer protocol */ 2831 if (stcb->sctp_socket) { 2832 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2833 /* are the queues empty? they should be */ 2834 if (!TAILQ_EMPTY(&asoc->send_queue) || 2835 !TAILQ_EMPTY(&asoc->sent_queue) || 2836 !TAILQ_EMPTY(&asoc->out_wheel)) { 2837 sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED); 2838 } 2839 } 2840 /* stop the timer */ 2841 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22); 2842 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 2843 /* free the TCB */ 2844 SCTPDBG(SCTP_DEBUG_INPUT2, 2845 "sctp_handle_shutdown_complete: calls free-asoc\n"); 2846 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2847 so = SCTP_INP_SO(stcb->sctp_ep); 2848 atomic_add_int(&stcb->asoc.refcnt, 1); 2849 SCTP_TCB_UNLOCK(stcb); 2850 SCTP_SOCKET_LOCK(so, 1); 2851 SCTP_TCB_LOCK(stcb); 2852 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2853 #endif 2854 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23); 2855 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2856 SCTP_SOCKET_UNLOCK(so, 1); 2857 #endif 2858 return; 2859 } 2860 2861 static int 2862 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, 2863 struct sctp_nets *net, uint8_t flg) 2864 { 2865 switch (desc->chunk_type) { 2866 case SCTP_DATA: 2867 /* find the tsn to resend (possibly */ 2868 { 2869 uint32_t tsn; 2870 struct sctp_tmit_chunk *tp1; 2871 2872 tsn = ntohl(desc->tsn_ifany); 2873 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2874 while (tp1) { 2875 if (tp1->rec.data.TSN_seq == tsn) { 2876 /* found it */ 2877 break; 2878 } 2879 if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn, 2880 MAX_TSN)) { 2881 /* not found */ 2882 tp1 = NULL; 2883 break; 2884 } 2885 tp1 = TAILQ_NEXT(tp1, sctp_next); 2886 } 2887 if (tp1 == NULL) { 2888 /* 2889 * Do it the other way , aka without paying 2890 * attention to queue seq order. 2891 */ 2892 SCTP_STAT_INCR(sctps_pdrpdnfnd); 2893 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2894 while (tp1) { 2895 if (tp1->rec.data.TSN_seq == tsn) { 2896 /* found it */ 2897 break; 2898 } 2899 tp1 = TAILQ_NEXT(tp1, sctp_next); 2900 } 2901 } 2902 if (tp1 == NULL) { 2903 SCTP_STAT_INCR(sctps_pdrptsnnf); 2904 } 2905 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) { 2906 uint8_t *ddp; 2907 2908 if ((stcb->asoc.peers_rwnd == 0) && 2909 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 2910 SCTP_STAT_INCR(sctps_pdrpdiwnp); 2911 return (0); 2912 } 2913 if (stcb->asoc.peers_rwnd == 0 && 2914 (flg & SCTP_FROM_MIDDLE_BOX)) { 2915 SCTP_STAT_INCR(sctps_pdrpdizrw); 2916 return (0); 2917 } 2918 ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+ 2919 sizeof(struct sctp_data_chunk)); 2920 { 2921 unsigned int iii; 2922 2923 for (iii = 0; iii < sizeof(desc->data_bytes); 2924 iii++) { 2925 if (ddp[iii] != desc->data_bytes[iii]) { 2926 SCTP_STAT_INCR(sctps_pdrpbadd); 2927 return (-1); 2928 } 2929 } 2930 } 2931 /* 2932 * We zero out the nonce so resync not 2933 * needed 2934 */ 2935 tp1->rec.data.ect_nonce = 0; 2936 2937 if (tp1->do_rtt) { 2938 /* 2939 * this guy had a RTO calculation 2940 * pending on it, cancel it 2941 */ 2942 tp1->do_rtt = 0; 2943 } 2944 SCTP_STAT_INCR(sctps_pdrpmark); 2945 if (tp1->sent != SCTP_DATAGRAM_RESEND) 2946 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2947 tp1->sent = SCTP_DATAGRAM_RESEND; 2948 /* 2949 * mark it as if we were doing a FR, since 2950 * we will be getting gap ack reports behind 2951 * the info from the router. 2952 */ 2953 tp1->rec.data.doing_fast_retransmit = 1; 2954 /* 2955 * mark the tsn with what sequences can 2956 * cause a new FR. 2957 */ 2958 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 2959 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 2960 } else { 2961 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 2962 } 2963 2964 /* restart the timer */ 2965 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2966 stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24); 2967 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2968 stcb, tp1->whoTo); 2969 2970 /* fix counts and things */ 2971 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 2972 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP, 2973 tp1->whoTo->flight_size, 2974 tp1->book_size, 2975 (uintptr_t) stcb, 2976 tp1->rec.data.TSN_seq); 2977 } 2978 sctp_flight_size_decrease(tp1); 2979 sctp_total_flight_decrease(stcb, tp1); 2980 } { 2981 /* audit code */ 2982 unsigned int audit; 2983 2984 audit = 0; 2985 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 2986 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2987 audit++; 2988 } 2989 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue, 2990 sctp_next) { 2991 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2992 audit++; 2993 } 2994 if (audit != stcb->asoc.sent_queue_retran_cnt) { 2995 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n", 2996 audit, stcb->asoc.sent_queue_retran_cnt); 2997 #ifndef SCTP_AUDITING_ENABLED 2998 stcb->asoc.sent_queue_retran_cnt = audit; 2999 #endif 3000 } 3001 } 3002 } 3003 break; 3004 case SCTP_ASCONF: 3005 { 3006 struct sctp_tmit_chunk *asconf; 3007 3008 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 3009 sctp_next) { 3010 if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 3011 break; 3012 } 3013 } 3014 if (asconf) { 3015 if (asconf->sent != SCTP_DATAGRAM_RESEND) 3016 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3017 asconf->sent = SCTP_DATAGRAM_RESEND; 3018 asconf->snd_count--; 3019 } 3020 } 3021 break; 3022 case SCTP_INITIATION: 3023 /* resend the INIT */ 3024 stcb->asoc.dropped_special_cnt++; 3025 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) { 3026 /* 3027 * If we can get it in, in a few attempts we do 3028 * this, otherwise we let the timer fire. 3029 */ 3030 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, 3031 stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25); 3032 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 3033 } 3034 break; 3035 case SCTP_SELECTIVE_ACK: 3036 /* resend the sack */ 3037 sctp_send_sack(stcb); 3038 break; 3039 case SCTP_HEARTBEAT_REQUEST: 3040 /* resend a demand HB */ 3041 if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) { 3042 /* 3043 * Only retransmit if we KNOW we wont destroy the 3044 * tcb 3045 */ 3046 (void)sctp_send_hb(stcb, 1, net); 3047 } 3048 break; 3049 case SCTP_SHUTDOWN: 3050 sctp_send_shutdown(stcb, net); 3051 break; 3052 case SCTP_SHUTDOWN_ACK: 3053 sctp_send_shutdown_ack(stcb, net); 3054 break; 3055 case SCTP_COOKIE_ECHO: 3056 { 3057 struct sctp_tmit_chunk *cookie; 3058 3059 cookie = NULL; 3060 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, 3061 sctp_next) { 3062 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 3063 break; 3064 } 3065 } 3066 if (cookie) { 3067 if (cookie->sent != SCTP_DATAGRAM_RESEND) 3068 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3069 cookie->sent = SCTP_DATAGRAM_RESEND; 3070 sctp_stop_all_cookie_timers(stcb); 3071 } 3072 } 3073 break; 3074 case SCTP_COOKIE_ACK: 3075 sctp_send_cookie_ack(stcb); 3076 break; 3077 case SCTP_ASCONF_ACK: 3078 /* resend last asconf ack */ 3079 sctp_send_asconf_ack(stcb); 3080 break; 3081 case SCTP_FORWARD_CUM_TSN: 3082 send_forward_tsn(stcb, &stcb->asoc); 3083 break; 3084 /* can't do anything with these */ 3085 case SCTP_PACKET_DROPPED: 3086 case SCTP_INITIATION_ACK: /* this should not happen */ 3087 case SCTP_HEARTBEAT_ACK: 3088 case SCTP_ABORT_ASSOCIATION: 3089 case SCTP_OPERATION_ERROR: 3090 case SCTP_SHUTDOWN_COMPLETE: 3091 case SCTP_ECN_ECHO: 3092 case SCTP_ECN_CWR: 3093 default: 3094 break; 3095 } 3096 return (0); 3097 } 3098 3099 void 3100 sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3101 { 3102 int i; 3103 uint16_t temp; 3104 3105 /* 3106 * We set things to 0xffff since this is the last delivered sequence 3107 * and we will be sending in 0 after the reset. 3108 */ 3109 3110 if (number_entries) { 3111 for (i = 0; i < number_entries; i++) { 3112 temp = ntohs(list[i]); 3113 if (temp >= stcb->asoc.streamincnt) { 3114 continue; 3115 } 3116 stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff; 3117 } 3118 } else { 3119 list = NULL; 3120 for (i = 0; i < stcb->asoc.streamincnt; i++) { 3121 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff; 3122 } 3123 } 3124 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3125 } 3126 3127 static void 3128 sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3129 { 3130 int i; 3131 3132 if (number_entries == 0) { 3133 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3134 stcb->asoc.strmout[i].next_sequence_sent = 0; 3135 } 3136 } else if (number_entries) { 3137 for (i = 0; i < number_entries; i++) { 3138 uint16_t temp; 3139 3140 temp = ntohs(list[i]); 3141 if (temp >= stcb->asoc.streamoutcnt) { 3142 /* no such stream */ 3143 continue; 3144 } 3145 stcb->asoc.strmout[temp].next_sequence_sent = 0; 3146 } 3147 } 3148 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3149 } 3150 3151 3152 struct sctp_stream_reset_out_request * 3153 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk) 3154 { 3155 struct sctp_association *asoc; 3156 struct sctp_stream_reset_out_req *req; 3157 struct sctp_stream_reset_out_request *r; 3158 struct sctp_tmit_chunk *chk; 3159 int len, clen; 3160 3161 asoc = &stcb->asoc; 3162 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 3163 asoc->stream_reset_outstanding = 0; 3164 return (NULL); 3165 } 3166 if (stcb->asoc.str_reset == NULL) { 3167 asoc->stream_reset_outstanding = 0; 3168 return (NULL); 3169 } 3170 chk = stcb->asoc.str_reset; 3171 if (chk->data == NULL) { 3172 return (NULL); 3173 } 3174 if (bchk) { 3175 /* he wants a copy of the chk pointer */ 3176 *bchk = chk; 3177 } 3178 clen = chk->send_size; 3179 req = mtod(chk->data, struct sctp_stream_reset_out_req *); 3180 r = &req->sr_req; 3181 if (ntohl(r->request_seq) == seq) { 3182 /* found it */ 3183 return (r); 3184 } 3185 len = SCTP_SIZE32(ntohs(r->ph.param_length)); 3186 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) { 3187 /* move to the next one, there can only be a max of two */ 3188 r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len); 3189 if (ntohl(r->request_seq) == seq) { 3190 return (r); 3191 } 3192 } 3193 /* that seq is not here */ 3194 return (NULL); 3195 } 3196 3197 static void 3198 sctp_clean_up_stream_reset(struct sctp_tcb *stcb) 3199 { 3200 struct sctp_association *asoc; 3201 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset; 3202 3203 if (stcb->asoc.str_reset == NULL) { 3204 return; 3205 } 3206 asoc = &stcb->asoc; 3207 3208 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26); 3209 TAILQ_REMOVE(&asoc->control_send_queue, 3210 chk, 3211 sctp_next); 3212 if (chk->data) { 3213 sctp_m_freem(chk->data); 3214 chk->data = NULL; 3215 } 3216 asoc->ctrl_queue_cnt--; 3217 sctp_free_a_chunk(stcb, chk); 3218 /* sa_ignore NO_NULL_CHK */ 3219 stcb->asoc.str_reset = NULL; 3220 } 3221 3222 3223 static int 3224 sctp_handle_stream_reset_response(struct sctp_tcb *stcb, 3225 uint32_t seq, uint32_t action, 3226 struct sctp_stream_reset_response *respin) 3227 { 3228 uint16_t type; 3229 int lparm_len; 3230 struct sctp_association *asoc = &stcb->asoc; 3231 struct sctp_tmit_chunk *chk; 3232 struct sctp_stream_reset_out_request *srparam; 3233 int number_entries; 3234 3235 if (asoc->stream_reset_outstanding == 0) { 3236 /* duplicate */ 3237 return (0); 3238 } 3239 if (seq == stcb->asoc.str_reset_seq_out) { 3240 srparam = sctp_find_stream_reset(stcb, seq, &chk); 3241 if (srparam) { 3242 stcb->asoc.str_reset_seq_out++; 3243 type = ntohs(srparam->ph.param_type); 3244 lparm_len = ntohs(srparam->ph.param_length); 3245 if (type == SCTP_STR_RESET_OUT_REQUEST) { 3246 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t); 3247 asoc->stream_reset_out_is_outstanding = 0; 3248 if (asoc->stream_reset_outstanding) 3249 asoc->stream_reset_outstanding--; 3250 if (action == SCTP_STREAM_RESET_PERFORMED) { 3251 /* do it */ 3252 sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams); 3253 } else { 3254 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3255 } 3256 } else if (type == SCTP_STR_RESET_IN_REQUEST) { 3257 /* Answered my request */ 3258 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t); 3259 if (asoc->stream_reset_outstanding) 3260 asoc->stream_reset_outstanding--; 3261 if (action != SCTP_STREAM_RESET_PERFORMED) { 3262 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3263 } 3264 } else if (type == SCTP_STR_RESET_TSN_REQUEST) { 3265 /** 3266 * a) Adopt the new in tsn. 3267 * b) reset the map 3268 * c) Adopt the new out-tsn 3269 */ 3270 struct sctp_stream_reset_response_tsn *resp; 3271 struct sctp_forward_tsn_chunk fwdtsn; 3272 int abort_flag = 0; 3273 3274 if (respin == NULL) { 3275 /* huh ? */ 3276 return (0); 3277 } 3278 if (action == SCTP_STREAM_RESET_PERFORMED) { 3279 resp = (struct sctp_stream_reset_response_tsn *)respin; 3280 asoc->stream_reset_outstanding--; 3281 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3282 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3283 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1); 3284 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3285 if (abort_flag) { 3286 return (1); 3287 } 3288 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1); 3289 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 3290 sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 3291 } 3292 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3293 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn); 3294 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3295 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn); 3296 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn; 3297 3298 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3299 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3300 3301 } 3302 } 3303 /* get rid of the request and get the request flags */ 3304 if (asoc->stream_reset_outstanding == 0) { 3305 sctp_clean_up_stream_reset(stcb); 3306 } 3307 } 3308 } 3309 return (0); 3310 } 3311 3312 static void 3313 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb, 3314 struct sctp_tmit_chunk *chk, 3315 struct sctp_stream_reset_in_request *req, int trunc) 3316 { 3317 uint32_t seq; 3318 int len, i; 3319 int number_entries; 3320 uint16_t temp; 3321 3322 /* 3323 * peer wants me to send a str-reset to him for my outgoing seq's if 3324 * seq_in is right. 3325 */ 3326 struct sctp_association *asoc = &stcb->asoc; 3327 3328 seq = ntohl(req->request_seq); 3329 if (asoc->str_reset_seq_in == seq) { 3330 if (trunc) { 3331 /* Can't do it, since they exceeded our buffer size */ 3332 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3333 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3334 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3335 } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) { 3336 len = ntohs(req->ph.param_length); 3337 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t)); 3338 for (i = 0; i < number_entries; i++) { 3339 temp = ntohs(req->list_of_streams[i]); 3340 req->list_of_streams[i] = temp; 3341 } 3342 /* move the reset action back one */ 3343 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3344 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3345 sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams, 3346 asoc->str_reset_seq_out, 3347 seq, (asoc->sending_seq - 1)); 3348 asoc->stream_reset_out_is_outstanding = 1; 3349 asoc->str_reset = chk; 3350 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 3351 stcb->asoc.stream_reset_outstanding++; 3352 } else { 3353 /* Can't do it, since we have sent one out */ 3354 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3355 asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER; 3356 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3357 } 3358 asoc->str_reset_seq_in++; 3359 } else if (asoc->str_reset_seq_in - 1 == seq) { 3360 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3361 } else if (asoc->str_reset_seq_in - 2 == seq) { 3362 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3363 } else { 3364 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3365 } 3366 } 3367 3368 static int 3369 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb, 3370 struct sctp_tmit_chunk *chk, 3371 struct sctp_stream_reset_tsn_request *req) 3372 { 3373 /* reset all in and out and update the tsn */ 3374 /* 3375 * A) reset my str-seq's on in and out. B) Select a receive next, 3376 * and set cum-ack to it. Also process this selected number as a 3377 * fwd-tsn as well. C) set in the response my next sending seq. 3378 */ 3379 struct sctp_forward_tsn_chunk fwdtsn; 3380 struct sctp_association *asoc = &stcb->asoc; 3381 int abort_flag = 0; 3382 uint32_t seq; 3383 3384 seq = ntohl(req->request_seq); 3385 if (asoc->str_reset_seq_in == seq) { 3386 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3387 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3388 fwdtsn.ch.chunk_flags = 0; 3389 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1); 3390 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3391 if (abort_flag) { 3392 return (1); 3393 } 3394 stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA; 3395 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 3396 sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 3397 } 3398 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3399 stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1; 3400 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3401 atomic_add_int(&stcb->asoc.sending_seq, 1); 3402 /* save off historical data for retrans */ 3403 stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0]; 3404 stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq; 3405 stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0]; 3406 stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn; 3407 3408 sctp_add_stream_reset_result_tsn(chk, 3409 ntohl(req->request_seq), 3410 SCTP_STREAM_RESET_PERFORMED, 3411 stcb->asoc.sending_seq, 3412 stcb->asoc.mapping_array_base_tsn); 3413 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3414 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3415 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3416 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3417 3418 asoc->str_reset_seq_in++; 3419 } else if (asoc->str_reset_seq_in - 1 == seq) { 3420 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 3421 stcb->asoc.last_sending_seq[0], 3422 stcb->asoc.last_base_tsnsent[0] 3423 ); 3424 } else if (asoc->str_reset_seq_in - 2 == seq) { 3425 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1], 3426 stcb->asoc.last_sending_seq[1], 3427 stcb->asoc.last_base_tsnsent[1] 3428 ); 3429 } else { 3430 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3431 } 3432 return (0); 3433 } 3434 3435 static void 3436 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb, 3437 struct sctp_tmit_chunk *chk, 3438 struct sctp_stream_reset_out_request *req, int trunc) 3439 { 3440 uint32_t seq, tsn; 3441 int number_entries, len; 3442 struct sctp_association *asoc = &stcb->asoc; 3443 3444 seq = ntohl(req->request_seq); 3445 3446 /* now if its not a duplicate we process it */ 3447 if (asoc->str_reset_seq_in == seq) { 3448 len = ntohs(req->ph.param_length); 3449 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t)); 3450 /* 3451 * the sender is resetting, handle the list issue.. we must 3452 * a) verify if we can do the reset, if so no problem b) If 3453 * we can't do the reset we must copy the request. c) queue 3454 * it, and setup the data in processor to trigger it off 3455 * when needed and dequeue all the queued data. 3456 */ 3457 tsn = ntohl(req->send_reset_at_tsn); 3458 3459 /* move the reset action back one */ 3460 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3461 if (trunc) { 3462 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3463 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3464 } else if ((tsn == asoc->cumulative_tsn) || 3465 (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) { 3466 /* we can do it now */ 3467 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams); 3468 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3469 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3470 } else { 3471 /* 3472 * we must queue it up and thus wait for the TSN's 3473 * to arrive that are at or before tsn 3474 */ 3475 struct sctp_stream_reset_list *liste; 3476 int siz; 3477 3478 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t)); 3479 SCTP_MALLOC(liste, struct sctp_stream_reset_list *, 3480 siz, SCTP_M_STRESET); 3481 if (liste == NULL) { 3482 /* gak out of memory */ 3483 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3484 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3485 return; 3486 } 3487 liste->tsn = tsn; 3488 liste->number_entries = number_entries; 3489 memcpy(&liste->req, req, 3490 (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t)))); 3491 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp); 3492 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3493 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3494 } 3495 asoc->str_reset_seq_in++; 3496 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3497 /* 3498 * one seq back, just echo back last action since my 3499 * response was lost. 3500 */ 3501 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3502 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3503 /* 3504 * two seq back, just echo back last action since my 3505 * response was lost. 3506 */ 3507 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3508 } else { 3509 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3510 } 3511 } 3512 3513 #ifdef __GNUC__ 3514 __attribute__((noinline)) 3515 #endif 3516 static int 3517 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset, 3518 struct sctp_stream_reset_out_req *sr_req) 3519 { 3520 int chk_length, param_len, ptype; 3521 struct sctp_paramhdr pstore; 3522 uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE]; 3523 3524 uint32_t seq; 3525 int num_req = 0; 3526 int trunc = 0; 3527 struct sctp_tmit_chunk *chk; 3528 struct sctp_chunkhdr *ch; 3529 struct sctp_paramhdr *ph; 3530 int ret_code = 0; 3531 int num_param = 0; 3532 3533 /* now it may be a reset or a reset-response */ 3534 chk_length = ntohs(sr_req->ch.chunk_length); 3535 3536 /* setup for adding the response */ 3537 sctp_alloc_a_chunk(stcb, chk); 3538 if (chk == NULL) { 3539 return (ret_code); 3540 } 3541 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 3542 chk->rec.chunk_id.can_take_data = 0; 3543 chk->asoc = &stcb->asoc; 3544 chk->no_fr_allowed = 0; 3545 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr); 3546 chk->book_size_scale = 0; 3547 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3548 if (chk->data == NULL) { 3549 strres_nochunk: 3550 if (chk->data) { 3551 sctp_m_freem(chk->data); 3552 chk->data = NULL; 3553 } 3554 sctp_free_a_chunk(stcb, chk); 3555 return (ret_code); 3556 } 3557 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 3558 3559 /* setup chunk parameters */ 3560 chk->sent = SCTP_DATAGRAM_UNSENT; 3561 chk->snd_count = 0; 3562 chk->whoTo = stcb->asoc.primary_destination; 3563 atomic_add_int(&chk->whoTo->ref_count, 1); 3564 3565 ch = mtod(chk->data, struct sctp_chunkhdr *); 3566 ch->chunk_type = SCTP_STREAM_RESET; 3567 ch->chunk_flags = 0; 3568 ch->chunk_length = htons(chk->send_size); 3569 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 3570 offset += sizeof(struct sctp_chunkhdr); 3571 while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) { 3572 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore); 3573 if (ph == NULL) 3574 break; 3575 param_len = ntohs(ph->param_length); 3576 if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) { 3577 /* bad param */ 3578 break; 3579 } 3580 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)), 3581 (uint8_t *) & cstore); 3582 ptype = ntohs(ph->param_type); 3583 num_param++; 3584 if (param_len > (int)sizeof(cstore)) { 3585 trunc = 1; 3586 } else { 3587 trunc = 0; 3588 } 3589 3590 if (num_param > SCTP_MAX_RESET_PARAMS) { 3591 /* hit the max of parameters already sorry.. */ 3592 break; 3593 } 3594 if (ptype == SCTP_STR_RESET_OUT_REQUEST) { 3595 struct sctp_stream_reset_out_request *req_out; 3596 3597 req_out = (struct sctp_stream_reset_out_request *)ph; 3598 num_req++; 3599 if (stcb->asoc.stream_reset_outstanding) { 3600 seq = ntohl(req_out->response_seq); 3601 if (seq == stcb->asoc.str_reset_seq_out) { 3602 /* implicit ack */ 3603 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL); 3604 } 3605 } 3606 sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc); 3607 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) { 3608 struct sctp_stream_reset_in_request *req_in; 3609 3610 num_req++; 3611 3612 req_in = (struct sctp_stream_reset_in_request *)ph; 3613 3614 sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc); 3615 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) { 3616 struct sctp_stream_reset_tsn_request *req_tsn; 3617 3618 num_req++; 3619 req_tsn = (struct sctp_stream_reset_tsn_request *)ph; 3620 3621 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) { 3622 ret_code = 1; 3623 goto strres_nochunk; 3624 } 3625 /* no more */ 3626 break; 3627 } else if (ptype == SCTP_STR_RESET_RESPONSE) { 3628 struct sctp_stream_reset_response *resp; 3629 uint32_t result; 3630 3631 resp = (struct sctp_stream_reset_response *)ph; 3632 seq = ntohl(resp->response_seq); 3633 result = ntohl(resp->result); 3634 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) { 3635 ret_code = 1; 3636 goto strres_nochunk; 3637 } 3638 } else { 3639 break; 3640 } 3641 offset += SCTP_SIZE32(param_len); 3642 chk_length -= SCTP_SIZE32(param_len); 3643 } 3644 if (num_req == 0) { 3645 /* we have no response free the stuff */ 3646 goto strres_nochunk; 3647 } 3648 /* ok we have a chunk to link in */ 3649 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, 3650 chk, 3651 sctp_next); 3652 stcb->asoc.ctrl_queue_cnt++; 3653 return (ret_code); 3654 } 3655 3656 /* 3657 * Handle a router or endpoints report of a packet loss, there are two ways 3658 * to handle this, either we get the whole packet and must disect it 3659 * ourselves (possibly with truncation and or corruption) or it is a summary 3660 * from a middle box that did the disectting for us. 3661 */ 3662 static void 3663 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, 3664 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit) 3665 { 3666 uint32_t bottle_bw, on_queue; 3667 uint16_t trunc_len; 3668 unsigned int chlen; 3669 unsigned int at; 3670 struct sctp_chunk_desc desc; 3671 struct sctp_chunkhdr *ch; 3672 3673 chlen = ntohs(cp->ch.chunk_length); 3674 chlen -= sizeof(struct sctp_pktdrop_chunk); 3675 /* XXX possible chlen underflow */ 3676 if (chlen == 0) { 3677 ch = NULL; 3678 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) 3679 SCTP_STAT_INCR(sctps_pdrpbwrpt); 3680 } else { 3681 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr)); 3682 chlen -= sizeof(struct sctphdr); 3683 /* XXX possible chlen underflow */ 3684 memset(&desc, 0, sizeof(desc)); 3685 } 3686 trunc_len = (uint16_t) ntohs(cp->trunc_len); 3687 if (trunc_len > limit) { 3688 trunc_len = limit; 3689 } 3690 /* now the chunks themselves */ 3691 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) { 3692 desc.chunk_type = ch->chunk_type; 3693 /* get amount we need to move */ 3694 at = ntohs(ch->chunk_length); 3695 if (at < sizeof(struct sctp_chunkhdr)) { 3696 /* corrupt chunk, maybe at the end? */ 3697 SCTP_STAT_INCR(sctps_pdrpcrupt); 3698 break; 3699 } 3700 if (trunc_len == 0) { 3701 /* we are supposed to have all of it */ 3702 if (at > chlen) { 3703 /* corrupt skip it */ 3704 SCTP_STAT_INCR(sctps_pdrpcrupt); 3705 break; 3706 } 3707 } else { 3708 /* is there enough of it left ? */ 3709 if (desc.chunk_type == SCTP_DATA) { 3710 if (chlen < (sizeof(struct sctp_data_chunk) + 3711 sizeof(desc.data_bytes))) { 3712 break; 3713 } 3714 } else { 3715 if (chlen < sizeof(struct sctp_chunkhdr)) { 3716 break; 3717 } 3718 } 3719 } 3720 if (desc.chunk_type == SCTP_DATA) { 3721 /* can we get out the tsn? */ 3722 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3723 SCTP_STAT_INCR(sctps_pdrpmbda); 3724 3725 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) { 3726 /* yep */ 3727 struct sctp_data_chunk *dcp; 3728 uint8_t *ddp; 3729 unsigned int iii; 3730 3731 dcp = (struct sctp_data_chunk *)ch; 3732 ddp = (uint8_t *) (dcp + 1); 3733 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) { 3734 desc.data_bytes[iii] = ddp[iii]; 3735 } 3736 desc.tsn_ifany = dcp->dp.tsn; 3737 } else { 3738 /* nope we are done. */ 3739 SCTP_STAT_INCR(sctps_pdrpnedat); 3740 break; 3741 } 3742 } else { 3743 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3744 SCTP_STAT_INCR(sctps_pdrpmbct); 3745 } 3746 3747 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) { 3748 SCTP_STAT_INCR(sctps_pdrppdbrk); 3749 break; 3750 } 3751 if (SCTP_SIZE32(at) > chlen) { 3752 break; 3753 } 3754 chlen -= SCTP_SIZE32(at); 3755 if (chlen < sizeof(struct sctp_chunkhdr)) { 3756 /* done, none left */ 3757 break; 3758 } 3759 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at)); 3760 } 3761 /* Now update any rwnd --- possibly */ 3762 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) { 3763 /* From a peer, we get a rwnd report */ 3764 uint32_t a_rwnd; 3765 3766 SCTP_STAT_INCR(sctps_pdrpfehos); 3767 3768 bottle_bw = ntohl(cp->bottle_bw); 3769 on_queue = ntohl(cp->current_onq); 3770 if (bottle_bw && on_queue) { 3771 /* a rwnd report is in here */ 3772 if (bottle_bw > on_queue) 3773 a_rwnd = bottle_bw - on_queue; 3774 else 3775 a_rwnd = 0; 3776 3777 if (a_rwnd == 0) 3778 stcb->asoc.peers_rwnd = 0; 3779 else { 3780 if (a_rwnd > stcb->asoc.total_flight) { 3781 stcb->asoc.peers_rwnd = 3782 a_rwnd - stcb->asoc.total_flight; 3783 } else { 3784 stcb->asoc.peers_rwnd = 0; 3785 } 3786 if (stcb->asoc.peers_rwnd < 3787 stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3788 /* SWS sender side engages */ 3789 stcb->asoc.peers_rwnd = 0; 3790 } 3791 } 3792 } 3793 } else { 3794 SCTP_STAT_INCR(sctps_pdrpfmbox); 3795 } 3796 3797 /* now middle boxes in sat networks get a cwnd bump */ 3798 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) && 3799 (stcb->asoc.sat_t3_loss_recovery == 0) && 3800 (stcb->asoc.sat_network)) { 3801 /* 3802 * This is debateable but for sat networks it makes sense 3803 * Note if a T3 timer has went off, we will prohibit any 3804 * changes to cwnd until we exit the t3 loss recovery. 3805 */ 3806 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb, 3807 net, cp, &bottle_bw, &on_queue); 3808 } 3809 } 3810 3811 /* 3812 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to 3813 * still contain IP/SCTP header - stcb: is the tcb found for this packet - 3814 * offset: offset into the mbuf chain to first chunkhdr - length: is the 3815 * length of the complete packet outputs: - length: modified to remaining 3816 * length after control processing - netp: modified to new sctp_nets after 3817 * cookie-echo processing - return NULL to discard the packet (ie. no asoc, 3818 * bad packet,...) otherwise return the tcb for this packet 3819 */ 3820 #ifdef __GNUC__ 3821 __attribute__((noinline)) 3822 #endif 3823 static struct sctp_tcb * 3824 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, 3825 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp, 3826 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen, 3827 uint32_t vrf_id, uint16_t port) 3828 { 3829 struct sctp_association *asoc; 3830 uint32_t vtag_in; 3831 int num_chunks = 0; /* number of control chunks processed */ 3832 uint32_t chk_length; 3833 int ret; 3834 int abort_no_unlock = 0; 3835 3836 /* 3837 * How big should this be, and should it be alloc'd? Lets try the 3838 * d-mtu-ceiling for now (2k) and that should hopefully work ... 3839 * until we get into jumbo grams and such.. 3840 */ 3841 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; 3842 struct sctp_tcb *locked_tcb = stcb; 3843 int got_auth = 0; 3844 uint32_t auth_offset = 0, auth_len = 0; 3845 int auth_skipped = 0; 3846 int asconf_cnt = 0; 3847 3848 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3849 struct socket *so; 3850 3851 #endif 3852 3853 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n", 3854 iphlen, *offset, length, stcb); 3855 3856 /* validate chunk header length... */ 3857 if (ntohs(ch->chunk_length) < sizeof(*ch)) { 3858 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n", 3859 ntohs(ch->chunk_length)); 3860 if (locked_tcb) { 3861 SCTP_TCB_UNLOCK(locked_tcb); 3862 } 3863 return (NULL); 3864 } 3865 /* 3866 * validate the verification tag 3867 */ 3868 vtag_in = ntohl(sh->v_tag); 3869 3870 if (locked_tcb) { 3871 SCTP_TCB_LOCK_ASSERT(locked_tcb); 3872 } 3873 if (ch->chunk_type == SCTP_INITIATION) { 3874 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n", 3875 ntohs(ch->chunk_length), vtag_in); 3876 if (vtag_in != 0) { 3877 /* protocol error- silently discard... */ 3878 SCTP_STAT_INCR(sctps_badvtag); 3879 if (locked_tcb) { 3880 SCTP_TCB_UNLOCK(locked_tcb); 3881 } 3882 return (NULL); 3883 } 3884 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) { 3885 /* 3886 * If there is no stcb, skip the AUTH chunk and process 3887 * later after a stcb is found (to validate the lookup was 3888 * valid. 3889 */ 3890 if ((ch->chunk_type == SCTP_AUTHENTICATION) && 3891 (stcb == NULL) && 3892 !SCTP_BASE_SYSCTL(sctp_auth_disable)) { 3893 /* save this chunk for later processing */ 3894 auth_skipped = 1; 3895 auth_offset = *offset; 3896 auth_len = ntohs(ch->chunk_length); 3897 3898 /* (temporarily) move past this chunk */ 3899 *offset += SCTP_SIZE32(auth_len); 3900 if (*offset >= length) { 3901 /* no more data left in the mbuf chain */ 3902 *offset = length; 3903 if (locked_tcb) { 3904 SCTP_TCB_UNLOCK(locked_tcb); 3905 } 3906 return (NULL); 3907 } 3908 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3909 sizeof(struct sctp_chunkhdr), chunk_buf); 3910 } 3911 if (ch == NULL) { 3912 /* Help */ 3913 *offset = length; 3914 if (locked_tcb) { 3915 SCTP_TCB_UNLOCK(locked_tcb); 3916 } 3917 return (NULL); 3918 } 3919 if (ch->chunk_type == SCTP_COOKIE_ECHO) { 3920 goto process_control_chunks; 3921 } 3922 /* 3923 * first check if it's an ASCONF with an unknown src addr we 3924 * need to look inside to find the association 3925 */ 3926 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) { 3927 struct sctp_chunkhdr *asconf_ch = ch; 3928 uint32_t asconf_offset = 0, asconf_len = 0; 3929 3930 /* inp's refcount may be reduced */ 3931 SCTP_INP_INCR_REF(inp); 3932 3933 asconf_offset = *offset; 3934 do { 3935 asconf_len = ntohs(asconf_ch->chunk_length); 3936 if (asconf_len < sizeof(struct sctp_asconf_paramhdr)) 3937 break; 3938 stcb = sctp_findassociation_ep_asconf(m, iphlen, 3939 *offset, sh, &inp, netp); 3940 if (stcb != NULL) 3941 break; 3942 asconf_offset += SCTP_SIZE32(asconf_len); 3943 asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset, 3944 sizeof(struct sctp_chunkhdr), chunk_buf); 3945 } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF); 3946 if (stcb == NULL) { 3947 /* 3948 * reduce inp's refcount if not reduced in 3949 * sctp_findassociation_ep_asconf(). 3950 */ 3951 SCTP_INP_DECR_REF(inp); 3952 } else { 3953 locked_tcb = stcb; 3954 } 3955 3956 /* now go back and verify any auth chunk to be sure */ 3957 if (auth_skipped && (stcb != NULL)) { 3958 struct sctp_auth_chunk *auth; 3959 3960 auth = (struct sctp_auth_chunk *) 3961 sctp_m_getptr(m, auth_offset, 3962 auth_len, chunk_buf); 3963 got_auth = 1; 3964 auth_skipped = 0; 3965 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, 3966 auth_offset)) { 3967 /* auth HMAC failed so dump it */ 3968 *offset = length; 3969 if (locked_tcb) { 3970 SCTP_TCB_UNLOCK(locked_tcb); 3971 } 3972 return (NULL); 3973 } else { 3974 /* remaining chunks are HMAC checked */ 3975 stcb->asoc.authenticated = 1; 3976 } 3977 } 3978 } 3979 if (stcb == NULL) { 3980 /* no association, so it's out of the blue... */ 3981 sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL, 3982 vrf_id, port); 3983 *offset = length; 3984 if (locked_tcb) { 3985 SCTP_TCB_UNLOCK(locked_tcb); 3986 } 3987 return (NULL); 3988 } 3989 asoc = &stcb->asoc; 3990 /* ABORT and SHUTDOWN can use either v_tag... */ 3991 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) || 3992 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) || 3993 (ch->chunk_type == SCTP_PACKET_DROPPED)) { 3994 if ((vtag_in == asoc->my_vtag) || 3995 ((ch->chunk_flags & SCTP_HAD_NO_TCB) && 3996 (vtag_in == asoc->peer_vtag))) { 3997 /* this is valid */ 3998 } else { 3999 /* drop this packet... */ 4000 SCTP_STAT_INCR(sctps_badvtag); 4001 if (locked_tcb) { 4002 SCTP_TCB_UNLOCK(locked_tcb); 4003 } 4004 return (NULL); 4005 } 4006 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 4007 if (vtag_in != asoc->my_vtag) { 4008 /* 4009 * this could be a stale SHUTDOWN-ACK or the 4010 * peer never got the SHUTDOWN-COMPLETE and 4011 * is still hung; we have started a new asoc 4012 * but it won't complete until the shutdown 4013 * is completed 4014 */ 4015 if (locked_tcb) { 4016 SCTP_TCB_UNLOCK(locked_tcb); 4017 } 4018 sctp_handle_ootb(m, iphlen, *offset, sh, inp, 4019 NULL, vrf_id, port); 4020 return (NULL); 4021 } 4022 } else { 4023 /* for all other chunks, vtag must match */ 4024 if (vtag_in != asoc->my_vtag) { 4025 /* invalid vtag... */ 4026 SCTPDBG(SCTP_DEBUG_INPUT3, 4027 "invalid vtag: %xh, expect %xh\n", 4028 vtag_in, asoc->my_vtag); 4029 SCTP_STAT_INCR(sctps_badvtag); 4030 if (locked_tcb) { 4031 SCTP_TCB_UNLOCK(locked_tcb); 4032 } 4033 *offset = length; 4034 return (NULL); 4035 } 4036 } 4037 } /* end if !SCTP_COOKIE_ECHO */ 4038 /* 4039 * process all control chunks... 4040 */ 4041 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) || 4042 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) && 4043 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 4044 /* implied cookie-ack.. we must have lost the ack */ 4045 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4046 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4047 stcb->asoc.overall_error_count, 4048 0, 4049 SCTP_FROM_SCTP_INPUT, 4050 __LINE__); 4051 } 4052 stcb->asoc.overall_error_count = 0; 4053 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, 4054 *netp); 4055 } 4056 process_control_chunks: 4057 while (IS_SCTP_CONTROL(ch)) { 4058 /* validate chunk length */ 4059 chk_length = ntohs(ch->chunk_length); 4060 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n", 4061 ch->chunk_type, chk_length); 4062 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length); 4063 if (chk_length < sizeof(*ch) || 4064 (*offset + (int)chk_length) > length) { 4065 *offset = length; 4066 if (locked_tcb) { 4067 SCTP_TCB_UNLOCK(locked_tcb); 4068 } 4069 return (NULL); 4070 } 4071 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 4072 /* 4073 * INIT-ACK only gets the init ack "header" portion only 4074 * because we don't have to process the peer's COOKIE. All 4075 * others get a complete chunk. 4076 */ 4077 if ((ch->chunk_type == SCTP_INITIATION_ACK) || 4078 (ch->chunk_type == SCTP_INITIATION)) { 4079 /* get an init-ack chunk */ 4080 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4081 sizeof(struct sctp_init_ack_chunk), chunk_buf); 4082 if (ch == NULL) { 4083 *offset = length; 4084 if (locked_tcb) { 4085 SCTP_TCB_UNLOCK(locked_tcb); 4086 } 4087 return (NULL); 4088 } 4089 } else { 4090 /* For cookies and all other chunks. */ 4091 if (chk_length > sizeof(chunk_buf)) { 4092 /* 4093 * use just the size of the chunk buffer so 4094 * the front part of our chunks fit in 4095 * contiguous space up to the chunk buffer 4096 * size (508 bytes). For chunks that need to 4097 * get more than that they must use the 4098 * sctp_m_getptr() function or other means 4099 * (e.g. know how to parse mbuf chains). 4100 * Cookies do this already. 4101 */ 4102 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4103 (sizeof(chunk_buf) - 4), 4104 chunk_buf); 4105 if (ch == NULL) { 4106 *offset = length; 4107 if (locked_tcb) { 4108 SCTP_TCB_UNLOCK(locked_tcb); 4109 } 4110 return (NULL); 4111 } 4112 } else { 4113 /* We can fit it all */ 4114 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4115 chk_length, chunk_buf); 4116 if (ch == NULL) { 4117 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n"); 4118 *offset = length; 4119 if (locked_tcb) { 4120 SCTP_TCB_UNLOCK(locked_tcb); 4121 } 4122 return (NULL); 4123 } 4124 } 4125 } 4126 num_chunks++; 4127 /* Save off the last place we got a control from */ 4128 if (stcb != NULL) { 4129 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) { 4130 /* 4131 * allow last_control to be NULL if 4132 * ASCONF... ASCONF processing will find the 4133 * right net later 4134 */ 4135 if ((netp != NULL) && (*netp != NULL)) 4136 stcb->asoc.last_control_chunk_from = *netp; 4137 } 4138 } 4139 #ifdef SCTP_AUDITING_ENABLED 4140 sctp_audit_log(0xB0, ch->chunk_type); 4141 #endif 4142 4143 /* check to see if this chunk required auth, but isn't */ 4144 if ((stcb != NULL) && 4145 !SCTP_BASE_SYSCTL(sctp_auth_disable) && 4146 sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) && 4147 !stcb->asoc.authenticated) { 4148 /* "silently" ignore */ 4149 SCTP_STAT_INCR(sctps_recvauthmissing); 4150 goto next_chunk; 4151 } 4152 switch (ch->chunk_type) { 4153 case SCTP_INITIATION: 4154 /* must be first and only chunk */ 4155 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n"); 4156 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4157 /* We are not interested anymore? */ 4158 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4159 /* 4160 * collision case where we are 4161 * sending to them too 4162 */ 4163 ; 4164 } else { 4165 if (locked_tcb) { 4166 SCTP_TCB_UNLOCK(locked_tcb); 4167 } 4168 *offset = length; 4169 return (NULL); 4170 } 4171 } 4172 if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) || 4173 (num_chunks > 1) || 4174 (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 4175 *offset = length; 4176 if (locked_tcb) { 4177 SCTP_TCB_UNLOCK(locked_tcb); 4178 } 4179 return (NULL); 4180 } 4181 if ((stcb != NULL) && 4182 (SCTP_GET_STATE(&stcb->asoc) == 4183 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 4184 sctp_send_shutdown_ack(stcb, 4185 stcb->asoc.primary_destination); 4186 *offset = length; 4187 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4188 if (locked_tcb) { 4189 SCTP_TCB_UNLOCK(locked_tcb); 4190 } 4191 return (NULL); 4192 } 4193 if (netp) { 4194 sctp_handle_init(m, iphlen, *offset, sh, 4195 (struct sctp_init_chunk *)ch, inp, 4196 stcb, *netp, &abort_no_unlock, vrf_id, port); 4197 } 4198 if (abort_no_unlock) 4199 return (NULL); 4200 4201 *offset = length; 4202 if (locked_tcb) { 4203 SCTP_TCB_UNLOCK(locked_tcb); 4204 } 4205 return (NULL); 4206 break; 4207 case SCTP_PAD_CHUNK: 4208 break; 4209 case SCTP_INITIATION_ACK: 4210 /* must be first and only chunk */ 4211 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n"); 4212 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4213 /* We are not interested anymore */ 4214 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4215 ; 4216 } else { 4217 if (locked_tcb) { 4218 SCTP_TCB_UNLOCK(locked_tcb); 4219 } 4220 *offset = length; 4221 if (stcb) { 4222 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4223 so = SCTP_INP_SO(inp); 4224 atomic_add_int(&stcb->asoc.refcnt, 1); 4225 SCTP_TCB_UNLOCK(stcb); 4226 SCTP_SOCKET_LOCK(so, 1); 4227 SCTP_TCB_LOCK(stcb); 4228 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4229 #endif 4230 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4231 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4232 SCTP_SOCKET_UNLOCK(so, 1); 4233 #endif 4234 } 4235 return (NULL); 4236 } 4237 } 4238 if ((num_chunks > 1) || 4239 (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 4240 *offset = length; 4241 if (locked_tcb) { 4242 SCTP_TCB_UNLOCK(locked_tcb); 4243 } 4244 return (NULL); 4245 } 4246 if ((netp) && (*netp)) { 4247 ret = sctp_handle_init_ack(m, iphlen, *offset, sh, 4248 (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id); 4249 } else { 4250 ret = -1; 4251 } 4252 /* 4253 * Special case, I must call the output routine to 4254 * get the cookie echoed 4255 */ 4256 if (abort_no_unlock) 4257 return (NULL); 4258 4259 if ((stcb) && ret == 0) 4260 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4261 *offset = length; 4262 if (locked_tcb) { 4263 SCTP_TCB_UNLOCK(locked_tcb); 4264 } 4265 return (NULL); 4266 break; 4267 case SCTP_SELECTIVE_ACK: 4268 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n"); 4269 SCTP_STAT_INCR(sctps_recvsacks); 4270 { 4271 struct sctp_sack_chunk *sack; 4272 int abort_now = 0; 4273 uint32_t a_rwnd, cum_ack; 4274 uint16_t num_seg; 4275 int nonce_sum_flag; 4276 4277 if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) { 4278 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n"); 4279 ignore_sack: 4280 *offset = length; 4281 if (locked_tcb) { 4282 SCTP_TCB_UNLOCK(locked_tcb); 4283 } 4284 return (NULL); 4285 } 4286 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 4287 /*- 4288 * If we have sent a shutdown-ack, we will pay no 4289 * attention to a sack sent in to us since 4290 * we don't care anymore. 4291 */ 4292 goto ignore_sack; 4293 } 4294 sack = (struct sctp_sack_chunk *)ch; 4295 nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM; 4296 cum_ack = ntohl(sack->sack.cum_tsn_ack); 4297 num_seg = ntohs(sack->sack.num_gap_ack_blks); 4298 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd); 4299 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 4300 cum_ack, 4301 num_seg, 4302 a_rwnd 4303 ); 4304 stcb->asoc.seen_a_sack_this_pkt = 1; 4305 if ((stcb->asoc.pr_sctp_cnt == 0) && 4306 (num_seg == 0) && 4307 ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) || 4308 (cum_ack == stcb->asoc.last_acked_seq)) && 4309 (stcb->asoc.saw_sack_with_frags == 0) && 4310 (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) 4311 ) { 4312 /* 4313 * We have a SIMPLE sack having no 4314 * prior segments and data on sent 4315 * queue to be acked.. Use the 4316 * faster path sack processing. We 4317 * also allow window update sacks 4318 * with no missing segments to go 4319 * this way too. 4320 */ 4321 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag, 4322 &abort_now); 4323 } else { 4324 if (netp && *netp) 4325 sctp_handle_sack(m, *offset, 4326 sack, stcb, *netp, &abort_now, chk_length, a_rwnd); 4327 } 4328 if (abort_now) { 4329 /* ABORT signal from sack processing */ 4330 *offset = length; 4331 return (NULL); 4332 } 4333 } 4334 break; 4335 case SCTP_HEARTBEAT_REQUEST: 4336 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n"); 4337 if ((stcb) && netp && *netp) { 4338 SCTP_STAT_INCR(sctps_recvheartbeat); 4339 sctp_send_heartbeat_ack(stcb, m, *offset, 4340 chk_length, *netp); 4341 4342 /* He's alive so give him credit */ 4343 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4344 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4345 stcb->asoc.overall_error_count, 4346 0, 4347 SCTP_FROM_SCTP_INPUT, 4348 __LINE__); 4349 } 4350 stcb->asoc.overall_error_count = 0; 4351 } 4352 break; 4353 case SCTP_HEARTBEAT_ACK: 4354 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n"); 4355 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) { 4356 /* Its not ours */ 4357 *offset = length; 4358 if (locked_tcb) { 4359 SCTP_TCB_UNLOCK(locked_tcb); 4360 } 4361 return (NULL); 4362 } 4363 /* He's alive so give him credit */ 4364 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4365 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4366 stcb->asoc.overall_error_count, 4367 0, 4368 SCTP_FROM_SCTP_INPUT, 4369 __LINE__); 4370 } 4371 stcb->asoc.overall_error_count = 0; 4372 SCTP_STAT_INCR(sctps_recvheartbeatack); 4373 if (netp && *netp) 4374 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch, 4375 stcb, *netp); 4376 break; 4377 case SCTP_ABORT_ASSOCIATION: 4378 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n", 4379 stcb); 4380 if ((stcb) && netp && *netp) 4381 sctp_handle_abort((struct sctp_abort_chunk *)ch, 4382 stcb, *netp); 4383 *offset = length; 4384 return (NULL); 4385 break; 4386 case SCTP_SHUTDOWN: 4387 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n", 4388 stcb); 4389 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) { 4390 *offset = length; 4391 if (locked_tcb) { 4392 SCTP_TCB_UNLOCK(locked_tcb); 4393 } 4394 return (NULL); 4395 } 4396 if (netp && *netp) { 4397 int abort_flag = 0; 4398 4399 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch, 4400 stcb, *netp, &abort_flag); 4401 if (abort_flag) { 4402 *offset = length; 4403 return (NULL); 4404 } 4405 } 4406 break; 4407 case SCTP_SHUTDOWN_ACK: 4408 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb); 4409 if ((stcb) && (netp) && (*netp)) 4410 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp); 4411 *offset = length; 4412 return (NULL); 4413 break; 4414 4415 case SCTP_OPERATION_ERROR: 4416 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n"); 4417 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) { 4418 4419 *offset = length; 4420 return (NULL); 4421 } 4422 break; 4423 case SCTP_COOKIE_ECHO: 4424 SCTPDBG(SCTP_DEBUG_INPUT3, 4425 "SCTP_COOKIE-ECHO, stcb %p\n", stcb); 4426 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4427 ; 4428 } else { 4429 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4430 /* We are not interested anymore */ 4431 *offset = length; 4432 return (NULL); 4433 } 4434 } 4435 /* 4436 * First are we accepting? We do this again here 4437 * sincen it is possible that a previous endpoint 4438 * WAS listening responded to a INIT-ACK and then 4439 * closed. We opened and bound.. and are now no 4440 * longer listening. 4441 */ 4442 4443 if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) { 4444 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4445 (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) { 4446 struct mbuf *oper; 4447 struct sctp_paramhdr *phdr; 4448 4449 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4450 0, M_DONTWAIT, 1, MT_DATA); 4451 if (oper) { 4452 SCTP_BUF_LEN(oper) = 4453 sizeof(struct sctp_paramhdr); 4454 phdr = mtod(oper, 4455 struct sctp_paramhdr *); 4456 phdr->param_type = 4457 htons(SCTP_CAUSE_OUT_OF_RESC); 4458 phdr->param_length = 4459 htons(sizeof(struct sctp_paramhdr)); 4460 } 4461 sctp_abort_association(inp, stcb, m, 4462 iphlen, sh, oper, vrf_id, port); 4463 } 4464 *offset = length; 4465 return (NULL); 4466 } else { 4467 struct mbuf *ret_buf; 4468 struct sctp_inpcb *linp; 4469 4470 if (stcb) { 4471 linp = NULL; 4472 } else { 4473 linp = inp; 4474 } 4475 4476 if (linp) { 4477 SCTP_ASOC_CREATE_LOCK(linp); 4478 } 4479 if (netp) { 4480 ret_buf = 4481 sctp_handle_cookie_echo(m, iphlen, 4482 *offset, sh, 4483 (struct sctp_cookie_echo_chunk *)ch, 4484 &inp, &stcb, netp, 4485 auth_skipped, 4486 auth_offset, 4487 auth_len, 4488 &locked_tcb, 4489 vrf_id, 4490 port); 4491 } else { 4492 ret_buf = NULL; 4493 } 4494 if (linp) { 4495 SCTP_ASOC_CREATE_UNLOCK(linp); 4496 } 4497 if (ret_buf == NULL) { 4498 if (locked_tcb) { 4499 SCTP_TCB_UNLOCK(locked_tcb); 4500 } 4501 SCTPDBG(SCTP_DEBUG_INPUT3, 4502 "GAK, null buffer\n"); 4503 auth_skipped = 0; 4504 *offset = length; 4505 return (NULL); 4506 } 4507 /* if AUTH skipped, see if it verified... */ 4508 if (auth_skipped) { 4509 got_auth = 1; 4510 auth_skipped = 0; 4511 } 4512 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) { 4513 /* 4514 * Restart the timer if we have 4515 * pending data 4516 */ 4517 struct sctp_tmit_chunk *chk; 4518 4519 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 4520 if (chk) { 4521 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4522 stcb->sctp_ep, stcb, 4523 chk->whoTo); 4524 } 4525 } 4526 } 4527 break; 4528 case SCTP_COOKIE_ACK: 4529 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb); 4530 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) { 4531 if (locked_tcb) { 4532 SCTP_TCB_UNLOCK(locked_tcb); 4533 } 4534 return (NULL); 4535 } 4536 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4537 /* We are not interested anymore */ 4538 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4539 ; 4540 } else if (stcb) { 4541 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4542 so = SCTP_INP_SO(inp); 4543 atomic_add_int(&stcb->asoc.refcnt, 1); 4544 SCTP_TCB_UNLOCK(stcb); 4545 SCTP_SOCKET_LOCK(so, 1); 4546 SCTP_TCB_LOCK(stcb); 4547 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4548 #endif 4549 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4550 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4551 SCTP_SOCKET_UNLOCK(so, 1); 4552 #endif 4553 *offset = length; 4554 return (NULL); 4555 } 4556 } 4557 /* He's alive so give him credit */ 4558 if ((stcb) && netp && *netp) { 4559 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4560 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4561 stcb->asoc.overall_error_count, 4562 0, 4563 SCTP_FROM_SCTP_INPUT, 4564 __LINE__); 4565 } 4566 stcb->asoc.overall_error_count = 0; 4567 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp); 4568 } 4569 break; 4570 case SCTP_ECN_ECHO: 4571 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n"); 4572 /* He's alive so give him credit */ 4573 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) { 4574 /* Its not ours */ 4575 if (locked_tcb) { 4576 SCTP_TCB_UNLOCK(locked_tcb); 4577 } 4578 *offset = length; 4579 return (NULL); 4580 } 4581 if (stcb) { 4582 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4583 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4584 stcb->asoc.overall_error_count, 4585 0, 4586 SCTP_FROM_SCTP_INPUT, 4587 __LINE__); 4588 } 4589 stcb->asoc.overall_error_count = 0; 4590 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, 4591 stcb); 4592 } 4593 break; 4594 case SCTP_ECN_CWR: 4595 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n"); 4596 /* He's alive so give him credit */ 4597 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) { 4598 /* Its not ours */ 4599 if (locked_tcb) { 4600 SCTP_TCB_UNLOCK(locked_tcb); 4601 } 4602 *offset = length; 4603 return (NULL); 4604 } 4605 if (stcb) { 4606 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4607 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4608 stcb->asoc.overall_error_count, 4609 0, 4610 SCTP_FROM_SCTP_INPUT, 4611 __LINE__); 4612 } 4613 stcb->asoc.overall_error_count = 0; 4614 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb); 4615 } 4616 break; 4617 case SCTP_SHUTDOWN_COMPLETE: 4618 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb); 4619 /* must be first and only chunk */ 4620 if ((num_chunks > 1) || 4621 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 4622 *offset = length; 4623 if (locked_tcb) { 4624 SCTP_TCB_UNLOCK(locked_tcb); 4625 } 4626 return (NULL); 4627 } 4628 if ((stcb) && netp && *netp) { 4629 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch, 4630 stcb, *netp); 4631 } 4632 *offset = length; 4633 return (NULL); 4634 break; 4635 case SCTP_ASCONF: 4636 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n"); 4637 /* He's alive so give him credit */ 4638 if (stcb) { 4639 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4640 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4641 stcb->asoc.overall_error_count, 4642 0, 4643 SCTP_FROM_SCTP_INPUT, 4644 __LINE__); 4645 } 4646 stcb->asoc.overall_error_count = 0; 4647 sctp_handle_asconf(m, *offset, 4648 (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0); 4649 asconf_cnt++; 4650 } 4651 break; 4652 case SCTP_ASCONF_ACK: 4653 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n"); 4654 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { 4655 /* Its not ours */ 4656 if (locked_tcb) { 4657 SCTP_TCB_UNLOCK(locked_tcb); 4658 } 4659 *offset = length; 4660 return (NULL); 4661 } 4662 if ((stcb) && netp && *netp) { 4663 /* He's alive so give him credit */ 4664 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4665 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4666 stcb->asoc.overall_error_count, 4667 0, 4668 SCTP_FROM_SCTP_INPUT, 4669 __LINE__); 4670 } 4671 stcb->asoc.overall_error_count = 0; 4672 sctp_handle_asconf_ack(m, *offset, 4673 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock); 4674 if (abort_no_unlock) 4675 return (NULL); 4676 } 4677 break; 4678 case SCTP_FORWARD_CUM_TSN: 4679 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n"); 4680 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { 4681 /* Its not ours */ 4682 if (locked_tcb) { 4683 SCTP_TCB_UNLOCK(locked_tcb); 4684 } 4685 *offset = length; 4686 return (NULL); 4687 } 4688 /* He's alive so give him credit */ 4689 if (stcb) { 4690 int abort_flag = 0; 4691 4692 stcb->asoc.overall_error_count = 0; 4693 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4694 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4695 stcb->asoc.overall_error_count, 4696 0, 4697 SCTP_FROM_SCTP_INPUT, 4698 __LINE__); 4699 } 4700 *fwd_tsn_seen = 1; 4701 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4702 /* We are not interested anymore */ 4703 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4704 so = SCTP_INP_SO(inp); 4705 atomic_add_int(&stcb->asoc.refcnt, 1); 4706 SCTP_TCB_UNLOCK(stcb); 4707 SCTP_SOCKET_LOCK(so, 1); 4708 SCTP_TCB_LOCK(stcb); 4709 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4710 #endif 4711 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29); 4712 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4713 SCTP_SOCKET_UNLOCK(so, 1); 4714 #endif 4715 *offset = length; 4716 return (NULL); 4717 } 4718 sctp_handle_forward_tsn(stcb, 4719 (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset); 4720 if (abort_flag) { 4721 *offset = length; 4722 return (NULL); 4723 } else { 4724 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4725 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4726 stcb->asoc.overall_error_count, 4727 0, 4728 SCTP_FROM_SCTP_INPUT, 4729 __LINE__); 4730 } 4731 stcb->asoc.overall_error_count = 0; 4732 } 4733 4734 } 4735 break; 4736 case SCTP_STREAM_RESET: 4737 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n"); 4738 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) { 4739 /* Its not ours */ 4740 if (locked_tcb) { 4741 SCTP_TCB_UNLOCK(locked_tcb); 4742 } 4743 *offset = length; 4744 return (NULL); 4745 } 4746 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4747 /* We are not interested anymore */ 4748 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4749 so = SCTP_INP_SO(inp); 4750 atomic_add_int(&stcb->asoc.refcnt, 1); 4751 SCTP_TCB_UNLOCK(stcb); 4752 SCTP_SOCKET_LOCK(so, 1); 4753 SCTP_TCB_LOCK(stcb); 4754 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4755 #endif 4756 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30); 4757 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4758 SCTP_SOCKET_UNLOCK(so, 1); 4759 #endif 4760 *offset = length; 4761 return (NULL); 4762 } 4763 if (stcb->asoc.peer_supports_strreset == 0) { 4764 /* 4765 * hmm, peer should have announced this, but 4766 * we will turn it on since he is sending us 4767 * a stream reset. 4768 */ 4769 stcb->asoc.peer_supports_strreset = 1; 4770 } 4771 if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) { 4772 /* stop processing */ 4773 *offset = length; 4774 return (NULL); 4775 } 4776 break; 4777 case SCTP_PACKET_DROPPED: 4778 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n"); 4779 /* re-get it all please */ 4780 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { 4781 /* Its not ours */ 4782 if (locked_tcb) { 4783 SCTP_TCB_UNLOCK(locked_tcb); 4784 } 4785 *offset = length; 4786 return (NULL); 4787 } 4788 if (ch && (stcb) && netp && (*netp)) { 4789 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch, 4790 stcb, *netp, 4791 min(chk_length, (sizeof(chunk_buf) - 4))); 4792 4793 } 4794 break; 4795 4796 case SCTP_AUTHENTICATION: 4797 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n"); 4798 if (SCTP_BASE_SYSCTL(sctp_auth_disable)) 4799 goto unknown_chunk; 4800 4801 if (stcb == NULL) { 4802 /* save the first AUTH for later processing */ 4803 if (auth_skipped == 0) { 4804 auth_offset = *offset; 4805 auth_len = chk_length; 4806 auth_skipped = 1; 4807 } 4808 /* skip this chunk (temporarily) */ 4809 goto next_chunk; 4810 } 4811 if ((chk_length < (sizeof(struct sctp_auth_chunk))) || 4812 (chk_length > (sizeof(struct sctp_auth_chunk) + 4813 SCTP_AUTH_DIGEST_LEN_MAX))) { 4814 /* Its not ours */ 4815 if (locked_tcb) { 4816 SCTP_TCB_UNLOCK(locked_tcb); 4817 } 4818 *offset = length; 4819 return (NULL); 4820 } 4821 if (got_auth == 1) { 4822 /* skip this chunk... it's already auth'd */ 4823 goto next_chunk; 4824 } 4825 got_auth = 1; 4826 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, 4827 m, *offset)) { 4828 /* auth HMAC failed so dump the packet */ 4829 *offset = length; 4830 return (stcb); 4831 } else { 4832 /* remaining chunks are HMAC checked */ 4833 stcb->asoc.authenticated = 1; 4834 } 4835 break; 4836 4837 default: 4838 unknown_chunk: 4839 /* it's an unknown chunk! */ 4840 if ((ch->chunk_type & 0x40) && (stcb != NULL)) { 4841 struct mbuf *mm; 4842 struct sctp_paramhdr *phd; 4843 4844 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4845 0, M_DONTWAIT, 1, MT_DATA); 4846 if (mm) { 4847 phd = mtod(mm, struct sctp_paramhdr *); 4848 /* 4849 * We cheat and use param type since 4850 * we did not bother to define a 4851 * error cause struct. They are the 4852 * same basic format with different 4853 * names. 4854 */ 4855 phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK); 4856 phd->param_length = htons(chk_length + sizeof(*phd)); 4857 SCTP_BUF_LEN(mm) = sizeof(*phd); 4858 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length), 4859 M_DONTWAIT); 4860 if (SCTP_BUF_NEXT(mm)) { 4861 #ifdef SCTP_MBUF_LOGGING 4862 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 4863 struct mbuf *mat; 4864 4865 mat = SCTP_BUF_NEXT(mm); 4866 while (mat) { 4867 if (SCTP_BUF_IS_EXTENDED(mat)) { 4868 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 4869 } 4870 mat = SCTP_BUF_NEXT(mat); 4871 } 4872 } 4873 #endif 4874 sctp_queue_op_err(stcb, mm); 4875 } else { 4876 sctp_m_freem(mm); 4877 } 4878 } 4879 } 4880 if ((ch->chunk_type & 0x80) == 0) { 4881 /* discard this packet */ 4882 *offset = length; 4883 return (stcb); 4884 } /* else skip this bad chunk and continue... */ 4885 break; 4886 } /* switch (ch->chunk_type) */ 4887 4888 4889 next_chunk: 4890 /* get the next chunk */ 4891 *offset += SCTP_SIZE32(chk_length); 4892 if (*offset >= length) { 4893 /* no more data left in the mbuf chain */ 4894 break; 4895 } 4896 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4897 sizeof(struct sctp_chunkhdr), chunk_buf); 4898 if (ch == NULL) { 4899 if (locked_tcb) { 4900 SCTP_TCB_UNLOCK(locked_tcb); 4901 } 4902 *offset = length; 4903 return (NULL); 4904 } 4905 } /* while */ 4906 4907 if (asconf_cnt > 0 && stcb != NULL) { 4908 sctp_send_asconf_ack(stcb); 4909 } 4910 return (stcb); 4911 } 4912 4913 4914 /* 4915 * Process the ECN bits we have something set so we must look to see if it is 4916 * ECN(0) or ECN(1) or CE 4917 */ 4918 static void 4919 sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net, 4920 uint8_t ecn_bits) 4921 { 4922 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4923 ; 4924 } else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) { 4925 /* 4926 * we only add to the nonce sum for ECT1, ECT0 does not 4927 * change the NS bit (that we have yet to find a way to send 4928 * it yet). 4929 */ 4930 4931 /* ECN Nonce stuff */ 4932 stcb->asoc.receiver_nonce_sum++; 4933 stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM; 4934 4935 /* 4936 * Drag up the last_echo point if cumack is larger since we 4937 * don't want the point falling way behind by more than 4938 * 2^^31 and then having it be incorrect. 4939 */ 4940 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4941 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4942 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4943 } 4944 } else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) { 4945 /* 4946 * Drag up the last_echo point if cumack is larger since we 4947 * don't want the point falling way behind by more than 4948 * 2^^31 and then having it be incorrect. 4949 */ 4950 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4951 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4952 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4953 } 4954 } 4955 } 4956 4957 static void 4958 sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net, 4959 uint32_t high_tsn, uint8_t ecn_bits) 4960 { 4961 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4962 /* 4963 * we possibly must notify the sender that a congestion 4964 * window reduction is in order. We do this by adding a ECNE 4965 * chunk to the output chunk queue. The incoming CWR will 4966 * remove this chunk. 4967 */ 4968 if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn, 4969 MAX_TSN)) { 4970 /* Yep, we need to add a ECNE */ 4971 sctp_send_ecn_echo(stcb, net, high_tsn); 4972 stcb->asoc.last_echo_tsn = high_tsn; 4973 } 4974 } 4975 } 4976 4977 #ifdef INVARIANTS 4978 static void 4979 sctp_validate_no_locks(struct sctp_inpcb *inp) 4980 { 4981 struct sctp_tcb *stcb; 4982 4983 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 4984 if (mtx_owned(&stcb->tcb_mtx)) { 4985 panic("Own lock on stcb at return from input"); 4986 } 4987 } 4988 } 4989 4990 #endif 4991 4992 /* 4993 * common input chunk processing (v4 and v6) 4994 */ 4995 void 4996 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, 4997 int length, struct sctphdr *sh, struct sctp_chunkhdr *ch, 4998 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 4999 uint8_t ecn_bits, uint32_t vrf_id, uint16_t port) 5000 { 5001 /* 5002 * Control chunk processing 5003 */ 5004 uint32_t high_tsn; 5005 int fwd_tsn_seen = 0, data_processed = 0; 5006 struct mbuf *m = *mm; 5007 int abort_flag = 0; 5008 int un_sent; 5009 5010 SCTP_STAT_INCR(sctps_recvdatagrams); 5011 #ifdef SCTP_AUDITING_ENABLED 5012 sctp_audit_log(0xE0, 1); 5013 sctp_auditing(0, inp, stcb, net); 5014 #endif 5015 5016 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n", 5017 m, iphlen, offset, length, stcb); 5018 if (stcb) { 5019 /* always clear this before beginning a packet */ 5020 stcb->asoc.authenticated = 0; 5021 stcb->asoc.seen_a_sack_this_pkt = 0; 5022 SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n", 5023 stcb, stcb->asoc.state); 5024 5025 if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) || 5026 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5027 /*- 5028 * If we hit here, we had a ref count 5029 * up when the assoc was aborted and the 5030 * timer is clearing out the assoc, we should 5031 * NOT respond to any packet.. its OOTB. 5032 */ 5033 SCTP_TCB_UNLOCK(stcb); 5034 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5035 vrf_id, port); 5036 goto out_now; 5037 } 5038 } 5039 if (IS_SCTP_CONTROL(ch)) { 5040 /* process the control portion of the SCTP packet */ 5041 /* sa_ignore NO_NULL_CHK */ 5042 stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch, 5043 inp, stcb, &net, &fwd_tsn_seen, vrf_id, port); 5044 if (stcb) { 5045 /* 5046 * This covers us if the cookie-echo was there and 5047 * it changes our INP. 5048 */ 5049 inp = stcb->sctp_ep; 5050 if ((net) && (port)) { 5051 if (net->port == 0) { 5052 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr)); 5053 } 5054 net->port = port; 5055 } 5056 } 5057 } else { 5058 /* 5059 * no control chunks, so pre-process DATA chunks (these 5060 * checks are taken care of by control processing) 5061 */ 5062 5063 /* 5064 * if DATA only packet, and auth is required, then punt... 5065 * can't have authenticated without any AUTH (control) 5066 * chunks 5067 */ 5068 if ((stcb != NULL) && 5069 !SCTP_BASE_SYSCTL(sctp_auth_disable) && 5070 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) { 5071 /* "silently" ignore */ 5072 SCTP_STAT_INCR(sctps_recvauthmissing); 5073 SCTP_TCB_UNLOCK(stcb); 5074 goto out_now; 5075 } 5076 if (stcb == NULL) { 5077 /* out of the blue DATA chunk */ 5078 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5079 vrf_id, port); 5080 goto out_now; 5081 } 5082 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) { 5083 /* v_tag mismatch! */ 5084 SCTP_STAT_INCR(sctps_badvtag); 5085 SCTP_TCB_UNLOCK(stcb); 5086 goto out_now; 5087 } 5088 } 5089 5090 if (stcb == NULL) { 5091 /* 5092 * no valid TCB for this packet, or we found it's a bad 5093 * packet while processing control, or we're done with this 5094 * packet (done or skip rest of data), so we drop it... 5095 */ 5096 goto out_now; 5097 } 5098 /* 5099 * DATA chunk processing 5100 */ 5101 /* plow through the data chunks while length > offset */ 5102 5103 /* 5104 * Rest should be DATA only. Check authentication state if AUTH for 5105 * DATA is required. 5106 */ 5107 if ((length > offset) && 5108 (stcb != NULL) && 5109 !SCTP_BASE_SYSCTL(sctp_auth_disable) && 5110 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) && 5111 !stcb->asoc.authenticated) { 5112 /* "silently" ignore */ 5113 SCTP_STAT_INCR(sctps_recvauthmissing); 5114 SCTPDBG(SCTP_DEBUG_AUTH1, 5115 "Data chunk requires AUTH, skipped\n"); 5116 goto trigger_send; 5117 } 5118 if (length > offset) { 5119 int retval; 5120 5121 /* 5122 * First check to make sure our state is correct. We would 5123 * not get here unless we really did have a tag, so we don't 5124 * abort if this happens, just dump the chunk silently. 5125 */ 5126 switch (SCTP_GET_STATE(&stcb->asoc)) { 5127 case SCTP_STATE_COOKIE_ECHOED: 5128 /* 5129 * we consider data with valid tags in this state 5130 * shows us the cookie-ack was lost. Imply it was 5131 * there. 5132 */ 5133 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5134 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5135 stcb->asoc.overall_error_count, 5136 0, 5137 SCTP_FROM_SCTP_INPUT, 5138 __LINE__); 5139 } 5140 stcb->asoc.overall_error_count = 0; 5141 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net); 5142 break; 5143 case SCTP_STATE_COOKIE_WAIT: 5144 /* 5145 * We consider OOTB any data sent during asoc setup. 5146 */ 5147 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5148 vrf_id, port); 5149 SCTP_TCB_UNLOCK(stcb); 5150 goto out_now; 5151 /* sa_ignore NOTREACHED */ 5152 break; 5153 case SCTP_STATE_EMPTY: /* should not happen */ 5154 case SCTP_STATE_INUSE: /* should not happen */ 5155 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */ 5156 case SCTP_STATE_SHUTDOWN_ACK_SENT: 5157 default: 5158 SCTP_TCB_UNLOCK(stcb); 5159 goto out_now; 5160 /* sa_ignore NOTREACHED */ 5161 break; 5162 case SCTP_STATE_OPEN: 5163 case SCTP_STATE_SHUTDOWN_SENT: 5164 break; 5165 } 5166 /* take care of ECN, part 1. */ 5167 if (stcb->asoc.ecn_allowed && 5168 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 5169 sctp_process_ecn_marked_a(stcb, net, ecn_bits); 5170 } 5171 /* plow through the data chunks while length > offset */ 5172 retval = sctp_process_data(mm, iphlen, &offset, length, sh, 5173 inp, stcb, net, &high_tsn); 5174 if (retval == 2) { 5175 /* 5176 * The association aborted, NO UNLOCK needed since 5177 * the association is destroyed. 5178 */ 5179 goto out_now; 5180 } 5181 data_processed = 1; 5182 if (retval == 0) { 5183 /* take care of ecn part 2. */ 5184 if (stcb->asoc.ecn_allowed && 5185 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 5186 sctp_process_ecn_marked_b(stcb, net, high_tsn, 5187 ecn_bits); 5188 } 5189 } 5190 /* 5191 * Anything important needs to have been m_copy'ed in 5192 * process_data 5193 */ 5194 } 5195 if ((data_processed == 0) && (fwd_tsn_seen)) { 5196 int was_a_gap = 0; 5197 5198 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 5199 stcb->asoc.cumulative_tsn, MAX_TSN)) { 5200 /* there was a gap before this data was processed */ 5201 was_a_gap = 1; 5202 } 5203 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 5204 if (abort_flag) { 5205 /* Again, we aborted so NO UNLOCK needed */ 5206 goto out_now; 5207 } 5208 } 5209 /* trigger send of any chunks in queue... */ 5210 trigger_send: 5211 #ifdef SCTP_AUDITING_ENABLED 5212 sctp_audit_log(0xE0, 2); 5213 sctp_auditing(1, inp, stcb, net); 5214 #endif 5215 SCTPDBG(SCTP_DEBUG_INPUT1, 5216 "Check for chunk output prw:%d tqe:%d tf=%d\n", 5217 stcb->asoc.peers_rwnd, 5218 TAILQ_EMPTY(&stcb->asoc.control_send_queue), 5219 stcb->asoc.total_flight); 5220 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 5221 5222 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) || 5223 ((un_sent) && 5224 (stcb->asoc.peers_rwnd > 0 || 5225 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) { 5226 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n"); 5227 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 5228 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n"); 5229 } 5230 #ifdef SCTP_AUDITING_ENABLED 5231 sctp_audit_log(0xE0, 3); 5232 sctp_auditing(2, inp, stcb, net); 5233 #endif 5234 SCTP_TCB_UNLOCK(stcb); 5235 out_now: 5236 #ifdef INVARIANTS 5237 sctp_validate_no_locks(inp); 5238 #endif 5239 return; 5240 } 5241 5242 5243 void 5244 sctp_input_with_port(i_pak, off, port) 5245 struct mbuf *i_pak; 5246 int off; 5247 uint16_t port; 5248 { 5249 #ifdef SCTP_MBUF_LOGGING 5250 struct mbuf *mat; 5251 5252 #endif 5253 struct mbuf *m; 5254 int iphlen; 5255 uint32_t vrf_id = 0; 5256 uint8_t ecn_bits; 5257 struct ip *ip; 5258 struct sctphdr *sh; 5259 struct sctp_inpcb *inp = NULL; 5260 5261 uint32_t check, calc_check; 5262 struct sctp_nets *net; 5263 struct sctp_tcb *stcb = NULL; 5264 struct sctp_chunkhdr *ch; 5265 int refcount_up = 0; 5266 int length, mlen, offset; 5267 5268 5269 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) { 5270 SCTP_RELEASE_PKT(i_pak); 5271 return; 5272 } 5273 mlen = SCTP_HEADER_LEN(i_pak); 5274 iphlen = off; 5275 m = SCTP_HEADER_TO_CHAIN(i_pak); 5276 5277 net = NULL; 5278 SCTP_STAT_INCR(sctps_recvpackets); 5279 SCTP_STAT_INCR_COUNTER64(sctps_inpackets); 5280 5281 5282 #ifdef SCTP_MBUF_LOGGING 5283 /* Log in any input mbufs */ 5284 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 5285 mat = m; 5286 while (mat) { 5287 if (SCTP_BUF_IS_EXTENDED(mat)) { 5288 sctp_log_mb(mat, SCTP_MBUF_INPUT); 5289 } 5290 mat = SCTP_BUF_NEXT(mat); 5291 } 5292 } 5293 #endif 5294 #ifdef SCTP_PACKET_LOGGING 5295 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 5296 sctp_packet_log(m, mlen); 5297 #endif 5298 /* 5299 * Must take out the iphlen, since mlen expects this (only effect lb 5300 * case) 5301 */ 5302 mlen -= iphlen; 5303 5304 /* 5305 * Get IP, SCTP, and first chunk header together in first mbuf. 5306 */ 5307 ip = mtod(m, struct ip *); 5308 offset = iphlen + sizeof(*sh) + sizeof(*ch); 5309 if (SCTP_BUF_LEN(m) < offset) { 5310 if ((m = m_pullup(m, offset)) == 0) { 5311 SCTP_STAT_INCR(sctps_hdrops); 5312 return; 5313 } 5314 ip = mtod(m, struct ip *); 5315 } 5316 sh = (struct sctphdr *)((caddr_t)ip + iphlen); 5317 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh)); 5318 SCTPDBG(SCTP_DEBUG_INPUT1, 5319 "sctp_input() length:%d iphlen:%d\n", mlen, iphlen); 5320 5321 /* SCTP does not allow broadcasts or multicasts */ 5322 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 5323 goto bad; 5324 } 5325 if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) { 5326 /* 5327 * We only look at broadcast if its a front state, All 5328 * others we will not have a tcb for anyway. 5329 */ 5330 goto bad; 5331 } 5332 /* validate SCTP checksum */ 5333 check = sh->checksum; /* save incoming checksum */ 5334 if ((check == 0) && (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback)) && 5335 ((ip->ip_src.s_addr == ip->ip_dst.s_addr) || 5336 (SCTP_IS_IT_LOOPBACK(m))) 5337 ) { 5338 goto sctp_skip_csum_4; 5339 } 5340 sh->checksum = 0; /* prepare for calc */ 5341 calc_check = sctp_calculate_sum(m, &mlen, iphlen); 5342 if (calc_check != check) { 5343 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n", 5344 calc_check, check, m, mlen, iphlen); 5345 5346 stcb = sctp_findassociation_addr(m, iphlen, 5347 offset - sizeof(*ch), 5348 sh, ch, &inp, &net, 5349 vrf_id); 5350 if ((net) && (port)) { 5351 if (net->port == 0) { 5352 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr)); 5353 } 5354 net->port = port; 5355 } 5356 if ((inp) && (stcb)) { 5357 sctp_send_packet_dropped(stcb, net, m, iphlen, 1); 5358 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED); 5359 } else if ((inp != NULL) && (stcb == NULL)) { 5360 refcount_up = 1; 5361 } 5362 SCTP_STAT_INCR(sctps_badsum); 5363 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors); 5364 goto bad; 5365 } 5366 sh->checksum = calc_check; 5367 sctp_skip_csum_4: 5368 /* destination port of 0 is illegal, based on RFC2960. */ 5369 if (sh->dest_port == 0) { 5370 SCTP_STAT_INCR(sctps_hdrops); 5371 goto bad; 5372 } 5373 /* validate mbuf chain length with IP payload length */ 5374 if (mlen < (ip->ip_len - iphlen)) { 5375 SCTP_STAT_INCR(sctps_hdrops); 5376 goto bad; 5377 } 5378 /* 5379 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants 5380 * IP/SCTP/first chunk header... 5381 */ 5382 stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch), 5383 sh, ch, &inp, &net, vrf_id); 5384 if ((net) && (port)) { 5385 if (net->port == 0) { 5386 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr)); 5387 } 5388 net->port = port; 5389 } 5390 /* inp's ref-count increased && stcb locked */ 5391 if (inp == NULL) { 5392 struct sctp_init_chunk *init_chk, chunk_buf; 5393 5394 SCTP_STAT_INCR(sctps_noport); 5395 #ifdef ICMP_BANDLIM 5396 /* 5397 * we use the bandwidth limiting to protect against sending 5398 * too many ABORTS all at once. In this case these count the 5399 * same as an ICMP message. 5400 */ 5401 if (badport_bandlim(0) < 0) 5402 goto bad; 5403 #endif /* ICMP_BANDLIM */ 5404 SCTPDBG(SCTP_DEBUG_INPUT1, 5405 "Sending a ABORT from packet entry!\n"); 5406 if (ch->chunk_type == SCTP_INITIATION) { 5407 /* 5408 * we do a trick here to get the INIT tag, dig in 5409 * and get the tag from the INIT and put it in the 5410 * common header. 5411 */ 5412 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 5413 iphlen + sizeof(*sh), sizeof(*init_chk), 5414 (uint8_t *) & chunk_buf); 5415 if (init_chk != NULL) 5416 sh->v_tag = init_chk->init.initiate_tag; 5417 } 5418 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 5419 sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port); 5420 goto bad; 5421 } 5422 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) { 5423 goto bad; 5424 } 5425 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) 5426 sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, port); 5427 goto bad; 5428 } else if (stcb == NULL) { 5429 refcount_up = 1; 5430 } 5431 #ifdef IPSEC 5432 /* 5433 * I very much doubt any of the IPSEC stuff will work but I have no 5434 * idea, so I will leave it in place. 5435 */ 5436 if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) { 5437 ipsec4stat.in_polvio++; 5438 SCTP_STAT_INCR(sctps_hdrops); 5439 goto bad; 5440 } 5441 #endif /* IPSEC */ 5442 5443 /* 5444 * common chunk processing 5445 */ 5446 length = ip->ip_len + iphlen; 5447 offset -= sizeof(struct sctp_chunkhdr); 5448 5449 ecn_bits = ip->ip_tos; 5450 5451 /* sa_ignore NO_NULL_CHK */ 5452 sctp_common_input_processing(&m, iphlen, offset, length, sh, ch, 5453 inp, stcb, net, ecn_bits, vrf_id, port); 5454 /* inp's ref-count reduced && stcb unlocked */ 5455 if (m) { 5456 sctp_m_freem(m); 5457 } 5458 if ((inp) && (refcount_up)) { 5459 /* reduce ref-count */ 5460 SCTP_INP_DECR_REF(inp); 5461 } 5462 return; 5463 bad: 5464 if (stcb) { 5465 SCTP_TCB_UNLOCK(stcb); 5466 } 5467 if ((inp) && (refcount_up)) { 5468 /* reduce ref-count */ 5469 SCTP_INP_DECR_REF(inp); 5470 } 5471 if (m) { 5472 sctp_m_freem(m); 5473 } 5474 return; 5475 } 5476 void 5477 sctp_input(i_pak, off) 5478 struct mbuf *i_pak; 5479 int off; 5480 { 5481 sctp_input_with_port(i_pak, off, 0); 5482 } 5483