1 /*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_input.h> 44 #include <netinet/sctp_auth.h> 45 #include <netinet/sctp_indata.h> 46 #include <netinet/sctp_asconf.h> 47 48 49 50 51 static void 52 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb) 53 { 54 struct sctp_nets *net; 55 56 /* 57 * This now not only stops all cookie timers it also stops any INIT 58 * timers as well. This will make sure that the timers are stopped 59 * in all collision cases. 60 */ 61 SCTP_TCB_LOCK_ASSERT(stcb); 62 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 63 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) { 64 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, 65 stcb->sctp_ep, 66 stcb, 67 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1); 68 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) { 69 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, 70 stcb->sctp_ep, 71 stcb, 72 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2); 73 } 74 } 75 } 76 77 /* INIT handler */ 78 static void 79 sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 80 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 81 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, 82 uint32_t table_id) 83 { 84 struct sctp_init *init; 85 struct mbuf *op_err; 86 uint32_t init_limit; 87 88 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n", 89 stcb); 90 op_err = NULL; 91 init = &cp->init; 92 /* First are we accepting? */ 93 if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) { 94 SCTPDBG(SCTP_DEBUG_INPUT2, 95 "sctp_handle_init: Abort, so_qlimit:%d\n", 96 inp->sctp_socket->so_qlimit); 97 /* 98 * FIX ME ?? What about TCP model and we have a 99 * match/restart case? 100 */ 101 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 102 vrf_id, table_id); 103 if (stcb) 104 *abort_no_unlock = 1; 105 return; 106 } 107 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) { 108 /* Invalid length */ 109 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 110 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 111 vrf_id, table_id); 112 if (stcb) 113 *abort_no_unlock = 1; 114 return; 115 } 116 /* validate parameters */ 117 if (init->initiate_tag == 0) { 118 /* protocol error... send abort */ 119 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 120 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 121 vrf_id, table_id); 122 if (stcb) 123 *abort_no_unlock = 1; 124 return; 125 } 126 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) { 127 /* invalid parameter... send abort */ 128 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 129 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 130 vrf_id, table_id); 131 return; 132 } 133 if (init->num_inbound_streams == 0) { 134 /* protocol error... send abort */ 135 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 136 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 137 vrf_id, table_id); 138 if (stcb) 139 *abort_no_unlock = 1; 140 return; 141 } 142 if (init->num_outbound_streams == 0) { 143 /* protocol error... send abort */ 144 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 145 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 146 vrf_id, table_id); 147 if (stcb) 148 *abort_no_unlock = 1; 149 return; 150 } 151 init_limit = offset + ntohs(cp->ch.chunk_length); 152 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp), 153 init_limit)) { 154 /* auth parameter(s) error... send abort */ 155 sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id, 156 table_id); 157 if (stcb) 158 *abort_no_unlock = 1; 159 return; 160 } 161 /* send an INIT-ACK w/cookie */ 162 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); 163 sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, 164 table_id); 165 } 166 167 /* 168 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error 169 */ 170 static int 171 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb, 172 struct sctp_nets *net) 173 { 174 struct sctp_init *init; 175 struct sctp_association *asoc; 176 struct sctp_nets *lnet; 177 unsigned int i; 178 179 init = &cp->init; 180 asoc = &stcb->asoc; 181 /* save off parameters */ 182 asoc->peer_vtag = ntohl(init->initiate_tag); 183 asoc->peers_rwnd = ntohl(init->a_rwnd); 184 if (TAILQ_FIRST(&asoc->nets)) { 185 /* update any ssthresh's that may have a default */ 186 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 187 lnet->ssthresh = asoc->peers_rwnd; 188 189 #if defined(SCTP_CWND_MONITOR) || defined(SCTP_CWND_LOGGING) 190 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION); 191 #endif 192 193 } 194 } 195 SCTP_TCB_SEND_LOCK(stcb); 196 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) { 197 unsigned int newcnt; 198 struct sctp_stream_out *outs; 199 struct sctp_stream_queue_pending *sp; 200 201 /* cut back on number of streams */ 202 newcnt = ntohs(init->num_inbound_streams); 203 /* This if is probably not needed but I am cautious */ 204 if (asoc->strmout) { 205 /* First make sure no data chunks are trapped */ 206 for (i = newcnt; i < asoc->pre_open_streams; i++) { 207 outs = &asoc->strmout[i]; 208 sp = TAILQ_FIRST(&outs->outqueue); 209 while (sp) { 210 TAILQ_REMOVE(&outs->outqueue, sp, 211 next); 212 asoc->stream_queue_cnt--; 213 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, 214 stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, 215 sp); 216 if (sp->data) { 217 sctp_m_freem(sp->data); 218 sp->data = NULL; 219 } 220 sctp_free_remote_addr(sp->net); 221 sp->net = NULL; 222 /* Free the chunk */ 223 SCTP_PRINTF("sp:%p tcb:%p weird free case\n", 224 sp, stcb); 225 226 sctp_free_a_strmoq(stcb, sp); 227 /* sa_ignore FREED_MEMORY */ 228 sp = TAILQ_FIRST(&outs->outqueue); 229 } 230 } 231 } 232 /* cut back the count and abandon the upper streams */ 233 asoc->pre_open_streams = newcnt; 234 } 235 SCTP_TCB_SEND_UNLOCK(stcb); 236 asoc->streamoutcnt = asoc->pre_open_streams; 237 /* init tsn's */ 238 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1; 239 #ifdef SCTP_MAP_LOGGING 240 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 241 #endif 242 /* This is the next one we expect */ 243 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1; 244 245 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn); 246 asoc->cumulative_tsn = asoc->asconf_seq_in; 247 asoc->last_echo_tsn = asoc->asconf_seq_in; 248 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 249 /* open the requested streams */ 250 if (asoc->strmin != NULL) { 251 /* Free the old ones */ 252 struct sctp_queued_to_read *ctl; 253 254 for (i = 0; i < asoc->streamincnt; i++) { 255 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 256 while (ctl) { 257 TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next); 258 sctp_free_remote_addr(ctl->whoFrom); 259 sctp_m_freem(ctl->data); 260 ctl->data = NULL; 261 sctp_free_a_readq(stcb, ctl); 262 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 263 } 264 } 265 SCTP_FREE(asoc->strmin); 266 } 267 asoc->streamincnt = ntohs(init->num_outbound_streams); 268 if (asoc->streamincnt > MAX_SCTP_STREAMS) { 269 asoc->streamincnt = MAX_SCTP_STREAMS; 270 } 271 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt * 272 sizeof(struct sctp_stream_in), "StreamsIn"); 273 if (asoc->strmin == NULL) { 274 /* we didn't get memory for the streams! */ 275 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n"); 276 return (-1); 277 } 278 for (i = 0; i < asoc->streamincnt; i++) { 279 asoc->strmin[i].stream_no = i; 280 asoc->strmin[i].last_sequence_delivered = 0xffff; 281 /* 282 * U-stream ranges will be set when the cookie is unpacked. 283 * Or for the INIT sender they are un set (if pr-sctp not 284 * supported) when the INIT-ACK arrives. 285 */ 286 TAILQ_INIT(&asoc->strmin[i].inqueue); 287 asoc->strmin[i].delivery_started = 0; 288 } 289 /* 290 * load_address_from_init will put the addresses into the 291 * association when the COOKIE is processed or the INIT-ACK is 292 * processed. Both types of COOKIE's existing and new call this 293 * routine. It will remove addresses that are no longer in the 294 * association (for the restarting case where addresses are 295 * removed). Up front when the INIT arrives we will discard it if it 296 * is a restart and new addresses have been added. 297 */ 298 /* sa_ignore MEMLEAK */ 299 return (0); 300 } 301 302 /* 303 * INIT-ACK message processing/consumption returns value < 0 on error 304 */ 305 static int 306 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, 307 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 308 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, 309 uint32_t table_id) 310 { 311 struct sctp_association *asoc; 312 struct mbuf *op_err; 313 int retval, abort_flag; 314 uint32_t initack_limit; 315 316 /* First verify that we have no illegal param's */ 317 abort_flag = 0; 318 op_err = NULL; 319 320 op_err = sctp_arethere_unrecognized_parameters(m, 321 (offset + sizeof(struct sctp_init_chunk)), 322 &abort_flag, (struct sctp_chunkhdr *)cp); 323 if (abort_flag) { 324 /* Send an abort and notify peer */ 325 if (op_err != NULL) { 326 sctp_send_operr_to(m, iphlen, op_err, 327 cp->init.initiate_tag, vrf_id, 328 table_id); 329 } else { 330 /* 331 * Just notify (abort_assoc does this if we send an 332 * abort). 333 */ 334 sctp_abort_notification(stcb, 0); 335 /* 336 * No sense in further INIT's since we will get the 337 * same param back 338 */ 339 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); 340 *abort_no_unlock = 1; 341 } 342 return (-1); 343 } 344 asoc = &stcb->asoc; 345 /* process the peer's parameters in the INIT-ACK */ 346 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net); 347 if (retval < 0) { 348 return (retval); 349 } 350 initack_limit = offset + ntohs(cp->ch.chunk_length); 351 /* load all addresses */ 352 if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen, 353 (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh, 354 NULL))) { 355 /* Huh, we should abort */ 356 SCTPDBG(SCTP_DEBUG_INPUT1, 357 "Load addresses from INIT causes an abort %d\n", 358 retval); 359 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 360 NULL, 0, 0); 361 *abort_no_unlock = 1; 362 return (-1); 363 } 364 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs, 365 stcb->asoc.local_hmacs); 366 if (op_err) { 367 sctp_queue_op_err(stcb, op_err); 368 /* queuing will steal away the mbuf chain to the out queue */ 369 op_err = NULL; 370 } 371 /* extract the cookie and queue it to "echo" it back... */ 372 stcb->asoc.overall_error_count = 0; 373 net->error_count = 0; 374 375 /* 376 * Cancel the INIT timer, We do this first before queueing the 377 * cookie. We always cancel at the primary to assue that we are 378 * canceling the timer started by the INIT which always goes to the 379 * primary. 380 */ 381 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb, 382 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4); 383 384 /* calculate the RTO */ 385 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered); 386 387 retval = sctp_send_cookie_echo(m, offset, stcb, net); 388 if (retval < 0) { 389 /* 390 * No cookie, we probably should send a op error. But in any 391 * case if there is no cookie in the INIT-ACK, we can 392 * abandon the peer, its broke. 393 */ 394 if (retval == -3) { 395 /* We abort with an error of missing mandatory param */ 396 struct mbuf *op_err; 397 398 op_err = 399 sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM); 400 if (op_err) { 401 /* 402 * Expand beyond to include the mandatory 403 * param cookie 404 */ 405 struct sctp_inv_mandatory_param *mp; 406 407 SCTP_BUF_LEN(op_err) = 408 sizeof(struct sctp_inv_mandatory_param); 409 mp = mtod(op_err, 410 struct sctp_inv_mandatory_param *); 411 /* Subtract the reserved param */ 412 mp->length = 413 htons(sizeof(struct sctp_inv_mandatory_param) - 2); 414 mp->num_param = htonl(1); 415 mp->param = htons(SCTP_STATE_COOKIE); 416 mp->resv = 0; 417 } 418 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 419 sh, op_err, 0, 0); 420 *abort_no_unlock = 1; 421 } 422 return (retval); 423 } 424 return (0); 425 } 426 427 static void 428 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp, 429 struct sctp_tcb *stcb, struct sctp_nets *net) 430 { 431 struct sockaddr_storage store; 432 struct sockaddr_in *sin; 433 struct sockaddr_in6 *sin6; 434 struct sctp_nets *r_net; 435 struct timeval tv; 436 437 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) { 438 /* Invalid length */ 439 return; 440 } 441 sin = (struct sockaddr_in *)&store; 442 sin6 = (struct sockaddr_in6 *)&store; 443 444 memset(&store, 0, sizeof(store)); 445 if (cp->heartbeat.hb_info.addr_family == AF_INET && 446 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) { 447 sin->sin_family = cp->heartbeat.hb_info.addr_family; 448 sin->sin_len = cp->heartbeat.hb_info.addr_len; 449 sin->sin_port = stcb->rport; 450 memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address, 451 sizeof(sin->sin_addr)); 452 } else if (cp->heartbeat.hb_info.addr_family == AF_INET6 && 453 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) { 454 sin6->sin6_family = cp->heartbeat.hb_info.addr_family; 455 sin6->sin6_len = cp->heartbeat.hb_info.addr_len; 456 sin6->sin6_port = stcb->rport; 457 memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address, 458 sizeof(sin6->sin6_addr)); 459 } else { 460 return; 461 } 462 r_net = sctp_findnet(stcb, (struct sockaddr *)sin); 463 if (r_net == NULL) { 464 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n"); 465 return; 466 } 467 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) && 468 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) && 469 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) { 470 /* 471 * If the its a HB and it's random value is correct when can 472 * confirm the destination. 473 */ 474 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 475 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) { 476 stcb->asoc.primary_destination = r_net; 477 r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY; 478 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 479 r_net = TAILQ_FIRST(&stcb->asoc.nets); 480 if (r_net != stcb->asoc.primary_destination) { 481 /* 482 * first one on the list is NOT the primary 483 * sctp_cmpaddr() is much more efficent if 484 * the primary is the first on the list, 485 * make it so. 486 */ 487 TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 488 TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 489 } 490 } 491 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 492 stcb, 0, (void *)r_net); 493 } 494 r_net->error_count = 0; 495 r_net->hb_responded = 1; 496 tv.tv_sec = cp->heartbeat.hb_info.time_value_1; 497 tv.tv_usec = cp->heartbeat.hb_info.time_value_2; 498 if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 499 r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 500 r_net->dest_state |= SCTP_ADDR_REACHABLE; 501 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 502 SCTP_HEARTBEAT_SUCCESS, (void *)r_net); 503 /* now was it the primary? if so restore */ 504 if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 505 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net); 506 } 507 } 508 /* Now lets do a RTO with this */ 509 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv); 510 } 511 512 static void 513 sctp_handle_abort(struct sctp_abort_chunk *cp, 514 struct sctp_tcb *stcb, struct sctp_nets *net) 515 { 516 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n"); 517 if (stcb == NULL) 518 return; 519 520 /* stop any receive timers */ 521 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5); 522 /* notify user of the abort and clean up... */ 523 sctp_abort_notification(stcb, 0); 524 /* free the tcb */ 525 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 526 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 527 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 528 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 529 } 530 #ifdef SCTP_ASOCLOG_OF_TSNS 531 sctp_print_out_track_log(stcb); 532 #endif 533 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 534 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n"); 535 } 536 537 static void 538 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, 539 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag) 540 { 541 struct sctp_association *asoc; 542 int some_on_streamwheel; 543 544 SCTPDBG(SCTP_DEBUG_INPUT2, 545 "sctp_handle_shutdown: handling SHUTDOWN\n"); 546 if (stcb == NULL) 547 return; 548 asoc = &stcb->asoc; 549 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 550 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 551 return; 552 } 553 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) { 554 /* Shutdown NOT the expected size */ 555 return; 556 } else { 557 sctp_update_acked(stcb, cp, net, abort_flag); 558 } 559 if (asoc->control_pdapi) { 560 /* 561 * With a normal shutdown we assume the end of last record. 562 */ 563 SCTP_INP_READ_LOCK(stcb->sctp_ep); 564 asoc->control_pdapi->end_added = 1; 565 asoc->control_pdapi->pdapi_aborted = 1; 566 asoc->control_pdapi = NULL; 567 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 568 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 569 } 570 /* goto SHUTDOWN_RECEIVED state to block new requests */ 571 if (stcb->sctp_socket) { 572 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 573 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 574 asoc->state = SCTP_STATE_SHUTDOWN_RECEIVED; 575 /* 576 * notify upper layer that peer has initiated a 577 * shutdown 578 */ 579 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL); 580 581 /* reset time */ 582 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 583 } 584 } 585 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 586 /* 587 * stop the shutdown timer, since we WILL move to 588 * SHUTDOWN-ACK-SENT. 589 */ 590 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_7); 591 } 592 /* Now are we there yet? */ 593 some_on_streamwheel = 0; 594 if (!TAILQ_EMPTY(&asoc->out_wheel)) { 595 /* Check to see if some data queued */ 596 struct sctp_stream_out *outs; 597 598 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) { 599 if (!TAILQ_EMPTY(&outs->outqueue)) { 600 some_on_streamwheel = 1; 601 break; 602 } 603 } 604 } 605 if (!TAILQ_EMPTY(&asoc->send_queue) || 606 !TAILQ_EMPTY(&asoc->sent_queue) || 607 some_on_streamwheel) { 608 /* By returning we will push more data out */ 609 return; 610 } else { 611 /* no outstanding data to send, so move on... */ 612 /* send SHUTDOWN-ACK */ 613 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 614 /* move to SHUTDOWN-ACK-SENT state */ 615 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 616 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 617 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 618 } 619 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 620 621 /* start SHUTDOWN timer */ 622 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, 623 stcb, net); 624 } 625 } 626 627 static void 628 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp, 629 struct sctp_tcb *stcb, struct sctp_nets *net) 630 { 631 struct sctp_association *asoc; 632 633 SCTPDBG(SCTP_DEBUG_INPUT2, 634 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n"); 635 if (stcb == NULL) 636 return; 637 638 asoc = &stcb->asoc; 639 /* process according to association state */ 640 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 641 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 642 /* unexpected SHUTDOWN-ACK... so ignore... */ 643 SCTP_TCB_UNLOCK(stcb); 644 return; 645 } 646 if (asoc->control_pdapi) { 647 /* 648 * With a normal shutdown we assume the end of last record. 649 */ 650 SCTP_INP_READ_LOCK(stcb->sctp_ep); 651 asoc->control_pdapi->end_added = 1; 652 asoc->control_pdapi->pdapi_aborted = 1; 653 asoc->control_pdapi = NULL; 654 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 655 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 656 } 657 /* are the queues empty? */ 658 if (!TAILQ_EMPTY(&asoc->send_queue) || 659 !TAILQ_EMPTY(&asoc->sent_queue) || 660 !TAILQ_EMPTY(&asoc->out_wheel)) { 661 sctp_report_all_outbound(stcb, 0); 662 } 663 /* stop the timer */ 664 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8); 665 /* send SHUTDOWN-COMPLETE */ 666 sctp_send_shutdown_complete(stcb, net); 667 /* notify upper layer protocol */ 668 if (stcb->sctp_socket) { 669 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL); 670 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 671 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 672 /* Set the connected flag to disconnected */ 673 stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0; 674 } 675 } 676 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 677 /* free the TCB but first save off the ep */ 678 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 679 SCTP_FROM_SCTP_INPUT + SCTP_LOC_9); 680 } 681 682 /* 683 * Skip past the param header and then we will find the chunk that caused the 684 * problem. There are two possiblities ASCONF or FWD-TSN other than that and 685 * our peer must be broken. 686 */ 687 static void 688 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr, 689 struct sctp_nets *net) 690 { 691 struct sctp_chunkhdr *chk; 692 693 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr)); 694 switch (chk->chunk_type) { 695 case SCTP_ASCONF_ACK: 696 case SCTP_ASCONF: 697 sctp_asconf_cleanup(stcb, net); 698 break; 699 case SCTP_FORWARD_CUM_TSN: 700 stcb->asoc.peer_supports_prsctp = 0; 701 break; 702 default: 703 SCTPDBG(SCTP_DEBUG_INPUT2, 704 "Peer does not support chunk type %d(%x)??\n", 705 chk->chunk_type, (uint32_t) chk->chunk_type); 706 break; 707 } 708 } 709 710 /* 711 * Skip past the param header and then we will find the param that caused the 712 * problem. There are a number of param's in a ASCONF OR the prsctp param 713 * these will turn of specific features. 714 */ 715 static void 716 sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr) 717 { 718 struct sctp_paramhdr *pbad; 719 720 pbad = phdr + 1; 721 switch (ntohs(pbad->param_type)) { 722 /* pr-sctp draft */ 723 case SCTP_PRSCTP_SUPPORTED: 724 stcb->asoc.peer_supports_prsctp = 0; 725 break; 726 case SCTP_SUPPORTED_CHUNK_EXT: 727 break; 728 /* draft-ietf-tsvwg-addip-sctp */ 729 case SCTP_ECN_NONCE_SUPPORTED: 730 stcb->asoc.peer_supports_ecn_nonce = 0; 731 stcb->asoc.ecn_nonce_allowed = 0; 732 stcb->asoc.ecn_allowed = 0; 733 break; 734 case SCTP_ADD_IP_ADDRESS: 735 case SCTP_DEL_IP_ADDRESS: 736 case SCTP_SET_PRIM_ADDR: 737 stcb->asoc.peer_supports_asconf = 0; 738 break; 739 case SCTP_SUCCESS_REPORT: 740 case SCTP_ERROR_CAUSE_IND: 741 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n"); 742 SCTPDBG(SCTP_DEBUG_INPUT2, 743 "Turning off ASCONF to this strange peer\n"); 744 stcb->asoc.peer_supports_asconf = 0; 745 break; 746 default: 747 SCTPDBG(SCTP_DEBUG_INPUT2, 748 "Peer does not support param type %d(%x)??\n", 749 pbad->param_type, (uint32_t) pbad->param_type); 750 break; 751 } 752 } 753 754 static int 755 sctp_handle_error(struct sctp_chunkhdr *ch, 756 struct sctp_tcb *stcb, struct sctp_nets *net) 757 { 758 int chklen; 759 struct sctp_paramhdr *phdr; 760 uint16_t error_type; 761 uint16_t error_len; 762 struct sctp_association *asoc; 763 764 int adjust; 765 766 /* parse through all of the errors and process */ 767 asoc = &stcb->asoc; 768 phdr = (struct sctp_paramhdr *)((caddr_t)ch + 769 sizeof(struct sctp_chunkhdr)); 770 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr); 771 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) { 772 /* Process an Error Cause */ 773 error_type = ntohs(phdr->param_type); 774 error_len = ntohs(phdr->param_length); 775 if ((error_len > chklen) || (error_len == 0)) { 776 /* invalid param length for this param */ 777 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n", 778 chklen, error_len); 779 return (0); 780 } 781 switch (error_type) { 782 case SCTP_CAUSE_INVALID_STREAM: 783 case SCTP_CAUSE_MISSING_PARAM: 784 case SCTP_CAUSE_INVALID_PARAM: 785 case SCTP_CAUSE_NO_USER_DATA: 786 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n", 787 error_type); 788 break; 789 case SCTP_CAUSE_STALE_COOKIE: 790 /* 791 * We only act if we have echoed a cookie and are 792 * waiting. 793 */ 794 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 795 int *p; 796 797 p = (int *)((caddr_t)phdr + sizeof(*phdr)); 798 /* Save the time doubled */ 799 asoc->cookie_preserve_req = ntohl(*p) << 1; 800 asoc->stale_cookie_count++; 801 if (asoc->stale_cookie_count > 802 asoc->max_init_times) { 803 sctp_abort_notification(stcb, 0); 804 /* now free the asoc */ 805 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_10); 806 return (-1); 807 } 808 /* blast back to INIT state */ 809 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED; 810 asoc->state |= SCTP_STATE_COOKIE_WAIT; 811 812 sctp_stop_all_cookie_timers(stcb); 813 sctp_send_initiate(stcb->sctp_ep, stcb); 814 } 815 break; 816 case SCTP_CAUSE_UNRESOLVABLE_ADDR: 817 /* 818 * Nothing we can do here, we don't do hostname 819 * addresses so if the peer does not like my IPv6 820 * (or IPv4 for that matter) it does not matter. If 821 * they don't support that type of address, they can 822 * NOT possibly get that packet type... i.e. with no 823 * IPv6 you can't recieve a IPv6 packet. so we can 824 * safely ignore this one. If we ever added support 825 * for HOSTNAME Addresses, then we would need to do 826 * something here. 827 */ 828 break; 829 case SCTP_CAUSE_UNRECOG_CHUNK: 830 sctp_process_unrecog_chunk(stcb, phdr, net); 831 break; 832 case SCTP_CAUSE_UNRECOG_PARAM: 833 sctp_process_unrecog_param(stcb, phdr); 834 break; 835 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN: 836 /* 837 * We ignore this since the timer will drive out a 838 * new cookie anyway and there timer will drive us 839 * to send a SHUTDOWN_COMPLETE. We can't send one 840 * here since we don't have their tag. 841 */ 842 break; 843 case SCTP_CAUSE_DELETING_LAST_ADDR: 844 case SCTP_CAUSE_RESOURCE_SHORTAGE: 845 case SCTP_CAUSE_DELETING_SRC_ADDR: 846 /* 847 * We should NOT get these here, but in a 848 * ASCONF-ACK. 849 */ 850 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n", 851 error_type); 852 break; 853 case SCTP_CAUSE_OUT_OF_RESC: 854 /* 855 * And what, pray tell do we do with the fact that 856 * the peer is out of resources? Not really sure we 857 * could do anything but abort. I suspect this 858 * should have came WITH an abort instead of in a 859 * OP-ERROR. 860 */ 861 break; 862 default: 863 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n", 864 error_type); 865 break; 866 } 867 adjust = SCTP_SIZE32(error_len); 868 chklen -= adjust; 869 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust); 870 } 871 return (0); 872 } 873 874 static int 875 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, 876 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 877 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, 878 uint32_t table_id) 879 { 880 struct sctp_init_ack *init_ack; 881 int *state; 882 struct mbuf *op_err; 883 884 SCTPDBG(SCTP_DEBUG_INPUT2, 885 "sctp_handle_init_ack: handling INIT-ACK\n"); 886 887 if (stcb == NULL) { 888 SCTPDBG(SCTP_DEBUG_INPUT2, 889 "sctp_handle_init_ack: TCB is null\n"); 890 return (-1); 891 } 892 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) { 893 /* Invalid length */ 894 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 895 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 896 op_err, 0, 0); 897 *abort_no_unlock = 1; 898 return (-1); 899 } 900 init_ack = &cp->init; 901 /* validate parameters */ 902 if (init_ack->initiate_tag == 0) { 903 /* protocol error... send an abort */ 904 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 905 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 906 op_err, 0, 0); 907 *abort_no_unlock = 1; 908 return (-1); 909 } 910 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) { 911 /* protocol error... send an abort */ 912 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 913 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 914 op_err, 0, 0); 915 *abort_no_unlock = 1; 916 return (-1); 917 } 918 if (init_ack->num_inbound_streams == 0) { 919 /* protocol error... send an abort */ 920 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 921 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 922 op_err, 0, 0); 923 *abort_no_unlock = 1; 924 return (-1); 925 } 926 if (init_ack->num_outbound_streams == 0) { 927 /* protocol error... send an abort */ 928 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 929 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 930 op_err, 0, 0); 931 *abort_no_unlock = 1; 932 return (-1); 933 } 934 /* process according to association state... */ 935 state = &stcb->asoc.state; 936 switch (*state & SCTP_STATE_MASK) { 937 case SCTP_STATE_COOKIE_WAIT: 938 /* this is the expected state for this chunk */ 939 /* process the INIT-ACK parameters */ 940 if (stcb->asoc.primary_destination->dest_state & 941 SCTP_ADDR_UNCONFIRMED) { 942 /* 943 * The primary is where we sent the INIT, we can 944 * always consider it confirmed when the INIT-ACK is 945 * returned. Do this before we load addresses 946 * though. 947 */ 948 stcb->asoc.primary_destination->dest_state &= 949 ~SCTP_ADDR_UNCONFIRMED; 950 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 951 stcb, 0, (void *)stcb->asoc.primary_destination); 952 } 953 if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb, 954 net, abort_no_unlock, vrf_id, 955 table_id) < 0) { 956 /* error in parsing parameters */ 957 return (-1); 958 } 959 /* update our state */ 960 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n"); 961 if (*state & SCTP_STATE_SHUTDOWN_PENDING) { 962 *state = SCTP_STATE_COOKIE_ECHOED | 963 SCTP_STATE_SHUTDOWN_PENDING; 964 } else { 965 *state = SCTP_STATE_COOKIE_ECHOED; 966 } 967 968 /* reset the RTO calc */ 969 stcb->asoc.overall_error_count = 0; 970 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 971 /* 972 * collapse the init timer back in case of a exponential 973 * backoff 974 */ 975 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep, 976 stcb, net); 977 /* 978 * the send at the end of the inbound data processing will 979 * cause the cookie to be sent 980 */ 981 break; 982 case SCTP_STATE_SHUTDOWN_SENT: 983 /* incorrect state... discard */ 984 break; 985 case SCTP_STATE_COOKIE_ECHOED: 986 /* incorrect state... discard */ 987 break; 988 case SCTP_STATE_OPEN: 989 /* incorrect state... discard */ 990 break; 991 case SCTP_STATE_EMPTY: 992 case SCTP_STATE_INUSE: 993 default: 994 /* incorrect state... discard */ 995 return (-1); 996 break; 997 } 998 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); 999 return (0); 1000 } 1001 1002 1003 /* 1004 * handle a state cookie for an existing association m: input packet mbuf 1005 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a 1006 * "split" mbuf and the cookie signature does not exist offset: offset into 1007 * mbuf to the cookie-echo chunk 1008 */ 1009 static struct sctp_tcb * 1010 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, 1011 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1012 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 1013 struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id, 1014 uint32_t vrf_id, uint32_t table_id) 1015 { 1016 struct sctp_association *asoc; 1017 struct sctp_init_chunk *init_cp, init_buf; 1018 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1019 int chk_length; 1020 int init_offset, initack_offset, i; 1021 int retval; 1022 int spec_flag = 0; 1023 int how_indx; 1024 1025 /* I know that the TCB is non-NULL from the caller */ 1026 asoc = &stcb->asoc; 1027 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) { 1028 if (asoc->cookie_how[how_indx] == 0) 1029 break; 1030 } 1031 if (how_indx < sizeof(asoc->cookie_how)) { 1032 asoc->cookie_how[how_indx] = 1; 1033 } 1034 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 1035 /* SHUTDOWN came in after sending INIT-ACK */ 1036 struct mbuf *op_err; 1037 struct sctp_paramhdr *ph; 1038 1039 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 1040 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1041 0, M_DONTWAIT, 1, MT_DATA); 1042 if (op_err == NULL) { 1043 /* FOOBAR */ 1044 return (NULL); 1045 } 1046 /* pre-reserve some space */ 1047 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1048 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1049 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1050 /* Set the len */ 1051 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1052 ph = mtod(op_err, struct sctp_paramhdr *); 1053 ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN); 1054 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 1055 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 1056 vrf_id, table_id); 1057 if (how_indx < sizeof(asoc->cookie_how)) 1058 asoc->cookie_how[how_indx] = 2; 1059 return (NULL); 1060 } 1061 /* 1062 * find and validate the INIT chunk in the cookie (peer's info) the 1063 * INIT should start after the cookie-echo header struct (chunk 1064 * header, state cookie header struct) 1065 */ 1066 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk); 1067 1068 init_cp = (struct sctp_init_chunk *) 1069 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1070 (uint8_t *) & init_buf); 1071 if (init_cp == NULL) { 1072 /* could not pull a INIT chunk in cookie */ 1073 return (NULL); 1074 } 1075 chk_length = ntohs(init_cp->ch.chunk_length); 1076 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1077 return (NULL); 1078 } 1079 /* 1080 * find and validate the INIT-ACK chunk in the cookie (my info) the 1081 * INIT-ACK follows the INIT chunk 1082 */ 1083 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1084 initack_cp = (struct sctp_init_ack_chunk *) 1085 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1086 (uint8_t *) & initack_buf); 1087 if (initack_cp == NULL) { 1088 /* could not pull INIT-ACK chunk in cookie */ 1089 return (NULL); 1090 } 1091 chk_length = ntohs(initack_cp->ch.chunk_length); 1092 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1093 return (NULL); 1094 } 1095 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1096 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) { 1097 /* 1098 * case D in Section 5.2.4 Table 2: MMAA process accordingly 1099 * to get into the OPEN state 1100 */ 1101 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1102 #ifdef INVARIANTS 1103 panic("Case D and non-match seq?"); 1104 #else 1105 SCTP_PRINTF("Case D, seq non-match %x vs %x?\n", 1106 ntohl(initack_cp->init.initial_tsn), 1107 asoc->init_seq_number); 1108 #endif 1109 } 1110 switch SCTP_GET_STATE 1111 (asoc) { 1112 case SCTP_STATE_COOKIE_WAIT: 1113 case SCTP_STATE_COOKIE_ECHOED: 1114 /* 1115 * INIT was sent but got a COOKIE_ECHO with the 1116 * correct tags... just accept it...but we must 1117 * process the init so that we can make sure we have 1118 * the right seq no's. 1119 */ 1120 /* First we must process the INIT !! */ 1121 retval = sctp_process_init(init_cp, stcb, net); 1122 if (retval < 0) { 1123 if (how_indx < sizeof(asoc->cookie_how)) 1124 asoc->cookie_how[how_indx] = 3; 1125 return (NULL); 1126 } 1127 /* we have already processed the INIT so no problem */ 1128 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, 1129 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); 1130 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); 1131 /* update current state */ 1132 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1133 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1134 else 1135 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1136 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1137 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING; 1138 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1139 stcb->sctp_ep, stcb, asoc->primary_destination); 1140 1141 } else { 1142 /* if ok, move to OPEN state */ 1143 asoc->state = SCTP_STATE_OPEN; 1144 } 1145 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1146 sctp_stop_all_cookie_timers(stcb); 1147 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1148 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1149 (inp->sctp_socket->so_qlimit == 0) 1150 ) { 1151 /* 1152 * Here is where collision would go if we 1153 * did a connect() and instead got a 1154 * init/init-ack/cookie done before the 1155 * init-ack came back.. 1156 */ 1157 stcb->sctp_ep->sctp_flags |= 1158 SCTP_PCB_FLAGS_CONNECTED; 1159 soisconnected(stcb->sctp_ep->sctp_socket); 1160 } 1161 /* notify upper layer */ 1162 *notification = SCTP_NOTIFY_ASSOC_UP; 1163 /* 1164 * since we did not send a HB make sure we don't 1165 * double things 1166 */ 1167 net->hb_responded = 1; 1168 1169 if (stcb->asoc.sctp_autoclose_ticks && 1170 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) { 1171 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 1172 inp, stcb, NULL); 1173 } 1174 break; 1175 default: 1176 /* 1177 * we're in the OPEN state (or beyond), so peer must 1178 * have simply lost the COOKIE-ACK 1179 */ 1180 break; 1181 } /* end switch */ 1182 sctp_stop_all_cookie_timers(stcb); 1183 /* 1184 * We ignore the return code here.. not sure if we should 1185 * somehow abort.. but we do have an existing asoc. This 1186 * really should not fail. 1187 */ 1188 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1189 init_offset + sizeof(struct sctp_init_chunk), 1190 initack_offset, sh, init_src)) { 1191 if (how_indx < sizeof(asoc->cookie_how)) 1192 asoc->cookie_how[how_indx] = 4; 1193 return (NULL); 1194 } 1195 /* respond with a COOKIE-ACK */ 1196 sctp_toss_old_cookies(stcb, asoc); 1197 sctp_send_cookie_ack(stcb); 1198 if (how_indx < sizeof(asoc->cookie_how)) 1199 asoc->cookie_how[how_indx] = 5; 1200 return (stcb); 1201 } 1202 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1203 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag && 1204 cookie->tie_tag_my_vtag == 0 && 1205 cookie->tie_tag_peer_vtag == 0) { 1206 /* 1207 * case C in Section 5.2.4 Table 2: XMOO silently discard 1208 */ 1209 if (how_indx < sizeof(asoc->cookie_how)) 1210 asoc->cookie_how[how_indx] = 6; 1211 return (NULL); 1212 } 1213 if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag && 1214 (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag || 1215 init_cp->init.initiate_tag == 0)) { 1216 /* 1217 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info 1218 * should be ok, re-accept peer info 1219 */ 1220 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1221 /* 1222 * Extension of case C. If we hit this, then the 1223 * random number generator returned the same vtag 1224 * when we first sent our INIT-ACK and when we later 1225 * sent our INIT. The side with the seq numbers that 1226 * are different will be the one that normnally 1227 * would have hit case C. This in effect "extends" 1228 * our vtags in this collision case to be 64 bits. 1229 * The same collision could occur aka you get both 1230 * vtag and seq number the same twice in a row.. but 1231 * is much less likely. If it did happen then we 1232 * would proceed through and bring up the assoc.. we 1233 * may end up with the wrong stream setup however.. 1234 * which would be bad.. but there is no way to 1235 * tell.. until we send on a stream that does not 1236 * exist :-) 1237 */ 1238 if (how_indx < sizeof(asoc->cookie_how)) 1239 asoc->cookie_how[how_indx] = 7; 1240 1241 return (NULL); 1242 } 1243 if (how_indx < sizeof(asoc->cookie_how)) 1244 asoc->cookie_how[how_indx] = 8; 1245 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13); 1246 sctp_stop_all_cookie_timers(stcb); 1247 /* 1248 * since we did not send a HB make sure we don't double 1249 * things 1250 */ 1251 net->hb_responded = 1; 1252 if (stcb->asoc.sctp_autoclose_ticks && 1253 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1254 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1255 NULL); 1256 } 1257 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1258 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1259 1260 /* Note last_cwr_tsn? where is this used? */ 1261 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1262 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { 1263 /* 1264 * Ok the peer probably discarded our data (if we 1265 * echoed a cookie+data). So anything on the 1266 * sent_queue should be marked for retransmit, we 1267 * may not get something to kick us so it COULD 1268 * still take a timeout to move these.. but it can't 1269 * hurt to mark them. 1270 */ 1271 struct sctp_tmit_chunk *chk; 1272 1273 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1274 if (chk->sent < SCTP_DATAGRAM_RESEND) { 1275 chk->sent = SCTP_DATAGRAM_RESEND; 1276 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1277 spec_flag++; 1278 } 1279 } 1280 1281 } 1282 /* process the INIT info (peer's info) */ 1283 retval = sctp_process_init(init_cp, stcb, net); 1284 if (retval < 0) { 1285 if (how_indx < sizeof(asoc->cookie_how)) 1286 asoc->cookie_how[how_indx] = 9; 1287 return (NULL); 1288 } 1289 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1290 init_offset + sizeof(struct sctp_init_chunk), 1291 initack_offset, sh, init_src)) { 1292 if (how_indx < sizeof(asoc->cookie_how)) 1293 asoc->cookie_how[how_indx] = 10; 1294 return (NULL); 1295 } 1296 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) || 1297 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) { 1298 *notification = SCTP_NOTIFY_ASSOC_UP; 1299 1300 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1301 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1302 (inp->sctp_socket->so_qlimit == 0)) { 1303 stcb->sctp_ep->sctp_flags |= 1304 SCTP_PCB_FLAGS_CONNECTED; 1305 soisconnected(stcb->sctp_ep->sctp_socket); 1306 } 1307 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1308 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1309 else 1310 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1311 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1312 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1313 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1314 SCTP_STAT_INCR_COUNTER32(sctps_restartestab); 1315 } else { 1316 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1317 } 1318 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1319 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING; 1320 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1321 stcb->sctp_ep, stcb, asoc->primary_destination); 1322 1323 } else { 1324 asoc->state = SCTP_STATE_OPEN; 1325 } 1326 sctp_stop_all_cookie_timers(stcb); 1327 sctp_toss_old_cookies(stcb, asoc); 1328 sctp_send_cookie_ack(stcb); 1329 if (spec_flag) { 1330 /* 1331 * only if we have retrans set do we do this. What 1332 * this call does is get only the COOKIE-ACK out and 1333 * then when we return the normal call to 1334 * sctp_chunk_output will get the retrans out behind 1335 * this. 1336 */ 1337 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK); 1338 } 1339 if (how_indx < sizeof(asoc->cookie_how)) 1340 asoc->cookie_how[how_indx] = 11; 1341 1342 return (stcb); 1343 } 1344 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1345 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) && 1346 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce && 1347 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce && 1348 cookie->tie_tag_peer_vtag != 0) { 1349 struct sctpasochead *head; 1350 1351 /* 1352 * case A in Section 5.2.4 Table 2: XXMM (peer restarted) 1353 */ 1354 /* temp code */ 1355 if (how_indx < sizeof(asoc->cookie_how)) 1356 asoc->cookie_how[how_indx] = 12; 1357 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14); 1358 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15); 1359 1360 *sac_assoc_id = sctp_get_associd(stcb); 1361 /* notify upper layer */ 1362 *notification = SCTP_NOTIFY_ASSOC_RESTART; 1363 atomic_add_int(&stcb->asoc.refcnt, 1); 1364 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) && 1365 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1366 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 1367 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1368 } 1369 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1370 SCTP_STAT_INCR_GAUGE32(sctps_restartestab); 1371 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1372 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab); 1373 } 1374 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1375 asoc->state = SCTP_STATE_OPEN | 1376 SCTP_STATE_SHUTDOWN_PENDING; 1377 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1378 stcb->sctp_ep, stcb, asoc->primary_destination); 1379 1380 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) { 1381 /* move to OPEN state, if not in SHUTDOWN_SENT */ 1382 asoc->state = SCTP_STATE_OPEN; 1383 } 1384 asoc->pre_open_streams = 1385 ntohs(initack_cp->init.num_outbound_streams); 1386 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1387 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1388 1389 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1390 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1391 1392 asoc->str_reset_seq_in = asoc->init_seq_number; 1393 1394 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1395 if (asoc->mapping_array) 1396 memset(asoc->mapping_array, 0, 1397 asoc->mapping_array_size); 1398 SCTP_TCB_UNLOCK(stcb); 1399 SCTP_INP_INFO_WLOCK(); 1400 SCTP_INP_WLOCK(stcb->sctp_ep); 1401 SCTP_TCB_LOCK(stcb); 1402 atomic_add_int(&stcb->asoc.refcnt, -1); 1403 /* send up all the data */ 1404 SCTP_TCB_SEND_LOCK(stcb); 1405 1406 sctp_report_all_outbound(stcb, 1); 1407 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1408 stcb->asoc.strmout[i].stream_no = i; 1409 stcb->asoc.strmout[i].next_sequence_sent = 0; 1410 stcb->asoc.strmout[i].last_msg_incomplete = 0; 1411 } 1412 /* process the INIT-ACK info (my info) */ 1413 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1414 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1415 1416 /* pull from vtag hash */ 1417 LIST_REMOVE(stcb, sctp_asocs); 1418 /* re-insert to new vtag position */ 1419 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, 1420 sctppcbinfo.hashasocmark)]; 1421 /* 1422 * put it in the bucket in the vtag hash of assoc's for the 1423 * system 1424 */ 1425 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 1426 1427 /* Is this the first restart? */ 1428 if (stcb->asoc.in_restart_hash == 0) { 1429 /* Ok add it to assoc_id vtag hash */ 1430 head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id, 1431 sctppcbinfo.hashrestartmark)]; 1432 LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash); 1433 stcb->asoc.in_restart_hash = 1; 1434 } 1435 /* process the INIT info (peer's info) */ 1436 SCTP_TCB_SEND_UNLOCK(stcb); 1437 SCTP_INP_WUNLOCK(stcb->sctp_ep); 1438 SCTP_INP_INFO_WUNLOCK(); 1439 1440 retval = sctp_process_init(init_cp, stcb, net); 1441 if (retval < 0) { 1442 if (how_indx < sizeof(asoc->cookie_how)) 1443 asoc->cookie_how[how_indx] = 13; 1444 1445 return (NULL); 1446 } 1447 /* 1448 * since we did not send a HB make sure we don't double 1449 * things 1450 */ 1451 net->hb_responded = 1; 1452 1453 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1454 init_offset + sizeof(struct sctp_init_chunk), 1455 initack_offset, sh, init_src)) { 1456 if (how_indx < sizeof(asoc->cookie_how)) 1457 asoc->cookie_how[how_indx] = 14; 1458 1459 return (NULL); 1460 } 1461 /* respond with a COOKIE-ACK */ 1462 sctp_stop_all_cookie_timers(stcb); 1463 sctp_toss_old_cookies(stcb, asoc); 1464 sctp_send_cookie_ack(stcb); 1465 if (how_indx < sizeof(asoc->cookie_how)) 1466 asoc->cookie_how[how_indx] = 15; 1467 1468 return (stcb); 1469 } 1470 if (how_indx < sizeof(asoc->cookie_how)) 1471 asoc->cookie_how[how_indx] = 16; 1472 /* all other cases... */ 1473 return (NULL); 1474 } 1475 1476 1477 /* 1478 * handle a state cookie for a new association m: input packet mbuf chain-- 1479 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf 1480 * and the cookie signature does not exist offset: offset into mbuf to the 1481 * cookie-echo chunk length: length of the cookie chunk to: where the init 1482 * was from returns a new TCB 1483 */ 1484 static struct sctp_tcb * 1485 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1486 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1487 struct sctp_inpcb *inp, struct sctp_nets **netp, 1488 struct sockaddr *init_src, int *notification, 1489 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1490 uint32_t vrf_id, uint32_t table_id) 1491 { 1492 struct sctp_tcb *stcb; 1493 struct sctp_init_chunk *init_cp, init_buf; 1494 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1495 struct sockaddr_storage sa_store; 1496 struct sockaddr *initack_src = (struct sockaddr *)&sa_store; 1497 struct sockaddr_in *sin; 1498 struct sockaddr_in6 *sin6; 1499 struct sctp_association *asoc; 1500 int chk_length; 1501 int init_offset, initack_offset, initack_limit; 1502 int retval; 1503 int error = 0; 1504 uint32_t old_tag; 1505 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE]; 1506 1507 /* 1508 * find and validate the INIT chunk in the cookie (peer's info) the 1509 * INIT should start after the cookie-echo header struct (chunk 1510 * header, state cookie header struct) 1511 */ 1512 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk); 1513 init_cp = (struct sctp_init_chunk *) 1514 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1515 (uint8_t *) & init_buf); 1516 if (init_cp == NULL) { 1517 /* could not pull a INIT chunk in cookie */ 1518 SCTPDBG(SCTP_DEBUG_INPUT1, 1519 "process_cookie_new: could not pull INIT chunk hdr\n"); 1520 return (NULL); 1521 } 1522 chk_length = ntohs(init_cp->ch.chunk_length); 1523 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1524 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n"); 1525 return (NULL); 1526 } 1527 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1528 /* 1529 * find and validate the INIT-ACK chunk in the cookie (my info) the 1530 * INIT-ACK follows the INIT chunk 1531 */ 1532 initack_cp = (struct sctp_init_ack_chunk *) 1533 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1534 (uint8_t *) & initack_buf); 1535 if (initack_cp == NULL) { 1536 /* could not pull INIT-ACK chunk in cookie */ 1537 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n"); 1538 return (NULL); 1539 } 1540 chk_length = ntohs(initack_cp->ch.chunk_length); 1541 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1542 return (NULL); 1543 } 1544 /* 1545 * NOTE: We can't use the INIT_ACK's chk_length to determine the 1546 * "initack_limit" value. This is because the chk_length field 1547 * includes the length of the cookie, but the cookie is omitted when 1548 * the INIT and INIT_ACK are tacked onto the cookie... 1549 */ 1550 initack_limit = offset + cookie_len; 1551 1552 /* 1553 * now that we know the INIT/INIT-ACK are in place, create a new TCB 1554 * and popluate 1555 */ 1556 stcb = sctp_aloc_assoc(inp, init_src, 0, &error, 1557 ntohl(initack_cp->init.initiate_tag), vrf_id); 1558 if (stcb == NULL) { 1559 struct mbuf *op_err; 1560 1561 /* memory problem? */ 1562 SCTPDBG(SCTP_DEBUG_INPUT1, 1563 "process_cookie_new: no room for another TCB!\n"); 1564 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1565 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1566 sh, op_err, vrf_id, table_id); 1567 return (NULL); 1568 } 1569 /* get the correct sctp_nets */ 1570 if (netp) 1571 *netp = sctp_findnet(stcb, init_src); 1572 1573 asoc = &stcb->asoc; 1574 /* save the table id (vrf_id is done in aloc_assoc) */ 1575 asoc->table_id = table_id; 1576 /* get scope variables out of cookie */ 1577 asoc->ipv4_local_scope = cookie->ipv4_scope; 1578 asoc->site_scope = cookie->site_scope; 1579 asoc->local_scope = cookie->local_scope; 1580 asoc->loopback_scope = cookie->loopback_scope; 1581 1582 if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) || 1583 (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) { 1584 struct mbuf *op_err; 1585 1586 /* 1587 * Houston we have a problem. The EP changed while the 1588 * cookie was in flight. Only recourse is to abort the 1589 * association. 1590 */ 1591 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1592 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1593 sh, op_err, vrf_id, table_id); 1594 return (NULL); 1595 } 1596 /* process the INIT-ACK info (my info) */ 1597 old_tag = asoc->my_vtag; 1598 asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1599 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1600 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1601 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1602 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1603 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1604 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1605 asoc->str_reset_seq_in = asoc->init_seq_number; 1606 1607 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1608 1609 /* process the INIT info (peer's info) */ 1610 if (netp) 1611 retval = sctp_process_init(init_cp, stcb, *netp); 1612 else 1613 retval = 0; 1614 if (retval < 0) { 1615 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1616 return (NULL); 1617 } 1618 /* load all addresses */ 1619 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1620 init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh, 1621 init_src)) { 1622 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17); 1623 return (NULL); 1624 } 1625 /* 1626 * verify any preceding AUTH chunk that was skipped 1627 */ 1628 /* pull the local authentication parameters from the cookie/init-ack */ 1629 sctp_auth_get_cookie_params(stcb, m, 1630 initack_offset + sizeof(struct sctp_init_ack_chunk), 1631 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk))); 1632 if (auth_skipped) { 1633 struct sctp_auth_chunk *auth; 1634 1635 auth = (struct sctp_auth_chunk *) 1636 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); 1637 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { 1638 /* auth HMAC failed, dump the assoc and packet */ 1639 SCTPDBG(SCTP_DEBUG_AUTH1, 1640 "COOKIE-ECHO: AUTH failed\n"); 1641 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18); 1642 return (NULL); 1643 } else { 1644 /* remaining chunks checked... good to go */ 1645 stcb->asoc.authenticated = 1; 1646 } 1647 } 1648 /* update current state */ 1649 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 1650 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1651 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING; 1652 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1653 stcb->sctp_ep, stcb, asoc->primary_destination); 1654 } else { 1655 asoc->state = SCTP_STATE_OPEN; 1656 } 1657 sctp_stop_all_cookie_timers(stcb); 1658 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab); 1659 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1660 1661 /* 1662 * if we're doing ASCONFs, check to see if we have any new local 1663 * addresses that need to get added to the peer (eg. addresses 1664 * changed while cookie echo in flight). This needs to be done 1665 * after we go to the OPEN state to do the correct asconf 1666 * processing. else, make sure we have the correct addresses in our 1667 * lists 1668 */ 1669 1670 /* warning, we re-use sin, sin6, sa_store here! */ 1671 /* pull in local_address (our "from" address) */ 1672 if (cookie->laddr_type == SCTP_IPV4_ADDRESS) { 1673 /* source addr is IPv4 */ 1674 sin = (struct sockaddr_in *)initack_src; 1675 memset(sin, 0, sizeof(*sin)); 1676 sin->sin_family = AF_INET; 1677 sin->sin_len = sizeof(struct sockaddr_in); 1678 sin->sin_addr.s_addr = cookie->laddress[0]; 1679 } else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) { 1680 /* source addr is IPv6 */ 1681 sin6 = (struct sockaddr_in6 *)initack_src; 1682 memset(sin6, 0, sizeof(*sin6)); 1683 sin6->sin6_family = AF_INET6; 1684 sin6->sin6_len = sizeof(struct sockaddr_in6); 1685 sin6->sin6_scope_id = cookie->scope_id; 1686 memcpy(&sin6->sin6_addr, cookie->laddress, 1687 sizeof(sin6->sin6_addr)); 1688 } else { 1689 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19); 1690 return (NULL); 1691 } 1692 1693 sctp_check_address_list(stcb, m, 1694 initack_offset + sizeof(struct sctp_init_ack_chunk), 1695 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)), 1696 initack_src, cookie->local_scope, cookie->site_scope, 1697 cookie->ipv4_scope, cookie->loopback_scope); 1698 1699 1700 /* set up to notify upper layer */ 1701 *notification = SCTP_NOTIFY_ASSOC_UP; 1702 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1703 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1704 (inp->sctp_socket->so_qlimit == 0)) { 1705 /* 1706 * This is an endpoint that called connect() how it got a 1707 * cookie that is NEW is a bit of a mystery. It must be that 1708 * the INIT was sent, but before it got there.. a complete 1709 * INIT/INIT-ACK/COOKIE arrived. But of course then it 1710 * should have went to the other code.. not here.. oh well.. 1711 * a bit of protection is worth having.. 1712 */ 1713 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 1714 soisconnected(stcb->sctp_ep->sctp_socket); 1715 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 1716 (inp->sctp_socket->so_qlimit)) { 1717 /* 1718 * We don't want to do anything with this one. Since it is 1719 * the listening guy. The timer will get started for 1720 * accepted connections in the caller. 1721 */ 1722 ; 1723 } 1724 /* since we did not send a HB make sure we don't double things */ 1725 if ((netp) && (*netp)) 1726 (*netp)->hb_responded = 1; 1727 1728 if (stcb->asoc.sctp_autoclose_ticks && 1729 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1730 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 1731 } 1732 /* respond with a COOKIE-ACK */ 1733 /* calculate the RTT */ 1734 if ((netp) && (*netp)) 1735 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp, 1736 &cookie->time_entered); 1737 sctp_send_cookie_ack(stcb); 1738 return (stcb); 1739 } 1740 1741 1742 /* 1743 * handles a COOKIE-ECHO message stcb: modified to either a new or left as 1744 * existing (non-NULL) TCB 1745 */ 1746 static struct mbuf * 1747 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, 1748 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp, 1749 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp, 1750 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1751 struct sctp_tcb **locked_tcb, uint32_t vrf_id, uint32_t table_id) 1752 { 1753 struct sctp_state_cookie *cookie; 1754 struct sockaddr_in6 sin6; 1755 struct sockaddr_in sin; 1756 struct sctp_tcb *l_stcb = *stcb; 1757 struct sctp_inpcb *l_inp; 1758 struct sockaddr *to; 1759 sctp_assoc_t sac_restart_id; 1760 struct sctp_pcb *ep; 1761 struct mbuf *m_sig; 1762 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE]; 1763 uint8_t *sig; 1764 uint8_t cookie_ok = 0; 1765 unsigned int size_of_pkt, sig_offset, cookie_offset; 1766 unsigned int cookie_len; 1767 struct timeval now; 1768 struct timeval time_expires; 1769 struct sockaddr_storage dest_store; 1770 struct sockaddr *localep_sa = (struct sockaddr *)&dest_store; 1771 struct ip *iph; 1772 int notification = 0; 1773 struct sctp_nets *netl; 1774 int had_a_existing_tcb = 0; 1775 1776 SCTPDBG(SCTP_DEBUG_INPUT2, 1777 "sctp_handle_cookie: handling COOKIE-ECHO\n"); 1778 1779 if (inp_p == NULL) { 1780 return (NULL); 1781 } 1782 /* First get the destination address setup too. */ 1783 iph = mtod(m, struct ip *); 1784 if (iph->ip_v == IPVERSION) { 1785 /* its IPv4 */ 1786 struct sockaddr_in *sin; 1787 1788 sin = (struct sockaddr_in *)(localep_sa); 1789 memset(sin, 0, sizeof(*sin)); 1790 sin->sin_family = AF_INET; 1791 sin->sin_len = sizeof(*sin); 1792 sin->sin_port = sh->dest_port; 1793 sin->sin_addr.s_addr = iph->ip_dst.s_addr; 1794 size_of_pkt = SCTP_GET_IPV4_LENGTH(iph); 1795 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 1796 /* its IPv6 */ 1797 struct ip6_hdr *ip6; 1798 struct sockaddr_in6 *sin6; 1799 1800 sin6 = (struct sockaddr_in6 *)(localep_sa); 1801 memset(sin6, 0, sizeof(*sin6)); 1802 sin6->sin6_family = AF_INET6; 1803 sin6->sin6_len = sizeof(struct sockaddr_in6); 1804 ip6 = mtod(m, struct ip6_hdr *); 1805 sin6->sin6_port = sh->dest_port; 1806 sin6->sin6_addr = ip6->ip6_dst; 1807 size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen; 1808 } else { 1809 return (NULL); 1810 } 1811 1812 cookie = &cp->cookie; 1813 cookie_offset = offset + sizeof(struct sctp_chunkhdr); 1814 cookie_len = ntohs(cp->ch.chunk_length); 1815 1816 if ((cookie->peerport != sh->src_port) && 1817 (cookie->myport != sh->dest_port) && 1818 (cookie->my_vtag != sh->v_tag)) { 1819 /* 1820 * invalid ports or bad tag. Note that we always leave the 1821 * v_tag in the header in network order and when we stored 1822 * it in the my_vtag slot we also left it in network order. 1823 * This maintains the match even though it may be in the 1824 * opposite byte order of the machine :-> 1825 */ 1826 return (NULL); 1827 } 1828 if (cookie_len > size_of_pkt || 1829 cookie_len < sizeof(struct sctp_cookie_echo_chunk) + 1830 sizeof(struct sctp_init_chunk) + 1831 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) { 1832 /* cookie too long! or too small */ 1833 return (NULL); 1834 } 1835 /* 1836 * split off the signature into its own mbuf (since it should not be 1837 * calculated in the sctp_hmac_m() call). 1838 */ 1839 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE; 1840 if (sig_offset > size_of_pkt) { 1841 /* packet not correct size! */ 1842 /* XXX this may already be accounted for earlier... */ 1843 return (NULL); 1844 } 1845 m_sig = m_split(m, sig_offset, M_DONTWAIT); 1846 if (m_sig == NULL) { 1847 /* out of memory or ?? */ 1848 return (NULL); 1849 } 1850 /* 1851 * compute the signature/digest for the cookie 1852 */ 1853 ep = &(*inp_p)->sctp_ep; 1854 l_inp = *inp_p; 1855 if (l_stcb) { 1856 SCTP_TCB_UNLOCK(l_stcb); 1857 } 1858 SCTP_INP_RLOCK(l_inp); 1859 if (l_stcb) { 1860 SCTP_TCB_LOCK(l_stcb); 1861 } 1862 /* which cookie is it? */ 1863 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) && 1864 (ep->current_secret_number != ep->last_secret_number)) { 1865 /* it's the old cookie */ 1866 (void)sctp_hmac_m(SCTP_HMAC, 1867 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 1868 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig); 1869 } else { 1870 /* it's the current cookie */ 1871 (void)sctp_hmac_m(SCTP_HMAC, 1872 (uint8_t *) ep->secret_key[(int)ep->current_secret_number], 1873 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig); 1874 } 1875 /* get the signature */ 1876 SCTP_INP_RUNLOCK(l_inp); 1877 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig); 1878 if (sig == NULL) { 1879 /* couldn't find signature */ 1880 sctp_m_freem(m_sig); 1881 return (NULL); 1882 } 1883 /* compare the received digest with the computed digest */ 1884 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) { 1885 /* try the old cookie? */ 1886 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) && 1887 (ep->current_secret_number != ep->last_secret_number)) { 1888 /* compute digest with old */ 1889 (void)sctp_hmac_m(SCTP_HMAC, 1890 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 1891 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig); 1892 /* compare */ 1893 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0) 1894 cookie_ok = 1; 1895 } 1896 } else { 1897 cookie_ok = 1; 1898 } 1899 1900 /* 1901 * Now before we continue we must reconstruct our mbuf so that 1902 * normal processing of any other chunks will work. 1903 */ 1904 { 1905 struct mbuf *m_at; 1906 1907 m_at = m; 1908 while (SCTP_BUF_NEXT(m_at) != NULL) { 1909 m_at = SCTP_BUF_NEXT(m_at); 1910 } 1911 SCTP_BUF_NEXT(m_at) = m_sig; 1912 } 1913 1914 if (cookie_ok == 0) { 1915 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n"); 1916 SCTPDBG(SCTP_DEBUG_INPUT2, 1917 "offset = %u, cookie_offset = %u, sig_offset = %u\n", 1918 (uint32_t) offset, cookie_offset, sig_offset); 1919 return (NULL); 1920 } 1921 /* 1922 * check the cookie timestamps to be sure it's not stale 1923 */ 1924 (void)SCTP_GETTIME_TIMEVAL(&now); 1925 /* Expire time is in Ticks, so we convert to seconds */ 1926 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life); 1927 time_expires.tv_usec = cookie->time_entered.tv_usec; 1928 if (timevalcmp(&now, &time_expires, >)) { 1929 /* cookie is stale! */ 1930 struct mbuf *op_err; 1931 struct sctp_stale_cookie_msg *scm; 1932 uint32_t tim; 1933 1934 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg), 1935 0, M_DONTWAIT, 1, MT_DATA); 1936 if (op_err == NULL) { 1937 /* FOOBAR */ 1938 return (NULL); 1939 } 1940 /* pre-reserve some space */ 1941 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1942 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1943 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1944 1945 /* Set the len */ 1946 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg); 1947 scm = mtod(op_err, struct sctp_stale_cookie_msg *); 1948 scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE); 1949 scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) + 1950 (sizeof(uint32_t)))); 1951 /* seconds to usec */ 1952 tim = (now.tv_sec - time_expires.tv_sec) * 1000000; 1953 /* add in usec */ 1954 if (tim == 0) 1955 tim = now.tv_usec - cookie->time_entered.tv_usec; 1956 scm->time_usec = htonl(tim); 1957 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 1958 vrf_id, table_id); 1959 return (NULL); 1960 } 1961 /* 1962 * Now we must see with the lookup address if we have an existing 1963 * asoc. This will only happen if we were in the COOKIE-WAIT state 1964 * and a INIT collided with us and somewhere the peer sent the 1965 * cookie on another address besides the single address our assoc 1966 * had for him. In this case we will have one of the tie-tags set at 1967 * least AND the address field in the cookie can be used to look it 1968 * up. 1969 */ 1970 to = NULL; 1971 if (cookie->addr_type == SCTP_IPV6_ADDRESS) { 1972 memset(&sin6, 0, sizeof(sin6)); 1973 sin6.sin6_family = AF_INET6; 1974 sin6.sin6_len = sizeof(sin6); 1975 sin6.sin6_port = sh->src_port; 1976 sin6.sin6_scope_id = cookie->scope_id; 1977 memcpy(&sin6.sin6_addr.s6_addr, cookie->address, 1978 sizeof(sin6.sin6_addr.s6_addr)); 1979 to = (struct sockaddr *)&sin6; 1980 } else if (cookie->addr_type == SCTP_IPV4_ADDRESS) { 1981 memset(&sin, 0, sizeof(sin)); 1982 sin.sin_family = AF_INET; 1983 sin.sin_len = sizeof(sin); 1984 sin.sin_port = sh->src_port; 1985 sin.sin_addr.s_addr = cookie->address[0]; 1986 to = (struct sockaddr *)&sin; 1987 } else { 1988 /* This should not happen */ 1989 return (NULL); 1990 } 1991 if ((*stcb == NULL) && to) { 1992 /* Yep, lets check */ 1993 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL); 1994 if (*stcb == NULL) { 1995 /* 1996 * We should have only got back the same inp. If we 1997 * got back a different ep we have a problem. The 1998 * original findep got back l_inp and now 1999 */ 2000 if (l_inp != *inp_p) { 2001 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n"); 2002 } 2003 } else { 2004 if (*locked_tcb == NULL) { 2005 /* 2006 * In this case we found the assoc only 2007 * after we locked the create lock. This 2008 * means we are in a colliding case and we 2009 * must make sure that we unlock the tcb if 2010 * its one of the cases where we throw away 2011 * the incoming packets. 2012 */ 2013 *locked_tcb = *stcb; 2014 2015 /* 2016 * We must also increment the inp ref count 2017 * since the ref_count flags was set when we 2018 * did not find the TCB, now we found it 2019 * which reduces the refcount.. we must 2020 * raise it back out to balance it all :-) 2021 */ 2022 SCTP_INP_INCR_REF((*stcb)->sctp_ep); 2023 if ((*stcb)->sctp_ep != l_inp) { 2024 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n", 2025 (*stcb)->sctp_ep, l_inp); 2026 } 2027 } 2028 } 2029 } 2030 if (to == NULL) 2031 return (NULL); 2032 2033 cookie_len -= SCTP_SIGNATURE_SIZE; 2034 if (*stcb == NULL) { 2035 /* this is the "normal" case... get a new TCB */ 2036 *stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie, 2037 cookie_len, *inp_p, netp, to, ¬ification, 2038 auth_skipped, auth_offset, auth_len, vrf_id, table_id); 2039 } else { 2040 /* this is abnormal... cookie-echo on existing TCB */ 2041 had_a_existing_tcb = 1; 2042 *stcb = sctp_process_cookie_existing(m, iphlen, offset, sh, 2043 cookie, cookie_len, *inp_p, *stcb, *netp, to, ¬ification, 2044 &sac_restart_id, vrf_id, table_id); 2045 } 2046 2047 if (*stcb == NULL) { 2048 /* still no TCB... must be bad cookie-echo */ 2049 return (NULL); 2050 } 2051 /* 2052 * Ok, we built an association so confirm the address we sent the 2053 * INIT-ACK to. 2054 */ 2055 netl = sctp_findnet(*stcb, to); 2056 /* 2057 * This code should in theory NOT run but 2058 */ 2059 if (netl == NULL) { 2060 /* TSNH! Huh, why do I need to add this address here? */ 2061 int ret; 2062 2063 ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE, 2064 SCTP_IN_COOKIE_PROC); 2065 netl = sctp_findnet(*stcb, to); 2066 } 2067 if (netl) { 2068 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) { 2069 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2070 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL, 2071 netl); 2072 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2073 (*stcb), 0, (void *)netl); 2074 } 2075 } 2076 if (*stcb) { 2077 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p, 2078 *stcb, NULL); 2079 } 2080 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 2081 if (!had_a_existing_tcb || 2082 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 2083 /* 2084 * If we have a NEW cookie or the connect never 2085 * reached the connected state during collision we 2086 * must do the TCP accept thing. 2087 */ 2088 struct socket *so, *oso; 2089 struct sctp_inpcb *inp; 2090 2091 if (notification == SCTP_NOTIFY_ASSOC_RESTART) { 2092 /* 2093 * For a restart we will keep the same 2094 * socket, no need to do anything. I THINK!! 2095 */ 2096 sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id); 2097 return (m); 2098 } 2099 oso = (*inp_p)->sctp_socket; 2100 /* 2101 * We do this to keep the sockets side happy durin 2102 * the sonewcon ONLY. 2103 */ 2104 NET_LOCK_GIANT(); 2105 SCTP_TCB_UNLOCK((*stcb)); 2106 so = sonewconn(oso, 0 2107 ); 2108 NET_UNLOCK_GIANT(); 2109 SCTP_INP_WLOCK((*stcb)->sctp_ep); 2110 SCTP_TCB_LOCK((*stcb)); 2111 SCTP_INP_WUNLOCK((*stcb)->sctp_ep); 2112 if (so == NULL) { 2113 struct mbuf *op_err; 2114 2115 /* Too many sockets */ 2116 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n"); 2117 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2118 sctp_abort_association(*inp_p, NULL, m, iphlen, 2119 sh, op_err, vrf_id, 2120 table_id); 2121 sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20); 2122 return (NULL); 2123 } 2124 inp = (struct sctp_inpcb *)so->so_pcb; 2125 SCTP_INP_INCR_REF(inp); 2126 /* 2127 * We add the unbound flag here so that if we get an 2128 * soabort() before we get the move_pcb done, we 2129 * will properly cleanup. 2130 */ 2131 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 2132 SCTP_PCB_FLAGS_CONNECTED | 2133 SCTP_PCB_FLAGS_IN_TCPPOOL | 2134 SCTP_PCB_FLAGS_UNBOUND | 2135 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) | 2136 SCTP_PCB_FLAGS_DONT_WAKE); 2137 inp->sctp_features = (*inp_p)->sctp_features; 2138 inp->sctp_socket = so; 2139 inp->sctp_frag_point = (*inp_p)->sctp_frag_point; 2140 inp->partial_delivery_point = (*inp_p)->partial_delivery_point; 2141 inp->sctp_context = (*inp_p)->sctp_context; 2142 inp->inp_starting_point_for_iterator = NULL; 2143 /* 2144 * copy in the authentication parameters from the 2145 * original endpoint 2146 */ 2147 if (inp->sctp_ep.local_hmacs) 2148 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2149 inp->sctp_ep.local_hmacs = 2150 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs); 2151 if (inp->sctp_ep.local_auth_chunks) 2152 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 2153 inp->sctp_ep.local_auth_chunks = 2154 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks); 2155 (void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys, 2156 &inp->sctp_ep.shared_keys); 2157 2158 /* 2159 * Now we must move it from one hash table to 2160 * another and get the tcb in the right place. 2161 */ 2162 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb); 2163 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT); 2164 2165 /* 2166 * now we must check to see if we were aborted while 2167 * the move was going on and the lock/unlock 2168 * happened. 2169 */ 2170 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 2171 /* 2172 * yep it was, we leave the assoc attached 2173 * to the socket since the sctp_inpcb_free() 2174 * call will send an abort for us. 2175 */ 2176 SCTP_INP_DECR_REF(inp); 2177 return (NULL); 2178 } 2179 SCTP_INP_DECR_REF(inp); 2180 /* Switch over to the new guy */ 2181 *inp_p = inp; 2182 sctp_ulp_notify(notification, *stcb, 0, NULL); 2183 2184 /* 2185 * Pull it from the incomplete queue and wake the 2186 * guy 2187 */ 2188 soisconnected(so); 2189 return (m); 2190 } 2191 } 2192 if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 2193 sctp_ulp_notify(notification, *stcb, 0, NULL); 2194 } 2195 return (m); 2196 } 2197 2198 static void 2199 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp, 2200 struct sctp_tcb *stcb, struct sctp_nets *net) 2201 { 2202 /* cp must not be used, others call this without a c-ack :-) */ 2203 struct sctp_association *asoc; 2204 2205 SCTPDBG(SCTP_DEBUG_INPUT2, 2206 "sctp_handle_cookie_ack: handling COOKIE-ACK\n"); 2207 if (stcb == NULL) 2208 return; 2209 2210 asoc = &stcb->asoc; 2211 2212 sctp_stop_all_cookie_timers(stcb); 2213 /* process according to association state */ 2214 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 2215 /* state change only needed when I am in right state */ 2216 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2217 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2218 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING; 2219 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2220 stcb->sctp_ep, stcb, asoc->primary_destination); 2221 2222 } else { 2223 asoc->state = SCTP_STATE_OPEN; 2224 } 2225 /* update RTO */ 2226 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 2227 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2228 if (asoc->overall_error_count == 0) { 2229 net->RTO = sctp_calculate_rto(stcb, asoc, net, 2230 &asoc->time_entered); 2231 } 2232 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 2233 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL); 2234 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2235 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2236 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2237 soisconnected(stcb->sctp_ep->sctp_socket); 2238 } 2239 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 2240 stcb, net); 2241 /* 2242 * since we did not send a HB make sure we don't double 2243 * things 2244 */ 2245 net->hb_responded = 1; 2246 2247 if (stcb->asoc.sctp_autoclose_ticks && 2248 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2249 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 2250 stcb->sctp_ep, stcb, NULL); 2251 } 2252 /* 2253 * set ASCONF timer if ASCONFs are pending and allowed (eg. 2254 * addresses changed when init/cookie echo in flight) 2255 */ 2256 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) && 2257 (stcb->asoc.peer_supports_asconf) && 2258 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) { 2259 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, 2260 stcb->sctp_ep, stcb, 2261 stcb->asoc.primary_destination); 2262 } 2263 } 2264 /* Toss the cookie if I can */ 2265 sctp_toss_old_cookies(stcb, asoc); 2266 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 2267 /* Restart the timer if we have pending data */ 2268 struct sctp_tmit_chunk *chk; 2269 2270 chk = TAILQ_FIRST(&asoc->sent_queue); 2271 if (chk) { 2272 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2273 stcb, chk->whoTo); 2274 } 2275 } 2276 } 2277 2278 static void 2279 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp, 2280 struct sctp_tcb *stcb) 2281 { 2282 struct sctp_nets *net; 2283 struct sctp_tmit_chunk *lchk; 2284 uint32_t tsn; 2285 2286 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) { 2287 return; 2288 } 2289 SCTP_STAT_INCR(sctps_recvecne); 2290 tsn = ntohl(cp->tsn); 2291 /* ECN Nonce stuff: need a resync and disable the nonce sum check */ 2292 /* Also we make sure we disable the nonce_wait */ 2293 lchk = TAILQ_FIRST(&stcb->asoc.send_queue); 2294 if (lchk == NULL) { 2295 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 2296 } else { 2297 stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq; 2298 } 2299 stcb->asoc.nonce_wait_for_ecne = 0; 2300 stcb->asoc.nonce_sum_check = 0; 2301 2302 /* Find where it was sent, if possible */ 2303 net = NULL; 2304 lchk = TAILQ_FIRST(&stcb->asoc.sent_queue); 2305 while (lchk) { 2306 if (lchk->rec.data.TSN_seq == tsn) { 2307 net = lchk->whoTo; 2308 break; 2309 } 2310 if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ)) 2311 break; 2312 lchk = TAILQ_NEXT(lchk, sctp_next); 2313 } 2314 if (net == NULL) 2315 /* default is we use the primary */ 2316 net = stcb->asoc.primary_destination; 2317 2318 if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) { 2319 #ifdef SCTP_CWND_MONITOR 2320 int old_cwnd; 2321 2322 old_cwnd = net->cwnd; 2323 #endif 2324 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 2325 net->ssthresh = net->cwnd / 2; 2326 if (net->ssthresh < net->mtu) { 2327 net->ssthresh = net->mtu; 2328 /* here back off the timer as well, to slow us down */ 2329 net->RTO <<= 2; 2330 } 2331 net->cwnd = net->ssthresh; 2332 #ifdef SCTP_CWND_MONITOR 2333 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 2334 #endif 2335 /* 2336 * we reduce once every RTT. So we will only lower cwnd at 2337 * the next sending seq i.e. the resync_tsn. 2338 */ 2339 stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn; 2340 } 2341 /* 2342 * We always send a CWR this way if our previous one was lost our 2343 * peer will get an update, or if it is not time again to reduce we 2344 * still get the cwr to the peer. 2345 */ 2346 sctp_send_cwr(stcb, net, tsn); 2347 } 2348 2349 static void 2350 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb) 2351 { 2352 /* 2353 * Here we get a CWR from the peer. We must look in the outqueue and 2354 * make sure that we have a covered ECNE in teh control chunk part. 2355 * If so remove it. 2356 */ 2357 struct sctp_tmit_chunk *chk; 2358 struct sctp_ecne_chunk *ecne; 2359 2360 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 2361 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) { 2362 continue; 2363 } 2364 /* 2365 * Look for and remove if it is the right TSN. Since there 2366 * is only ONE ECNE on the control queue at any one time we 2367 * don't need to worry about more than one! 2368 */ 2369 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 2370 if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn), 2371 MAX_TSN) || (cp->tsn == ecne->tsn)) { 2372 /* this covers this ECNE, we can remove it */ 2373 stcb->asoc.ecn_echo_cnt_onq--; 2374 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, 2375 sctp_next); 2376 if (chk->data) { 2377 sctp_m_freem(chk->data); 2378 chk->data = NULL; 2379 } 2380 stcb->asoc.ctrl_queue_cnt--; 2381 sctp_free_remote_addr(chk->whoTo); 2382 sctp_free_a_chunk(stcb, chk); 2383 break; 2384 } 2385 } 2386 } 2387 2388 static void 2389 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp, 2390 struct sctp_tcb *stcb, struct sctp_nets *net) 2391 { 2392 struct sctp_association *asoc; 2393 2394 SCTPDBG(SCTP_DEBUG_INPUT2, 2395 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n"); 2396 if (stcb == NULL) 2397 return; 2398 2399 asoc = &stcb->asoc; 2400 /* process according to association state */ 2401 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 2402 /* unexpected SHUTDOWN-COMPLETE... so ignore... */ 2403 SCTP_TCB_UNLOCK(stcb); 2404 return; 2405 } 2406 /* notify upper layer protocol */ 2407 if (stcb->sctp_socket) { 2408 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL); 2409 /* are the queues empty? they should be */ 2410 if (!TAILQ_EMPTY(&asoc->send_queue) || 2411 !TAILQ_EMPTY(&asoc->sent_queue) || 2412 !TAILQ_EMPTY(&asoc->out_wheel)) { 2413 sctp_report_all_outbound(stcb, 0); 2414 } 2415 } 2416 /* stop the timer */ 2417 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_21); 2418 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 2419 /* free the TCB */ 2420 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22); 2421 return; 2422 } 2423 2424 static int 2425 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, 2426 struct sctp_nets *net, uint8_t flg) 2427 { 2428 switch (desc->chunk_type) { 2429 case SCTP_DATA: 2430 /* find the tsn to resend (possibly */ 2431 { 2432 uint32_t tsn; 2433 struct sctp_tmit_chunk *tp1; 2434 2435 tsn = ntohl(desc->tsn_ifany); 2436 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2437 while (tp1) { 2438 if (tp1->rec.data.TSN_seq == tsn) { 2439 /* found it */ 2440 break; 2441 } 2442 if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn, 2443 MAX_TSN)) { 2444 /* not found */ 2445 tp1 = NULL; 2446 break; 2447 } 2448 tp1 = TAILQ_NEXT(tp1, sctp_next); 2449 } 2450 if (tp1 == NULL) { 2451 /* 2452 * Do it the other way , aka without paying 2453 * attention to queue seq order. 2454 */ 2455 SCTP_STAT_INCR(sctps_pdrpdnfnd); 2456 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2457 while (tp1) { 2458 if (tp1->rec.data.TSN_seq == tsn) { 2459 /* found it */ 2460 break; 2461 } 2462 tp1 = TAILQ_NEXT(tp1, sctp_next); 2463 } 2464 } 2465 if (tp1 == NULL) { 2466 SCTP_STAT_INCR(sctps_pdrptsnnf); 2467 } 2468 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) { 2469 uint8_t *ddp; 2470 2471 if ((stcb->asoc.peers_rwnd == 0) && 2472 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 2473 SCTP_STAT_INCR(sctps_pdrpdiwnp); 2474 return (0); 2475 } 2476 if (stcb->asoc.peers_rwnd == 0 && 2477 (flg & SCTP_FROM_MIDDLE_BOX)) { 2478 SCTP_STAT_INCR(sctps_pdrpdizrw); 2479 return (0); 2480 } 2481 ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+ 2482 sizeof(struct sctp_data_chunk)); 2483 { 2484 unsigned int iii; 2485 2486 for (iii = 0; iii < sizeof(desc->data_bytes); 2487 iii++) { 2488 if (ddp[iii] != desc->data_bytes[iii]) { 2489 SCTP_STAT_INCR(sctps_pdrpbadd); 2490 return (-1); 2491 } 2492 } 2493 } 2494 /* 2495 * We zero out the nonce so resync not 2496 * needed 2497 */ 2498 tp1->rec.data.ect_nonce = 0; 2499 2500 if (tp1->do_rtt) { 2501 /* 2502 * this guy had a RTO calculation 2503 * pending on it, cancel it 2504 */ 2505 tp1->do_rtt = 0; 2506 } 2507 SCTP_STAT_INCR(sctps_pdrpmark); 2508 if (tp1->sent != SCTP_DATAGRAM_RESEND) 2509 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2510 tp1->sent = SCTP_DATAGRAM_RESEND; 2511 /* 2512 * mark it as if we were doing a FR, since 2513 * we will be getting gap ack reports behind 2514 * the info from the router. 2515 */ 2516 tp1->rec.data.doing_fast_retransmit = 1; 2517 /* 2518 * mark the tsn with what sequences can 2519 * cause a new FR. 2520 */ 2521 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 2522 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 2523 } else { 2524 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 2525 } 2526 2527 /* restart the timer */ 2528 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2529 stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23); 2530 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2531 stcb, tp1->whoTo); 2532 2533 /* fix counts and things */ 2534 #ifdef SCTP_FLIGHT_LOGGING 2535 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP, 2536 tp1->whoTo->flight_size, 2537 tp1->book_size, 2538 (uintptr_t) stcb, 2539 tp1->rec.data.TSN_seq); 2540 #endif 2541 sctp_flight_size_decrease(tp1); 2542 sctp_total_flight_decrease(stcb, tp1); 2543 } { 2544 /* audit code */ 2545 unsigned int audit; 2546 2547 audit = 0; 2548 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 2549 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2550 audit++; 2551 } 2552 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue, 2553 sctp_next) { 2554 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2555 audit++; 2556 } 2557 if (audit != stcb->asoc.sent_queue_retran_cnt) { 2558 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n", 2559 audit, stcb->asoc.sent_queue_retran_cnt); 2560 #ifndef SCTP_AUDITING_ENABLED 2561 stcb->asoc.sent_queue_retran_cnt = audit; 2562 #endif 2563 } 2564 } 2565 } 2566 break; 2567 case SCTP_ASCONF: 2568 { 2569 struct sctp_tmit_chunk *asconf; 2570 2571 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 2572 sctp_next) { 2573 if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 2574 break; 2575 } 2576 } 2577 if (asconf) { 2578 if (asconf->sent != SCTP_DATAGRAM_RESEND) 2579 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2580 asconf->sent = SCTP_DATAGRAM_RESEND; 2581 asconf->snd_count--; 2582 } 2583 } 2584 break; 2585 case SCTP_INITIATION: 2586 /* resend the INIT */ 2587 stcb->asoc.dropped_special_cnt++; 2588 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) { 2589 /* 2590 * If we can get it in, in a few attempts we do 2591 * this, otherwise we let the timer fire. 2592 */ 2593 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, 2594 stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24); 2595 sctp_send_initiate(stcb->sctp_ep, stcb); 2596 } 2597 break; 2598 case SCTP_SELECTIVE_ACK: 2599 /* resend the sack */ 2600 sctp_send_sack(stcb); 2601 break; 2602 case SCTP_HEARTBEAT_REQUEST: 2603 /* resend a demand HB */ 2604 (void)sctp_send_hb(stcb, 1, net); 2605 break; 2606 case SCTP_SHUTDOWN: 2607 sctp_send_shutdown(stcb, net); 2608 break; 2609 case SCTP_SHUTDOWN_ACK: 2610 sctp_send_shutdown_ack(stcb, net); 2611 break; 2612 case SCTP_COOKIE_ECHO: 2613 { 2614 struct sctp_tmit_chunk *cookie; 2615 2616 cookie = NULL; 2617 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, 2618 sctp_next) { 2619 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 2620 break; 2621 } 2622 } 2623 if (cookie) { 2624 if (cookie->sent != SCTP_DATAGRAM_RESEND) 2625 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2626 cookie->sent = SCTP_DATAGRAM_RESEND; 2627 sctp_stop_all_cookie_timers(stcb); 2628 } 2629 } 2630 break; 2631 case SCTP_COOKIE_ACK: 2632 sctp_send_cookie_ack(stcb); 2633 break; 2634 case SCTP_ASCONF_ACK: 2635 /* resend last asconf ack */ 2636 sctp_send_asconf_ack(stcb, 1); 2637 break; 2638 case SCTP_FORWARD_CUM_TSN: 2639 send_forward_tsn(stcb, &stcb->asoc); 2640 break; 2641 /* can't do anything with these */ 2642 case SCTP_PACKET_DROPPED: 2643 case SCTP_INITIATION_ACK: /* this should not happen */ 2644 case SCTP_HEARTBEAT_ACK: 2645 case SCTP_ABORT_ASSOCIATION: 2646 case SCTP_OPERATION_ERROR: 2647 case SCTP_SHUTDOWN_COMPLETE: 2648 case SCTP_ECN_ECHO: 2649 case SCTP_ECN_CWR: 2650 default: 2651 break; 2652 } 2653 return (0); 2654 } 2655 2656 void 2657 sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 2658 { 2659 int i; 2660 uint16_t temp; 2661 2662 /* 2663 * We set things to 0xffff since this is the last delivered sequence 2664 * and we will be sending in 0 after the reset. 2665 */ 2666 2667 if (number_entries) { 2668 for (i = 0; i < number_entries; i++) { 2669 temp = ntohs(list[i]); 2670 if (temp >= stcb->asoc.streamincnt) { 2671 continue; 2672 } 2673 stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff; 2674 } 2675 } else { 2676 list = NULL; 2677 for (i = 0; i < stcb->asoc.streamincnt; i++) { 2678 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff; 2679 } 2680 } 2681 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list); 2682 } 2683 2684 static void 2685 sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 2686 { 2687 int i; 2688 2689 if (number_entries == 0) { 2690 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 2691 stcb->asoc.strmout[i].next_sequence_sent = 0; 2692 } 2693 } else if (number_entries) { 2694 for (i = 0; i < number_entries; i++) { 2695 uint16_t temp; 2696 2697 temp = ntohs(list[i]); 2698 if (temp >= stcb->asoc.streamoutcnt) { 2699 /* no such stream */ 2700 continue; 2701 } 2702 stcb->asoc.strmout[temp].next_sequence_sent = 0; 2703 } 2704 } 2705 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list); 2706 } 2707 2708 2709 struct sctp_stream_reset_out_request * 2710 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk) 2711 { 2712 struct sctp_association *asoc; 2713 struct sctp_stream_reset_out_req *req; 2714 struct sctp_stream_reset_out_request *r; 2715 struct sctp_tmit_chunk *chk; 2716 int len, clen; 2717 2718 asoc = &stcb->asoc; 2719 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 2720 asoc->stream_reset_outstanding = 0; 2721 return (NULL); 2722 } 2723 if (stcb->asoc.str_reset == NULL) { 2724 asoc->stream_reset_outstanding = 0; 2725 return (NULL); 2726 } 2727 chk = stcb->asoc.str_reset; 2728 if (chk->data == NULL) { 2729 return (NULL); 2730 } 2731 if (bchk) { 2732 /* he wants a copy of the chk pointer */ 2733 *bchk = chk; 2734 } 2735 clen = chk->send_size; 2736 req = mtod(chk->data, struct sctp_stream_reset_out_req *); 2737 r = &req->sr_req; 2738 if (ntohl(r->request_seq) == seq) { 2739 /* found it */ 2740 return (r); 2741 } 2742 len = SCTP_SIZE32(ntohs(r->ph.param_length)); 2743 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) { 2744 /* move to the next one, there can only be a max of two */ 2745 r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len); 2746 if (ntohl(r->request_seq) == seq) { 2747 return (r); 2748 } 2749 } 2750 /* that seq is not here */ 2751 return (NULL); 2752 } 2753 2754 static void 2755 sctp_clean_up_stream_reset(struct sctp_tcb *stcb) 2756 { 2757 struct sctp_association *asoc; 2758 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset; 2759 2760 if (stcb->asoc.str_reset == NULL) { 2761 return; 2762 } 2763 asoc = &stcb->asoc; 2764 2765 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25); 2766 TAILQ_REMOVE(&asoc->control_send_queue, 2767 chk, 2768 sctp_next); 2769 if (chk->data) { 2770 sctp_m_freem(chk->data); 2771 chk->data = NULL; 2772 } 2773 asoc->ctrl_queue_cnt--; 2774 sctp_free_remote_addr(chk->whoTo); 2775 2776 sctp_free_a_chunk(stcb, chk); 2777 stcb->asoc.str_reset = NULL; 2778 } 2779 2780 2781 static int 2782 sctp_handle_stream_reset_response(struct sctp_tcb *stcb, 2783 uint32_t seq, uint32_t action, 2784 struct sctp_stream_reset_response *respin) 2785 { 2786 uint16_t type; 2787 int lparm_len; 2788 struct sctp_association *asoc = &stcb->asoc; 2789 struct sctp_tmit_chunk *chk; 2790 struct sctp_stream_reset_out_request *srparam; 2791 int number_entries; 2792 2793 if (asoc->stream_reset_outstanding == 0) { 2794 /* duplicate */ 2795 return (0); 2796 } 2797 if (seq == stcb->asoc.str_reset_seq_out) { 2798 srparam = sctp_find_stream_reset(stcb, seq, &chk); 2799 if (srparam) { 2800 stcb->asoc.str_reset_seq_out++; 2801 type = ntohs(srparam->ph.param_type); 2802 lparm_len = ntohs(srparam->ph.param_length); 2803 if (type == SCTP_STR_RESET_OUT_REQUEST) { 2804 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t); 2805 asoc->stream_reset_out_is_outstanding = 0; 2806 if (asoc->stream_reset_outstanding) 2807 asoc->stream_reset_outstanding--; 2808 if (action == SCTP_STREAM_RESET_PERFORMED) { 2809 /* do it */ 2810 sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams); 2811 } else { 2812 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams); 2813 } 2814 } else if (type == SCTP_STR_RESET_IN_REQUEST) { 2815 /* Answered my request */ 2816 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t); 2817 if (asoc->stream_reset_outstanding) 2818 asoc->stream_reset_outstanding--; 2819 if (action != SCTP_STREAM_RESET_PERFORMED) { 2820 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams); 2821 } 2822 } else if (type == SCTP_STR_RESET_TSN_REQUEST) { 2823 /** 2824 * a) Adopt the new in tsn. 2825 * b) reset the map 2826 * c) Adopt the new out-tsn 2827 */ 2828 struct sctp_stream_reset_response_tsn *resp; 2829 struct sctp_forward_tsn_chunk fwdtsn; 2830 int abort_flag = 0; 2831 2832 if (respin == NULL) { 2833 /* huh ? */ 2834 return (0); 2835 } 2836 if (action == SCTP_STREAM_RESET_PERFORMED) { 2837 resp = (struct sctp_stream_reset_response_tsn *)respin; 2838 asoc->stream_reset_outstanding--; 2839 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 2840 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 2841 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1); 2842 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag); 2843 if (abort_flag) { 2844 return (1); 2845 } 2846 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1); 2847 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 2848 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn); 2849 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 2850 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn); 2851 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn; 2852 2853 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 2854 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 2855 2856 } 2857 } 2858 /* get rid of the request and get the request flags */ 2859 if (asoc->stream_reset_outstanding == 0) { 2860 sctp_clean_up_stream_reset(stcb); 2861 } 2862 } 2863 } 2864 return (0); 2865 } 2866 2867 static void 2868 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb, 2869 struct sctp_tmit_chunk *chk, 2870 struct sctp_stream_reset_in_request *req) 2871 { 2872 uint32_t seq; 2873 int len, i; 2874 int number_entries; 2875 uint16_t temp; 2876 2877 /* 2878 * peer wants me to send a str-reset to him for my outgoing seq's if 2879 * seq_in is right. 2880 */ 2881 struct sctp_association *asoc = &stcb->asoc; 2882 2883 seq = ntohl(req->request_seq); 2884 if (asoc->str_reset_seq_in == seq) { 2885 if (stcb->asoc.stream_reset_out_is_outstanding == 0) { 2886 len = ntohs(req->ph.param_length); 2887 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t)); 2888 for (i = 0; i < number_entries; i++) { 2889 temp = ntohs(req->list_of_streams[i]); 2890 req->list_of_streams[i] = temp; 2891 } 2892 /* move the reset action back one */ 2893 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 2894 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 2895 sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams, 2896 asoc->str_reset_seq_out, 2897 seq, (asoc->sending_seq - 1)); 2898 asoc->stream_reset_out_is_outstanding = 1; 2899 asoc->str_reset = chk; 2900 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 2901 stcb->asoc.stream_reset_outstanding++; 2902 } else { 2903 /* Can't do it, since we have sent one out */ 2904 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 2905 asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER; 2906 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 2907 } 2908 asoc->str_reset_seq_in++; 2909 } else if (asoc->str_reset_seq_in - 1 == seq) { 2910 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 2911 } else if (asoc->str_reset_seq_in - 2 == seq) { 2912 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 2913 } else { 2914 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 2915 } 2916 } 2917 2918 static int 2919 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb, 2920 struct sctp_tmit_chunk *chk, 2921 struct sctp_stream_reset_tsn_request *req) 2922 { 2923 /* reset all in and out and update the tsn */ 2924 /* 2925 * A) reset my str-seq's on in and out. B) Select a receive next, 2926 * and set cum-ack to it. Also process this selected number as a 2927 * fwd-tsn as well. C) set in the response my next sending seq. 2928 */ 2929 struct sctp_forward_tsn_chunk fwdtsn; 2930 struct sctp_association *asoc = &stcb->asoc; 2931 int abort_flag = 0; 2932 uint32_t seq; 2933 2934 seq = ntohl(req->request_seq); 2935 if (asoc->str_reset_seq_in == seq) { 2936 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 2937 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 2938 fwdtsn.ch.chunk_flags = 0; 2939 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1); 2940 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag); 2941 if (abort_flag) { 2942 return (1); 2943 } 2944 stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA; 2945 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 2946 stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1; 2947 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 2948 atomic_add_int(&stcb->asoc.sending_seq, 1); 2949 /* save off historical data for retrans */ 2950 stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0]; 2951 stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq; 2952 stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0]; 2953 stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn; 2954 2955 sctp_add_stream_reset_result_tsn(chk, 2956 ntohl(req->request_seq), 2957 SCTP_STREAM_RESET_PERFORMED, 2958 stcb->asoc.sending_seq, 2959 stcb->asoc.mapping_array_base_tsn); 2960 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 2961 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 2962 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 2963 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 2964 2965 asoc->str_reset_seq_in++; 2966 } else if (asoc->str_reset_seq_in - 1 == seq) { 2967 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 2968 stcb->asoc.last_sending_seq[0], 2969 stcb->asoc.last_base_tsnsent[0] 2970 ); 2971 } else if (asoc->str_reset_seq_in - 2 == seq) { 2972 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1], 2973 stcb->asoc.last_sending_seq[1], 2974 stcb->asoc.last_base_tsnsent[1] 2975 ); 2976 } else { 2977 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 2978 } 2979 return (0); 2980 } 2981 2982 static void 2983 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb, 2984 struct sctp_tmit_chunk *chk, 2985 struct sctp_stream_reset_out_request *req) 2986 { 2987 uint32_t seq, tsn; 2988 int number_entries, len; 2989 struct sctp_association *asoc = &stcb->asoc; 2990 2991 seq = ntohl(req->request_seq); 2992 2993 /* now if its not a duplicate we process it */ 2994 if (asoc->str_reset_seq_in == seq) { 2995 len = ntohs(req->ph.param_length); 2996 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t)); 2997 /* 2998 * the sender is resetting, handle the list issue.. we must 2999 * a) verify if we can do the reset, if so no problem b) If 3000 * we can't do the reset we must copy the request. c) queue 3001 * it, and setup the data in processor to trigger it off 3002 * when needed and dequeue all the queued data. 3003 */ 3004 tsn = ntohl(req->send_reset_at_tsn); 3005 3006 /* move the reset action back one */ 3007 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3008 if ((tsn == asoc->cumulative_tsn) || 3009 (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) { 3010 /* we can do it now */ 3011 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams); 3012 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3013 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3014 } else { 3015 /* 3016 * we must queue it up and thus wait for the TSN's 3017 * to arrive that are at or before tsn 3018 */ 3019 struct sctp_stream_reset_list *liste; 3020 int siz; 3021 3022 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t)); 3023 SCTP_MALLOC(liste, struct sctp_stream_reset_list *, 3024 siz, "StrRstList"); 3025 if (liste == NULL) { 3026 /* gak out of memory */ 3027 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3028 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3029 return; 3030 } 3031 liste->tsn = tsn; 3032 liste->number_entries = number_entries; 3033 memcpy(&liste->req, req, 3034 (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t)))); 3035 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp); 3036 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3037 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3038 } 3039 asoc->str_reset_seq_in++; 3040 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3041 /* 3042 * one seq back, just echo back last action since my 3043 * response was lost. 3044 */ 3045 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3046 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3047 /* 3048 * two seq back, just echo back last action since my 3049 * response was lost. 3050 */ 3051 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3052 } else { 3053 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3054 } 3055 } 3056 3057 static int 3058 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct sctp_stream_reset_out_req *sr_req) 3059 { 3060 int chk_length, param_len, ptype; 3061 uint32_t seq; 3062 int num_req = 0; 3063 struct sctp_tmit_chunk *chk; 3064 struct sctp_chunkhdr *ch; 3065 struct sctp_paramhdr *ph; 3066 int ret_code = 0; 3067 int num_param = 0; 3068 3069 /* now it may be a reset or a reset-response */ 3070 chk_length = ntohs(sr_req->ch.chunk_length); 3071 3072 /* setup for adding the response */ 3073 sctp_alloc_a_chunk(stcb, chk); 3074 if (chk == NULL) { 3075 return (ret_code); 3076 } 3077 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 3078 chk->rec.chunk_id.can_take_data = 0; 3079 chk->asoc = &stcb->asoc; 3080 chk->no_fr_allowed = 0; 3081 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr); 3082 chk->book_size_scale = 0; 3083 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3084 if (chk->data == NULL) { 3085 strres_nochunk: 3086 if (chk->data) { 3087 sctp_m_freem(chk->data); 3088 chk->data = NULL; 3089 } 3090 sctp_free_a_chunk(stcb, chk); 3091 return (ret_code); 3092 } 3093 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 3094 3095 /* setup chunk parameters */ 3096 chk->sent = SCTP_DATAGRAM_UNSENT; 3097 chk->snd_count = 0; 3098 chk->whoTo = stcb->asoc.primary_destination; 3099 atomic_add_int(&chk->whoTo->ref_count, 1); 3100 3101 ch = mtod(chk->data, struct sctp_chunkhdr *); 3102 ch->chunk_type = SCTP_STREAM_RESET; 3103 ch->chunk_flags = 0; 3104 ch->chunk_length = htons(chk->send_size); 3105 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 3106 ph = (struct sctp_paramhdr *)&sr_req->sr_req; 3107 while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) { 3108 param_len = ntohs(ph->param_length); 3109 if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) { 3110 /* bad param */ 3111 break; 3112 } 3113 ptype = ntohs(ph->param_type); 3114 num_param++; 3115 if (num_param > SCTP_MAX_RESET_PARAMS) { 3116 /* hit the max of parameters already sorry.. */ 3117 break; 3118 } 3119 if (ptype == SCTP_STR_RESET_OUT_REQUEST) { 3120 struct sctp_stream_reset_out_request *req_out; 3121 3122 req_out = (struct sctp_stream_reset_out_request *)ph; 3123 num_req++; 3124 if (stcb->asoc.stream_reset_outstanding) { 3125 seq = ntohl(req_out->response_seq); 3126 if (seq == stcb->asoc.str_reset_seq_out) { 3127 /* implicit ack */ 3128 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL); 3129 } 3130 } 3131 sctp_handle_str_reset_request_out(stcb, chk, req_out); 3132 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) { 3133 struct sctp_stream_reset_in_request *req_in; 3134 3135 num_req++; 3136 req_in = (struct sctp_stream_reset_in_request *)ph; 3137 sctp_handle_str_reset_request_in(stcb, chk, req_in); 3138 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) { 3139 struct sctp_stream_reset_tsn_request *req_tsn; 3140 3141 num_req++; 3142 req_tsn = (struct sctp_stream_reset_tsn_request *)ph; 3143 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) { 3144 ret_code = 1; 3145 goto strres_nochunk; 3146 } 3147 /* no more */ 3148 break; 3149 } else if (ptype == SCTP_STR_RESET_RESPONSE) { 3150 struct sctp_stream_reset_response *resp; 3151 uint32_t result; 3152 3153 resp = (struct sctp_stream_reset_response *)ph; 3154 seq = ntohl(resp->response_seq); 3155 result = ntohl(resp->result); 3156 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) { 3157 ret_code = 1; 3158 goto strres_nochunk; 3159 } 3160 } else { 3161 break; 3162 } 3163 3164 ph = (struct sctp_paramhdr *)((caddr_t)ph + SCTP_SIZE32(param_len)); 3165 chk_length -= SCTP_SIZE32(param_len); 3166 } 3167 if (num_req == 0) { 3168 /* we have no response free the stuff */ 3169 goto strres_nochunk; 3170 } 3171 /* ok we have a chunk to link in */ 3172 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, 3173 chk, 3174 sctp_next); 3175 stcb->asoc.ctrl_queue_cnt++; 3176 return (ret_code); 3177 } 3178 3179 /* 3180 * Handle a router or endpoints report of a packet loss, there are two ways 3181 * to handle this, either we get the whole packet and must disect it 3182 * ourselves (possibly with truncation and or corruption) or it is a summary 3183 * from a middle box that did the disectting for us. 3184 */ 3185 static void 3186 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, 3187 struct sctp_tcb *stcb, struct sctp_nets *net) 3188 { 3189 uint32_t bottle_bw, on_queue; 3190 uint16_t trunc_len; 3191 unsigned int chlen; 3192 unsigned int at; 3193 struct sctp_chunk_desc desc; 3194 struct sctp_chunkhdr *ch; 3195 3196 chlen = ntohs(cp->ch.chunk_length); 3197 chlen -= sizeof(struct sctp_pktdrop_chunk); 3198 /* XXX possible chlen underflow */ 3199 if (chlen == 0) { 3200 ch = NULL; 3201 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) 3202 SCTP_STAT_INCR(sctps_pdrpbwrpt); 3203 } else { 3204 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr)); 3205 chlen -= sizeof(struct sctphdr); 3206 /* XXX possible chlen underflow */ 3207 memset(&desc, 0, sizeof(desc)); 3208 } 3209 trunc_len = (uint16_t) ntohs(cp->trunc_len); 3210 /* now the chunks themselves */ 3211 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) { 3212 desc.chunk_type = ch->chunk_type; 3213 /* get amount we need to move */ 3214 at = ntohs(ch->chunk_length); 3215 if (at < sizeof(struct sctp_chunkhdr)) { 3216 /* corrupt chunk, maybe at the end? */ 3217 SCTP_STAT_INCR(sctps_pdrpcrupt); 3218 break; 3219 } 3220 if (trunc_len == 0) { 3221 /* we are supposed to have all of it */ 3222 if (at > chlen) { 3223 /* corrupt skip it */ 3224 SCTP_STAT_INCR(sctps_pdrpcrupt); 3225 break; 3226 } 3227 } else { 3228 /* is there enough of it left ? */ 3229 if (desc.chunk_type == SCTP_DATA) { 3230 if (chlen < (sizeof(struct sctp_data_chunk) + 3231 sizeof(desc.data_bytes))) { 3232 break; 3233 } 3234 } else { 3235 if (chlen < sizeof(struct sctp_chunkhdr)) { 3236 break; 3237 } 3238 } 3239 } 3240 if (desc.chunk_type == SCTP_DATA) { 3241 /* can we get out the tsn? */ 3242 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3243 SCTP_STAT_INCR(sctps_pdrpmbda); 3244 3245 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) { 3246 /* yep */ 3247 struct sctp_data_chunk *dcp; 3248 uint8_t *ddp; 3249 unsigned int iii; 3250 3251 dcp = (struct sctp_data_chunk *)ch; 3252 ddp = (uint8_t *) (dcp + 1); 3253 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) { 3254 desc.data_bytes[iii] = ddp[iii]; 3255 } 3256 desc.tsn_ifany = dcp->dp.tsn; 3257 } else { 3258 /* nope we are done. */ 3259 SCTP_STAT_INCR(sctps_pdrpnedat); 3260 break; 3261 } 3262 } else { 3263 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3264 SCTP_STAT_INCR(sctps_pdrpmbct); 3265 } 3266 3267 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) { 3268 SCTP_STAT_INCR(sctps_pdrppdbrk); 3269 break; 3270 } 3271 if (SCTP_SIZE32(at) > chlen) { 3272 break; 3273 } 3274 chlen -= SCTP_SIZE32(at); 3275 if (chlen < sizeof(struct sctp_chunkhdr)) { 3276 /* done, none left */ 3277 break; 3278 } 3279 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at)); 3280 } 3281 /* Now update any rwnd --- possibly */ 3282 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) { 3283 /* From a peer, we get a rwnd report */ 3284 uint32_t a_rwnd; 3285 3286 SCTP_STAT_INCR(sctps_pdrpfehos); 3287 3288 bottle_bw = ntohl(cp->bottle_bw); 3289 on_queue = ntohl(cp->current_onq); 3290 if (bottle_bw && on_queue) { 3291 /* a rwnd report is in here */ 3292 if (bottle_bw > on_queue) 3293 a_rwnd = bottle_bw - on_queue; 3294 else 3295 a_rwnd = 0; 3296 3297 if (a_rwnd == 0) 3298 stcb->asoc.peers_rwnd = 0; 3299 else { 3300 if (a_rwnd > stcb->asoc.total_flight) { 3301 stcb->asoc.peers_rwnd = 3302 a_rwnd - stcb->asoc.total_flight; 3303 } else { 3304 stcb->asoc.peers_rwnd = 0; 3305 } 3306 if (stcb->asoc.peers_rwnd < 3307 stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3308 /* SWS sender side engages */ 3309 stcb->asoc.peers_rwnd = 0; 3310 } 3311 } 3312 } 3313 } else { 3314 SCTP_STAT_INCR(sctps_pdrpfmbox); 3315 } 3316 3317 /* now middle boxes in sat networks get a cwnd bump */ 3318 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) && 3319 (stcb->asoc.sat_t3_loss_recovery == 0) && 3320 (stcb->asoc.sat_network)) { 3321 /* 3322 * This is debateable but for sat networks it makes sense 3323 * Note if a T3 timer has went off, we will prohibit any 3324 * changes to cwnd until we exit the t3 loss recovery. 3325 */ 3326 uint32_t bw_avail; 3327 int rtt, incr; 3328 3329 #ifdef SCTP_CWND_MONITOR 3330 int old_cwnd = net->cwnd; 3331 3332 #endif 3333 /* need real RTT for this calc */ 3334 rtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 3335 /* get bottle neck bw */ 3336 bottle_bw = ntohl(cp->bottle_bw); 3337 /* and whats on queue */ 3338 on_queue = ntohl(cp->current_onq); 3339 /* 3340 * adjust the on-queue if our flight is more it could be 3341 * that the router has not yet gotten data "in-flight" to it 3342 */ 3343 if (on_queue < net->flight_size) 3344 on_queue = net->flight_size; 3345 3346 /* calculate the available space */ 3347 bw_avail = (bottle_bw * rtt) / 1000; 3348 if (bw_avail > bottle_bw) { 3349 /* 3350 * Cap the growth to no more than the bottle neck. 3351 * This can happen as RTT slides up due to queues. 3352 * It also means if you have more than a 1 second 3353 * RTT with a empty queue you will be limited to the 3354 * bottle_bw per second no matter if other points 3355 * have 1/2 the RTT and you could get more out... 3356 */ 3357 bw_avail = bottle_bw; 3358 } 3359 if (on_queue > bw_avail) { 3360 /* 3361 * No room for anything else don't allow anything 3362 * else to be "added to the fire". 3363 */ 3364 int seg_inflight, seg_onqueue, my_portion; 3365 3366 net->partial_bytes_acked = 0; 3367 3368 /* how much are we over queue size? */ 3369 incr = on_queue - bw_avail; 3370 if (stcb->asoc.seen_a_sack_this_pkt) { 3371 /* 3372 * undo any cwnd adjustment that the sack 3373 * might have made 3374 */ 3375 net->cwnd = net->prev_cwnd; 3376 } 3377 /* Now how much of that is mine? */ 3378 seg_inflight = net->flight_size / net->mtu; 3379 seg_onqueue = on_queue / net->mtu; 3380 my_portion = (incr * seg_inflight) / seg_onqueue; 3381 3382 /* Have I made an adjustment already */ 3383 if (net->cwnd > net->flight_size) { 3384 /* 3385 * for this flight I made an adjustment we 3386 * need to decrease the portion by a share 3387 * our previous adjustment. 3388 */ 3389 int diff_adj; 3390 3391 diff_adj = net->cwnd - net->flight_size; 3392 if (diff_adj > my_portion) 3393 my_portion = 0; 3394 else 3395 my_portion -= diff_adj; 3396 } 3397 /* 3398 * back down to the previous cwnd (assume we have 3399 * had a sack before this packet). minus what ever 3400 * portion of the overage is my fault. 3401 */ 3402 net->cwnd -= my_portion; 3403 3404 /* we will NOT back down more than 1 MTU */ 3405 if (net->cwnd <= net->mtu) { 3406 net->cwnd = net->mtu; 3407 } 3408 /* force into CA */ 3409 net->ssthresh = net->cwnd - 1; 3410 } else { 3411 /* 3412 * Take 1/4 of the space left or max burst up .. 3413 * whichever is less. 3414 */ 3415 incr = min((bw_avail - on_queue) >> 2, 3416 (int)stcb->asoc.max_burst * (int)net->mtu); 3417 net->cwnd += incr; 3418 } 3419 if (net->cwnd > bw_avail) { 3420 /* We can't exceed the pipe size */ 3421 net->cwnd = bw_avail; 3422 } 3423 if (net->cwnd < net->mtu) { 3424 /* We always have 1 MTU */ 3425 net->cwnd = net->mtu; 3426 } 3427 #ifdef SCTP_CWND_MONITOR 3428 if (net->cwnd - old_cwnd != 0) { 3429 /* log only changes */ 3430 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 3431 SCTP_CWND_LOG_FROM_SAT); 3432 } 3433 #endif 3434 } 3435 } 3436 3437 /* 3438 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to 3439 * still contain IP/SCTP header - stcb: is the tcb found for this packet - 3440 * offset: offset into the mbuf chain to first chunkhdr - length: is the 3441 * length of the complete packet outputs: - length: modified to remaining 3442 * length after control processing - netp: modified to new sctp_nets after 3443 * cookie-echo processing - return NULL to discard the packet (ie. no asoc, 3444 * bad packet,...) otherwise return the tcb for this packet 3445 */ 3446 static struct sctp_tcb * 3447 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, 3448 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp, 3449 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen, 3450 uint32_t vrf_id, uint32_t table_id) 3451 { 3452 struct sctp_association *asoc; 3453 uint32_t vtag_in; 3454 int num_chunks = 0; /* number of control chunks processed */ 3455 int chk_length; 3456 int ret; 3457 int abort_no_unlock = 0; 3458 3459 /* 3460 * How big should this be, and should it be alloc'd? Lets try the 3461 * d-mtu-ceiling for now (2k) and that should hopefully work ... 3462 * until we get into jumbo grams and such.. 3463 */ 3464 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; 3465 struct sctp_tcb *locked_tcb = stcb; 3466 int got_auth = 0; 3467 uint32_t auth_offset = 0, auth_len = 0; 3468 int auth_skipped = 0; 3469 3470 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n", 3471 iphlen, *offset, length, stcb); 3472 3473 /* validate chunk header length... */ 3474 if (ntohs(ch->chunk_length) < sizeof(*ch)) { 3475 return (NULL); 3476 } 3477 /* 3478 * validate the verification tag 3479 */ 3480 vtag_in = ntohl(sh->v_tag); 3481 3482 if (locked_tcb) { 3483 SCTP_TCB_LOCK_ASSERT(locked_tcb); 3484 } 3485 if (ch->chunk_type == SCTP_INITIATION) { 3486 if (vtag_in != 0) { 3487 /* protocol error- silently discard... */ 3488 SCTP_STAT_INCR(sctps_badvtag); 3489 if (locked_tcb) { 3490 SCTP_TCB_UNLOCK(locked_tcb); 3491 } 3492 return (NULL); 3493 } 3494 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) { 3495 /* 3496 * If there is no stcb, skip the AUTH chunk and process 3497 * later after a stcb is found (to validate the lookup was 3498 * valid. 3499 */ 3500 if ((ch->chunk_type == SCTP_AUTHENTICATION) && 3501 (stcb == NULL) && !sctp_auth_disable) { 3502 /* save this chunk for later processing */ 3503 auth_skipped = 1; 3504 auth_offset = *offset; 3505 auth_len = ntohs(ch->chunk_length); 3506 3507 /* (temporarily) move past this chunk */ 3508 *offset += SCTP_SIZE32(auth_len); 3509 if (*offset >= length) { 3510 /* no more data left in the mbuf chain */ 3511 *offset = length; 3512 return (NULL); 3513 } 3514 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3515 sizeof(struct sctp_chunkhdr), chunk_buf); 3516 } 3517 if (ch == NULL) { 3518 /* Help */ 3519 *offset = length; 3520 return (NULL); 3521 } 3522 if (ch->chunk_type == SCTP_COOKIE_ECHO) { 3523 goto process_control_chunks; 3524 } 3525 /* 3526 * first check if it's an ASCONF with an unknown src addr we 3527 * need to look inside to find the association 3528 */ 3529 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) { 3530 /* inp's refcount may be reduced */ 3531 SCTP_INP_INCR_REF(inp); 3532 3533 stcb = sctp_findassociation_ep_asconf(m, iphlen, 3534 *offset, sh, &inp, netp); 3535 if (stcb == NULL) { 3536 /* 3537 * reduce inp's refcount if not reduced in 3538 * sctp_findassociation_ep_asconf(). 3539 */ 3540 SCTP_INP_DECR_REF(inp); 3541 } 3542 /* now go back and verify any auth chunk to be sure */ 3543 if (auth_skipped && (stcb != NULL)) { 3544 struct sctp_auth_chunk *auth; 3545 3546 auth = (struct sctp_auth_chunk *) 3547 sctp_m_getptr(m, auth_offset, 3548 auth_len, chunk_buf); 3549 got_auth = 1; 3550 auth_skipped = 0; 3551 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, 3552 auth_offset)) { 3553 /* auth HMAC failed so dump it */ 3554 *offset = length; 3555 return (NULL); 3556 } else { 3557 /* remaining chunks are HMAC checked */ 3558 stcb->asoc.authenticated = 1; 3559 } 3560 } 3561 } 3562 if (stcb == NULL) { 3563 /* no association, so it's out of the blue... */ 3564 sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL, 3565 vrf_id, table_id); 3566 *offset = length; 3567 if (locked_tcb) { 3568 SCTP_TCB_UNLOCK(locked_tcb); 3569 } 3570 return (NULL); 3571 } 3572 asoc = &stcb->asoc; 3573 /* ABORT and SHUTDOWN can use either v_tag... */ 3574 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) || 3575 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) || 3576 (ch->chunk_type == SCTP_PACKET_DROPPED)) { 3577 if ((vtag_in == asoc->my_vtag) || 3578 ((ch->chunk_flags & SCTP_HAD_NO_TCB) && 3579 (vtag_in == asoc->peer_vtag))) { 3580 /* this is valid */ 3581 } else { 3582 /* drop this packet... */ 3583 SCTP_STAT_INCR(sctps_badvtag); 3584 if (locked_tcb) { 3585 SCTP_TCB_UNLOCK(locked_tcb); 3586 } 3587 return (NULL); 3588 } 3589 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 3590 if (vtag_in != asoc->my_vtag) { 3591 /* 3592 * this could be a stale SHUTDOWN-ACK or the 3593 * peer never got the SHUTDOWN-COMPLETE and 3594 * is still hung; we have started a new asoc 3595 * but it won't complete until the shutdown 3596 * is completed 3597 */ 3598 if (locked_tcb) { 3599 SCTP_TCB_UNLOCK(locked_tcb); 3600 } 3601 sctp_handle_ootb(m, iphlen, *offset, sh, inp, 3602 NULL, vrf_id, table_id); 3603 return (NULL); 3604 } 3605 } else { 3606 /* for all other chunks, vtag must match */ 3607 if (vtag_in != asoc->my_vtag) { 3608 /* invalid vtag... */ 3609 SCTPDBG(SCTP_DEBUG_INPUT3, 3610 "invalid vtag: %xh, expect %xh\n", 3611 vtag_in, asoc->my_vtag); 3612 SCTP_STAT_INCR(sctps_badvtag); 3613 if (locked_tcb) { 3614 SCTP_TCB_UNLOCK(locked_tcb); 3615 } 3616 *offset = length; 3617 return (NULL); 3618 } 3619 } 3620 } /* end if !SCTP_COOKIE_ECHO */ 3621 /* 3622 * process all control chunks... 3623 */ 3624 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) || 3625 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) && 3626 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 3627 /* implied cookie-ack.. we must have lost the ack */ 3628 stcb->asoc.overall_error_count = 0; 3629 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, 3630 *netp); 3631 } 3632 process_control_chunks: 3633 while (IS_SCTP_CONTROL(ch)) { 3634 /* validate chunk length */ 3635 chk_length = ntohs(ch->chunk_length); 3636 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n", 3637 ch->chunk_type, chk_length); 3638 if ((size_t)chk_length < sizeof(*ch) || 3639 (*offset + chk_length) > length) { 3640 *offset = length; 3641 if (locked_tcb) { 3642 SCTP_TCB_UNLOCK(locked_tcb); 3643 } 3644 return (NULL); 3645 } 3646 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 3647 /* 3648 * INIT-ACK only gets the init ack "header" portion only 3649 * because we don't have to process the peer's COOKIE. All 3650 * others get a complete chunk. 3651 */ 3652 if ((ch->chunk_type == SCTP_INITIATION_ACK) || 3653 (ch->chunk_type == SCTP_INITIATION)) { 3654 /* get an init-ack chunk */ 3655 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3656 sizeof(struct sctp_init_ack_chunk), chunk_buf); 3657 if (ch == NULL) { 3658 *offset = length; 3659 if (locked_tcb) { 3660 SCTP_TCB_UNLOCK(locked_tcb); 3661 } 3662 return (NULL); 3663 } 3664 } else if (ch->chunk_type == SCTP_COOKIE_ECHO) { 3665 if (chk_length > sizeof(chunk_buf)) { 3666 /* 3667 * use just the size of the chunk buffer so 3668 * the front part of our cookie is intact. 3669 * The rest of cookie processing should use 3670 * the sctp_m_getptr() function to access 3671 * the other parts. 3672 */ 3673 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3674 (sizeof(chunk_buf) - 4), 3675 chunk_buf); 3676 if (ch == NULL) { 3677 *offset = length; 3678 if (locked_tcb) { 3679 SCTP_TCB_UNLOCK(locked_tcb); 3680 } 3681 return (NULL); 3682 } 3683 } else { 3684 /* We can fit it all */ 3685 goto all_fits; 3686 } 3687 } else { 3688 /* get a complete chunk... */ 3689 if ((size_t)chk_length > sizeof(chunk_buf)) { 3690 struct mbuf *oper; 3691 struct sctp_paramhdr *phdr; 3692 3693 oper = NULL; 3694 if (stcb) { 3695 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 3696 0, M_DONTWAIT, 1, MT_DATA); 3697 3698 if (oper) { 3699 /* pre-reserve some space */ 3700 SCTP_BUF_RESV_UF(oper, sizeof(struct sctp_chunkhdr)); 3701 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr); 3702 phdr = mtod(oper, struct sctp_paramhdr *); 3703 phdr->param_type = htons(SCTP_CAUSE_OUT_OF_RESC); 3704 phdr->param_length = htons(sizeof(struct sctp_paramhdr)); 3705 sctp_queue_op_err(stcb, oper); 3706 } 3707 } 3708 if (locked_tcb) { 3709 SCTP_TCB_UNLOCK(locked_tcb); 3710 } 3711 return (NULL); 3712 } 3713 all_fits: 3714 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3715 chk_length, chunk_buf); 3716 if (ch == NULL) { 3717 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n"); 3718 *offset = length; 3719 if (locked_tcb) { 3720 SCTP_TCB_UNLOCK(locked_tcb); 3721 } 3722 return (NULL); 3723 } 3724 } 3725 num_chunks++; 3726 /* Save off the last place we got a control from */ 3727 if (stcb != NULL) { 3728 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) { 3729 /* 3730 * allow last_control to be NULL if 3731 * ASCONF... ASCONF processing will find the 3732 * right net later 3733 */ 3734 if ((netp != NULL) && (*netp != NULL)) 3735 stcb->asoc.last_control_chunk_from = *netp; 3736 } 3737 } 3738 #ifdef SCTP_AUDITING_ENABLED 3739 sctp_audit_log(0xB0, ch->chunk_type); 3740 #endif 3741 3742 /* check to see if this chunk required auth, but isn't */ 3743 if ((stcb != NULL) && !sctp_auth_disable && 3744 sctp_auth_is_required_chunk(ch->chunk_type, 3745 stcb->asoc.local_auth_chunks) && 3746 !stcb->asoc.authenticated) { 3747 /* "silently" ignore */ 3748 SCTP_STAT_INCR(sctps_recvauthmissing); 3749 goto next_chunk; 3750 } 3751 switch (ch->chunk_type) { 3752 case SCTP_INITIATION: 3753 /* must be first and only chunk */ 3754 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n"); 3755 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3756 /* We are not interested anymore? */ 3757 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 3758 /* 3759 * collision case where we are 3760 * sending to them too 3761 */ 3762 ; 3763 } else { 3764 if (locked_tcb) { 3765 SCTP_TCB_UNLOCK(locked_tcb); 3766 } 3767 *offset = length; 3768 return (NULL); 3769 } 3770 } 3771 if ((num_chunks > 1) || 3772 (sctp_strict_init && (length - *offset > SCTP_SIZE32(chk_length)))) { 3773 *offset = length; 3774 if (locked_tcb) { 3775 SCTP_TCB_UNLOCK(locked_tcb); 3776 } 3777 return (NULL); 3778 } 3779 if ((stcb != NULL) && 3780 (SCTP_GET_STATE(&stcb->asoc) == 3781 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 3782 sctp_send_shutdown_ack(stcb, 3783 stcb->asoc.primary_destination); 3784 *offset = length; 3785 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC); 3786 if (locked_tcb) { 3787 SCTP_TCB_UNLOCK(locked_tcb); 3788 } 3789 return (NULL); 3790 } 3791 if (netp) { 3792 sctp_handle_init(m, iphlen, *offset, sh, 3793 (struct sctp_init_chunk *)ch, inp, 3794 stcb, *netp, &abort_no_unlock, vrf_id, table_id); 3795 } 3796 if (abort_no_unlock) 3797 return (NULL); 3798 3799 *offset = length; 3800 if (locked_tcb) { 3801 SCTP_TCB_UNLOCK(locked_tcb); 3802 } 3803 return (NULL); 3804 break; 3805 case SCTP_INITIATION_ACK: 3806 /* must be first and only chunk */ 3807 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n"); 3808 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3809 /* We are not interested anymore */ 3810 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 3811 ; 3812 } else { 3813 if (locked_tcb) { 3814 SCTP_TCB_UNLOCK(locked_tcb); 3815 } 3816 *offset = length; 3817 if (stcb) { 3818 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26); 3819 } 3820 return (NULL); 3821 } 3822 } 3823 if ((num_chunks > 1) || 3824 (sctp_strict_init && (length - *offset > SCTP_SIZE32(chk_length)))) { 3825 *offset = length; 3826 if (locked_tcb) { 3827 SCTP_TCB_UNLOCK(locked_tcb); 3828 } 3829 return (NULL); 3830 } 3831 if ((netp) && (*netp)) { 3832 ret = sctp_handle_init_ack(m, iphlen, *offset, sh, 3833 (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id, table_id); 3834 } else { 3835 ret = -1; 3836 } 3837 /* 3838 * Special case, I must call the output routine to 3839 * get the cookie echoed 3840 */ 3841 if (abort_no_unlock) 3842 return (NULL); 3843 3844 if ((stcb) && ret == 0) 3845 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC); 3846 *offset = length; 3847 if (locked_tcb) { 3848 SCTP_TCB_UNLOCK(locked_tcb); 3849 } 3850 return (NULL); 3851 break; 3852 case SCTP_SELECTIVE_ACK: 3853 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n"); 3854 SCTP_STAT_INCR(sctps_recvsacks); 3855 { 3856 struct sctp_sack_chunk *sack; 3857 int abort_now = 0; 3858 uint32_t a_rwnd, cum_ack; 3859 uint16_t num_seg; 3860 int nonce_sum_flag; 3861 3862 if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) { 3863 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n"); 3864 *offset = length; 3865 if (locked_tcb) { 3866 SCTP_TCB_UNLOCK(locked_tcb); 3867 } 3868 return (NULL); 3869 } 3870 sack = (struct sctp_sack_chunk *)ch; 3871 nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM; 3872 cum_ack = ntohl(sack->sack.cum_tsn_ack); 3873 num_seg = ntohs(sack->sack.num_gap_ack_blks); 3874 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd); 3875 stcb->asoc.seen_a_sack_this_pkt = 1; 3876 if ((stcb->asoc.pr_sctp_cnt == 0) && 3877 (num_seg == 0) && 3878 ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) || 3879 (cum_ack == stcb->asoc.last_acked_seq)) && 3880 (stcb->asoc.saw_sack_with_frags == 0) && 3881 (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) 3882 ) { 3883 /* 3884 * We have a SIMPLE sack having no 3885 * prior segments and data on sent 3886 * queue to be acked.. Use the 3887 * faster path sack processing. We 3888 * also allow window update sacks 3889 * with no missing segments to go 3890 * this way too. 3891 */ 3892 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag, 3893 &abort_now); 3894 } else { 3895 if (netp && *netp) 3896 sctp_handle_sack(sack, stcb, *netp, &abort_now, chk_length, a_rwnd); 3897 } 3898 if (abort_now) { 3899 /* ABORT signal from sack processing */ 3900 *offset = length; 3901 return (NULL); 3902 } 3903 } 3904 break; 3905 case SCTP_HEARTBEAT_REQUEST: 3906 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n"); 3907 if ((stcb) && netp && *netp) { 3908 SCTP_STAT_INCR(sctps_recvheartbeat); 3909 sctp_send_heartbeat_ack(stcb, m, *offset, 3910 chk_length, *netp); 3911 3912 /* He's alive so give him credit */ 3913 stcb->asoc.overall_error_count = 0; 3914 } 3915 break; 3916 case SCTP_HEARTBEAT_ACK: 3917 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n"); 3918 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) { 3919 /* Its not ours */ 3920 *offset = length; 3921 if (locked_tcb) { 3922 SCTP_TCB_UNLOCK(locked_tcb); 3923 } 3924 return (NULL); 3925 } 3926 /* He's alive so give him credit */ 3927 stcb->asoc.overall_error_count = 0; 3928 SCTP_STAT_INCR(sctps_recvheartbeatack); 3929 if (netp && *netp) 3930 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch, 3931 stcb, *netp); 3932 break; 3933 case SCTP_ABORT_ASSOCIATION: 3934 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT\n"); 3935 if ((stcb) && netp && *netp) 3936 sctp_handle_abort((struct sctp_abort_chunk *)ch, 3937 stcb, *netp); 3938 *offset = length; 3939 return (NULL); 3940 break; 3941 case SCTP_SHUTDOWN: 3942 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN\n"); 3943 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) { 3944 *offset = length; 3945 if (locked_tcb) { 3946 SCTP_TCB_UNLOCK(locked_tcb); 3947 } 3948 return (NULL); 3949 3950 } 3951 if (netp && *netp) { 3952 int abort_flag = 0; 3953 3954 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch, 3955 stcb, *netp, &abort_flag); 3956 if (abort_flag) { 3957 *offset = length; 3958 return (NULL); 3959 } 3960 } 3961 break; 3962 case SCTP_SHUTDOWN_ACK: 3963 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK\n"); 3964 if ((stcb) && (netp) && (*netp)) 3965 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp); 3966 *offset = length; 3967 return (NULL); 3968 break; 3969 3970 case SCTP_OPERATION_ERROR: 3971 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n"); 3972 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) { 3973 3974 *offset = length; 3975 return (NULL); 3976 } 3977 break; 3978 case SCTP_COOKIE_ECHO: 3979 SCTPDBG(SCTP_DEBUG_INPUT3, 3980 "SCTP_COOKIE-ECHO stcb is %p\n", stcb); 3981 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 3982 ; 3983 } else { 3984 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3985 /* We are not interested anymore */ 3986 *offset = length; 3987 return (NULL); 3988 } 3989 } 3990 /* 3991 * First are we accepting? We do this again here 3992 * sincen it is possible that a previous endpoint 3993 * WAS listening responded to a INIT-ACK and then 3994 * closed. We opened and bound.. and are now no 3995 * longer listening. 3996 */ 3997 if (inp->sctp_socket->so_qlimit == 0) { 3998 if ((stcb) && (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3999 /* 4000 * special case, is this a retran'd 4001 * COOKIE-ECHO or a restarting assoc 4002 * that is a peeled off or 4003 * one-to-one style socket. 4004 */ 4005 goto process_cookie_anyway; 4006 } 4007 sctp_abort_association(inp, stcb, m, iphlen, 4008 sh, NULL, vrf_id, 4009 table_id); 4010 *offset = length; 4011 return (NULL); 4012 } else if (inp->sctp_socket->so_qlimit) { 4013 /* we are accepting so check limits like TCP */ 4014 if (inp->sctp_socket->so_qlen > 4015 inp->sctp_socket->so_qlimit) { 4016 /* no space */ 4017 struct mbuf *oper; 4018 struct sctp_paramhdr *phdr; 4019 4020 if (sctp_abort_if_one_2_one_hits_limit) { 4021 oper = NULL; 4022 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4023 0, M_DONTWAIT, 1, MT_DATA); 4024 if (oper) { 4025 SCTP_BUF_LEN(oper) = 4026 sizeof(struct sctp_paramhdr); 4027 phdr = mtod(oper, 4028 struct sctp_paramhdr *); 4029 phdr->param_type = 4030 htons(SCTP_CAUSE_OUT_OF_RESC); 4031 phdr->param_length = 4032 htons(sizeof(struct sctp_paramhdr)); 4033 } 4034 sctp_abort_association(inp, stcb, m, 4035 iphlen, sh, oper, vrf_id, table_id); 4036 } 4037 *offset = length; 4038 return (NULL); 4039 } 4040 } 4041 process_cookie_anyway: 4042 { 4043 struct mbuf *ret_buf; 4044 struct sctp_inpcb *linp; 4045 4046 if (stcb) { 4047 linp = NULL; 4048 } else { 4049 linp = inp; 4050 } 4051 4052 if (linp) { 4053 SCTP_ASOC_CREATE_LOCK(linp); 4054 } 4055 if (netp) { 4056 ret_buf = 4057 sctp_handle_cookie_echo(m, iphlen, 4058 *offset, sh, 4059 (struct sctp_cookie_echo_chunk *)ch, 4060 &inp, &stcb, netp, 4061 auth_skipped, 4062 auth_offset, 4063 auth_len, 4064 &locked_tcb, 4065 vrf_id, 4066 table_id); 4067 } else { 4068 ret_buf = NULL; 4069 } 4070 if (linp) { 4071 SCTP_ASOC_CREATE_UNLOCK(linp); 4072 } 4073 if (ret_buf == NULL) { 4074 if (locked_tcb) { 4075 SCTP_TCB_UNLOCK(locked_tcb); 4076 } 4077 SCTPDBG(SCTP_DEBUG_INPUT3, 4078 "GAK, null buffer\n"); 4079 auth_skipped = 0; 4080 *offset = length; 4081 return (NULL); 4082 } 4083 /* if AUTH skipped, see if it verified... */ 4084 if (auth_skipped) { 4085 got_auth = 1; 4086 auth_skipped = 0; 4087 } 4088 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) { 4089 /* 4090 * Restart the timer if we have 4091 * pending data 4092 */ 4093 struct sctp_tmit_chunk *chk; 4094 4095 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 4096 if (chk) { 4097 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4098 stcb->sctp_ep, stcb, 4099 chk->whoTo); 4100 } 4101 } 4102 } 4103 break; 4104 case SCTP_COOKIE_ACK: 4105 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK\n"); 4106 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) { 4107 if (locked_tcb) { 4108 SCTP_TCB_UNLOCK(locked_tcb); 4109 } 4110 return (NULL); 4111 } 4112 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4113 /* We are not interested anymore */ 4114 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4115 ; 4116 } else if (stcb) { 4117 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4118 *offset = length; 4119 return (NULL); 4120 } 4121 } 4122 /* He's alive so give him credit */ 4123 if ((stcb) && netp && *netp) { 4124 stcb->asoc.overall_error_count = 0; 4125 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp); 4126 } 4127 break; 4128 case SCTP_ECN_ECHO: 4129 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n"); 4130 /* He's alive so give him credit */ 4131 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) { 4132 /* Its not ours */ 4133 if (locked_tcb) { 4134 SCTP_TCB_UNLOCK(locked_tcb); 4135 } 4136 *offset = length; 4137 return (NULL); 4138 } 4139 if (stcb) { 4140 stcb->asoc.overall_error_count = 0; 4141 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, 4142 stcb); 4143 } 4144 break; 4145 case SCTP_ECN_CWR: 4146 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n"); 4147 /* He's alive so give him credit */ 4148 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) { 4149 /* Its not ours */ 4150 if (locked_tcb) { 4151 SCTP_TCB_UNLOCK(locked_tcb); 4152 } 4153 *offset = length; 4154 return (NULL); 4155 } 4156 if (stcb) { 4157 stcb->asoc.overall_error_count = 0; 4158 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb); 4159 } 4160 break; 4161 case SCTP_SHUTDOWN_COMPLETE: 4162 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE\n"); 4163 /* must be first and only chunk */ 4164 if ((num_chunks > 1) || 4165 (length - *offset > SCTP_SIZE32(chk_length))) { 4166 *offset = length; 4167 if (locked_tcb) { 4168 SCTP_TCB_UNLOCK(locked_tcb); 4169 } 4170 return (NULL); 4171 } 4172 if ((stcb) && netp && *netp) { 4173 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch, 4174 stcb, *netp); 4175 } 4176 *offset = length; 4177 return (NULL); 4178 break; 4179 case SCTP_ASCONF: 4180 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n"); 4181 /* He's alive so give him credit */ 4182 if (stcb) { 4183 stcb->asoc.overall_error_count = 0; 4184 sctp_handle_asconf(m, *offset, 4185 (struct sctp_asconf_chunk *)ch, stcb); 4186 } 4187 break; 4188 case SCTP_ASCONF_ACK: 4189 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n"); 4190 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { 4191 /* Its not ours */ 4192 if (locked_tcb) { 4193 SCTP_TCB_UNLOCK(locked_tcb); 4194 } 4195 *offset = length; 4196 return (NULL); 4197 } 4198 if ((stcb) && netp && *netp) { 4199 /* He's alive so give him credit */ 4200 stcb->asoc.overall_error_count = 0; 4201 sctp_handle_asconf_ack(m, *offset, 4202 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp); 4203 } 4204 break; 4205 case SCTP_FORWARD_CUM_TSN: 4206 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n"); 4207 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { 4208 /* Its not ours */ 4209 if (locked_tcb) { 4210 SCTP_TCB_UNLOCK(locked_tcb); 4211 } 4212 *offset = length; 4213 return (NULL); 4214 } 4215 /* He's alive so give him credit */ 4216 if (stcb) { 4217 int abort_flag = 0; 4218 4219 stcb->asoc.overall_error_count = 0; 4220 *fwd_tsn_seen = 1; 4221 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4222 /* We are not interested anymore */ 4223 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28); 4224 *offset = length; 4225 return (NULL); 4226 } 4227 sctp_handle_forward_tsn(stcb, 4228 (struct sctp_forward_tsn_chunk *)ch, &abort_flag); 4229 if (abort_flag) { 4230 *offset = length; 4231 return (NULL); 4232 } else { 4233 stcb->asoc.overall_error_count = 0; 4234 } 4235 4236 } 4237 break; 4238 case SCTP_STREAM_RESET: 4239 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n"); 4240 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4241 chk_length, chunk_buf); 4242 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) { 4243 /* Its not ours */ 4244 if (locked_tcb) { 4245 SCTP_TCB_UNLOCK(locked_tcb); 4246 } 4247 *offset = length; 4248 return (NULL); 4249 } 4250 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4251 /* We are not interested anymore */ 4252 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29); 4253 *offset = length; 4254 return (NULL); 4255 } 4256 if (stcb->asoc.peer_supports_strreset == 0) { 4257 /* 4258 * hmm, peer should have announced this, but 4259 * we will turn it on since he is sending us 4260 * a stream reset. 4261 */ 4262 stcb->asoc.peer_supports_strreset = 1; 4263 } 4264 if (sctp_handle_stream_reset(stcb, (struct sctp_stream_reset_out_req *)ch)) { 4265 /* stop processing */ 4266 *offset = length; 4267 return (NULL); 4268 } 4269 break; 4270 case SCTP_PACKET_DROPPED: 4271 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n"); 4272 /* re-get it all please */ 4273 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { 4274 /* Its not ours */ 4275 if (locked_tcb) { 4276 SCTP_TCB_UNLOCK(locked_tcb); 4277 } 4278 *offset = length; 4279 return (NULL); 4280 } 4281 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4282 chk_length, chunk_buf); 4283 4284 if (ch && (stcb) && netp && (*netp)) { 4285 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch, 4286 stcb, *netp); 4287 } 4288 break; 4289 4290 case SCTP_AUTHENTICATION: 4291 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n"); 4292 if (sctp_auth_disable) 4293 goto unknown_chunk; 4294 4295 if (stcb == NULL) { 4296 /* save the first AUTH for later processing */ 4297 if (auth_skipped == 0) { 4298 auth_offset = *offset; 4299 auth_len = chk_length; 4300 auth_skipped = 1; 4301 } 4302 /* skip this chunk (temporarily) */ 4303 goto next_chunk; 4304 } 4305 if ((chk_length < (sizeof(struct sctp_auth_chunk))) || 4306 (chk_length > (sizeof(struct sctp_auth_chunk) + 4307 SCTP_AUTH_DIGEST_LEN_MAX))) { 4308 /* Its not ours */ 4309 if (locked_tcb) { 4310 SCTP_TCB_UNLOCK(locked_tcb); 4311 } 4312 *offset = length; 4313 return (NULL); 4314 } 4315 if (got_auth == 1) { 4316 /* skip this chunk... it's already auth'd */ 4317 goto next_chunk; 4318 } 4319 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4320 chk_length, chunk_buf); 4321 got_auth = 1; 4322 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, 4323 m, *offset)) { 4324 /* auth HMAC failed so dump the packet */ 4325 *offset = length; 4326 return (stcb); 4327 } else { 4328 /* remaining chunks are HMAC checked */ 4329 stcb->asoc.authenticated = 1; 4330 } 4331 break; 4332 4333 default: 4334 unknown_chunk: 4335 /* it's an unknown chunk! */ 4336 if ((ch->chunk_type & 0x40) && (stcb != NULL)) { 4337 struct mbuf *mm; 4338 struct sctp_paramhdr *phd; 4339 4340 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4341 0, M_DONTWAIT, 1, MT_DATA); 4342 if (mm) { 4343 phd = mtod(mm, struct sctp_paramhdr *); 4344 /* 4345 * We cheat and use param type since 4346 * we did not bother to define a 4347 * error cause struct. They are the 4348 * same basic format with different 4349 * names. 4350 */ 4351 phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK); 4352 phd->param_length = htons(chk_length + sizeof(*phd)); 4353 SCTP_BUF_LEN(mm) = sizeof(*phd); 4354 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length), 4355 M_DONTWAIT); 4356 if (SCTP_BUF_NEXT(mm)) { 4357 sctp_queue_op_err(stcb, mm); 4358 } else { 4359 sctp_m_freem(mm); 4360 } 4361 } 4362 } 4363 if ((ch->chunk_type & 0x80) == 0) { 4364 /* discard this packet */ 4365 *offset = length; 4366 return (stcb); 4367 } /* else skip this bad chunk and continue... */ 4368 break; 4369 } /* switch (ch->chunk_type) */ 4370 4371 4372 next_chunk: 4373 /* get the next chunk */ 4374 *offset += SCTP_SIZE32(chk_length); 4375 if (*offset >= length) { 4376 /* no more data left in the mbuf chain */ 4377 break; 4378 } 4379 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4380 sizeof(struct sctp_chunkhdr), chunk_buf); 4381 if (ch == NULL) { 4382 if (locked_tcb) { 4383 SCTP_TCB_UNLOCK(locked_tcb); 4384 } 4385 *offset = length; 4386 return (NULL); 4387 } 4388 } /* while */ 4389 return (stcb); 4390 } 4391 4392 4393 /* 4394 * Process the ECN bits we have something set so we must look to see if it is 4395 * ECN(0) or ECN(1) or CE 4396 */ 4397 static __inline void 4398 sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net, 4399 uint8_t ecn_bits) 4400 { 4401 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4402 ; 4403 } else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) { 4404 /* 4405 * we only add to the nonce sum for ECT1, ECT0 does not 4406 * change the NS bit (that we have yet to find a way to send 4407 * it yet). 4408 */ 4409 4410 /* ECN Nonce stuff */ 4411 stcb->asoc.receiver_nonce_sum++; 4412 stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM; 4413 4414 /* 4415 * Drag up the last_echo point if cumack is larger since we 4416 * don't want the point falling way behind by more than 4417 * 2^^31 and then having it be incorrect. 4418 */ 4419 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4420 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4421 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4422 } 4423 } else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) { 4424 /* 4425 * Drag up the last_echo point if cumack is larger since we 4426 * don't want the point falling way behind by more than 4427 * 2^^31 and then having it be incorrect. 4428 */ 4429 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4430 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4431 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4432 } 4433 } 4434 } 4435 4436 static __inline void 4437 sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net, 4438 uint32_t high_tsn, uint8_t ecn_bits) 4439 { 4440 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4441 /* 4442 * we possibly must notify the sender that a congestion 4443 * window reduction is in order. We do this by adding a ECNE 4444 * chunk to the output chunk queue. The incoming CWR will 4445 * remove this chunk. 4446 */ 4447 if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn, 4448 MAX_TSN)) { 4449 /* Yep, we need to add a ECNE */ 4450 sctp_send_ecn_echo(stcb, net, high_tsn); 4451 stcb->asoc.last_echo_tsn = high_tsn; 4452 } 4453 } 4454 } 4455 4456 /* 4457 * common input chunk processing (v4 and v6) 4458 */ 4459 void 4460 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, 4461 int length, struct sctphdr *sh, struct sctp_chunkhdr *ch, 4462 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 4463 uint8_t ecn_bits, uint32_t vrf_id, uint32_t table_id) 4464 { 4465 /* 4466 * Control chunk processing 4467 */ 4468 uint32_t high_tsn; 4469 int fwd_tsn_seen = 0, data_processed = 0; 4470 struct mbuf *m = *mm; 4471 int abort_flag = 0; 4472 int un_sent; 4473 4474 SCTP_STAT_INCR(sctps_recvdatagrams); 4475 #ifdef SCTP_AUDITING_ENABLED 4476 sctp_audit_log(0xE0, 1); 4477 sctp_auditing(0, inp, stcb, net); 4478 #endif 4479 4480 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d\n", 4481 m, iphlen, offset); 4482 4483 if (stcb) { 4484 /* always clear this before beginning a packet */ 4485 stcb->asoc.authenticated = 0; 4486 stcb->asoc.seen_a_sack_this_pkt = 0; 4487 } 4488 if (IS_SCTP_CONTROL(ch)) { 4489 /* process the control portion of the SCTP packet */ 4490 /* sa_ignore NO_NULL_CHK */ 4491 stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch, 4492 inp, stcb, &net, &fwd_tsn_seen, vrf_id, table_id); 4493 if (stcb) { 4494 /* 4495 * This covers us if the cookie-echo was there and 4496 * it changes our INP. 4497 */ 4498 inp = stcb->sctp_ep; 4499 } 4500 } else { 4501 /* 4502 * no control chunks, so pre-process DATA chunks (these 4503 * checks are taken care of by control processing) 4504 */ 4505 4506 /* 4507 * if DATA only packet, and auth is required, then punt... 4508 * can't have authenticated without any AUTH (control) 4509 * chunks 4510 */ 4511 if ((stcb != NULL) && !sctp_auth_disable && 4512 sctp_auth_is_required_chunk(SCTP_DATA, 4513 stcb->asoc.local_auth_chunks)) { 4514 /* "silently" ignore */ 4515 SCTP_STAT_INCR(sctps_recvauthmissing); 4516 SCTP_TCB_UNLOCK(stcb); 4517 return; 4518 } 4519 if (stcb == NULL) { 4520 /* out of the blue DATA chunk */ 4521 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 4522 vrf_id, table_id); 4523 return; 4524 } 4525 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) { 4526 /* v_tag mismatch! */ 4527 SCTP_STAT_INCR(sctps_badvtag); 4528 SCTP_TCB_UNLOCK(stcb); 4529 return; 4530 } 4531 } 4532 4533 if (stcb == NULL) { 4534 /* 4535 * no valid TCB for this packet, or we found it's a bad 4536 * packet while processing control, or we're done with this 4537 * packet (done or skip rest of data), so we drop it... 4538 */ 4539 return; 4540 } 4541 /* 4542 * DATA chunk processing 4543 */ 4544 /* plow through the data chunks while length > offset */ 4545 4546 /* 4547 * Rest should be DATA only. Check authentication state if AUTH for 4548 * DATA is required. 4549 */ 4550 if ((length > offset) && (stcb != NULL) && !sctp_auth_disable && 4551 sctp_auth_is_required_chunk(SCTP_DATA, 4552 stcb->asoc.local_auth_chunks) && 4553 !stcb->asoc.authenticated) { 4554 /* "silently" ignore */ 4555 SCTP_STAT_INCR(sctps_recvauthmissing); 4556 SCTPDBG(SCTP_DEBUG_AUTH1, 4557 "Data chunk requires AUTH, skipped\n"); 4558 goto trigger_send; 4559 } 4560 if (length > offset) { 4561 int retval; 4562 4563 /* 4564 * First check to make sure our state is correct. We would 4565 * not get here unless we really did have a tag, so we don't 4566 * abort if this happens, just dump the chunk silently. 4567 */ 4568 switch (SCTP_GET_STATE(&stcb->asoc)) { 4569 case SCTP_STATE_COOKIE_ECHOED: 4570 /* 4571 * we consider data with valid tags in this state 4572 * shows us the cookie-ack was lost. Imply it was 4573 * there. 4574 */ 4575 stcb->asoc.overall_error_count = 0; 4576 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net); 4577 break; 4578 case SCTP_STATE_COOKIE_WAIT: 4579 /* 4580 * We consider OOTB any data sent during asoc setup. 4581 */ 4582 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 4583 vrf_id, table_id); 4584 SCTP_TCB_UNLOCK(stcb); 4585 return; 4586 break; 4587 case SCTP_STATE_EMPTY: /* should not happen */ 4588 case SCTP_STATE_INUSE: /* should not happen */ 4589 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */ 4590 case SCTP_STATE_SHUTDOWN_ACK_SENT: 4591 default: 4592 SCTP_TCB_UNLOCK(stcb); 4593 return; 4594 break; 4595 case SCTP_STATE_OPEN: 4596 case SCTP_STATE_SHUTDOWN_SENT: 4597 break; 4598 } 4599 /* take care of ECN, part 1. */ 4600 if (stcb->asoc.ecn_allowed && 4601 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 4602 sctp_process_ecn_marked_a(stcb, net, ecn_bits); 4603 } 4604 /* plow through the data chunks while length > offset */ 4605 retval = sctp_process_data(mm, iphlen, &offset, length, sh, 4606 inp, stcb, net, &high_tsn); 4607 if (retval == 2) { 4608 /* 4609 * The association aborted, NO UNLOCK needed since 4610 * the association is destroyed. 4611 */ 4612 return; 4613 } 4614 data_processed = 1; 4615 if (retval == 0) { 4616 /* take care of ecn part 2. */ 4617 if (stcb->asoc.ecn_allowed && 4618 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 4619 sctp_process_ecn_marked_b(stcb, net, high_tsn, 4620 ecn_bits); 4621 } 4622 } 4623 /* 4624 * Anything important needs to have been m_copy'ed in 4625 * process_data 4626 */ 4627 } 4628 if ((data_processed == 0) && (fwd_tsn_seen)) { 4629 int was_a_gap = 0; 4630 4631 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 4632 stcb->asoc.cumulative_tsn, MAX_TSN)) { 4633 /* there was a gap before this data was processed */ 4634 was_a_gap = 1; 4635 } 4636 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 4637 if (abort_flag) { 4638 /* Again, we aborted so NO UNLOCK needed */ 4639 return; 4640 } 4641 } 4642 /* trigger send of any chunks in queue... */ 4643 trigger_send: 4644 #ifdef SCTP_AUDITING_ENABLED 4645 sctp_audit_log(0xE0, 2); 4646 sctp_auditing(1, inp, stcb, net); 4647 #endif 4648 SCTPDBG(SCTP_DEBUG_INPUT1, 4649 "Check for chunk output prw:%d tqe:%d tf=%d\n", 4650 stcb->asoc.peers_rwnd, 4651 TAILQ_EMPTY(&stcb->asoc.control_send_queue), 4652 stcb->asoc.total_flight); 4653 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 4654 4655 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) || 4656 ((un_sent) && 4657 (stcb->asoc.peers_rwnd > 0 || 4658 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) { 4659 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n"); 4660 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC); 4661 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n"); 4662 } 4663 #ifdef SCTP_AUDITING_ENABLED 4664 sctp_audit_log(0xE0, 3); 4665 sctp_auditing(2, inp, stcb, net); 4666 #endif 4667 SCTP_TCB_UNLOCK(stcb); 4668 return; 4669 } 4670 4671 4672 4673 void 4674 sctp_input(i_pak, off) 4675 struct mbuf *i_pak; 4676 int off; 4677 4678 { 4679 #ifdef SCTP_MBUF_LOGGING 4680 struct mbuf *mat; 4681 4682 #endif 4683 struct mbuf *m; 4684 int iphlen; 4685 uint32_t vrf_id = 0, table_id = 0; 4686 uint8_t ecn_bits; 4687 struct ip *ip; 4688 struct sctphdr *sh; 4689 struct sctp_inpcb *inp = NULL; 4690 4691 uint32_t check, calc_check; 4692 struct sctp_nets *net; 4693 struct sctp_tcb *stcb = NULL; 4694 struct sctp_chunkhdr *ch; 4695 int refcount_up = 0; 4696 int length, mlen, offset; 4697 4698 4699 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) { 4700 SCTP_RELEASE_PKT(i_pak); 4701 return; 4702 } 4703 if (SCTP_GET_PKT_TABLEID(i_pak, table_id)) { 4704 SCTP_RELEASE_PKT(i_pak); 4705 return; 4706 } 4707 mlen = SCTP_HEADER_LEN(i_pak); 4708 iphlen = off; 4709 m = SCTP_HEADER_TO_CHAIN(i_pak); 4710 net = NULL; 4711 SCTP_STAT_INCR(sctps_recvpackets); 4712 SCTP_STAT_INCR_COUNTER64(sctps_inpackets); 4713 4714 #ifdef SCTP_MBUF_LOGGING 4715 /* Log in any input mbufs */ 4716 mat = m; 4717 while (mat) { 4718 if (SCTP_BUF_IS_EXTENDED(mat)) { 4719 sctp_log_mb(mat, SCTP_MBUF_INPUT); 4720 } 4721 mat = SCTP_BUF_NEXT(mat); 4722 } 4723 #endif 4724 4725 /* 4726 * Get IP, SCTP, and first chunk header together in first mbuf. 4727 */ 4728 ip = mtod(m, struct ip *); 4729 offset = iphlen + sizeof(*sh) + sizeof(*ch); 4730 if (SCTP_BUF_LEN(m) < offset) { 4731 if ((m = m_pullup(m, offset)) == 0) { 4732 SCTP_STAT_INCR(sctps_hdrops); 4733 return; 4734 } 4735 ip = mtod(m, struct ip *); 4736 } 4737 sh = (struct sctphdr *)((caddr_t)ip + iphlen); 4738 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh)); 4739 4740 /* SCTP does not allow broadcasts or multicasts */ 4741 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 4742 goto bad; 4743 } 4744 if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) { 4745 /* 4746 * We only look at broadcast if its a front state, All 4747 * others we will not have a tcb for anyway. 4748 */ 4749 goto bad; 4750 } 4751 /* validate SCTP checksum */ 4752 if ((sctp_no_csum_on_loopback == 0) || !SCTP_IS_IT_LOOPBACK(m)) { 4753 /* 4754 * we do NOT validate things from the loopback if the sysctl 4755 * is set to 1. 4756 */ 4757 check = sh->checksum; /* save incoming checksum */ 4758 if ((check == 0) && (sctp_no_csum_on_loopback)) { 4759 /* 4760 * special hook for where we got a local address 4761 * somehow routed across a non IFT_LOOP type 4762 * interface 4763 */ 4764 if (ip->ip_src.s_addr == ip->ip_dst.s_addr) 4765 goto sctp_skip_csum_4; 4766 } 4767 sh->checksum = 0; /* prepare for calc */ 4768 calc_check = sctp_calculate_sum(m, &mlen, iphlen); 4769 if (calc_check != check) { 4770 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n", 4771 calc_check, check, m, mlen, iphlen); 4772 4773 stcb = sctp_findassociation_addr(m, iphlen, 4774 offset - sizeof(*ch), 4775 sh, ch, &inp, &net, 4776 vrf_id); 4777 if ((inp) && (stcb)) { 4778 sctp_send_packet_dropped(stcb, net, m, iphlen, 1); 4779 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR); 4780 } else if ((inp != NULL) && (stcb == NULL)) { 4781 refcount_up = 1; 4782 } 4783 SCTP_STAT_INCR(sctps_badsum); 4784 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors); 4785 goto bad; 4786 } 4787 sh->checksum = calc_check; 4788 } 4789 sctp_skip_csum_4: 4790 /* destination port of 0 is illegal, based on RFC2960. */ 4791 if (sh->dest_port == 0) { 4792 SCTP_STAT_INCR(sctps_hdrops); 4793 goto bad; 4794 } 4795 /* validate mbuf chain length with IP payload length */ 4796 if (mlen < (ip->ip_len - iphlen)) { 4797 SCTP_STAT_INCR(sctps_hdrops); 4798 goto bad; 4799 } 4800 /* 4801 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants 4802 * IP/SCTP/first chunk header... 4803 */ 4804 stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch), 4805 sh, ch, &inp, &net, vrf_id); 4806 /* inp's ref-count increased && stcb locked */ 4807 if (inp == NULL) { 4808 struct sctp_init_chunk *init_chk, chunk_buf; 4809 4810 SCTP_STAT_INCR(sctps_noport); 4811 #ifdef ICMP_BANDLIM 4812 /* 4813 * we use the bandwidth limiting to protect against sending 4814 * too many ABORTS all at once. In this case these count the 4815 * same as an ICMP message. 4816 */ 4817 if (badport_bandlim(0) < 0) 4818 goto bad; 4819 #endif /* ICMP_BANDLIM */ 4820 SCTPDBG(SCTP_DEBUG_INPUT1, 4821 "Sending a ABORT from packet entry!\n"); 4822 if (ch->chunk_type == SCTP_INITIATION) { 4823 /* 4824 * we do a trick here to get the INIT tag, dig in 4825 * and get the tag from the INIT and put it in the 4826 * common header. 4827 */ 4828 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4829 iphlen + sizeof(*sh), sizeof(*init_chk), 4830 (uint8_t *) & chunk_buf); 4831 if (init_chk != NULL) 4832 sh->v_tag = init_chk->init.initiate_tag; 4833 } 4834 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 4835 sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, 4836 table_id); 4837 goto bad; 4838 } 4839 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) { 4840 goto bad; 4841 } 4842 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) 4843 sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, 4844 table_id); 4845 goto bad; 4846 } else if (stcb == NULL) { 4847 refcount_up = 1; 4848 } 4849 #ifdef IPSEC 4850 /* 4851 * I very much doubt any of the IPSEC stuff will work but I have no 4852 * idea, so I will leave it in place. 4853 */ 4854 4855 if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) { 4856 ipsecstat.in_polvio++; 4857 SCTP_STAT_INCR(sctps_hdrops); 4858 goto bad; 4859 } 4860 #endif /* IPSEC */ 4861 4862 /* 4863 * common chunk processing 4864 */ 4865 length = ip->ip_len + iphlen; 4866 offset -= sizeof(struct sctp_chunkhdr); 4867 4868 ecn_bits = ip->ip_tos; 4869 4870 /* sa_ignore NO_NULL_CHK */ 4871 sctp_common_input_processing(&m, iphlen, offset, length, sh, ch, 4872 inp, stcb, net, ecn_bits, vrf_id, 4873 table_id); 4874 /* inp's ref-count reduced && stcb unlocked */ 4875 if (m) { 4876 sctp_m_freem(m); 4877 } 4878 if ((inp) && (refcount_up)) { 4879 /* reduce ref-count */ 4880 SCTP_INP_WLOCK(inp); 4881 SCTP_INP_DECR_REF(inp); 4882 SCTP_INP_WUNLOCK(inp); 4883 } 4884 return; 4885 bad: 4886 if (stcb) { 4887 SCTP_TCB_UNLOCK(stcb); 4888 } 4889 if ((inp) && (refcount_up)) { 4890 /* reduce ref-count */ 4891 SCTP_INP_WLOCK(inp); 4892 SCTP_INP_DECR_REF(inp); 4893 SCTP_INP_WUNLOCK(inp); 4894 } 4895 if (m) { 4896 sctp_m_freem(m); 4897 } 4898 return; 4899 } 4900