1 /*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_input.h> 44 #include <netinet/sctp_auth.h> 45 #include <netinet/sctp_indata.h> 46 #include <netinet/sctp_asconf.h> 47 #include <netinet/sctp_bsd_addr.h> 48 #include <netinet/sctp_timer.h> 49 #include <netinet/sctp_crc32.h> 50 #if defined(INET) || defined(INET6) 51 #include <netinet/udp.h> 52 #endif 53 #include <sys/smp.h> 54 55 56 57 static void 58 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb) 59 { 60 struct sctp_nets *net; 61 62 /* 63 * This now not only stops all cookie timers it also stops any INIT 64 * timers as well. This will make sure that the timers are stopped 65 * in all collision cases. 66 */ 67 SCTP_TCB_LOCK_ASSERT(stcb); 68 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 69 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) { 70 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, 71 stcb->sctp_ep, 72 stcb, 73 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1); 74 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) { 75 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, 76 stcb->sctp_ep, 77 stcb, 78 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2); 79 } 80 } 81 } 82 83 /* INIT handler */ 84 static void 85 sctp_handle_init(struct mbuf *m, int iphlen, int offset, 86 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, 87 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, 88 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_no_unlock, 89 uint8_t mflowtype, uint32_t mflowid, 90 uint32_t vrf_id, uint16_t port) 91 { 92 struct sctp_init *init; 93 struct mbuf *op_err; 94 95 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n", 96 (void *)stcb); 97 if (stcb == NULL) { 98 SCTP_INP_RLOCK(inp); 99 } 100 /* validate length */ 101 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) { 102 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 103 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 104 mflowtype, mflowid, 105 vrf_id, port); 106 if (stcb) 107 *abort_no_unlock = 1; 108 goto outnow; 109 } 110 /* validate parameters */ 111 init = &cp->init; 112 if (init->initiate_tag == 0) { 113 /* protocol error... send abort */ 114 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 115 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 116 mflowtype, mflowid, 117 vrf_id, port); 118 if (stcb) 119 *abort_no_unlock = 1; 120 goto outnow; 121 } 122 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) { 123 /* invalid parameter... send abort */ 124 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 125 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 126 mflowtype, mflowid, 127 vrf_id, port); 128 if (stcb) 129 *abort_no_unlock = 1; 130 goto outnow; 131 } 132 if (init->num_inbound_streams == 0) { 133 /* protocol error... send abort */ 134 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 135 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 136 mflowtype, mflowid, 137 vrf_id, port); 138 if (stcb) 139 *abort_no_unlock = 1; 140 goto outnow; 141 } 142 if (init->num_outbound_streams == 0) { 143 /* protocol error... send abort */ 144 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 145 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 146 mflowtype, mflowid, 147 vrf_id, port); 148 if (stcb) 149 *abort_no_unlock = 1; 150 goto outnow; 151 } 152 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp), 153 offset + ntohs(cp->ch.chunk_length))) { 154 /* auth parameter(s) error... send abort */ 155 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 156 "Problem with AUTH parameters"); 157 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 158 mflowtype, mflowid, 159 vrf_id, port); 160 if (stcb) 161 *abort_no_unlock = 1; 162 goto outnow; 163 } 164 /* 165 * We are only accepting if we have a socket with positive 166 * so_qlimit. 167 */ 168 if ((stcb == NULL) && 169 ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 170 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 171 (inp->sctp_socket == NULL) || 172 (inp->sctp_socket->so_qlimit == 0))) { 173 /* 174 * FIX ME ?? What about TCP model and we have a 175 * match/restart case? Actually no fix is needed. the lookup 176 * will always find the existing assoc so stcb would not be 177 * NULL. It may be questionable to do this since we COULD 178 * just send back the INIT-ACK and hope that the app did 179 * accept()'s by the time the COOKIE was sent. But there is 180 * a price to pay for COOKIE generation and I don't want to 181 * pay it on the chance that the app will actually do some 182 * accepts(). The App just looses and should NOT be in this 183 * state :-) 184 */ 185 if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) { 186 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 187 "No listener"); 188 sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err, 189 mflowtype, mflowid, inp->fibnum, 190 vrf_id, port); 191 } 192 goto outnow; 193 } 194 if ((stcb != NULL) && 195 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 196 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n"); 197 sctp_send_shutdown_ack(stcb, NULL); 198 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 199 } else { 200 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); 201 sctp_send_initiate_ack(inp, stcb, net, m, iphlen, offset, 202 src, dst, sh, cp, 203 mflowtype, mflowid, 204 vrf_id, port, 205 ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED)); 206 } 207 outnow: 208 if (stcb == NULL) { 209 SCTP_INP_RUNLOCK(inp); 210 } 211 } 212 213 /* 214 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error 215 */ 216 217 int 218 sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked 219 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 220 SCTP_UNUSED 221 #endif 222 ) 223 { 224 int unsent_data = 0; 225 unsigned int i; 226 struct sctp_stream_queue_pending *sp; 227 struct sctp_association *asoc; 228 229 /* 230 * This function returns the number of streams that have true unsent 231 * data on them. Note that as it looks through it will clean up any 232 * places that have old data that has been sent but left at top of 233 * stream queue. 234 */ 235 asoc = &stcb->asoc; 236 SCTP_TCB_SEND_LOCK(stcb); 237 if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 238 /* Check to see if some data queued */ 239 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 240 /* sa_ignore FREED_MEMORY */ 241 sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue); 242 if (sp == NULL) { 243 continue; 244 } 245 if ((sp->msg_is_complete) && 246 (sp->length == 0) && 247 (sp->sender_all_done)) { 248 /* 249 * We are doing differed cleanup. Last time 250 * through when we took all the data the 251 * sender_all_done was not set. 252 */ 253 if (sp->put_last_out == 0) { 254 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 255 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", 256 sp->sender_all_done, 257 sp->length, 258 sp->msg_is_complete, 259 sp->put_last_out); 260 } 261 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1); 262 TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next); 263 if (sp->net) { 264 sctp_free_remote_addr(sp->net); 265 sp->net = NULL; 266 } 267 if (sp->data) { 268 sctp_m_freem(sp->data); 269 sp->data = NULL; 270 } 271 sctp_free_a_strmoq(stcb, sp, so_locked); 272 } else { 273 unsent_data++; 274 break; 275 } 276 } 277 } 278 SCTP_TCB_SEND_UNLOCK(stcb); 279 return (unsent_data); 280 } 281 282 static int 283 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb) 284 { 285 struct sctp_init *init; 286 struct sctp_association *asoc; 287 struct sctp_nets *lnet; 288 unsigned int i; 289 290 init = &cp->init; 291 asoc = &stcb->asoc; 292 /* save off parameters */ 293 asoc->peer_vtag = ntohl(init->initiate_tag); 294 asoc->peers_rwnd = ntohl(init->a_rwnd); 295 /* init tsn's */ 296 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1; 297 298 if (!TAILQ_EMPTY(&asoc->nets)) { 299 /* update any ssthresh's that may have a default */ 300 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 301 lnet->ssthresh = asoc->peers_rwnd; 302 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 303 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION); 304 } 305 } 306 } 307 SCTP_TCB_SEND_LOCK(stcb); 308 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) { 309 unsigned int newcnt; 310 struct sctp_stream_out *outs; 311 struct sctp_stream_queue_pending *sp, *nsp; 312 struct sctp_tmit_chunk *chk, *nchk; 313 314 /* abandon the upper streams */ 315 newcnt = ntohs(init->num_inbound_streams); 316 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 317 if (chk->rec.data.stream_number >= newcnt) { 318 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 319 asoc->send_queue_cnt--; 320 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 321 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 322 #ifdef INVARIANTS 323 } else { 324 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number); 325 #endif 326 } 327 if (chk->data != NULL) { 328 sctp_free_bufspace(stcb, asoc, chk, 1); 329 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 330 0, chk, SCTP_SO_NOT_LOCKED); 331 if (chk->data) { 332 sctp_m_freem(chk->data); 333 chk->data = NULL; 334 } 335 } 336 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 337 /* sa_ignore FREED_MEMORY */ 338 } 339 } 340 if (asoc->strmout) { 341 for (i = newcnt; i < asoc->pre_open_streams; i++) { 342 outs = &asoc->strmout[i]; 343 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 344 TAILQ_REMOVE(&outs->outqueue, sp, next); 345 asoc->stream_queue_cnt--; 346 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, 347 stcb, 0, sp, SCTP_SO_NOT_LOCKED); 348 if (sp->data) { 349 sctp_m_freem(sp->data); 350 sp->data = NULL; 351 } 352 if (sp->net) { 353 sctp_free_remote_addr(sp->net); 354 sp->net = NULL; 355 } 356 /* Free the chunk */ 357 sctp_free_a_strmoq(stcb, sp, SCTP_SO_NOT_LOCKED); 358 /* sa_ignore FREED_MEMORY */ 359 } 360 outs->state = SCTP_STREAM_CLOSED; 361 } 362 } 363 /* cut back the count */ 364 asoc->pre_open_streams = newcnt; 365 } 366 SCTP_TCB_SEND_UNLOCK(stcb); 367 asoc->streamoutcnt = asoc->pre_open_streams; 368 if (asoc->strmout) { 369 for (i = 0; i < asoc->streamoutcnt; i++) { 370 asoc->strmout[i].state = SCTP_STREAM_OPEN; 371 } 372 } 373 /* EY - nr_sack: initialize highest tsn in nr_mapping_array */ 374 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map; 375 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 376 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 377 } 378 /* This is the next one we expect */ 379 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1; 380 381 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn); 382 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in; 383 384 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 385 /* open the requested streams */ 386 387 if (asoc->strmin != NULL) { 388 /* Free the old ones */ 389 for (i = 0; i < asoc->streamincnt; i++) { 390 sctp_clean_up_stream(stcb, &asoc->strmin[i].inqueue); 391 sctp_clean_up_stream(stcb, &asoc->strmin[i].uno_inqueue); 392 } 393 SCTP_FREE(asoc->strmin, SCTP_M_STRMI); 394 } 395 if (asoc->max_inbound_streams > ntohs(init->num_outbound_streams)) { 396 asoc->streamincnt = ntohs(init->num_outbound_streams); 397 } else { 398 asoc->streamincnt = asoc->max_inbound_streams; 399 } 400 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt * 401 sizeof(struct sctp_stream_in), SCTP_M_STRMI); 402 if (asoc->strmin == NULL) { 403 /* we didn't get memory for the streams! */ 404 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n"); 405 return (-1); 406 } 407 for (i = 0; i < asoc->streamincnt; i++) { 408 asoc->strmin[i].stream_no = i; 409 asoc->strmin[i].last_sequence_delivered = 0xffffffff; 410 TAILQ_INIT(&asoc->strmin[i].inqueue); 411 TAILQ_INIT(&asoc->strmin[i].uno_inqueue); 412 asoc->strmin[i].pd_api_started = 0; 413 asoc->strmin[i].delivery_started = 0; 414 } 415 /* 416 * load_address_from_init will put the addresses into the 417 * association when the COOKIE is processed or the INIT-ACK is 418 * processed. Both types of COOKIE's existing and new call this 419 * routine. It will remove addresses that are no longer in the 420 * association (for the restarting case where addresses are 421 * removed). Up front when the INIT arrives we will discard it if it 422 * is a restart and new addresses have been added. 423 */ 424 /* sa_ignore MEMLEAK */ 425 return (0); 426 } 427 428 /* 429 * INIT-ACK message processing/consumption returns value < 0 on error 430 */ 431 static int 432 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, 433 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, 434 struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 435 struct sctp_nets *net, int *abort_no_unlock, 436 uint8_t mflowtype, uint32_t mflowid, 437 uint32_t vrf_id) 438 { 439 struct sctp_association *asoc; 440 struct mbuf *op_err; 441 int retval, abort_flag; 442 uint32_t initack_limit; 443 int nat_friendly = 0; 444 445 /* First verify that we have no illegal param's */ 446 abort_flag = 0; 447 448 op_err = sctp_arethere_unrecognized_parameters(m, 449 (offset + sizeof(struct sctp_init_chunk)), 450 &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly); 451 if (abort_flag) { 452 /* Send an abort and notify peer */ 453 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 454 *abort_no_unlock = 1; 455 return (-1); 456 } 457 asoc = &stcb->asoc; 458 asoc->peer_supports_nat = (uint8_t) nat_friendly; 459 /* process the peer's parameters in the INIT-ACK */ 460 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb); 461 if (retval < 0) { 462 return (retval); 463 } 464 initack_limit = offset + ntohs(cp->ch.chunk_length); 465 /* load all addresses */ 466 if ((retval = sctp_load_addresses_from_init(stcb, m, 467 (offset + sizeof(struct sctp_init_chunk)), initack_limit, 468 src, dst, NULL))) { 469 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 470 "Problem with address parameters"); 471 SCTPDBG(SCTP_DEBUG_INPUT1, 472 "Load addresses from INIT causes an abort %d\n", 473 retval); 474 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 475 src, dst, sh, op_err, 476 mflowtype, mflowid, 477 vrf_id, net->port); 478 *abort_no_unlock = 1; 479 return (-1); 480 } 481 /* if the peer doesn't support asconf, flush the asconf queue */ 482 if (asoc->asconf_supported == 0) { 483 struct sctp_asconf_addr *param, *nparam; 484 485 TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) { 486 TAILQ_REMOVE(&asoc->asconf_queue, param, next); 487 SCTP_FREE(param, SCTP_M_ASC_ADDR); 488 } 489 } 490 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs, 491 stcb->asoc.local_hmacs); 492 if (op_err) { 493 sctp_queue_op_err(stcb, op_err); 494 /* queuing will steal away the mbuf chain to the out queue */ 495 op_err = NULL; 496 } 497 /* extract the cookie and queue it to "echo" it back... */ 498 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 499 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 500 stcb->asoc.overall_error_count, 501 0, 502 SCTP_FROM_SCTP_INPUT, 503 __LINE__); 504 } 505 stcb->asoc.overall_error_count = 0; 506 net->error_count = 0; 507 508 /* 509 * Cancel the INIT timer, We do this first before queueing the 510 * cookie. We always cancel at the primary to assue that we are 511 * canceling the timer started by the INIT which always goes to the 512 * primary. 513 */ 514 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb, 515 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); 516 517 /* calculate the RTO */ 518 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy, 519 SCTP_RTT_FROM_NON_DATA); 520 retval = sctp_send_cookie_echo(m, offset, stcb, net); 521 if (retval < 0) { 522 /* 523 * No cookie, we probably should send a op error. But in any 524 * case if there is no cookie in the INIT-ACK, we can 525 * abandon the peer, its broke. 526 */ 527 if (retval == -3) { 528 uint16_t len; 529 530 len = (uint16_t) (sizeof(struct sctp_error_missing_param) + sizeof(uint16_t)); 531 /* We abort with an error of missing mandatory param */ 532 op_err = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 533 if (op_err != NULL) { 534 struct sctp_error_missing_param *cause; 535 536 SCTP_BUF_LEN(op_err) = len; 537 cause = mtod(op_err, struct sctp_error_missing_param *); 538 /* Subtract the reserved param */ 539 cause->cause.code = htons(SCTP_CAUSE_MISSING_PARAM); 540 cause->cause.length = htons(len); 541 cause->num_missing_params = htonl(1); 542 cause->type[0] = htons(SCTP_STATE_COOKIE); 543 } 544 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 545 src, dst, sh, op_err, 546 mflowtype, mflowid, 547 vrf_id, net->port); 548 *abort_no_unlock = 1; 549 } 550 return (retval); 551 } 552 return (0); 553 } 554 555 static void 556 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp, 557 struct sctp_tcb *stcb, struct sctp_nets *net) 558 { 559 union sctp_sockstore store; 560 struct sctp_nets *r_net, *f_net; 561 struct timeval tv; 562 int req_prim = 0; 563 uint16_t old_error_counter; 564 565 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) { 566 /* Invalid length */ 567 return; 568 } 569 memset(&store, 0, sizeof(store)); 570 switch (cp->heartbeat.hb_info.addr_family) { 571 #ifdef INET 572 case AF_INET: 573 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) { 574 store.sin.sin_family = cp->heartbeat.hb_info.addr_family; 575 store.sin.sin_len = cp->heartbeat.hb_info.addr_len; 576 store.sin.sin_port = stcb->rport; 577 memcpy(&store.sin.sin_addr, cp->heartbeat.hb_info.address, 578 sizeof(store.sin.sin_addr)); 579 } else { 580 return; 581 } 582 break; 583 #endif 584 #ifdef INET6 585 case AF_INET6: 586 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) { 587 store.sin6.sin6_family = cp->heartbeat.hb_info.addr_family; 588 store.sin6.sin6_len = cp->heartbeat.hb_info.addr_len; 589 store.sin6.sin6_port = stcb->rport; 590 memcpy(&store.sin6.sin6_addr, cp->heartbeat.hb_info.address, sizeof(struct in6_addr)); 591 } else { 592 return; 593 } 594 break; 595 #endif 596 default: 597 return; 598 } 599 r_net = sctp_findnet(stcb, &store.sa); 600 if (r_net == NULL) { 601 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n"); 602 return; 603 } 604 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) && 605 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) && 606 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) { 607 /* 608 * If the its a HB and it's random value is correct when can 609 * confirm the destination. 610 */ 611 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 612 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) { 613 stcb->asoc.primary_destination = r_net; 614 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 615 f_net = TAILQ_FIRST(&stcb->asoc.nets); 616 if (f_net != r_net) { 617 /* 618 * first one on the list is NOT the primary 619 * sctp_cmpaddr() is much more efficent if 620 * the primary is the first on the list, 621 * make it so. 622 */ 623 TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next); 624 TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next); 625 } 626 req_prim = 1; 627 } 628 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 629 stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 630 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, 631 r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4); 632 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net); 633 } 634 old_error_counter = r_net->error_count; 635 r_net->error_count = 0; 636 r_net->hb_responded = 1; 637 tv.tv_sec = cp->heartbeat.hb_info.time_value_1; 638 tv.tv_usec = cp->heartbeat.hb_info.time_value_2; 639 /* Now lets do a RTO with this */ 640 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy, 641 SCTP_RTT_FROM_NON_DATA); 642 if (!(r_net->dest_state & SCTP_ADDR_REACHABLE)) { 643 r_net->dest_state |= SCTP_ADDR_REACHABLE; 644 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 645 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 646 } 647 if (r_net->dest_state & SCTP_ADDR_PF) { 648 r_net->dest_state &= ~SCTP_ADDR_PF; 649 stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 650 } 651 if (old_error_counter > 0) { 652 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 653 stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5); 654 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net); 655 } 656 if (r_net == stcb->asoc.primary_destination) { 657 if (stcb->asoc.alternate) { 658 /* release the alternate, primary is good */ 659 sctp_free_remote_addr(stcb->asoc.alternate); 660 stcb->asoc.alternate = NULL; 661 } 662 } 663 /* Mobility adaptation */ 664 if (req_prim) { 665 if ((sctp_is_mobility_feature_on(stcb->sctp_ep, 666 SCTP_MOBILITY_BASE) || 667 sctp_is_mobility_feature_on(stcb->sctp_ep, 668 SCTP_MOBILITY_FASTHANDOFF)) && 669 sctp_is_mobility_feature_on(stcb->sctp_ep, 670 SCTP_MOBILITY_PRIM_DELETED)) { 671 672 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, 673 stcb->sctp_ep, stcb, NULL, 674 SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 675 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 676 SCTP_MOBILITY_FASTHANDOFF)) { 677 sctp_assoc_immediate_retrans(stcb, 678 stcb->asoc.primary_destination); 679 } 680 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 681 SCTP_MOBILITY_BASE)) { 682 sctp_move_chunks_from_net(stcb, 683 stcb->asoc.deleted_primary); 684 } 685 sctp_delete_prim_timer(stcb->sctp_ep, stcb, 686 stcb->asoc.deleted_primary); 687 } 688 } 689 } 690 691 static int 692 sctp_handle_nat_colliding_state(struct sctp_tcb *stcb) 693 { 694 /* 695 * return 0 means we want you to proceed with the abort non-zero 696 * means no abort processing 697 */ 698 struct sctpasochead *head; 699 700 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) { 701 /* generate a new vtag and send init */ 702 LIST_REMOVE(stcb, sctp_asocs); 703 stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 704 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; 705 /* 706 * put it in the bucket in the vtag hash of assoc's for the 707 * system 708 */ 709 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 710 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 711 return (1); 712 } 713 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 714 /* 715 * treat like a case where the cookie expired i.e.: - dump 716 * current cookie. - generate a new vtag. - resend init. 717 */ 718 /* generate a new vtag and send init */ 719 LIST_REMOVE(stcb, sctp_asocs); 720 stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED; 721 stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT; 722 sctp_stop_all_cookie_timers(stcb); 723 sctp_toss_old_cookies(stcb, &stcb->asoc); 724 stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 725 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; 726 /* 727 * put it in the bucket in the vtag hash of assoc's for the 728 * system 729 */ 730 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 731 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 732 return (1); 733 } 734 return (0); 735 } 736 737 static int 738 sctp_handle_nat_missing_state(struct sctp_tcb *stcb, 739 struct sctp_nets *net) 740 { 741 /* 742 * return 0 means we want you to proceed with the abort non-zero 743 * means no abort processing 744 */ 745 if (stcb->asoc.auth_supported == 0) { 746 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n"); 747 return (0); 748 } 749 sctp_asconf_send_nat_state_update(stcb, net); 750 return (1); 751 } 752 753 754 static void 755 sctp_handle_abort(struct sctp_abort_chunk *abort, 756 struct sctp_tcb *stcb, struct sctp_nets *net) 757 { 758 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 759 struct socket *so; 760 761 #endif 762 uint16_t len; 763 uint16_t error; 764 765 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n"); 766 if (stcb == NULL) 767 return; 768 769 len = ntohs(abort->ch.chunk_length); 770 if (len > sizeof(struct sctp_chunkhdr)) { 771 /* 772 * Need to check the cause codes for our two magic nat 773 * aborts which don't kill the assoc necessarily. 774 */ 775 struct sctp_gen_error_cause *cause; 776 777 cause = (struct sctp_gen_error_cause *)(abort + 1); 778 error = ntohs(cause->code); 779 if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) { 780 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n", 781 abort->ch.chunk_flags); 782 if (sctp_handle_nat_colliding_state(stcb)) { 783 return; 784 } 785 } else if (error == SCTP_CAUSE_NAT_MISSING_STATE) { 786 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n", 787 abort->ch.chunk_flags); 788 if (sctp_handle_nat_missing_state(stcb, net)) { 789 return; 790 } 791 } 792 } else { 793 error = 0; 794 } 795 /* stop any receive timers */ 796 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, 797 SCTP_FROM_SCTP_INPUT + SCTP_LOC_7); 798 /* notify user of the abort and clean up... */ 799 sctp_abort_notification(stcb, 1, error, abort, SCTP_SO_NOT_LOCKED); 800 /* free the tcb */ 801 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 802 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 803 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 804 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 805 } 806 #ifdef SCTP_ASOCLOG_OF_TSNS 807 sctp_print_out_track_log(stcb); 808 #endif 809 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 810 so = SCTP_INP_SO(stcb->sctp_ep); 811 atomic_add_int(&stcb->asoc.refcnt, 1); 812 SCTP_TCB_UNLOCK(stcb); 813 SCTP_SOCKET_LOCK(so, 1); 814 SCTP_TCB_LOCK(stcb); 815 atomic_subtract_int(&stcb->asoc.refcnt, 1); 816 #endif 817 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 818 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 819 SCTP_FROM_SCTP_INPUT + SCTP_LOC_8); 820 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 821 SCTP_SOCKET_UNLOCK(so, 1); 822 #endif 823 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n"); 824 } 825 826 static void 827 sctp_start_net_timers(struct sctp_tcb *stcb) 828 { 829 uint32_t cnt_hb_sent; 830 struct sctp_nets *net; 831 832 cnt_hb_sent = 0; 833 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 834 /* 835 * For each network start: 1) A pmtu timer. 2) A HB timer 3) 836 * If the dest in unconfirmed send a hb as well if under 837 * max_hb_burst have been sent. 838 */ 839 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net); 840 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 841 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 842 (cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) { 843 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 844 cnt_hb_sent++; 845 } 846 } 847 if (cnt_hb_sent) { 848 sctp_chunk_output(stcb->sctp_ep, stcb, 849 SCTP_OUTPUT_FROM_COOKIE_ACK, 850 SCTP_SO_NOT_LOCKED); 851 } 852 } 853 854 855 static void 856 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, 857 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag) 858 { 859 struct sctp_association *asoc; 860 int some_on_streamwheel; 861 int old_state; 862 863 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 864 struct socket *so; 865 866 #endif 867 868 SCTPDBG(SCTP_DEBUG_INPUT2, 869 "sctp_handle_shutdown: handling SHUTDOWN\n"); 870 if (stcb == NULL) 871 return; 872 asoc = &stcb->asoc; 873 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 874 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 875 return; 876 } 877 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) { 878 /* Shutdown NOT the expected size */ 879 return; 880 } 881 old_state = SCTP_GET_STATE(asoc); 882 sctp_update_acked(stcb, cp, abort_flag); 883 if (*abort_flag) { 884 return; 885 } 886 if (asoc->control_pdapi) { 887 /* 888 * With a normal shutdown we assume the end of last record. 889 */ 890 SCTP_INP_READ_LOCK(stcb->sctp_ep); 891 if (asoc->control_pdapi->on_strm_q) { 892 struct sctp_stream_in *strm; 893 894 strm = &asoc->strmin[asoc->control_pdapi->sinfo_stream]; 895 if (asoc->control_pdapi->on_strm_q == SCTP_ON_UNORDERED) { 896 /* Unordered */ 897 TAILQ_REMOVE(&strm->uno_inqueue, asoc->control_pdapi, next_instrm); 898 asoc->control_pdapi->on_strm_q = 0; 899 } else if (asoc->control_pdapi->on_strm_q == SCTP_ON_ORDERED) { 900 /* Ordered */ 901 TAILQ_REMOVE(&strm->inqueue, asoc->control_pdapi, next_instrm); 902 asoc->control_pdapi->on_strm_q = 0; 903 } else { 904 panic("Unknown state on ctrl:%p on_strm_q:%d", 905 asoc->control_pdapi, 906 asoc->control_pdapi->on_strm_q); 907 } 908 } 909 asoc->control_pdapi->end_added = 1; 910 asoc->control_pdapi->pdapi_aborted = 1; 911 asoc->control_pdapi = NULL; 912 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 913 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 914 so = SCTP_INP_SO(stcb->sctp_ep); 915 atomic_add_int(&stcb->asoc.refcnt, 1); 916 SCTP_TCB_UNLOCK(stcb); 917 SCTP_SOCKET_LOCK(so, 1); 918 SCTP_TCB_LOCK(stcb); 919 atomic_subtract_int(&stcb->asoc.refcnt, 1); 920 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 921 /* assoc was freed while we were unlocked */ 922 SCTP_SOCKET_UNLOCK(so, 1); 923 return; 924 } 925 #endif 926 if (stcb->sctp_socket) { 927 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 928 } 929 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 930 SCTP_SOCKET_UNLOCK(so, 1); 931 #endif 932 } 933 /* goto SHUTDOWN_RECEIVED state to block new requests */ 934 if (stcb->sctp_socket) { 935 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 936 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) && 937 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 938 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED); 939 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 940 /* 941 * notify upper layer that peer has initiated a 942 * shutdown 943 */ 944 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 945 946 /* reset time */ 947 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 948 } 949 } 950 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 951 /* 952 * stop the shutdown timer, since we WILL move to 953 * SHUTDOWN-ACK-SENT. 954 */ 955 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 956 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9); 957 } 958 /* Now is there unsent data on a stream somewhere? */ 959 some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED); 960 961 if (!TAILQ_EMPTY(&asoc->send_queue) || 962 !TAILQ_EMPTY(&asoc->sent_queue) || 963 some_on_streamwheel) { 964 /* By returning we will push more data out */ 965 return; 966 } else { 967 /* no outstanding data to send, so move on... */ 968 /* send SHUTDOWN-ACK */ 969 /* move to SHUTDOWN-ACK-SENT state */ 970 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 971 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 972 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 973 } 974 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 975 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 976 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 977 sctp_stop_timers_for_shutdown(stcb); 978 sctp_send_shutdown_ack(stcb, net); 979 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 980 stcb->sctp_ep, stcb, net); 981 } else if (old_state == SCTP_STATE_SHUTDOWN_ACK_SENT) { 982 sctp_send_shutdown_ack(stcb, net); 983 } 984 } 985 } 986 987 static void 988 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED, 989 struct sctp_tcb *stcb, 990 struct sctp_nets *net) 991 { 992 struct sctp_association *asoc; 993 994 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 995 struct socket *so; 996 997 so = SCTP_INP_SO(stcb->sctp_ep); 998 #endif 999 SCTPDBG(SCTP_DEBUG_INPUT2, 1000 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n"); 1001 if (stcb == NULL) 1002 return; 1003 1004 asoc = &stcb->asoc; 1005 /* process according to association state */ 1006 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 1007 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 1008 /* unexpected SHUTDOWN-ACK... do OOTB handling... */ 1009 sctp_send_shutdown_complete(stcb, net, 1); 1010 SCTP_TCB_UNLOCK(stcb); 1011 return; 1012 } 1013 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 1014 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 1015 /* unexpected SHUTDOWN-ACK... so ignore... */ 1016 SCTP_TCB_UNLOCK(stcb); 1017 return; 1018 } 1019 if (asoc->control_pdapi) { 1020 /* 1021 * With a normal shutdown we assume the end of last record. 1022 */ 1023 SCTP_INP_READ_LOCK(stcb->sctp_ep); 1024 asoc->control_pdapi->end_added = 1; 1025 asoc->control_pdapi->pdapi_aborted = 1; 1026 asoc->control_pdapi = NULL; 1027 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 1028 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1029 atomic_add_int(&stcb->asoc.refcnt, 1); 1030 SCTP_TCB_UNLOCK(stcb); 1031 SCTP_SOCKET_LOCK(so, 1); 1032 SCTP_TCB_LOCK(stcb); 1033 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1034 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1035 /* assoc was freed while we were unlocked */ 1036 SCTP_SOCKET_UNLOCK(so, 1); 1037 return; 1038 } 1039 #endif 1040 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1041 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1042 SCTP_SOCKET_UNLOCK(so, 1); 1043 #endif 1044 } 1045 #ifdef INVARIANTS 1046 if (!TAILQ_EMPTY(&asoc->send_queue) || 1047 !TAILQ_EMPTY(&asoc->sent_queue) || 1048 !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 1049 panic("Queues are not empty when handling SHUTDOWN-ACK"); 1050 } 1051 #endif 1052 /* stop the timer */ 1053 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, 1054 SCTP_FROM_SCTP_INPUT + SCTP_LOC_10); 1055 /* send SHUTDOWN-COMPLETE */ 1056 sctp_send_shutdown_complete(stcb, net, 0); 1057 /* notify upper layer protocol */ 1058 if (stcb->sctp_socket) { 1059 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1060 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 1061 stcb->sctp_socket->so_snd.sb_cc = 0; 1062 } 1063 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 1064 } 1065 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 1066 /* free the TCB but first save off the ep */ 1067 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1068 atomic_add_int(&stcb->asoc.refcnt, 1); 1069 SCTP_TCB_UNLOCK(stcb); 1070 SCTP_SOCKET_LOCK(so, 1); 1071 SCTP_TCB_LOCK(stcb); 1072 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1073 #endif 1074 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1075 SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); 1076 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1077 SCTP_SOCKET_UNLOCK(so, 1); 1078 #endif 1079 } 1080 1081 /* 1082 * Skip past the param header and then we will find the chunk that caused the 1083 * problem. There are two possiblities ASCONF or FWD-TSN other than that and 1084 * our peer must be broken. 1085 */ 1086 static void 1087 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr, 1088 struct sctp_nets *net) 1089 { 1090 struct sctp_chunkhdr *chk; 1091 1092 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr)); 1093 switch (chk->chunk_type) { 1094 case SCTP_ASCONF_ACK: 1095 case SCTP_ASCONF: 1096 sctp_asconf_cleanup(stcb, net); 1097 break; 1098 case SCTP_IFORWARD_CUM_TSN: 1099 case SCTP_FORWARD_CUM_TSN: 1100 stcb->asoc.prsctp_supported = 0; 1101 break; 1102 default: 1103 SCTPDBG(SCTP_DEBUG_INPUT2, 1104 "Peer does not support chunk type %d(%x)??\n", 1105 chk->chunk_type, (uint32_t) chk->chunk_type); 1106 break; 1107 } 1108 } 1109 1110 /* 1111 * Skip past the param header and then we will find the param that caused the 1112 * problem. There are a number of param's in a ASCONF OR the prsctp param 1113 * these will turn of specific features. 1114 * XXX: Is this the right thing to do? 1115 */ 1116 static void 1117 sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr) 1118 { 1119 struct sctp_paramhdr *pbad; 1120 1121 pbad = phdr + 1; 1122 switch (ntohs(pbad->param_type)) { 1123 /* pr-sctp draft */ 1124 case SCTP_PRSCTP_SUPPORTED: 1125 stcb->asoc.prsctp_supported = 0; 1126 break; 1127 case SCTP_SUPPORTED_CHUNK_EXT: 1128 break; 1129 /* draft-ietf-tsvwg-addip-sctp */ 1130 case SCTP_HAS_NAT_SUPPORT: 1131 stcb->asoc.peer_supports_nat = 0; 1132 break; 1133 case SCTP_ADD_IP_ADDRESS: 1134 case SCTP_DEL_IP_ADDRESS: 1135 case SCTP_SET_PRIM_ADDR: 1136 stcb->asoc.asconf_supported = 0; 1137 break; 1138 case SCTP_SUCCESS_REPORT: 1139 case SCTP_ERROR_CAUSE_IND: 1140 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n"); 1141 SCTPDBG(SCTP_DEBUG_INPUT2, 1142 "Turning off ASCONF to this strange peer\n"); 1143 stcb->asoc.asconf_supported = 0; 1144 break; 1145 default: 1146 SCTPDBG(SCTP_DEBUG_INPUT2, 1147 "Peer does not support param type %d(%x)??\n", 1148 pbad->param_type, (uint32_t) pbad->param_type); 1149 break; 1150 } 1151 } 1152 1153 static int 1154 sctp_handle_error(struct sctp_chunkhdr *ch, 1155 struct sctp_tcb *stcb, struct sctp_nets *net) 1156 { 1157 int chklen; 1158 struct sctp_paramhdr *phdr; 1159 uint16_t error, error_type; 1160 uint16_t error_len; 1161 struct sctp_association *asoc; 1162 int adjust; 1163 1164 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1165 struct socket *so; 1166 1167 #endif 1168 1169 /* parse through all of the errors and process */ 1170 asoc = &stcb->asoc; 1171 phdr = (struct sctp_paramhdr *)((caddr_t)ch + 1172 sizeof(struct sctp_chunkhdr)); 1173 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr); 1174 error = 0; 1175 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) { 1176 /* Process an Error Cause */ 1177 error_type = ntohs(phdr->param_type); 1178 error_len = ntohs(phdr->param_length); 1179 if ((error_len > chklen) || (error_len == 0)) { 1180 /* invalid param length for this param */ 1181 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n", 1182 chklen, error_len); 1183 return (0); 1184 } 1185 if (error == 0) { 1186 /* report the first error cause */ 1187 error = error_type; 1188 } 1189 switch (error_type) { 1190 case SCTP_CAUSE_INVALID_STREAM: 1191 case SCTP_CAUSE_MISSING_PARAM: 1192 case SCTP_CAUSE_INVALID_PARAM: 1193 case SCTP_CAUSE_NO_USER_DATA: 1194 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n", 1195 error_type); 1196 break; 1197 case SCTP_CAUSE_NAT_COLLIDING_STATE: 1198 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n", 1199 ch->chunk_flags); 1200 if (sctp_handle_nat_colliding_state(stcb)) { 1201 return (0); 1202 } 1203 break; 1204 case SCTP_CAUSE_NAT_MISSING_STATE: 1205 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n", 1206 ch->chunk_flags); 1207 if (sctp_handle_nat_missing_state(stcb, net)) { 1208 return (0); 1209 } 1210 break; 1211 case SCTP_CAUSE_STALE_COOKIE: 1212 /* 1213 * We only act if we have echoed a cookie and are 1214 * waiting. 1215 */ 1216 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 1217 int *p; 1218 1219 p = (int *)((caddr_t)phdr + sizeof(*phdr)); 1220 /* Save the time doubled */ 1221 asoc->cookie_preserve_req = ntohl(*p) << 1; 1222 asoc->stale_cookie_count++; 1223 if (asoc->stale_cookie_count > 1224 asoc->max_init_times) { 1225 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 1226 /* now free the asoc */ 1227 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1228 so = SCTP_INP_SO(stcb->sctp_ep); 1229 atomic_add_int(&stcb->asoc.refcnt, 1); 1230 SCTP_TCB_UNLOCK(stcb); 1231 SCTP_SOCKET_LOCK(so, 1); 1232 SCTP_TCB_LOCK(stcb); 1233 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1234 #endif 1235 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1236 SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); 1237 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1238 SCTP_SOCKET_UNLOCK(so, 1); 1239 #endif 1240 return (-1); 1241 } 1242 /* blast back to INIT state */ 1243 sctp_toss_old_cookies(stcb, &stcb->asoc); 1244 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED; 1245 asoc->state |= SCTP_STATE_COOKIE_WAIT; 1246 sctp_stop_all_cookie_timers(stcb); 1247 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1248 } 1249 break; 1250 case SCTP_CAUSE_UNRESOLVABLE_ADDR: 1251 /* 1252 * Nothing we can do here, we don't do hostname 1253 * addresses so if the peer does not like my IPv6 1254 * (or IPv4 for that matter) it does not matter. If 1255 * they don't support that type of address, they can 1256 * NOT possibly get that packet type... i.e. with no 1257 * IPv6 you can't recieve a IPv6 packet. so we can 1258 * safely ignore this one. If we ever added support 1259 * for HOSTNAME Addresses, then we would need to do 1260 * something here. 1261 */ 1262 break; 1263 case SCTP_CAUSE_UNRECOG_CHUNK: 1264 sctp_process_unrecog_chunk(stcb, phdr, net); 1265 break; 1266 case SCTP_CAUSE_UNRECOG_PARAM: 1267 sctp_process_unrecog_param(stcb, phdr); 1268 break; 1269 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN: 1270 /* 1271 * We ignore this since the timer will drive out a 1272 * new cookie anyway and there timer will drive us 1273 * to send a SHUTDOWN_COMPLETE. We can't send one 1274 * here since we don't have their tag. 1275 */ 1276 break; 1277 case SCTP_CAUSE_DELETING_LAST_ADDR: 1278 case SCTP_CAUSE_RESOURCE_SHORTAGE: 1279 case SCTP_CAUSE_DELETING_SRC_ADDR: 1280 /* 1281 * We should NOT get these here, but in a 1282 * ASCONF-ACK. 1283 */ 1284 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n", 1285 error_type); 1286 break; 1287 case SCTP_CAUSE_OUT_OF_RESC: 1288 /* 1289 * And what, pray tell do we do with the fact that 1290 * the peer is out of resources? Not really sure we 1291 * could do anything but abort. I suspect this 1292 * should have came WITH an abort instead of in a 1293 * OP-ERROR. 1294 */ 1295 break; 1296 default: 1297 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n", 1298 error_type); 1299 break; 1300 } 1301 adjust = SCTP_SIZE32(error_len); 1302 chklen -= adjust; 1303 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust); 1304 } 1305 sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, error, ch, SCTP_SO_NOT_LOCKED); 1306 return (0); 1307 } 1308 1309 static int 1310 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, 1311 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, 1312 struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 1313 struct sctp_nets *net, int *abort_no_unlock, 1314 uint8_t mflowtype, uint32_t mflowid, 1315 uint32_t vrf_id) 1316 { 1317 struct sctp_init_ack *init_ack; 1318 struct mbuf *op_err; 1319 1320 SCTPDBG(SCTP_DEBUG_INPUT2, 1321 "sctp_handle_init_ack: handling INIT-ACK\n"); 1322 1323 if (stcb == NULL) { 1324 SCTPDBG(SCTP_DEBUG_INPUT2, 1325 "sctp_handle_init_ack: TCB is null\n"); 1326 return (-1); 1327 } 1328 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) { 1329 /* Invalid length */ 1330 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1331 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1332 src, dst, sh, op_err, 1333 mflowtype, mflowid, 1334 vrf_id, net->port); 1335 *abort_no_unlock = 1; 1336 return (-1); 1337 } 1338 init_ack = &cp->init; 1339 /* validate parameters */ 1340 if (init_ack->initiate_tag == 0) { 1341 /* protocol error... send an abort */ 1342 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1343 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1344 src, dst, sh, op_err, 1345 mflowtype, mflowid, 1346 vrf_id, net->port); 1347 *abort_no_unlock = 1; 1348 return (-1); 1349 } 1350 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) { 1351 /* protocol error... send an abort */ 1352 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1353 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1354 src, dst, sh, op_err, 1355 mflowtype, mflowid, 1356 vrf_id, net->port); 1357 *abort_no_unlock = 1; 1358 return (-1); 1359 } 1360 if (init_ack->num_inbound_streams == 0) { 1361 /* protocol error... send an abort */ 1362 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1363 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1364 src, dst, sh, op_err, 1365 mflowtype, mflowid, 1366 vrf_id, net->port); 1367 *abort_no_unlock = 1; 1368 return (-1); 1369 } 1370 if (init_ack->num_outbound_streams == 0) { 1371 /* protocol error... send an abort */ 1372 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1373 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1374 src, dst, sh, op_err, 1375 mflowtype, mflowid, 1376 vrf_id, net->port); 1377 *abort_no_unlock = 1; 1378 return (-1); 1379 } 1380 /* process according to association state... */ 1381 switch (stcb->asoc.state & SCTP_STATE_MASK) { 1382 case SCTP_STATE_COOKIE_WAIT: 1383 /* this is the expected state for this chunk */ 1384 /* process the INIT-ACK parameters */ 1385 if (stcb->asoc.primary_destination->dest_state & 1386 SCTP_ADDR_UNCONFIRMED) { 1387 /* 1388 * The primary is where we sent the INIT, we can 1389 * always consider it confirmed when the INIT-ACK is 1390 * returned. Do this before we load addresses 1391 * though. 1392 */ 1393 stcb->asoc.primary_destination->dest_state &= 1394 ~SCTP_ADDR_UNCONFIRMED; 1395 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 1396 stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED); 1397 } 1398 if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb, 1399 net, abort_no_unlock, 1400 mflowtype, mflowid, 1401 vrf_id) < 0) { 1402 /* error in parsing parameters */ 1403 return (-1); 1404 } 1405 /* update our state */ 1406 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n"); 1407 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED); 1408 1409 /* reset the RTO calc */ 1410 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 1411 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 1412 stcb->asoc.overall_error_count, 1413 0, 1414 SCTP_FROM_SCTP_INPUT, 1415 __LINE__); 1416 } 1417 stcb->asoc.overall_error_count = 0; 1418 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1419 /* 1420 * collapse the init timer back in case of a exponential 1421 * backoff 1422 */ 1423 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep, 1424 stcb, net); 1425 /* 1426 * the send at the end of the inbound data processing will 1427 * cause the cookie to be sent 1428 */ 1429 break; 1430 case SCTP_STATE_SHUTDOWN_SENT: 1431 /* incorrect state... discard */ 1432 break; 1433 case SCTP_STATE_COOKIE_ECHOED: 1434 /* incorrect state... discard */ 1435 break; 1436 case SCTP_STATE_OPEN: 1437 /* incorrect state... discard */ 1438 break; 1439 case SCTP_STATE_EMPTY: 1440 case SCTP_STATE_INUSE: 1441 default: 1442 /* incorrect state... discard */ 1443 return (-1); 1444 break; 1445 } 1446 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); 1447 return (0); 1448 } 1449 1450 static struct sctp_tcb * 1451 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1452 struct sockaddr *src, struct sockaddr *dst, 1453 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1454 struct sctp_inpcb *inp, struct sctp_nets **netp, 1455 struct sockaddr *init_src, int *notification, 1456 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1457 uint8_t mflowtype, uint32_t mflowid, 1458 uint32_t vrf_id, uint16_t port); 1459 1460 1461 /* 1462 * handle a state cookie for an existing association m: input packet mbuf 1463 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a 1464 * "split" mbuf and the cookie signature does not exist offset: offset into 1465 * mbuf to the cookie-echo chunk 1466 */ 1467 static struct sctp_tcb * 1468 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, 1469 struct sockaddr *src, struct sockaddr *dst, 1470 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1471 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp, 1472 struct sockaddr *init_src, int *notification, 1473 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1474 uint8_t mflowtype, uint32_t mflowid, 1475 uint32_t vrf_id, uint16_t port) 1476 { 1477 struct sctp_association *asoc; 1478 struct sctp_init_chunk *init_cp, init_buf; 1479 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1480 struct sctp_nets *net; 1481 struct mbuf *op_err; 1482 int init_offset, initack_offset, i; 1483 int retval; 1484 int spec_flag = 0; 1485 uint32_t how_indx; 1486 1487 #if defined(SCTP_DETAILED_STR_STATS) 1488 int j; 1489 1490 #endif 1491 1492 net = *netp; 1493 /* I know that the TCB is non-NULL from the caller */ 1494 asoc = &stcb->asoc; 1495 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) { 1496 if (asoc->cookie_how[how_indx] == 0) 1497 break; 1498 } 1499 if (how_indx < sizeof(asoc->cookie_how)) { 1500 asoc->cookie_how[how_indx] = 1; 1501 } 1502 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 1503 /* SHUTDOWN came in after sending INIT-ACK */ 1504 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 1505 op_err = sctp_generate_cause(SCTP_CAUSE_COOKIE_IN_SHUTDOWN, ""); 1506 sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err, 1507 mflowtype, mflowid, inp->fibnum, 1508 vrf_id, net->port); 1509 if (how_indx < sizeof(asoc->cookie_how)) 1510 asoc->cookie_how[how_indx] = 2; 1511 return (NULL); 1512 } 1513 /* 1514 * find and validate the INIT chunk in the cookie (peer's info) the 1515 * INIT should start after the cookie-echo header struct (chunk 1516 * header, state cookie header struct) 1517 */ 1518 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk); 1519 1520 init_cp = (struct sctp_init_chunk *) 1521 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1522 (uint8_t *) & init_buf); 1523 if (init_cp == NULL) { 1524 /* could not pull a INIT chunk in cookie */ 1525 return (NULL); 1526 } 1527 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1528 return (NULL); 1529 } 1530 /* 1531 * find and validate the INIT-ACK chunk in the cookie (my info) the 1532 * INIT-ACK follows the INIT chunk 1533 */ 1534 initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length)); 1535 initack_cp = (struct sctp_init_ack_chunk *) 1536 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1537 (uint8_t *) & initack_buf); 1538 if (initack_cp == NULL) { 1539 /* could not pull INIT-ACK chunk in cookie */ 1540 return (NULL); 1541 } 1542 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1543 return (NULL); 1544 } 1545 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1546 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) { 1547 /* 1548 * case D in Section 5.2.4 Table 2: MMAA process accordingly 1549 * to get into the OPEN state 1550 */ 1551 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1552 /*- 1553 * Opps, this means that we somehow generated two vtag's 1554 * the same. I.e. we did: 1555 * Us Peer 1556 * <---INIT(tag=a)------ 1557 * ----INIT-ACK(tag=t)--> 1558 * ----INIT(tag=t)------> *1 1559 * <---INIT-ACK(tag=a)--- 1560 * <----CE(tag=t)------------- *2 1561 * 1562 * At point *1 we should be generating a different 1563 * tag t'. Which means we would throw away the CE and send 1564 * ours instead. Basically this is case C (throw away side). 1565 */ 1566 if (how_indx < sizeof(asoc->cookie_how)) 1567 asoc->cookie_how[how_indx] = 17; 1568 return (NULL); 1569 1570 } 1571 switch (SCTP_GET_STATE(asoc)) { 1572 case SCTP_STATE_COOKIE_WAIT: 1573 case SCTP_STATE_COOKIE_ECHOED: 1574 /* 1575 * INIT was sent but got a COOKIE_ECHO with the 1576 * correct tags... just accept it...but we must 1577 * process the init so that we can make sure we have 1578 * the right seq no's. 1579 */ 1580 /* First we must process the INIT !! */ 1581 retval = sctp_process_init(init_cp, stcb); 1582 if (retval < 0) { 1583 if (how_indx < sizeof(asoc->cookie_how)) 1584 asoc->cookie_how[how_indx] = 3; 1585 return (NULL); 1586 } 1587 /* we have already processed the INIT so no problem */ 1588 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, 1589 stcb, net, 1590 SCTP_FROM_SCTP_INPUT + SCTP_LOC_13); 1591 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, 1592 stcb, net, 1593 SCTP_FROM_SCTP_INPUT + SCTP_LOC_14); 1594 /* update current state */ 1595 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1596 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1597 else 1598 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1599 1600 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1601 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1602 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1603 stcb->sctp_ep, stcb, asoc->primary_destination); 1604 } 1605 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1606 sctp_stop_all_cookie_timers(stcb); 1607 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1608 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1609 (inp->sctp_socket->so_qlimit == 0) 1610 ) { 1611 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1612 struct socket *so; 1613 1614 #endif 1615 /* 1616 * Here is where collision would go if we 1617 * did a connect() and instead got a 1618 * init/init-ack/cookie done before the 1619 * init-ack came back.. 1620 */ 1621 stcb->sctp_ep->sctp_flags |= 1622 SCTP_PCB_FLAGS_CONNECTED; 1623 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1624 so = SCTP_INP_SO(stcb->sctp_ep); 1625 atomic_add_int(&stcb->asoc.refcnt, 1); 1626 SCTP_TCB_UNLOCK(stcb); 1627 SCTP_SOCKET_LOCK(so, 1); 1628 SCTP_TCB_LOCK(stcb); 1629 atomic_add_int(&stcb->asoc.refcnt, -1); 1630 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1631 SCTP_SOCKET_UNLOCK(so, 1); 1632 return (NULL); 1633 } 1634 #endif 1635 soisconnected(stcb->sctp_socket); 1636 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1637 SCTP_SOCKET_UNLOCK(so, 1); 1638 #endif 1639 } 1640 /* notify upper layer */ 1641 *notification = SCTP_NOTIFY_ASSOC_UP; 1642 /* 1643 * since we did not send a HB make sure we don't 1644 * double things 1645 */ 1646 net->hb_responded = 1; 1647 net->RTO = sctp_calculate_rto(stcb, asoc, net, 1648 &cookie->time_entered, 1649 sctp_align_unsafe_makecopy, 1650 SCTP_RTT_FROM_NON_DATA); 1651 1652 if (stcb->asoc.sctp_autoclose_ticks && 1653 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) { 1654 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 1655 inp, stcb, NULL); 1656 } 1657 break; 1658 default: 1659 /* 1660 * we're in the OPEN state (or beyond), so peer must 1661 * have simply lost the COOKIE-ACK 1662 */ 1663 break; 1664 } /* end switch */ 1665 sctp_stop_all_cookie_timers(stcb); 1666 /* 1667 * We ignore the return code here.. not sure if we should 1668 * somehow abort.. but we do have an existing asoc. This 1669 * really should not fail. 1670 */ 1671 if (sctp_load_addresses_from_init(stcb, m, 1672 init_offset + sizeof(struct sctp_init_chunk), 1673 initack_offset, src, dst, init_src)) { 1674 if (how_indx < sizeof(asoc->cookie_how)) 1675 asoc->cookie_how[how_indx] = 4; 1676 return (NULL); 1677 } 1678 /* respond with a COOKIE-ACK */ 1679 sctp_toss_old_cookies(stcb, asoc); 1680 sctp_send_cookie_ack(stcb); 1681 if (how_indx < sizeof(asoc->cookie_how)) 1682 asoc->cookie_how[how_indx] = 5; 1683 return (stcb); 1684 } 1685 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1686 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag && 1687 cookie->tie_tag_my_vtag == 0 && 1688 cookie->tie_tag_peer_vtag == 0) { 1689 /* 1690 * case C in Section 5.2.4 Table 2: XMOO silently discard 1691 */ 1692 if (how_indx < sizeof(asoc->cookie_how)) 1693 asoc->cookie_how[how_indx] = 6; 1694 return (NULL); 1695 } 1696 /* 1697 * If nat support, and the below and stcb is established, send back 1698 * a ABORT(colliding state) if we are established. 1699 */ 1700 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) && 1701 (asoc->peer_supports_nat) && 1702 ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1703 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) || 1704 (asoc->peer_vtag == 0)))) { 1705 /* 1706 * Special case - Peer's support nat. We may have two init's 1707 * that we gave out the same tag on since one was not 1708 * established.. i.e. we get INIT from host-1 behind the nat 1709 * and we respond tag-a, we get a INIT from host-2 behind 1710 * the nat and we get tag-a again. Then we bring up host-1 1711 * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1). 1712 * Now we have colliding state. We must send an abort here 1713 * with colliding state indication. 1714 */ 1715 op_err = sctp_generate_cause(SCTP_CAUSE_NAT_COLLIDING_STATE, ""); 1716 sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err, 1717 mflowtype, mflowid, inp->fibnum, 1718 vrf_id, port); 1719 return (NULL); 1720 } 1721 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1722 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) || 1723 (asoc->peer_vtag == 0))) { 1724 /* 1725 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info 1726 * should be ok, re-accept peer info 1727 */ 1728 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1729 /* 1730 * Extension of case C. If we hit this, then the 1731 * random number generator returned the same vtag 1732 * when we first sent our INIT-ACK and when we later 1733 * sent our INIT. The side with the seq numbers that 1734 * are different will be the one that normnally 1735 * would have hit case C. This in effect "extends" 1736 * our vtags in this collision case to be 64 bits. 1737 * The same collision could occur aka you get both 1738 * vtag and seq number the same twice in a row.. but 1739 * is much less likely. If it did happen then we 1740 * would proceed through and bring up the assoc.. we 1741 * may end up with the wrong stream setup however.. 1742 * which would be bad.. but there is no way to 1743 * tell.. until we send on a stream that does not 1744 * exist :-) 1745 */ 1746 if (how_indx < sizeof(asoc->cookie_how)) 1747 asoc->cookie_how[how_indx] = 7; 1748 1749 return (NULL); 1750 } 1751 if (how_indx < sizeof(asoc->cookie_how)) 1752 asoc->cookie_how[how_indx] = 8; 1753 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 1754 SCTP_FROM_SCTP_INPUT + SCTP_LOC_15); 1755 sctp_stop_all_cookie_timers(stcb); 1756 /* 1757 * since we did not send a HB make sure we don't double 1758 * things 1759 */ 1760 net->hb_responded = 1; 1761 if (stcb->asoc.sctp_autoclose_ticks && 1762 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1763 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1764 NULL); 1765 } 1766 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1767 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1768 1769 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { 1770 /* 1771 * Ok the peer probably discarded our data (if we 1772 * echoed a cookie+data). So anything on the 1773 * sent_queue should be marked for retransmit, we 1774 * may not get something to kick us so it COULD 1775 * still take a timeout to move these.. but it can't 1776 * hurt to mark them. 1777 */ 1778 struct sctp_tmit_chunk *chk; 1779 1780 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1781 if (chk->sent < SCTP_DATAGRAM_RESEND) { 1782 chk->sent = SCTP_DATAGRAM_RESEND; 1783 sctp_flight_size_decrease(chk); 1784 sctp_total_flight_decrease(stcb, chk); 1785 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1786 spec_flag++; 1787 } 1788 } 1789 1790 } 1791 /* process the INIT info (peer's info) */ 1792 retval = sctp_process_init(init_cp, stcb); 1793 if (retval < 0) { 1794 if (how_indx < sizeof(asoc->cookie_how)) 1795 asoc->cookie_how[how_indx] = 9; 1796 return (NULL); 1797 } 1798 if (sctp_load_addresses_from_init(stcb, m, 1799 init_offset + sizeof(struct sctp_init_chunk), 1800 initack_offset, src, dst, init_src)) { 1801 if (how_indx < sizeof(asoc->cookie_how)) 1802 asoc->cookie_how[how_indx] = 10; 1803 return (NULL); 1804 } 1805 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) || 1806 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) { 1807 *notification = SCTP_NOTIFY_ASSOC_UP; 1808 1809 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1810 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1811 (inp->sctp_socket->so_qlimit == 0)) { 1812 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1813 struct socket *so; 1814 1815 #endif 1816 stcb->sctp_ep->sctp_flags |= 1817 SCTP_PCB_FLAGS_CONNECTED; 1818 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1819 so = SCTP_INP_SO(stcb->sctp_ep); 1820 atomic_add_int(&stcb->asoc.refcnt, 1); 1821 SCTP_TCB_UNLOCK(stcb); 1822 SCTP_SOCKET_LOCK(so, 1); 1823 SCTP_TCB_LOCK(stcb); 1824 atomic_add_int(&stcb->asoc.refcnt, -1); 1825 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1826 SCTP_SOCKET_UNLOCK(so, 1); 1827 return (NULL); 1828 } 1829 #endif 1830 soisconnected(stcb->sctp_socket); 1831 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1832 SCTP_SOCKET_UNLOCK(so, 1); 1833 #endif 1834 } 1835 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1836 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1837 else 1838 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1839 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1840 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1841 SCTP_STAT_INCR_COUNTER32(sctps_restartestab); 1842 } else { 1843 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1844 } 1845 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1846 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1847 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1848 stcb->sctp_ep, stcb, asoc->primary_destination); 1849 } 1850 sctp_stop_all_cookie_timers(stcb); 1851 sctp_toss_old_cookies(stcb, asoc); 1852 sctp_send_cookie_ack(stcb); 1853 if (spec_flag) { 1854 /* 1855 * only if we have retrans set do we do this. What 1856 * this call does is get only the COOKIE-ACK out and 1857 * then when we return the normal call to 1858 * sctp_chunk_output will get the retrans out behind 1859 * this. 1860 */ 1861 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED); 1862 } 1863 if (how_indx < sizeof(asoc->cookie_how)) 1864 asoc->cookie_how[how_indx] = 11; 1865 1866 return (stcb); 1867 } 1868 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1869 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) && 1870 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce && 1871 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce && 1872 cookie->tie_tag_peer_vtag != 0) { 1873 struct sctpasochead *head; 1874 1875 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1876 struct socket *so; 1877 1878 #endif 1879 1880 if (asoc->peer_supports_nat) { 1881 /* 1882 * This is a gross gross hack. Just call the 1883 * cookie_new code since we are allowing a duplicate 1884 * association. I hope this works... 1885 */ 1886 return (sctp_process_cookie_new(m, iphlen, offset, src, dst, 1887 sh, cookie, cookie_len, 1888 inp, netp, init_src, notification, 1889 auth_skipped, auth_offset, auth_len, 1890 mflowtype, mflowid, 1891 vrf_id, port)); 1892 } 1893 /* 1894 * case A in Section 5.2.4 Table 2: XXMM (peer restarted) 1895 */ 1896 /* temp code */ 1897 if (how_indx < sizeof(asoc->cookie_how)) 1898 asoc->cookie_how[how_indx] = 12; 1899 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 1900 SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1901 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 1902 SCTP_FROM_SCTP_INPUT + SCTP_LOC_17); 1903 1904 /* notify upper layer */ 1905 *notification = SCTP_NOTIFY_ASSOC_RESTART; 1906 atomic_add_int(&stcb->asoc.refcnt, 1); 1907 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) && 1908 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1909 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 1910 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1911 } 1912 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1913 SCTP_STAT_INCR_GAUGE32(sctps_restartestab); 1914 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1915 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab); 1916 } 1917 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1918 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1919 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1920 stcb->sctp_ep, stcb, asoc->primary_destination); 1921 1922 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) { 1923 /* move to OPEN state, if not in SHUTDOWN_SENT */ 1924 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1925 } 1926 asoc->pre_open_streams = 1927 ntohs(initack_cp->init.num_outbound_streams); 1928 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1929 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1930 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1931 1932 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1933 1934 asoc->str_reset_seq_in = asoc->init_seq_number; 1935 1936 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1937 if (asoc->mapping_array) { 1938 memset(asoc->mapping_array, 0, 1939 asoc->mapping_array_size); 1940 } 1941 if (asoc->nr_mapping_array) { 1942 memset(asoc->nr_mapping_array, 0, 1943 asoc->mapping_array_size); 1944 } 1945 SCTP_TCB_UNLOCK(stcb); 1946 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1947 so = SCTP_INP_SO(stcb->sctp_ep); 1948 SCTP_SOCKET_LOCK(so, 1); 1949 #endif 1950 SCTP_INP_INFO_WLOCK(); 1951 SCTP_INP_WLOCK(stcb->sctp_ep); 1952 SCTP_TCB_LOCK(stcb); 1953 atomic_add_int(&stcb->asoc.refcnt, -1); 1954 /* send up all the data */ 1955 SCTP_TCB_SEND_LOCK(stcb); 1956 1957 sctp_report_all_outbound(stcb, 0, 1, SCTP_SO_LOCKED); 1958 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1959 stcb->asoc.strmout[i].chunks_on_queues = 0; 1960 #if defined(SCTP_DETAILED_STR_STATS) 1961 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1962 asoc->strmout[i].abandoned_sent[j] = 0; 1963 asoc->strmout[i].abandoned_unsent[j] = 0; 1964 } 1965 #else 1966 asoc->strmout[i].abandoned_sent[0] = 0; 1967 asoc->strmout[i].abandoned_unsent[0] = 0; 1968 #endif 1969 stcb->asoc.strmout[i].stream_no = i; 1970 stcb->asoc.strmout[i].next_sequence_send = 0; 1971 stcb->asoc.strmout[i].last_msg_incomplete = 0; 1972 } 1973 /* process the INIT-ACK info (my info) */ 1974 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1975 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1976 1977 /* pull from vtag hash */ 1978 LIST_REMOVE(stcb, sctp_asocs); 1979 /* re-insert to new vtag position */ 1980 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, 1981 SCTP_BASE_INFO(hashasocmark))]; 1982 /* 1983 * put it in the bucket in the vtag hash of assoc's for the 1984 * system 1985 */ 1986 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 1987 1988 SCTP_TCB_SEND_UNLOCK(stcb); 1989 SCTP_INP_WUNLOCK(stcb->sctp_ep); 1990 SCTP_INP_INFO_WUNLOCK(); 1991 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1992 SCTP_SOCKET_UNLOCK(so, 1); 1993 #endif 1994 asoc->total_flight = 0; 1995 asoc->total_flight_count = 0; 1996 /* process the INIT info (peer's info) */ 1997 retval = sctp_process_init(init_cp, stcb); 1998 if (retval < 0) { 1999 if (how_indx < sizeof(asoc->cookie_how)) 2000 asoc->cookie_how[how_indx] = 13; 2001 2002 return (NULL); 2003 } 2004 /* 2005 * since we did not send a HB make sure we don't double 2006 * things 2007 */ 2008 net->hb_responded = 1; 2009 2010 if (sctp_load_addresses_from_init(stcb, m, 2011 init_offset + sizeof(struct sctp_init_chunk), 2012 initack_offset, src, dst, init_src)) { 2013 if (how_indx < sizeof(asoc->cookie_how)) 2014 asoc->cookie_how[how_indx] = 14; 2015 2016 return (NULL); 2017 } 2018 /* respond with a COOKIE-ACK */ 2019 sctp_stop_all_cookie_timers(stcb); 2020 sctp_toss_old_cookies(stcb, asoc); 2021 sctp_send_cookie_ack(stcb); 2022 if (how_indx < sizeof(asoc->cookie_how)) 2023 asoc->cookie_how[how_indx] = 15; 2024 2025 return (stcb); 2026 } 2027 if (how_indx < sizeof(asoc->cookie_how)) 2028 asoc->cookie_how[how_indx] = 16; 2029 /* all other cases... */ 2030 return (NULL); 2031 } 2032 2033 2034 /* 2035 * handle a state cookie for a new association m: input packet mbuf chain-- 2036 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf 2037 * and the cookie signature does not exist offset: offset into mbuf to the 2038 * cookie-echo chunk length: length of the cookie chunk to: where the init 2039 * was from returns a new TCB 2040 */ 2041 static struct sctp_tcb * 2042 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 2043 struct sockaddr *src, struct sockaddr *dst, 2044 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 2045 struct sctp_inpcb *inp, struct sctp_nets **netp, 2046 struct sockaddr *init_src, int *notification, 2047 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2048 uint8_t mflowtype, uint32_t mflowid, 2049 uint32_t vrf_id, uint16_t port) 2050 { 2051 struct sctp_tcb *stcb; 2052 struct sctp_init_chunk *init_cp, init_buf; 2053 struct sctp_init_ack_chunk *initack_cp, initack_buf; 2054 union sctp_sockstore store; 2055 struct sctp_association *asoc; 2056 int init_offset, initack_offset, initack_limit; 2057 int retval; 2058 int error = 0; 2059 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE]; 2060 2061 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2062 struct socket *so; 2063 2064 so = SCTP_INP_SO(inp); 2065 #endif 2066 2067 /* 2068 * find and validate the INIT chunk in the cookie (peer's info) the 2069 * INIT should start after the cookie-echo header struct (chunk 2070 * header, state cookie header struct) 2071 */ 2072 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk); 2073 init_cp = (struct sctp_init_chunk *) 2074 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 2075 (uint8_t *) & init_buf); 2076 if (init_cp == NULL) { 2077 /* could not pull a INIT chunk in cookie */ 2078 SCTPDBG(SCTP_DEBUG_INPUT1, 2079 "process_cookie_new: could not pull INIT chunk hdr\n"); 2080 return (NULL); 2081 } 2082 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 2083 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n"); 2084 return (NULL); 2085 } 2086 initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length)); 2087 /* 2088 * find and validate the INIT-ACK chunk in the cookie (my info) the 2089 * INIT-ACK follows the INIT chunk 2090 */ 2091 initack_cp = (struct sctp_init_ack_chunk *) 2092 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 2093 (uint8_t *) & initack_buf); 2094 if (initack_cp == NULL) { 2095 /* could not pull INIT-ACK chunk in cookie */ 2096 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n"); 2097 return (NULL); 2098 } 2099 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 2100 return (NULL); 2101 } 2102 /* 2103 * NOTE: We can't use the INIT_ACK's chk_length to determine the 2104 * "initack_limit" value. This is because the chk_length field 2105 * includes the length of the cookie, but the cookie is omitted when 2106 * the INIT and INIT_ACK are tacked onto the cookie... 2107 */ 2108 initack_limit = offset + cookie_len; 2109 2110 /* 2111 * now that we know the INIT/INIT-ACK are in place, create a new TCB 2112 * and popluate 2113 */ 2114 2115 /* 2116 * Here we do a trick, we set in NULL for the proc/thread argument. 2117 * We do this since in effect we only use the p argument when the 2118 * socket is unbound and we must do an implicit bind. Since we are 2119 * getting a cookie, we cannot be unbound. 2120 */ 2121 stcb = sctp_aloc_assoc(inp, init_src, &error, 2122 ntohl(initack_cp->init.initiate_tag), vrf_id, 2123 ntohs(initack_cp->init.num_outbound_streams), 2124 (struct thread *)NULL 2125 ); 2126 if (stcb == NULL) { 2127 struct mbuf *op_err; 2128 2129 /* memory problem? */ 2130 SCTPDBG(SCTP_DEBUG_INPUT1, 2131 "process_cookie_new: no room for another TCB!\n"); 2132 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 2133 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 2134 src, dst, sh, op_err, 2135 mflowtype, mflowid, 2136 vrf_id, port); 2137 return (NULL); 2138 } 2139 /* get the correct sctp_nets */ 2140 if (netp) 2141 *netp = sctp_findnet(stcb, init_src); 2142 2143 asoc = &stcb->asoc; 2144 /* get scope variables out of cookie */ 2145 asoc->scope.ipv4_local_scope = cookie->ipv4_scope; 2146 asoc->scope.site_scope = cookie->site_scope; 2147 asoc->scope.local_scope = cookie->local_scope; 2148 asoc->scope.loopback_scope = cookie->loopback_scope; 2149 2150 if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) || 2151 (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal)) { 2152 struct mbuf *op_err; 2153 2154 /* 2155 * Houston we have a problem. The EP changed while the 2156 * cookie was in flight. Only recourse is to abort the 2157 * association. 2158 */ 2159 atomic_add_int(&stcb->asoc.refcnt, 1); 2160 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 2161 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 2162 src, dst, sh, op_err, 2163 mflowtype, mflowid, 2164 vrf_id, port); 2165 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2166 SCTP_TCB_UNLOCK(stcb); 2167 SCTP_SOCKET_LOCK(so, 1); 2168 SCTP_TCB_LOCK(stcb); 2169 #endif 2170 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2171 SCTP_FROM_SCTP_INPUT + SCTP_LOC_18); 2172 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2173 SCTP_SOCKET_UNLOCK(so, 1); 2174 #endif 2175 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2176 return (NULL); 2177 } 2178 /* process the INIT-ACK info (my info) */ 2179 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 2180 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 2181 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 2182 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 2183 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 2184 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 2185 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 2186 asoc->str_reset_seq_in = asoc->init_seq_number; 2187 2188 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 2189 2190 /* process the INIT info (peer's info) */ 2191 if (netp) 2192 retval = sctp_process_init(init_cp, stcb); 2193 else 2194 retval = 0; 2195 if (retval < 0) { 2196 atomic_add_int(&stcb->asoc.refcnt, 1); 2197 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2198 SCTP_TCB_UNLOCK(stcb); 2199 SCTP_SOCKET_LOCK(so, 1); 2200 SCTP_TCB_LOCK(stcb); 2201 #endif 2202 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2203 SCTP_FROM_SCTP_INPUT + SCTP_LOC_19); 2204 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2205 SCTP_SOCKET_UNLOCK(so, 1); 2206 #endif 2207 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2208 return (NULL); 2209 } 2210 /* load all addresses */ 2211 if (sctp_load_addresses_from_init(stcb, m, 2212 init_offset + sizeof(struct sctp_init_chunk), initack_offset, 2213 src, dst, init_src)) { 2214 atomic_add_int(&stcb->asoc.refcnt, 1); 2215 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2216 SCTP_TCB_UNLOCK(stcb); 2217 SCTP_SOCKET_LOCK(so, 1); 2218 SCTP_TCB_LOCK(stcb); 2219 #endif 2220 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2221 SCTP_FROM_SCTP_INPUT + SCTP_LOC_20); 2222 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2223 SCTP_SOCKET_UNLOCK(so, 1); 2224 #endif 2225 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2226 return (NULL); 2227 } 2228 /* 2229 * verify any preceding AUTH chunk that was skipped 2230 */ 2231 /* pull the local authentication parameters from the cookie/init-ack */ 2232 sctp_auth_get_cookie_params(stcb, m, 2233 initack_offset + sizeof(struct sctp_init_ack_chunk), 2234 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk))); 2235 if (auth_skipped) { 2236 struct sctp_auth_chunk *auth; 2237 2238 auth = (struct sctp_auth_chunk *) 2239 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); 2240 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { 2241 /* auth HMAC failed, dump the assoc and packet */ 2242 SCTPDBG(SCTP_DEBUG_AUTH1, 2243 "COOKIE-ECHO: AUTH failed\n"); 2244 atomic_add_int(&stcb->asoc.refcnt, 1); 2245 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2246 SCTP_TCB_UNLOCK(stcb); 2247 SCTP_SOCKET_LOCK(so, 1); 2248 SCTP_TCB_LOCK(stcb); 2249 #endif 2250 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2251 SCTP_FROM_SCTP_INPUT + SCTP_LOC_21); 2252 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2253 SCTP_SOCKET_UNLOCK(so, 1); 2254 #endif 2255 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2256 return (NULL); 2257 } else { 2258 /* remaining chunks checked... good to go */ 2259 stcb->asoc.authenticated = 1; 2260 } 2261 } 2262 /* update current state */ 2263 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2264 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2265 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2266 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2267 stcb->sctp_ep, stcb, asoc->primary_destination); 2268 } 2269 sctp_stop_all_cookie_timers(stcb); 2270 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab); 2271 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2272 2273 /* 2274 * if we're doing ASCONFs, check to see if we have any new local 2275 * addresses that need to get added to the peer (eg. addresses 2276 * changed while cookie echo in flight). This needs to be done 2277 * after we go to the OPEN state to do the correct asconf 2278 * processing. else, make sure we have the correct addresses in our 2279 * lists 2280 */ 2281 2282 /* warning, we re-use sin, sin6, sa_store here! */ 2283 /* pull in local_address (our "from" address) */ 2284 switch (cookie->laddr_type) { 2285 #ifdef INET 2286 case SCTP_IPV4_ADDRESS: 2287 /* source addr is IPv4 */ 2288 memset(&store.sin, 0, sizeof(struct sockaddr_in)); 2289 store.sin.sin_family = AF_INET; 2290 store.sin.sin_len = sizeof(struct sockaddr_in); 2291 store.sin.sin_addr.s_addr = cookie->laddress[0]; 2292 break; 2293 #endif 2294 #ifdef INET6 2295 case SCTP_IPV6_ADDRESS: 2296 /* source addr is IPv6 */ 2297 memset(&store.sin6, 0, sizeof(struct sockaddr_in6)); 2298 store.sin6.sin6_family = AF_INET6; 2299 store.sin6.sin6_len = sizeof(struct sockaddr_in6); 2300 store.sin6.sin6_scope_id = cookie->scope_id; 2301 memcpy(&store.sin6.sin6_addr, cookie->laddress, sizeof(struct in6_addr)); 2302 break; 2303 #endif 2304 default: 2305 atomic_add_int(&stcb->asoc.refcnt, 1); 2306 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2307 SCTP_TCB_UNLOCK(stcb); 2308 SCTP_SOCKET_LOCK(so, 1); 2309 SCTP_TCB_LOCK(stcb); 2310 #endif 2311 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2312 SCTP_FROM_SCTP_INPUT + SCTP_LOC_22); 2313 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2314 SCTP_SOCKET_UNLOCK(so, 1); 2315 #endif 2316 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2317 return (NULL); 2318 } 2319 2320 /* set up to notify upper layer */ 2321 *notification = SCTP_NOTIFY_ASSOC_UP; 2322 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2323 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2324 (inp->sctp_socket->so_qlimit == 0)) { 2325 /* 2326 * This is an endpoint that called connect() how it got a 2327 * cookie that is NEW is a bit of a mystery. It must be that 2328 * the INIT was sent, but before it got there.. a complete 2329 * INIT/INIT-ACK/COOKIE arrived. But of course then it 2330 * should have went to the other code.. not here.. oh well.. 2331 * a bit of protection is worth having.. 2332 */ 2333 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2334 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2335 atomic_add_int(&stcb->asoc.refcnt, 1); 2336 SCTP_TCB_UNLOCK(stcb); 2337 SCTP_SOCKET_LOCK(so, 1); 2338 SCTP_TCB_LOCK(stcb); 2339 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2340 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2341 SCTP_SOCKET_UNLOCK(so, 1); 2342 return (NULL); 2343 } 2344 #endif 2345 soisconnected(stcb->sctp_socket); 2346 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2347 SCTP_SOCKET_UNLOCK(so, 1); 2348 #endif 2349 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 2350 (inp->sctp_socket->so_qlimit)) { 2351 /* 2352 * We don't want to do anything with this one. Since it is 2353 * the listening guy. The timer will get started for 2354 * accepted connections in the caller. 2355 */ 2356 ; 2357 } 2358 /* since we did not send a HB make sure we don't double things */ 2359 if ((netp) && (*netp)) 2360 (*netp)->hb_responded = 1; 2361 2362 if (stcb->asoc.sctp_autoclose_ticks && 2363 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2364 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 2365 } 2366 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 2367 if ((netp != NULL) && (*netp != NULL)) { 2368 /* calculate the RTT and set the encaps port */ 2369 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp, 2370 &cookie->time_entered, sctp_align_unsafe_makecopy, 2371 SCTP_RTT_FROM_NON_DATA); 2372 #if defined(INET) || defined(INET6) 2373 if (((*netp)->port == 0) && (port != 0)) { 2374 sctp_pathmtu_adjustment(stcb, (uint16_t) ((*netp)->mtu - sizeof(struct udphdr))); 2375 } 2376 (*netp)->port = port; 2377 #endif 2378 } 2379 /* respond with a COOKIE-ACK */ 2380 sctp_send_cookie_ack(stcb); 2381 2382 /* 2383 * check the address lists for any ASCONFs that need to be sent 2384 * AFTER the cookie-ack is sent 2385 */ 2386 sctp_check_address_list(stcb, m, 2387 initack_offset + sizeof(struct sctp_init_ack_chunk), 2388 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)), 2389 &store.sa, cookie->local_scope, cookie->site_scope, 2390 cookie->ipv4_scope, cookie->loopback_scope); 2391 2392 2393 return (stcb); 2394 } 2395 2396 /* 2397 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e 2398 * we NEED to make sure we are not already using the vtag. If so we 2399 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit! 2400 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag, 2401 SCTP_BASE_INFO(hashasocmark))]; 2402 LIST_FOREACH(stcb, head, sctp_asocs) { 2403 if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep)) { 2404 -- SEND ABORT - TRY AGAIN -- 2405 } 2406 } 2407 */ 2408 2409 /* 2410 * handles a COOKIE-ECHO message stcb: modified to either a new or left as 2411 * existing (non-NULL) TCB 2412 */ 2413 static struct mbuf * 2414 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, 2415 struct sockaddr *src, struct sockaddr *dst, 2416 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp, 2417 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp, 2418 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2419 struct sctp_tcb **locked_tcb, 2420 uint8_t mflowtype, uint32_t mflowid, 2421 uint32_t vrf_id, uint16_t port) 2422 { 2423 struct sctp_state_cookie *cookie; 2424 struct sctp_tcb *l_stcb = *stcb; 2425 struct sctp_inpcb *l_inp; 2426 struct sockaddr *to; 2427 struct sctp_pcb *ep; 2428 struct mbuf *m_sig; 2429 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE]; 2430 uint8_t *sig; 2431 uint8_t cookie_ok = 0; 2432 unsigned int sig_offset, cookie_offset; 2433 unsigned int cookie_len; 2434 struct timeval now; 2435 struct timeval time_expires; 2436 int notification = 0; 2437 struct sctp_nets *netl; 2438 int had_a_existing_tcb = 0; 2439 int send_int_conf = 0; 2440 2441 #ifdef INET 2442 struct sockaddr_in sin; 2443 2444 #endif 2445 #ifdef INET6 2446 struct sockaddr_in6 sin6; 2447 2448 #endif 2449 2450 SCTPDBG(SCTP_DEBUG_INPUT2, 2451 "sctp_handle_cookie: handling COOKIE-ECHO\n"); 2452 2453 if (inp_p == NULL) { 2454 return (NULL); 2455 } 2456 cookie = &cp->cookie; 2457 cookie_offset = offset + sizeof(struct sctp_chunkhdr); 2458 cookie_len = ntohs(cp->ch.chunk_length); 2459 2460 if ((cookie->peerport != sh->src_port) || 2461 (cookie->myport != sh->dest_port) || 2462 (cookie->my_vtag != sh->v_tag)) { 2463 /* 2464 * invalid ports or bad tag. Note that we always leave the 2465 * v_tag in the header in network order and when we stored 2466 * it in the my_vtag slot we also left it in network order. 2467 * This maintains the match even though it may be in the 2468 * opposite byte order of the machine :-> 2469 */ 2470 return (NULL); 2471 } 2472 if (cookie_len < sizeof(struct sctp_cookie_echo_chunk) + 2473 sizeof(struct sctp_init_chunk) + 2474 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) { 2475 /* cookie too small */ 2476 return (NULL); 2477 } 2478 /* 2479 * split off the signature into its own mbuf (since it should not be 2480 * calculated in the sctp_hmac_m() call). 2481 */ 2482 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE; 2483 m_sig = m_split(m, sig_offset, M_NOWAIT); 2484 if (m_sig == NULL) { 2485 /* out of memory or ?? */ 2486 return (NULL); 2487 } 2488 #ifdef SCTP_MBUF_LOGGING 2489 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 2490 sctp_log_mbc(m_sig, SCTP_MBUF_SPLIT); 2491 } 2492 #endif 2493 2494 /* 2495 * compute the signature/digest for the cookie 2496 */ 2497 ep = &(*inp_p)->sctp_ep; 2498 l_inp = *inp_p; 2499 if (l_stcb) { 2500 SCTP_TCB_UNLOCK(l_stcb); 2501 } 2502 SCTP_INP_RLOCK(l_inp); 2503 if (l_stcb) { 2504 SCTP_TCB_LOCK(l_stcb); 2505 } 2506 /* which cookie is it? */ 2507 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) && 2508 (ep->current_secret_number != ep->last_secret_number)) { 2509 /* it's the old cookie */ 2510 (void)sctp_hmac_m(SCTP_HMAC, 2511 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2512 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2513 } else { 2514 /* it's the current cookie */ 2515 (void)sctp_hmac_m(SCTP_HMAC, 2516 (uint8_t *) ep->secret_key[(int)ep->current_secret_number], 2517 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2518 } 2519 /* get the signature */ 2520 SCTP_INP_RUNLOCK(l_inp); 2521 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig); 2522 if (sig == NULL) { 2523 /* couldn't find signature */ 2524 sctp_m_freem(m_sig); 2525 return (NULL); 2526 } 2527 /* compare the received digest with the computed digest */ 2528 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) { 2529 /* try the old cookie? */ 2530 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) && 2531 (ep->current_secret_number != ep->last_secret_number)) { 2532 /* compute digest with old */ 2533 (void)sctp_hmac_m(SCTP_HMAC, 2534 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2535 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2536 /* compare */ 2537 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0) 2538 cookie_ok = 1; 2539 } 2540 } else { 2541 cookie_ok = 1; 2542 } 2543 2544 /* 2545 * Now before we continue we must reconstruct our mbuf so that 2546 * normal processing of any other chunks will work. 2547 */ 2548 { 2549 struct mbuf *m_at; 2550 2551 m_at = m; 2552 while (SCTP_BUF_NEXT(m_at) != NULL) { 2553 m_at = SCTP_BUF_NEXT(m_at); 2554 } 2555 SCTP_BUF_NEXT(m_at) = m_sig; 2556 } 2557 2558 if (cookie_ok == 0) { 2559 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n"); 2560 SCTPDBG(SCTP_DEBUG_INPUT2, 2561 "offset = %u, cookie_offset = %u, sig_offset = %u\n", 2562 (uint32_t) offset, cookie_offset, sig_offset); 2563 return (NULL); 2564 } 2565 /* 2566 * check the cookie timestamps to be sure it's not stale 2567 */ 2568 (void)SCTP_GETTIME_TIMEVAL(&now); 2569 /* Expire time is in Ticks, so we convert to seconds */ 2570 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life); 2571 time_expires.tv_usec = cookie->time_entered.tv_usec; 2572 /* 2573 * TODO sctp_constants.h needs alternative time macros when _KERNEL 2574 * is undefined. 2575 */ 2576 if (timevalcmp(&now, &time_expires, >)) { 2577 /* cookie is stale! */ 2578 struct mbuf *op_err; 2579 struct sctp_error_stale_cookie *cause; 2580 uint32_t tim; 2581 2582 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_stale_cookie), 2583 0, M_NOWAIT, 1, MT_DATA); 2584 if (op_err == NULL) { 2585 /* FOOBAR */ 2586 return (NULL); 2587 } 2588 /* Set the len */ 2589 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_stale_cookie); 2590 cause = mtod(op_err, struct sctp_error_stale_cookie *); 2591 cause->cause.code = htons(SCTP_CAUSE_STALE_COOKIE); 2592 cause->cause.length = htons((sizeof(struct sctp_paramhdr) + 2593 (sizeof(uint32_t)))); 2594 /* seconds to usec */ 2595 tim = (now.tv_sec - time_expires.tv_sec) * 1000000; 2596 /* add in usec */ 2597 if (tim == 0) 2598 tim = now.tv_usec - cookie->time_entered.tv_usec; 2599 cause->stale_time = htonl(tim); 2600 sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err, 2601 mflowtype, mflowid, l_inp->fibnum, 2602 vrf_id, port); 2603 return (NULL); 2604 } 2605 /* 2606 * Now we must see with the lookup address if we have an existing 2607 * asoc. This will only happen if we were in the COOKIE-WAIT state 2608 * and a INIT collided with us and somewhere the peer sent the 2609 * cookie on another address besides the single address our assoc 2610 * had for him. In this case we will have one of the tie-tags set at 2611 * least AND the address field in the cookie can be used to look it 2612 * up. 2613 */ 2614 to = NULL; 2615 switch (cookie->addr_type) { 2616 #ifdef INET6 2617 case SCTP_IPV6_ADDRESS: 2618 memset(&sin6, 0, sizeof(sin6)); 2619 sin6.sin6_family = AF_INET6; 2620 sin6.sin6_len = sizeof(sin6); 2621 sin6.sin6_port = sh->src_port; 2622 sin6.sin6_scope_id = cookie->scope_id; 2623 memcpy(&sin6.sin6_addr.s6_addr, cookie->address, 2624 sizeof(sin6.sin6_addr.s6_addr)); 2625 to = (struct sockaddr *)&sin6; 2626 break; 2627 #endif 2628 #ifdef INET 2629 case SCTP_IPV4_ADDRESS: 2630 memset(&sin, 0, sizeof(sin)); 2631 sin.sin_family = AF_INET; 2632 sin.sin_len = sizeof(sin); 2633 sin.sin_port = sh->src_port; 2634 sin.sin_addr.s_addr = cookie->address[0]; 2635 to = (struct sockaddr *)&sin; 2636 break; 2637 #endif 2638 default: 2639 /* This should not happen */ 2640 return (NULL); 2641 } 2642 if (*stcb == NULL) { 2643 /* Yep, lets check */ 2644 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, dst, NULL); 2645 if (*stcb == NULL) { 2646 /* 2647 * We should have only got back the same inp. If we 2648 * got back a different ep we have a problem. The 2649 * original findep got back l_inp and now 2650 */ 2651 if (l_inp != *inp_p) { 2652 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n"); 2653 } 2654 } else { 2655 if (*locked_tcb == NULL) { 2656 /* 2657 * In this case we found the assoc only 2658 * after we locked the create lock. This 2659 * means we are in a colliding case and we 2660 * must make sure that we unlock the tcb if 2661 * its one of the cases where we throw away 2662 * the incoming packets. 2663 */ 2664 *locked_tcb = *stcb; 2665 2666 /* 2667 * We must also increment the inp ref count 2668 * since the ref_count flags was set when we 2669 * did not find the TCB, now we found it 2670 * which reduces the refcount.. we must 2671 * raise it back out to balance it all :-) 2672 */ 2673 SCTP_INP_INCR_REF((*stcb)->sctp_ep); 2674 if ((*stcb)->sctp_ep != l_inp) { 2675 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n", 2676 (void *)(*stcb)->sctp_ep, (void *)l_inp); 2677 } 2678 } 2679 } 2680 } 2681 cookie_len -= SCTP_SIGNATURE_SIZE; 2682 if (*stcb == NULL) { 2683 /* this is the "normal" case... get a new TCB */ 2684 *stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, sh, 2685 cookie, cookie_len, *inp_p, 2686 netp, to, ¬ification, 2687 auth_skipped, auth_offset, auth_len, 2688 mflowtype, mflowid, 2689 vrf_id, port); 2690 } else { 2691 /* this is abnormal... cookie-echo on existing TCB */ 2692 had_a_existing_tcb = 1; 2693 *stcb = sctp_process_cookie_existing(m, iphlen, offset, 2694 src, dst, sh, 2695 cookie, cookie_len, *inp_p, *stcb, netp, to, 2696 ¬ification, auth_skipped, auth_offset, auth_len, 2697 mflowtype, mflowid, 2698 vrf_id, port); 2699 } 2700 2701 if (*stcb == NULL) { 2702 /* still no TCB... must be bad cookie-echo */ 2703 return (NULL); 2704 } 2705 if (*netp != NULL) { 2706 (*netp)->flowtype = mflowtype; 2707 (*netp)->flowid = mflowid; 2708 } 2709 /* 2710 * Ok, we built an association so confirm the address we sent the 2711 * INIT-ACK to. 2712 */ 2713 netl = sctp_findnet(*stcb, to); 2714 /* 2715 * This code should in theory NOT run but 2716 */ 2717 if (netl == NULL) { 2718 /* TSNH! Huh, why do I need to add this address here? */ 2719 if (sctp_add_remote_addr(*stcb, to, NULL, SCTP_DONOT_SETSCOPE, SCTP_IN_COOKIE_PROC)) { 2720 return (NULL); 2721 } 2722 netl = sctp_findnet(*stcb, to); 2723 } 2724 if (netl) { 2725 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) { 2726 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2727 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL, 2728 netl); 2729 send_int_conf = 1; 2730 } 2731 } 2732 sctp_start_net_timers(*stcb); 2733 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 2734 if (!had_a_existing_tcb || 2735 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 2736 /* 2737 * If we have a NEW cookie or the connect never 2738 * reached the connected state during collision we 2739 * must do the TCP accept thing. 2740 */ 2741 struct socket *so, *oso; 2742 struct sctp_inpcb *inp; 2743 2744 if (notification == SCTP_NOTIFY_ASSOC_RESTART) { 2745 /* 2746 * For a restart we will keep the same 2747 * socket, no need to do anything. I THINK!! 2748 */ 2749 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2750 if (send_int_conf) { 2751 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2752 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2753 } 2754 return (m); 2755 } 2756 oso = (*inp_p)->sctp_socket; 2757 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2758 SCTP_TCB_UNLOCK((*stcb)); 2759 CURVNET_SET(oso->so_vnet); 2760 so = sonewconn(oso, 0 2761 ); 2762 CURVNET_RESTORE(); 2763 SCTP_TCB_LOCK((*stcb)); 2764 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2765 2766 if (so == NULL) { 2767 struct mbuf *op_err; 2768 2769 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2770 struct socket *pcb_so; 2771 2772 #endif 2773 /* Too many sockets */ 2774 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n"); 2775 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 2776 sctp_abort_association(*inp_p, NULL, m, iphlen, 2777 src, dst, sh, op_err, 2778 mflowtype, mflowid, 2779 vrf_id, port); 2780 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2781 pcb_so = SCTP_INP_SO(*inp_p); 2782 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2783 SCTP_TCB_UNLOCK((*stcb)); 2784 SCTP_SOCKET_LOCK(pcb_so, 1); 2785 SCTP_TCB_LOCK((*stcb)); 2786 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2787 #endif 2788 (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, 2789 SCTP_FROM_SCTP_INPUT + SCTP_LOC_23); 2790 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2791 SCTP_SOCKET_UNLOCK(pcb_so, 1); 2792 #endif 2793 return (NULL); 2794 } 2795 inp = (struct sctp_inpcb *)so->so_pcb; 2796 SCTP_INP_INCR_REF(inp); 2797 /* 2798 * We add the unbound flag here so that if we get an 2799 * soabort() before we get the move_pcb done, we 2800 * will properly cleanup. 2801 */ 2802 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 2803 SCTP_PCB_FLAGS_CONNECTED | 2804 SCTP_PCB_FLAGS_IN_TCPPOOL | 2805 SCTP_PCB_FLAGS_UNBOUND | 2806 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) | 2807 SCTP_PCB_FLAGS_DONT_WAKE); 2808 inp->sctp_features = (*inp_p)->sctp_features; 2809 inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features; 2810 inp->sctp_socket = so; 2811 inp->sctp_frag_point = (*inp_p)->sctp_frag_point; 2812 inp->max_cwnd = (*inp_p)->max_cwnd; 2813 inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off; 2814 inp->ecn_supported = (*inp_p)->ecn_supported; 2815 inp->prsctp_supported = (*inp_p)->prsctp_supported; 2816 inp->auth_supported = (*inp_p)->auth_supported; 2817 inp->asconf_supported = (*inp_p)->asconf_supported; 2818 inp->reconfig_supported = (*inp_p)->reconfig_supported; 2819 inp->nrsack_supported = (*inp_p)->nrsack_supported; 2820 inp->pktdrop_supported = (*inp_p)->pktdrop_supported; 2821 inp->partial_delivery_point = (*inp_p)->partial_delivery_point; 2822 inp->sctp_context = (*inp_p)->sctp_context; 2823 inp->local_strreset_support = (*inp_p)->local_strreset_support; 2824 inp->fibnum = (*inp_p)->fibnum; 2825 inp->inp_starting_point_for_iterator = NULL; 2826 /* 2827 * copy in the authentication parameters from the 2828 * original endpoint 2829 */ 2830 if (inp->sctp_ep.local_hmacs) 2831 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2832 inp->sctp_ep.local_hmacs = 2833 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs); 2834 if (inp->sctp_ep.local_auth_chunks) 2835 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 2836 inp->sctp_ep.local_auth_chunks = 2837 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks); 2838 2839 /* 2840 * Now we must move it from one hash table to 2841 * another and get the tcb in the right place. 2842 */ 2843 2844 /* 2845 * This is where the one-2-one socket is put into 2846 * the accept state waiting for the accept! 2847 */ 2848 if (*stcb) { 2849 (*stcb)->asoc.state |= SCTP_STATE_IN_ACCEPT_QUEUE; 2850 } 2851 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb); 2852 2853 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2854 SCTP_TCB_UNLOCK((*stcb)); 2855 2856 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, 2857 0); 2858 SCTP_TCB_LOCK((*stcb)); 2859 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2860 2861 2862 /* 2863 * now we must check to see if we were aborted while 2864 * the move was going on and the lock/unlock 2865 * happened. 2866 */ 2867 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 2868 /* 2869 * yep it was, we leave the assoc attached 2870 * to the socket since the sctp_inpcb_free() 2871 * call will send an abort for us. 2872 */ 2873 SCTP_INP_DECR_REF(inp); 2874 return (NULL); 2875 } 2876 SCTP_INP_DECR_REF(inp); 2877 /* Switch over to the new guy */ 2878 *inp_p = inp; 2879 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2880 if (send_int_conf) { 2881 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2882 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2883 } 2884 /* 2885 * Pull it from the incomplete queue and wake the 2886 * guy 2887 */ 2888 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2889 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2890 SCTP_TCB_UNLOCK((*stcb)); 2891 SCTP_SOCKET_LOCK(so, 1); 2892 #endif 2893 soisconnected(so); 2894 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2895 SCTP_TCB_LOCK((*stcb)); 2896 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2897 SCTP_SOCKET_UNLOCK(so, 1); 2898 #endif 2899 return (m); 2900 } 2901 } 2902 if (notification) { 2903 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2904 } 2905 if (send_int_conf) { 2906 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2907 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2908 } 2909 return (m); 2910 } 2911 2912 static void 2913 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED, 2914 struct sctp_tcb *stcb, struct sctp_nets *net) 2915 { 2916 /* cp must not be used, others call this without a c-ack :-) */ 2917 struct sctp_association *asoc; 2918 2919 SCTPDBG(SCTP_DEBUG_INPUT2, 2920 "sctp_handle_cookie_ack: handling COOKIE-ACK\n"); 2921 if ((stcb == NULL) || (net == NULL)) { 2922 return; 2923 } 2924 asoc = &stcb->asoc; 2925 2926 sctp_stop_all_cookie_timers(stcb); 2927 /* process according to association state */ 2928 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 2929 /* state change only needed when I am in right state */ 2930 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2931 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2932 sctp_start_net_timers(stcb); 2933 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2934 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2935 stcb->sctp_ep, stcb, asoc->primary_destination); 2936 2937 } 2938 /* update RTO */ 2939 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 2940 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2941 if (asoc->overall_error_count == 0) { 2942 net->RTO = sctp_calculate_rto(stcb, asoc, net, 2943 &asoc->time_entered, sctp_align_safe_nocopy, 2944 SCTP_RTT_FROM_NON_DATA); 2945 } 2946 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 2947 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2948 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2949 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2950 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2951 struct socket *so; 2952 2953 #endif 2954 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2955 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2956 so = SCTP_INP_SO(stcb->sctp_ep); 2957 atomic_add_int(&stcb->asoc.refcnt, 1); 2958 SCTP_TCB_UNLOCK(stcb); 2959 SCTP_SOCKET_LOCK(so, 1); 2960 SCTP_TCB_LOCK(stcb); 2961 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2962 #endif 2963 if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) { 2964 soisconnected(stcb->sctp_socket); 2965 } 2966 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2967 SCTP_SOCKET_UNLOCK(so, 1); 2968 #endif 2969 } 2970 /* 2971 * since we did not send a HB make sure we don't double 2972 * things 2973 */ 2974 net->hb_responded = 1; 2975 2976 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2977 /* 2978 * We don't need to do the asconf thing, nor hb or 2979 * autoclose if the socket is closed. 2980 */ 2981 goto closed_socket; 2982 } 2983 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 2984 stcb, net); 2985 2986 2987 if (stcb->asoc.sctp_autoclose_ticks && 2988 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2989 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 2990 stcb->sctp_ep, stcb, NULL); 2991 } 2992 /* 2993 * send ASCONF if parameters are pending and ASCONFs are 2994 * allowed (eg. addresses changed when init/cookie echo were 2995 * in flight) 2996 */ 2997 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) && 2998 (stcb->asoc.asconf_supported == 1) && 2999 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) { 3000 #ifdef SCTP_TIMER_BASED_ASCONF 3001 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, 3002 stcb->sctp_ep, stcb, 3003 stcb->asoc.primary_destination); 3004 #else 3005 sctp_send_asconf(stcb, stcb->asoc.primary_destination, 3006 SCTP_ADDR_NOT_LOCKED); 3007 #endif 3008 } 3009 } 3010 closed_socket: 3011 /* Toss the cookie if I can */ 3012 sctp_toss_old_cookies(stcb, asoc); 3013 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3014 /* Restart the timer if we have pending data */ 3015 struct sctp_tmit_chunk *chk; 3016 3017 chk = TAILQ_FIRST(&asoc->sent_queue); 3018 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 3019 } 3020 } 3021 3022 static void 3023 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp, 3024 struct sctp_tcb *stcb) 3025 { 3026 struct sctp_nets *net; 3027 struct sctp_tmit_chunk *lchk; 3028 struct sctp_ecne_chunk bkup; 3029 uint8_t override_bit; 3030 uint32_t tsn, window_data_tsn; 3031 int len; 3032 unsigned int pkt_cnt; 3033 3034 len = ntohs(cp->ch.chunk_length); 3035 if ((len != sizeof(struct sctp_ecne_chunk)) && 3036 (len != sizeof(struct old_sctp_ecne_chunk))) { 3037 return; 3038 } 3039 if (len == sizeof(struct old_sctp_ecne_chunk)) { 3040 /* Its the old format */ 3041 memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk)); 3042 bkup.num_pkts_since_cwr = htonl(1); 3043 cp = &bkup; 3044 } 3045 SCTP_STAT_INCR(sctps_recvecne); 3046 tsn = ntohl(cp->tsn); 3047 pkt_cnt = ntohl(cp->num_pkts_since_cwr); 3048 lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead); 3049 if (lchk == NULL) { 3050 window_data_tsn = stcb->asoc.sending_seq - 1; 3051 } else { 3052 window_data_tsn = lchk->rec.data.TSN_seq; 3053 } 3054 3055 /* Find where it was sent to if possible. */ 3056 net = NULL; 3057 TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) { 3058 if (lchk->rec.data.TSN_seq == tsn) { 3059 net = lchk->whoTo; 3060 net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send; 3061 break; 3062 } 3063 if (SCTP_TSN_GT(lchk->rec.data.TSN_seq, tsn)) { 3064 break; 3065 } 3066 } 3067 if (net == NULL) { 3068 /* 3069 * What to do. A previous send of a CWR was possibly lost. 3070 * See how old it is, we may have it marked on the actual 3071 * net. 3072 */ 3073 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3074 if (tsn == net->last_cwr_tsn) { 3075 /* Found him, send it off */ 3076 break; 3077 } 3078 } 3079 if (net == NULL) { 3080 /* 3081 * If we reach here, we need to send a special CWR 3082 * that says hey, we did this a long time ago and 3083 * you lost the response. 3084 */ 3085 net = TAILQ_FIRST(&stcb->asoc.nets); 3086 if (net == NULL) { 3087 /* TSNH */ 3088 return; 3089 } 3090 override_bit = SCTP_CWR_REDUCE_OVERRIDE; 3091 } else { 3092 override_bit = 0; 3093 } 3094 } else { 3095 override_bit = 0; 3096 } 3097 if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) && 3098 ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) { 3099 /* 3100 * JRS - Use the congestion control given in the pluggable 3101 * CC module 3102 */ 3103 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt); 3104 /* 3105 * We reduce once every RTT. So we will only lower cwnd at 3106 * the next sending seq i.e. the window_data_tsn 3107 */ 3108 net->cwr_window_tsn = window_data_tsn; 3109 net->ecn_ce_pkt_cnt += pkt_cnt; 3110 net->lost_cnt = pkt_cnt; 3111 net->last_cwr_tsn = tsn; 3112 } else { 3113 override_bit |= SCTP_CWR_IN_SAME_WINDOW; 3114 if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) && 3115 ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) { 3116 /* 3117 * Another loss in the same window update how many 3118 * marks/packets lost we have had. 3119 */ 3120 int cnt = 1; 3121 3122 if (pkt_cnt > net->lost_cnt) { 3123 /* Should be the case */ 3124 cnt = (pkt_cnt - net->lost_cnt); 3125 net->ecn_ce_pkt_cnt += cnt; 3126 } 3127 net->lost_cnt = pkt_cnt; 3128 net->last_cwr_tsn = tsn; 3129 /* 3130 * Most CC functions will ignore this call, since we 3131 * are in-window yet of the initial CE the peer saw. 3132 */ 3133 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt); 3134 } 3135 } 3136 /* 3137 * We always send a CWR this way if our previous one was lost our 3138 * peer will get an update, or if it is not time again to reduce we 3139 * still get the cwr to the peer. Note we set the override when we 3140 * could not find the TSN on the chunk or the destination network. 3141 */ 3142 sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit); 3143 } 3144 3145 static void 3146 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net) 3147 { 3148 /* 3149 * Here we get a CWR from the peer. We must look in the outqueue and 3150 * make sure that we have a covered ECNE in the control chunk part. 3151 * If so remove it. 3152 */ 3153 struct sctp_tmit_chunk *chk; 3154 struct sctp_ecne_chunk *ecne; 3155 int override; 3156 uint32_t cwr_tsn; 3157 3158 cwr_tsn = ntohl(cp->tsn); 3159 override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE; 3160 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 3161 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) { 3162 continue; 3163 } 3164 if ((override == 0) && (chk->whoTo != net)) { 3165 /* Must be from the right src unless override is set */ 3166 continue; 3167 } 3168 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 3169 if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) { 3170 /* this covers this ECNE, we can remove it */ 3171 stcb->asoc.ecn_echo_cnt_onq--; 3172 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, 3173 sctp_next); 3174 sctp_m_freem(chk->data); 3175 chk->data = NULL; 3176 stcb->asoc.ctrl_queue_cnt--; 3177 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 3178 if (override == 0) { 3179 break; 3180 } 3181 } 3182 } 3183 } 3184 3185 static void 3186 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED, 3187 struct sctp_tcb *stcb, struct sctp_nets *net) 3188 { 3189 struct sctp_association *asoc; 3190 3191 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3192 struct socket *so; 3193 3194 #endif 3195 3196 SCTPDBG(SCTP_DEBUG_INPUT2, 3197 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n"); 3198 if (stcb == NULL) 3199 return; 3200 3201 asoc = &stcb->asoc; 3202 /* process according to association state */ 3203 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 3204 /* unexpected SHUTDOWN-COMPLETE... so ignore... */ 3205 SCTPDBG(SCTP_DEBUG_INPUT2, 3206 "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n"); 3207 SCTP_TCB_UNLOCK(stcb); 3208 return; 3209 } 3210 /* notify upper layer protocol */ 3211 if (stcb->sctp_socket) { 3212 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 3213 } 3214 #ifdef INVARIANTS 3215 if (!TAILQ_EMPTY(&asoc->send_queue) || 3216 !TAILQ_EMPTY(&asoc->sent_queue) || 3217 !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 3218 panic("Queues are not empty when handling SHUTDOWN-COMPLETE"); 3219 } 3220 #endif 3221 /* stop the timer */ 3222 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, 3223 SCTP_FROM_SCTP_INPUT + SCTP_LOC_24); 3224 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 3225 /* free the TCB */ 3226 SCTPDBG(SCTP_DEBUG_INPUT2, 3227 "sctp_handle_shutdown_complete: calls free-asoc\n"); 3228 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3229 so = SCTP_INP_SO(stcb->sctp_ep); 3230 atomic_add_int(&stcb->asoc.refcnt, 1); 3231 SCTP_TCB_UNLOCK(stcb); 3232 SCTP_SOCKET_LOCK(so, 1); 3233 SCTP_TCB_LOCK(stcb); 3234 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3235 #endif 3236 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 3237 SCTP_FROM_SCTP_INPUT + SCTP_LOC_25); 3238 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3239 SCTP_SOCKET_UNLOCK(so, 1); 3240 #endif 3241 return; 3242 } 3243 3244 static int 3245 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, 3246 struct sctp_nets *net, uint8_t flg) 3247 { 3248 switch (desc->chunk_type) { 3249 case SCTP_DATA: 3250 /* find the tsn to resend (possibly */ 3251 { 3252 uint32_t tsn; 3253 struct sctp_tmit_chunk *tp1; 3254 3255 tsn = ntohl(desc->tsn_ifany); 3256 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3257 if (tp1->rec.data.TSN_seq == tsn) { 3258 /* found it */ 3259 break; 3260 } 3261 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, tsn)) { 3262 /* not found */ 3263 tp1 = NULL; 3264 break; 3265 } 3266 } 3267 if (tp1 == NULL) { 3268 /* 3269 * Do it the other way , aka without paying 3270 * attention to queue seq order. 3271 */ 3272 SCTP_STAT_INCR(sctps_pdrpdnfnd); 3273 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3274 if (tp1->rec.data.TSN_seq == tsn) { 3275 /* found it */ 3276 break; 3277 } 3278 } 3279 } 3280 if (tp1 == NULL) { 3281 SCTP_STAT_INCR(sctps_pdrptsnnf); 3282 } 3283 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) { 3284 uint8_t *ddp; 3285 3286 if (((flg & SCTP_BADCRC) == 0) && 3287 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 3288 return (0); 3289 } 3290 if ((stcb->asoc.peers_rwnd == 0) && 3291 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 3292 SCTP_STAT_INCR(sctps_pdrpdiwnp); 3293 return (0); 3294 } 3295 if (stcb->asoc.peers_rwnd == 0 && 3296 (flg & SCTP_FROM_MIDDLE_BOX)) { 3297 SCTP_STAT_INCR(sctps_pdrpdizrw); 3298 return (0); 3299 } 3300 ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+ 3301 sizeof(struct sctp_data_chunk)); 3302 { 3303 unsigned int iii; 3304 3305 for (iii = 0; iii < sizeof(desc->data_bytes); 3306 iii++) { 3307 if (ddp[iii] != desc->data_bytes[iii]) { 3308 SCTP_STAT_INCR(sctps_pdrpbadd); 3309 return (-1); 3310 } 3311 } 3312 } 3313 3314 if (tp1->do_rtt) { 3315 /* 3316 * this guy had a RTO calculation 3317 * pending on it, cancel it 3318 */ 3319 if (tp1->whoTo->rto_needed == 0) { 3320 tp1->whoTo->rto_needed = 1; 3321 } 3322 tp1->do_rtt = 0; 3323 } 3324 SCTP_STAT_INCR(sctps_pdrpmark); 3325 if (tp1->sent != SCTP_DATAGRAM_RESEND) 3326 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3327 /* 3328 * mark it as if we were doing a FR, since 3329 * we will be getting gap ack reports behind 3330 * the info from the router. 3331 */ 3332 tp1->rec.data.doing_fast_retransmit = 1; 3333 /* 3334 * mark the tsn with what sequences can 3335 * cause a new FR. 3336 */ 3337 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 3338 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 3339 } else { 3340 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 3341 } 3342 3343 /* restart the timer */ 3344 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3345 stcb, tp1->whoTo, 3346 SCTP_FROM_SCTP_INPUT + SCTP_LOC_26); 3347 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3348 stcb, tp1->whoTo); 3349 3350 /* fix counts and things */ 3351 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3352 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP, 3353 tp1->whoTo->flight_size, 3354 tp1->book_size, 3355 (uint32_t) (uintptr_t) stcb, 3356 tp1->rec.data.TSN_seq); 3357 } 3358 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3359 sctp_flight_size_decrease(tp1); 3360 sctp_total_flight_decrease(stcb, tp1); 3361 } 3362 tp1->sent = SCTP_DATAGRAM_RESEND; 3363 } { 3364 /* audit code */ 3365 unsigned int audit; 3366 3367 audit = 0; 3368 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3369 if (tp1->sent == SCTP_DATAGRAM_RESEND) 3370 audit++; 3371 } 3372 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue, 3373 sctp_next) { 3374 if (tp1->sent == SCTP_DATAGRAM_RESEND) 3375 audit++; 3376 } 3377 if (audit != stcb->asoc.sent_queue_retran_cnt) { 3378 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n", 3379 audit, stcb->asoc.sent_queue_retran_cnt); 3380 #ifndef SCTP_AUDITING_ENABLED 3381 stcb->asoc.sent_queue_retran_cnt = audit; 3382 #endif 3383 } 3384 } 3385 } 3386 break; 3387 case SCTP_ASCONF: 3388 { 3389 struct sctp_tmit_chunk *asconf; 3390 3391 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 3392 sctp_next) { 3393 if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 3394 break; 3395 } 3396 } 3397 if (asconf) { 3398 if (asconf->sent != SCTP_DATAGRAM_RESEND) 3399 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3400 asconf->sent = SCTP_DATAGRAM_RESEND; 3401 asconf->snd_count--; 3402 } 3403 } 3404 break; 3405 case SCTP_INITIATION: 3406 /* resend the INIT */ 3407 stcb->asoc.dropped_special_cnt++; 3408 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) { 3409 /* 3410 * If we can get it in, in a few attempts we do 3411 * this, otherwise we let the timer fire. 3412 */ 3413 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, 3414 stcb, net, 3415 SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 3416 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 3417 } 3418 break; 3419 case SCTP_SELECTIVE_ACK: 3420 case SCTP_NR_SELECTIVE_ACK: 3421 /* resend the sack */ 3422 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 3423 break; 3424 case SCTP_HEARTBEAT_REQUEST: 3425 /* resend a demand HB */ 3426 if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) { 3427 /* 3428 * Only retransmit if we KNOW we wont destroy the 3429 * tcb 3430 */ 3431 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 3432 } 3433 break; 3434 case SCTP_SHUTDOWN: 3435 sctp_send_shutdown(stcb, net); 3436 break; 3437 case SCTP_SHUTDOWN_ACK: 3438 sctp_send_shutdown_ack(stcb, net); 3439 break; 3440 case SCTP_COOKIE_ECHO: 3441 { 3442 struct sctp_tmit_chunk *cookie; 3443 3444 cookie = NULL; 3445 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, 3446 sctp_next) { 3447 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 3448 break; 3449 } 3450 } 3451 if (cookie) { 3452 if (cookie->sent != SCTP_DATAGRAM_RESEND) 3453 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3454 cookie->sent = SCTP_DATAGRAM_RESEND; 3455 sctp_stop_all_cookie_timers(stcb); 3456 } 3457 } 3458 break; 3459 case SCTP_COOKIE_ACK: 3460 sctp_send_cookie_ack(stcb); 3461 break; 3462 case SCTP_ASCONF_ACK: 3463 /* resend last asconf ack */ 3464 sctp_send_asconf_ack(stcb); 3465 break; 3466 case SCTP_IFORWARD_CUM_TSN: 3467 case SCTP_FORWARD_CUM_TSN: 3468 send_forward_tsn(stcb, &stcb->asoc); 3469 break; 3470 /* can't do anything with these */ 3471 case SCTP_PACKET_DROPPED: 3472 case SCTP_INITIATION_ACK: /* this should not happen */ 3473 case SCTP_HEARTBEAT_ACK: 3474 case SCTP_ABORT_ASSOCIATION: 3475 case SCTP_OPERATION_ERROR: 3476 case SCTP_SHUTDOWN_COMPLETE: 3477 case SCTP_ECN_ECHO: 3478 case SCTP_ECN_CWR: 3479 default: 3480 break; 3481 } 3482 return (0); 3483 } 3484 3485 void 3486 sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t * list) 3487 { 3488 uint32_t i; 3489 uint16_t temp; 3490 3491 /* 3492 * We set things to 0xffffffff since this is the last delivered 3493 * sequence and we will be sending in 0 after the reset. 3494 */ 3495 3496 if (number_entries) { 3497 for (i = 0; i < number_entries; i++) { 3498 temp = ntohs(list[i]); 3499 if (temp >= stcb->asoc.streamincnt) { 3500 continue; 3501 } 3502 stcb->asoc.strmin[temp].last_sequence_delivered = 0xffffffff; 3503 } 3504 } else { 3505 list = NULL; 3506 for (i = 0; i < stcb->asoc.streamincnt; i++) { 3507 stcb->asoc.strmin[i].last_sequence_delivered = 0xffffffff; 3508 } 3509 } 3510 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3511 } 3512 3513 static void 3514 sctp_reset_out_streams(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t * list) 3515 { 3516 uint32_t i; 3517 uint16_t temp; 3518 3519 if (number_entries > 0) { 3520 for (i = 0; i < number_entries; i++) { 3521 temp = ntohs(list[i]); 3522 if (temp >= stcb->asoc.streamoutcnt) { 3523 /* no such stream */ 3524 continue; 3525 } 3526 stcb->asoc.strmout[temp].next_sequence_send = 0; 3527 } 3528 } else { 3529 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3530 stcb->asoc.strmout[i].next_sequence_send = 0; 3531 } 3532 } 3533 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3534 } 3535 3536 static void 3537 sctp_reset_clear_pending(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t * list) 3538 { 3539 uint32_t i; 3540 uint16_t temp; 3541 3542 if (number_entries > 0) { 3543 for (i = 0; i < number_entries; i++) { 3544 temp = ntohs(list[i]); 3545 if (temp >= stcb->asoc.streamoutcnt) { 3546 /* no such stream */ 3547 continue; 3548 } 3549 stcb->asoc.strmout[temp].state = SCTP_STREAM_OPEN; 3550 } 3551 } else { 3552 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3553 stcb->asoc.strmout[i].state = SCTP_STREAM_OPEN; 3554 } 3555 } 3556 } 3557 3558 3559 struct sctp_stream_reset_request * 3560 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk) 3561 { 3562 struct sctp_association *asoc; 3563 struct sctp_chunkhdr *ch; 3564 struct sctp_stream_reset_request *r; 3565 struct sctp_tmit_chunk *chk; 3566 int len, clen; 3567 3568 asoc = &stcb->asoc; 3569 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 3570 asoc->stream_reset_outstanding = 0; 3571 return (NULL); 3572 } 3573 if (stcb->asoc.str_reset == NULL) { 3574 asoc->stream_reset_outstanding = 0; 3575 return (NULL); 3576 } 3577 chk = stcb->asoc.str_reset; 3578 if (chk->data == NULL) { 3579 return (NULL); 3580 } 3581 if (bchk) { 3582 /* he wants a copy of the chk pointer */ 3583 *bchk = chk; 3584 } 3585 clen = chk->send_size; 3586 ch = mtod(chk->data, struct sctp_chunkhdr *); 3587 r = (struct sctp_stream_reset_request *)(ch + 1); 3588 if (ntohl(r->request_seq) == seq) { 3589 /* found it */ 3590 return (r); 3591 } 3592 len = SCTP_SIZE32(ntohs(r->ph.param_length)); 3593 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) { 3594 /* move to the next one, there can only be a max of two */ 3595 r = (struct sctp_stream_reset_request *)((caddr_t)r + len); 3596 if (ntohl(r->request_seq) == seq) { 3597 return (r); 3598 } 3599 } 3600 /* that seq is not here */ 3601 return (NULL); 3602 } 3603 3604 static void 3605 sctp_clean_up_stream_reset(struct sctp_tcb *stcb) 3606 { 3607 struct sctp_association *asoc; 3608 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset; 3609 3610 if (stcb->asoc.str_reset == NULL) { 3611 return; 3612 } 3613 asoc = &stcb->asoc; 3614 3615 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, 3616 chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28); 3617 TAILQ_REMOVE(&asoc->control_send_queue, 3618 chk, 3619 sctp_next); 3620 if (chk->data) { 3621 sctp_m_freem(chk->data); 3622 chk->data = NULL; 3623 } 3624 asoc->ctrl_queue_cnt--; 3625 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 3626 /* sa_ignore NO_NULL_CHK */ 3627 stcb->asoc.str_reset = NULL; 3628 } 3629 3630 3631 static int 3632 sctp_handle_stream_reset_response(struct sctp_tcb *stcb, 3633 uint32_t seq, uint32_t action, 3634 struct sctp_stream_reset_response *respin) 3635 { 3636 uint16_t type; 3637 int lparm_len; 3638 struct sctp_association *asoc = &stcb->asoc; 3639 struct sctp_tmit_chunk *chk; 3640 struct sctp_stream_reset_request *req_param; 3641 struct sctp_stream_reset_out_request *req_out_param; 3642 struct sctp_stream_reset_in_request *req_in_param; 3643 uint32_t number_entries; 3644 3645 if (asoc->stream_reset_outstanding == 0) { 3646 /* duplicate */ 3647 return (0); 3648 } 3649 if (seq == stcb->asoc.str_reset_seq_out) { 3650 req_param = sctp_find_stream_reset(stcb, seq, &chk); 3651 if (req_param != NULL) { 3652 stcb->asoc.str_reset_seq_out++; 3653 type = ntohs(req_param->ph.param_type); 3654 lparm_len = ntohs(req_param->ph.param_length); 3655 if (type == SCTP_STR_RESET_OUT_REQUEST) { 3656 int no_clear = 0; 3657 3658 req_out_param = (struct sctp_stream_reset_out_request *)req_param; 3659 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t); 3660 asoc->stream_reset_out_is_outstanding = 0; 3661 if (asoc->stream_reset_outstanding) 3662 asoc->stream_reset_outstanding--; 3663 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) { 3664 /* do it */ 3665 sctp_reset_out_streams(stcb, number_entries, req_out_param->list_of_streams); 3666 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3667 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED); 3668 } else if (action == SCTP_STREAM_RESET_RESULT_IN_PROGRESS) { 3669 /* 3670 * Set it up so we don't stop 3671 * retransmitting 3672 */ 3673 asoc->stream_reset_outstanding++; 3674 stcb->asoc.str_reset_seq_out--; 3675 asoc->stream_reset_out_is_outstanding = 1; 3676 no_clear = 1; 3677 } else { 3678 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED); 3679 } 3680 if (no_clear == 0) { 3681 sctp_reset_clear_pending(stcb, number_entries, req_out_param->list_of_streams); 3682 } 3683 } else if (type == SCTP_STR_RESET_IN_REQUEST) { 3684 req_in_param = (struct sctp_stream_reset_in_request *)req_param; 3685 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t); 3686 if (asoc->stream_reset_outstanding) 3687 asoc->stream_reset_outstanding--; 3688 if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3689 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_IN, stcb, 3690 number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED); 3691 } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) { 3692 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, 3693 number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED); 3694 } 3695 } else if (type == SCTP_STR_RESET_ADD_OUT_STREAMS) { 3696 /* Ok we now may have more streams */ 3697 int num_stream; 3698 3699 num_stream = stcb->asoc.strm_pending_add_size; 3700 if (num_stream > (stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt)) { 3701 /* TSNH */ 3702 num_stream = stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt; 3703 } 3704 stcb->asoc.strm_pending_add_size = 0; 3705 if (asoc->stream_reset_outstanding) 3706 asoc->stream_reset_outstanding--; 3707 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) { 3708 /* Put the new streams into effect */ 3709 int i; 3710 3711 for (i = asoc->streamoutcnt; i < (asoc->streamoutcnt + num_stream); i++) { 3712 asoc->strmout[i].state = SCTP_STREAM_OPEN; 3713 } 3714 asoc->streamoutcnt += num_stream; 3715 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0); 3716 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3717 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 3718 SCTP_STREAM_CHANGE_DENIED); 3719 } else { 3720 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 3721 SCTP_STREAM_CHANGE_FAILED); 3722 } 3723 } else if (type == SCTP_STR_RESET_ADD_IN_STREAMS) { 3724 if (asoc->stream_reset_outstanding) 3725 asoc->stream_reset_outstanding--; 3726 if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3727 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 3728 SCTP_STREAM_CHANGE_DENIED); 3729 } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) { 3730 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 3731 SCTP_STREAM_CHANGE_FAILED); 3732 } 3733 } else if (type == SCTP_STR_RESET_TSN_REQUEST) { 3734 /** 3735 * a) Adopt the new in tsn. 3736 * b) reset the map 3737 * c) Adopt the new out-tsn 3738 */ 3739 struct sctp_stream_reset_response_tsn *resp; 3740 struct sctp_forward_tsn_chunk fwdtsn; 3741 int abort_flag = 0; 3742 3743 if (respin == NULL) { 3744 /* huh ? */ 3745 return (0); 3746 } 3747 if (ntohs(respin->ph.param_length) < sizeof(struct sctp_stream_reset_response_tsn)) { 3748 return (0); 3749 } 3750 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) { 3751 resp = (struct sctp_stream_reset_response_tsn *)respin; 3752 asoc->stream_reset_outstanding--; 3753 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3754 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3755 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1); 3756 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3757 if (abort_flag) { 3758 return (1); 3759 } 3760 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1); 3761 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 3762 sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 3763 } 3764 stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3765 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn); 3766 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3767 3768 stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map; 3769 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 3770 3771 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn); 3772 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn; 3773 3774 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3775 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3776 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 0); 3777 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3778 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 3779 SCTP_ASSOC_RESET_DENIED); 3780 } else { 3781 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 3782 SCTP_ASSOC_RESET_FAILED); 3783 } 3784 } 3785 /* get rid of the request and get the request flags */ 3786 if (asoc->stream_reset_outstanding == 0) { 3787 sctp_clean_up_stream_reset(stcb); 3788 } 3789 } 3790 } 3791 if (asoc->stream_reset_outstanding == 0) { 3792 sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED); 3793 } 3794 return (0); 3795 } 3796 3797 static void 3798 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb, 3799 struct sctp_tmit_chunk *chk, 3800 struct sctp_stream_reset_in_request *req, int trunc) 3801 { 3802 uint32_t seq; 3803 int len, i; 3804 int number_entries; 3805 uint16_t temp; 3806 3807 /* 3808 * peer wants me to send a str-reset to him for my outgoing seq's if 3809 * seq_in is right. 3810 */ 3811 struct sctp_association *asoc = &stcb->asoc; 3812 3813 seq = ntohl(req->request_seq); 3814 if (asoc->str_reset_seq_in == seq) { 3815 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3816 if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) { 3817 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3818 } else if (trunc) { 3819 /* Can't do it, since they exceeded our buffer size */ 3820 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3821 } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) { 3822 len = ntohs(req->ph.param_length); 3823 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t)); 3824 if (number_entries) { 3825 for (i = 0; i < number_entries; i++) { 3826 temp = ntohs(req->list_of_streams[i]); 3827 if (temp >= stcb->asoc.streamoutcnt) { 3828 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3829 goto bad_boy; 3830 } 3831 req->list_of_streams[i] = temp; 3832 } 3833 for (i = 0; i < number_entries; i++) { 3834 if (stcb->asoc.strmout[req->list_of_streams[i]].state == SCTP_STREAM_OPEN) { 3835 stcb->asoc.strmout[req->list_of_streams[i]].state = SCTP_STREAM_RESET_PENDING; 3836 } 3837 } 3838 } else { 3839 /* Its all */ 3840 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3841 if (stcb->asoc.strmout[i].state == SCTP_STREAM_OPEN) 3842 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_PENDING; 3843 } 3844 } 3845 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 3846 } else { 3847 /* Can't do it, since we have sent one out */ 3848 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS; 3849 } 3850 bad_boy: 3851 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3852 asoc->str_reset_seq_in++; 3853 } else if (asoc->str_reset_seq_in - 1 == seq) { 3854 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3855 } else if (asoc->str_reset_seq_in - 2 == seq) { 3856 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3857 } else { 3858 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 3859 } 3860 sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED); 3861 } 3862 3863 static int 3864 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb, 3865 struct sctp_tmit_chunk *chk, 3866 struct sctp_stream_reset_tsn_request *req) 3867 { 3868 /* reset all in and out and update the tsn */ 3869 /* 3870 * A) reset my str-seq's on in and out. B) Select a receive next, 3871 * and set cum-ack to it. Also process this selected number as a 3872 * fwd-tsn as well. C) set in the response my next sending seq. 3873 */ 3874 struct sctp_forward_tsn_chunk fwdtsn; 3875 struct sctp_association *asoc = &stcb->asoc; 3876 int abort_flag = 0; 3877 uint32_t seq; 3878 3879 seq = ntohl(req->request_seq); 3880 if (asoc->str_reset_seq_in == seq) { 3881 asoc->last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3882 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) { 3883 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3884 } else { 3885 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3886 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3887 fwdtsn.ch.chunk_flags = 0; 3888 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1); 3889 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3890 if (abort_flag) { 3891 return (1); 3892 } 3893 asoc->highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA; 3894 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 3895 sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 3896 } 3897 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 3898 asoc->mapping_array_base_tsn = asoc->highest_tsn_inside_map + 1; 3899 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 3900 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map; 3901 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 3902 atomic_add_int(&asoc->sending_seq, 1); 3903 /* save off historical data for retrans */ 3904 asoc->last_sending_seq[1] = asoc->last_sending_seq[0]; 3905 asoc->last_sending_seq[0] = asoc->sending_seq; 3906 asoc->last_base_tsnsent[1] = asoc->last_base_tsnsent[0]; 3907 asoc->last_base_tsnsent[0] = asoc->mapping_array_base_tsn; 3908 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3909 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3910 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 3911 sctp_notify_stream_reset_tsn(stcb, asoc->sending_seq, (asoc->mapping_array_base_tsn + 1), 0); 3912 } 3913 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 3914 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]); 3915 asoc->str_reset_seq_in++; 3916 } else if (asoc->str_reset_seq_in - 1 == seq) { 3917 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 3918 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]); 3919 } else if (asoc->str_reset_seq_in - 2 == seq) { 3920 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1], 3921 asoc->last_sending_seq[1], asoc->last_base_tsnsent[1]); 3922 } else { 3923 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 3924 } 3925 return (0); 3926 } 3927 3928 static void 3929 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb, 3930 struct sctp_tmit_chunk *chk, 3931 struct sctp_stream_reset_out_request *req, int trunc) 3932 { 3933 uint32_t seq, tsn; 3934 int number_entries, len; 3935 struct sctp_association *asoc = &stcb->asoc; 3936 3937 seq = ntohl(req->request_seq); 3938 3939 /* now if its not a duplicate we process it */ 3940 if (asoc->str_reset_seq_in == seq) { 3941 len = ntohs(req->ph.param_length); 3942 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t)); 3943 /* 3944 * the sender is resetting, handle the list issue.. we must 3945 * a) verify if we can do the reset, if so no problem b) If 3946 * we can't do the reset we must copy the request. c) queue 3947 * it, and setup the data in processor to trigger it off 3948 * when needed and dequeue all the queued data. 3949 */ 3950 tsn = ntohl(req->send_reset_at_tsn); 3951 3952 /* move the reset action back one */ 3953 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3954 if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) { 3955 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3956 } else if (trunc) { 3957 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3958 } else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 3959 /* we can do it now */ 3960 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams); 3961 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 3962 } else { 3963 /* 3964 * we must queue it up and thus wait for the TSN's 3965 * to arrive that are at or before tsn 3966 */ 3967 struct sctp_stream_reset_list *liste; 3968 int siz; 3969 3970 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t)); 3971 SCTP_MALLOC(liste, struct sctp_stream_reset_list *, 3972 siz, SCTP_M_STRESET); 3973 if (liste == NULL) { 3974 /* gak out of memory */ 3975 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3976 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3977 return; 3978 } 3979 liste->seq = seq; 3980 liste->tsn = tsn; 3981 liste->number_entries = number_entries; 3982 memcpy(&liste->list_of_streams, req->list_of_streams, number_entries * sizeof(uint16_t)); 3983 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp); 3984 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_IN_PROGRESS; 3985 } 3986 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3987 asoc->str_reset_seq_in++; 3988 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3989 /* 3990 * one seq back, just echo back last action since my 3991 * response was lost. 3992 */ 3993 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3994 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3995 /* 3996 * two seq back, just echo back last action since my 3997 * response was lost. 3998 */ 3999 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 4000 } else { 4001 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 4002 } 4003 } 4004 4005 static void 4006 sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk, 4007 struct sctp_stream_reset_add_strm *str_add) 4008 { 4009 /* 4010 * Peer is requesting to add more streams. If its within our 4011 * max-streams we will allow it. 4012 */ 4013 uint32_t num_stream, i; 4014 uint32_t seq; 4015 struct sctp_association *asoc = &stcb->asoc; 4016 struct sctp_queued_to_read *ctl, *nctl; 4017 4018 /* Get the number. */ 4019 seq = ntohl(str_add->request_seq); 4020 num_stream = ntohs(str_add->number_of_streams); 4021 /* Now what would be the new total? */ 4022 if (asoc->str_reset_seq_in == seq) { 4023 num_stream += stcb->asoc.streamincnt; 4024 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 4025 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) { 4026 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4027 } else if ((num_stream > stcb->asoc.max_inbound_streams) || 4028 (num_stream > 0xffff)) { 4029 /* We must reject it they ask for to many */ 4030 denied: 4031 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4032 } else { 4033 /* Ok, we can do that :-) */ 4034 struct sctp_stream_in *oldstrm; 4035 4036 /* save off the old */ 4037 oldstrm = stcb->asoc.strmin; 4038 SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *, 4039 (num_stream * sizeof(struct sctp_stream_in)), 4040 SCTP_M_STRMI); 4041 if (stcb->asoc.strmin == NULL) { 4042 stcb->asoc.strmin = oldstrm; 4043 goto denied; 4044 } 4045 /* copy off the old data */ 4046 for (i = 0; i < stcb->asoc.streamincnt; i++) { 4047 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue); 4048 TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue); 4049 stcb->asoc.strmin[i].stream_no = i; 4050 stcb->asoc.strmin[i].last_sequence_delivered = oldstrm[i].last_sequence_delivered; 4051 stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started; 4052 stcb->asoc.strmin[i].pd_api_started = oldstrm[i].pd_api_started; 4053 /* now anything on those queues? */ 4054 TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next_instrm, nctl) { 4055 TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next_instrm); 4056 TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next_instrm); 4057 } 4058 TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].uno_inqueue, next_instrm, nctl) { 4059 TAILQ_REMOVE(&oldstrm[i].uno_inqueue, ctl, next_instrm); 4060 TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].uno_inqueue, ctl, next_instrm); 4061 } 4062 } 4063 /* Init the new streams */ 4064 for (i = stcb->asoc.streamincnt; i < num_stream; i++) { 4065 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue); 4066 TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue); 4067 stcb->asoc.strmin[i].stream_no = i; 4068 stcb->asoc.strmin[i].last_sequence_delivered = 0xffffffff; 4069 stcb->asoc.strmin[i].pd_api_started = 0; 4070 stcb->asoc.strmin[i].delivery_started = 0; 4071 } 4072 SCTP_FREE(oldstrm, SCTP_M_STRMI); 4073 /* update the size */ 4074 stcb->asoc.streamincnt = num_stream; 4075 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 4076 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0); 4077 } 4078 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4079 asoc->str_reset_seq_in++; 4080 } else if ((asoc->str_reset_seq_in - 1) == seq) { 4081 /* 4082 * one seq back, just echo back last action since my 4083 * response was lost. 4084 */ 4085 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4086 } else if ((asoc->str_reset_seq_in - 2) == seq) { 4087 /* 4088 * two seq back, just echo back last action since my 4089 * response was lost. 4090 */ 4091 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 4092 } else { 4093 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 4094 4095 } 4096 } 4097 4098 static void 4099 sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk, 4100 struct sctp_stream_reset_add_strm *str_add) 4101 { 4102 /* 4103 * Peer is requesting to add more streams. If its within our 4104 * max-streams we will allow it. 4105 */ 4106 uint16_t num_stream; 4107 uint32_t seq; 4108 struct sctp_association *asoc = &stcb->asoc; 4109 4110 /* Get the number. */ 4111 seq = ntohl(str_add->request_seq); 4112 num_stream = ntohs(str_add->number_of_streams); 4113 /* Now what would be the new total? */ 4114 if (asoc->str_reset_seq_in == seq) { 4115 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 4116 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) { 4117 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4118 } else if (stcb->asoc.stream_reset_outstanding) { 4119 /* We must reject it we have something pending */ 4120 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS; 4121 } else { 4122 /* Ok, we can do that :-) */ 4123 int mychk; 4124 4125 mychk = stcb->asoc.streamoutcnt; 4126 mychk += num_stream; 4127 if (mychk < 0x10000) { 4128 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 4129 if (sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, 1, num_stream, 0, 1)) { 4130 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4131 } 4132 } else { 4133 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4134 } 4135 } 4136 sctp_add_stream_reset_result(chk, seq, stcb->asoc.last_reset_action[0]); 4137 asoc->str_reset_seq_in++; 4138 } else if ((asoc->str_reset_seq_in - 1) == seq) { 4139 /* 4140 * one seq back, just echo back last action since my 4141 * response was lost. 4142 */ 4143 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4144 } else if ((asoc->str_reset_seq_in - 2) == seq) { 4145 /* 4146 * two seq back, just echo back last action since my 4147 * response was lost. 4148 */ 4149 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 4150 } else { 4151 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 4152 } 4153 } 4154 4155 #ifdef __GNUC__ 4156 __attribute__((noinline)) 4157 #endif 4158 static int 4159 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset, 4160 struct sctp_chunkhdr *ch_req) 4161 { 4162 uint16_t remaining_length, param_len, ptype; 4163 struct sctp_paramhdr pstore; 4164 uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE]; 4165 uint32_t seq = 0; 4166 int num_req = 0; 4167 int trunc = 0; 4168 struct sctp_tmit_chunk *chk; 4169 struct sctp_chunkhdr *ch; 4170 struct sctp_paramhdr *ph; 4171 int ret_code = 0; 4172 int num_param = 0; 4173 4174 /* now it may be a reset or a reset-response */ 4175 remaining_length = ntohs(ch_req->chunk_length) - sizeof(struct sctp_chunkhdr); 4176 4177 /* setup for adding the response */ 4178 sctp_alloc_a_chunk(stcb, chk); 4179 if (chk == NULL) { 4180 return (ret_code); 4181 } 4182 chk->copy_by_ref = 0; 4183 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 4184 chk->rec.chunk_id.can_take_data = 0; 4185 chk->flags = 0; 4186 chk->asoc = &stcb->asoc; 4187 chk->no_fr_allowed = 0; 4188 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr); 4189 chk->book_size_scale = 0; 4190 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 4191 if (chk->data == NULL) { 4192 strres_nochunk: 4193 if (chk->data) { 4194 sctp_m_freem(chk->data); 4195 chk->data = NULL; 4196 } 4197 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 4198 return (ret_code); 4199 } 4200 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 4201 4202 /* setup chunk parameters */ 4203 chk->sent = SCTP_DATAGRAM_UNSENT; 4204 chk->snd_count = 0; 4205 chk->whoTo = NULL; 4206 4207 ch = mtod(chk->data, struct sctp_chunkhdr *); 4208 ch->chunk_type = SCTP_STREAM_RESET; 4209 ch->chunk_flags = 0; 4210 ch->chunk_length = htons(chk->send_size); 4211 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 4212 offset += sizeof(struct sctp_chunkhdr); 4213 while (remaining_length >= sizeof(struct sctp_paramhdr)) { 4214 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore); 4215 if (ph == NULL) { 4216 /* TSNH */ 4217 break; 4218 } 4219 param_len = ntohs(ph->param_length); 4220 if ((param_len > remaining_length) || 4221 (param_len < (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)))) { 4222 /* bad parameter length */ 4223 break; 4224 } 4225 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, sizeof(cstore)), 4226 (uint8_t *) & cstore); 4227 if (ph == NULL) { 4228 /* TSNH */ 4229 break; 4230 } 4231 ptype = ntohs(ph->param_type); 4232 num_param++; 4233 if (param_len > sizeof(cstore)) { 4234 trunc = 1; 4235 } else { 4236 trunc = 0; 4237 } 4238 if (num_param > SCTP_MAX_RESET_PARAMS) { 4239 /* hit the max of parameters already sorry.. */ 4240 break; 4241 } 4242 if (ptype == SCTP_STR_RESET_OUT_REQUEST) { 4243 struct sctp_stream_reset_out_request *req_out; 4244 4245 if (param_len < sizeof(struct sctp_stream_reset_out_request)) { 4246 break; 4247 } 4248 req_out = (struct sctp_stream_reset_out_request *)ph; 4249 num_req++; 4250 if (stcb->asoc.stream_reset_outstanding) { 4251 seq = ntohl(req_out->response_seq); 4252 if (seq == stcb->asoc.str_reset_seq_out) { 4253 /* implicit ack */ 4254 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_RESULT_PERFORMED, NULL); 4255 } 4256 } 4257 sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc); 4258 } else if (ptype == SCTP_STR_RESET_ADD_OUT_STREAMS) { 4259 struct sctp_stream_reset_add_strm *str_add; 4260 4261 if (param_len < sizeof(struct sctp_stream_reset_add_strm)) { 4262 break; 4263 } 4264 str_add = (struct sctp_stream_reset_add_strm *)ph; 4265 num_req++; 4266 sctp_handle_str_reset_add_strm(stcb, chk, str_add); 4267 } else if (ptype == SCTP_STR_RESET_ADD_IN_STREAMS) { 4268 struct sctp_stream_reset_add_strm *str_add; 4269 4270 if (param_len < sizeof(struct sctp_stream_reset_add_strm)) { 4271 break; 4272 } 4273 str_add = (struct sctp_stream_reset_add_strm *)ph; 4274 num_req++; 4275 sctp_handle_str_reset_add_out_strm(stcb, chk, str_add); 4276 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) { 4277 struct sctp_stream_reset_in_request *req_in; 4278 4279 num_req++; 4280 req_in = (struct sctp_stream_reset_in_request *)ph; 4281 sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc); 4282 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) { 4283 struct sctp_stream_reset_tsn_request *req_tsn; 4284 4285 num_req++; 4286 req_tsn = (struct sctp_stream_reset_tsn_request *)ph; 4287 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) { 4288 ret_code = 1; 4289 goto strres_nochunk; 4290 } 4291 /* no more */ 4292 break; 4293 } else if (ptype == SCTP_STR_RESET_RESPONSE) { 4294 struct sctp_stream_reset_response *resp; 4295 uint32_t result; 4296 4297 if (param_len < sizeof(struct sctp_stream_reset_response)) { 4298 break; 4299 } 4300 resp = (struct sctp_stream_reset_response *)ph; 4301 seq = ntohl(resp->response_seq); 4302 result = ntohl(resp->result); 4303 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) { 4304 ret_code = 1; 4305 goto strres_nochunk; 4306 } 4307 } else { 4308 break; 4309 } 4310 offset += SCTP_SIZE32(param_len); 4311 if (remaining_length >= SCTP_SIZE32(param_len)) { 4312 remaining_length -= SCTP_SIZE32(param_len); 4313 } else { 4314 remaining_length = 0; 4315 } 4316 } 4317 if (num_req == 0) { 4318 /* we have no response free the stuff */ 4319 goto strres_nochunk; 4320 } 4321 /* ok we have a chunk to link in */ 4322 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, 4323 chk, 4324 sctp_next); 4325 stcb->asoc.ctrl_queue_cnt++; 4326 return (ret_code); 4327 } 4328 4329 /* 4330 * Handle a router or endpoints report of a packet loss, there are two ways 4331 * to handle this, either we get the whole packet and must disect it 4332 * ourselves (possibly with truncation and or corruption) or it is a summary 4333 * from a middle box that did the disectting for us. 4334 */ 4335 static void 4336 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, 4337 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit) 4338 { 4339 uint32_t bottle_bw, on_queue; 4340 uint16_t trunc_len; 4341 unsigned int chlen; 4342 unsigned int at; 4343 struct sctp_chunk_desc desc; 4344 struct sctp_chunkhdr *ch; 4345 4346 chlen = ntohs(cp->ch.chunk_length); 4347 chlen -= sizeof(struct sctp_pktdrop_chunk); 4348 /* XXX possible chlen underflow */ 4349 if (chlen == 0) { 4350 ch = NULL; 4351 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) 4352 SCTP_STAT_INCR(sctps_pdrpbwrpt); 4353 } else { 4354 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr)); 4355 chlen -= sizeof(struct sctphdr); 4356 /* XXX possible chlen underflow */ 4357 memset(&desc, 0, sizeof(desc)); 4358 } 4359 trunc_len = (uint16_t) ntohs(cp->trunc_len); 4360 if (trunc_len > limit) { 4361 trunc_len = limit; 4362 } 4363 /* now the chunks themselves */ 4364 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) { 4365 desc.chunk_type = ch->chunk_type; 4366 /* get amount we need to move */ 4367 at = ntohs(ch->chunk_length); 4368 if (at < sizeof(struct sctp_chunkhdr)) { 4369 /* corrupt chunk, maybe at the end? */ 4370 SCTP_STAT_INCR(sctps_pdrpcrupt); 4371 break; 4372 } 4373 if (trunc_len == 0) { 4374 /* we are supposed to have all of it */ 4375 if (at > chlen) { 4376 /* corrupt skip it */ 4377 SCTP_STAT_INCR(sctps_pdrpcrupt); 4378 break; 4379 } 4380 } else { 4381 /* is there enough of it left ? */ 4382 if (desc.chunk_type == SCTP_DATA) { 4383 if (chlen < (sizeof(struct sctp_data_chunk) + 4384 sizeof(desc.data_bytes))) { 4385 break; 4386 } 4387 } else { 4388 if (chlen < sizeof(struct sctp_chunkhdr)) { 4389 break; 4390 } 4391 } 4392 } 4393 if (desc.chunk_type == SCTP_DATA) { 4394 /* can we get out the tsn? */ 4395 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 4396 SCTP_STAT_INCR(sctps_pdrpmbda); 4397 4398 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) { 4399 /* yep */ 4400 struct sctp_data_chunk *dcp; 4401 uint8_t *ddp; 4402 unsigned int iii; 4403 4404 dcp = (struct sctp_data_chunk *)ch; 4405 ddp = (uint8_t *) (dcp + 1); 4406 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) { 4407 desc.data_bytes[iii] = ddp[iii]; 4408 } 4409 desc.tsn_ifany = dcp->dp.tsn; 4410 } else { 4411 /* nope we are done. */ 4412 SCTP_STAT_INCR(sctps_pdrpnedat); 4413 break; 4414 } 4415 } else { 4416 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 4417 SCTP_STAT_INCR(sctps_pdrpmbct); 4418 } 4419 4420 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) { 4421 SCTP_STAT_INCR(sctps_pdrppdbrk); 4422 break; 4423 } 4424 if (SCTP_SIZE32(at) > chlen) { 4425 break; 4426 } 4427 chlen -= SCTP_SIZE32(at); 4428 if (chlen < sizeof(struct sctp_chunkhdr)) { 4429 /* done, none left */ 4430 break; 4431 } 4432 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at)); 4433 } 4434 /* Now update any rwnd --- possibly */ 4435 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) { 4436 /* From a peer, we get a rwnd report */ 4437 uint32_t a_rwnd; 4438 4439 SCTP_STAT_INCR(sctps_pdrpfehos); 4440 4441 bottle_bw = ntohl(cp->bottle_bw); 4442 on_queue = ntohl(cp->current_onq); 4443 if (bottle_bw && on_queue) { 4444 /* a rwnd report is in here */ 4445 if (bottle_bw > on_queue) 4446 a_rwnd = bottle_bw - on_queue; 4447 else 4448 a_rwnd = 0; 4449 4450 if (a_rwnd == 0) 4451 stcb->asoc.peers_rwnd = 0; 4452 else { 4453 if (a_rwnd > stcb->asoc.total_flight) { 4454 stcb->asoc.peers_rwnd = 4455 a_rwnd - stcb->asoc.total_flight; 4456 } else { 4457 stcb->asoc.peers_rwnd = 0; 4458 } 4459 if (stcb->asoc.peers_rwnd < 4460 stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4461 /* SWS sender side engages */ 4462 stcb->asoc.peers_rwnd = 0; 4463 } 4464 } 4465 } 4466 } else { 4467 SCTP_STAT_INCR(sctps_pdrpfmbox); 4468 } 4469 4470 /* now middle boxes in sat networks get a cwnd bump */ 4471 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) && 4472 (stcb->asoc.sat_t3_loss_recovery == 0) && 4473 (stcb->asoc.sat_network)) { 4474 /* 4475 * This is debateable but for sat networks it makes sense 4476 * Note if a T3 timer has went off, we will prohibit any 4477 * changes to cwnd until we exit the t3 loss recovery. 4478 */ 4479 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb, 4480 net, cp, &bottle_bw, &on_queue); 4481 } 4482 } 4483 4484 /* 4485 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to 4486 * still contain IP/SCTP header - stcb: is the tcb found for this packet - 4487 * offset: offset into the mbuf chain to first chunkhdr - length: is the 4488 * length of the complete packet outputs: - length: modified to remaining 4489 * length after control processing - netp: modified to new sctp_nets after 4490 * cookie-echo processing - return NULL to discard the packet (ie. no asoc, 4491 * bad packet,...) otherwise return the tcb for this packet 4492 */ 4493 #ifdef __GNUC__ 4494 __attribute__((noinline)) 4495 #endif 4496 static struct sctp_tcb * 4497 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, 4498 struct sockaddr *src, struct sockaddr *dst, 4499 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp, 4500 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen, 4501 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4502 uint32_t vrf_id, uint16_t port) 4503 { 4504 struct sctp_association *asoc; 4505 struct mbuf *op_err; 4506 char msg[SCTP_DIAG_INFO_LEN]; 4507 uint32_t vtag_in; 4508 int num_chunks = 0; /* number of control chunks processed */ 4509 uint32_t chk_length; 4510 int ret; 4511 int abort_no_unlock = 0; 4512 int ecne_seen = 0; 4513 4514 /* 4515 * How big should this be, and should it be alloc'd? Lets try the 4516 * d-mtu-ceiling for now (2k) and that should hopefully work ... 4517 * until we get into jumbo grams and such.. 4518 */ 4519 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; 4520 struct sctp_tcb *locked_tcb = stcb; 4521 int got_auth = 0; 4522 uint32_t auth_offset = 0, auth_len = 0; 4523 int auth_skipped = 0; 4524 int asconf_cnt = 0; 4525 4526 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4527 struct socket *so; 4528 4529 #endif 4530 4531 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n", 4532 iphlen, *offset, length, (void *)stcb); 4533 4534 /* validate chunk header length... */ 4535 if (ntohs(ch->chunk_length) < sizeof(*ch)) { 4536 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n", 4537 ntohs(ch->chunk_length)); 4538 if (locked_tcb) { 4539 SCTP_TCB_UNLOCK(locked_tcb); 4540 } 4541 return (NULL); 4542 } 4543 /* 4544 * validate the verification tag 4545 */ 4546 vtag_in = ntohl(sh->v_tag); 4547 4548 if (locked_tcb) { 4549 SCTP_TCB_LOCK_ASSERT(locked_tcb); 4550 } 4551 if (ch->chunk_type == SCTP_INITIATION) { 4552 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n", 4553 ntohs(ch->chunk_length), vtag_in); 4554 if (vtag_in != 0) { 4555 /* protocol error- silently discard... */ 4556 SCTP_STAT_INCR(sctps_badvtag); 4557 if (locked_tcb) { 4558 SCTP_TCB_UNLOCK(locked_tcb); 4559 } 4560 return (NULL); 4561 } 4562 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) { 4563 /* 4564 * If there is no stcb, skip the AUTH chunk and process 4565 * later after a stcb is found (to validate the lookup was 4566 * valid. 4567 */ 4568 if ((ch->chunk_type == SCTP_AUTHENTICATION) && 4569 (stcb == NULL) && 4570 (inp->auth_supported == 1)) { 4571 /* save this chunk for later processing */ 4572 auth_skipped = 1; 4573 auth_offset = *offset; 4574 auth_len = ntohs(ch->chunk_length); 4575 4576 /* (temporarily) move past this chunk */ 4577 *offset += SCTP_SIZE32(auth_len); 4578 if (*offset >= length) { 4579 /* no more data left in the mbuf chain */ 4580 *offset = length; 4581 if (locked_tcb) { 4582 SCTP_TCB_UNLOCK(locked_tcb); 4583 } 4584 return (NULL); 4585 } 4586 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4587 sizeof(struct sctp_chunkhdr), chunk_buf); 4588 } 4589 if (ch == NULL) { 4590 /* Help */ 4591 *offset = length; 4592 if (locked_tcb) { 4593 SCTP_TCB_UNLOCK(locked_tcb); 4594 } 4595 return (NULL); 4596 } 4597 if (ch->chunk_type == SCTP_COOKIE_ECHO) { 4598 goto process_control_chunks; 4599 } 4600 /* 4601 * first check if it's an ASCONF with an unknown src addr we 4602 * need to look inside to find the association 4603 */ 4604 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) { 4605 struct sctp_chunkhdr *asconf_ch = ch; 4606 uint32_t asconf_offset = 0, asconf_len = 0; 4607 4608 /* inp's refcount may be reduced */ 4609 SCTP_INP_INCR_REF(inp); 4610 4611 asconf_offset = *offset; 4612 do { 4613 asconf_len = ntohs(asconf_ch->chunk_length); 4614 if (asconf_len < sizeof(struct sctp_asconf_paramhdr)) 4615 break; 4616 stcb = sctp_findassociation_ep_asconf(m, 4617 *offset, 4618 dst, 4619 sh, &inp, netp, vrf_id); 4620 if (stcb != NULL) 4621 break; 4622 asconf_offset += SCTP_SIZE32(asconf_len); 4623 asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset, 4624 sizeof(struct sctp_chunkhdr), chunk_buf); 4625 } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF); 4626 if (stcb == NULL) { 4627 /* 4628 * reduce inp's refcount if not reduced in 4629 * sctp_findassociation_ep_asconf(). 4630 */ 4631 SCTP_INP_DECR_REF(inp); 4632 } else { 4633 locked_tcb = stcb; 4634 } 4635 4636 /* now go back and verify any auth chunk to be sure */ 4637 if (auth_skipped && (stcb != NULL)) { 4638 struct sctp_auth_chunk *auth; 4639 4640 auth = (struct sctp_auth_chunk *) 4641 sctp_m_getptr(m, auth_offset, 4642 auth_len, chunk_buf); 4643 got_auth = 1; 4644 auth_skipped = 0; 4645 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, 4646 auth_offset)) { 4647 /* auth HMAC failed so dump it */ 4648 *offset = length; 4649 if (locked_tcb) { 4650 SCTP_TCB_UNLOCK(locked_tcb); 4651 } 4652 return (NULL); 4653 } else { 4654 /* remaining chunks are HMAC checked */ 4655 stcb->asoc.authenticated = 1; 4656 } 4657 } 4658 } 4659 if (stcb == NULL) { 4660 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); 4661 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 4662 msg); 4663 /* no association, so it's out of the blue... */ 4664 sctp_handle_ootb(m, iphlen, *offset, src, dst, sh, inp, op_err, 4665 mflowtype, mflowid, inp->fibnum, 4666 vrf_id, port); 4667 *offset = length; 4668 if (locked_tcb) { 4669 SCTP_TCB_UNLOCK(locked_tcb); 4670 } 4671 return (NULL); 4672 } 4673 asoc = &stcb->asoc; 4674 /* ABORT and SHUTDOWN can use either v_tag... */ 4675 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) || 4676 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) || 4677 (ch->chunk_type == SCTP_PACKET_DROPPED)) { 4678 /* Take the T-bit always into account. */ 4679 if ((((ch->chunk_flags & SCTP_HAD_NO_TCB) == 0) && 4680 (vtag_in == asoc->my_vtag)) || 4681 (((ch->chunk_flags & SCTP_HAD_NO_TCB) == SCTP_HAD_NO_TCB) && 4682 (vtag_in == asoc->peer_vtag))) { 4683 /* this is valid */ 4684 } else { 4685 /* drop this packet... */ 4686 SCTP_STAT_INCR(sctps_badvtag); 4687 if (locked_tcb) { 4688 SCTP_TCB_UNLOCK(locked_tcb); 4689 } 4690 return (NULL); 4691 } 4692 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 4693 if (vtag_in != asoc->my_vtag) { 4694 /* 4695 * this could be a stale SHUTDOWN-ACK or the 4696 * peer never got the SHUTDOWN-COMPLETE and 4697 * is still hung; we have started a new asoc 4698 * but it won't complete until the shutdown 4699 * is completed 4700 */ 4701 if (locked_tcb) { 4702 SCTP_TCB_UNLOCK(locked_tcb); 4703 } 4704 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); 4705 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 4706 msg); 4707 sctp_handle_ootb(m, iphlen, *offset, src, dst, 4708 sh, inp, op_err, 4709 mflowtype, mflowid, fibnum, 4710 vrf_id, port); 4711 return (NULL); 4712 } 4713 } else { 4714 /* for all other chunks, vtag must match */ 4715 if (vtag_in != asoc->my_vtag) { 4716 /* invalid vtag... */ 4717 SCTPDBG(SCTP_DEBUG_INPUT3, 4718 "invalid vtag: %xh, expect %xh\n", 4719 vtag_in, asoc->my_vtag); 4720 SCTP_STAT_INCR(sctps_badvtag); 4721 if (locked_tcb) { 4722 SCTP_TCB_UNLOCK(locked_tcb); 4723 } 4724 *offset = length; 4725 return (NULL); 4726 } 4727 } 4728 } /* end if !SCTP_COOKIE_ECHO */ 4729 /* 4730 * process all control chunks... 4731 */ 4732 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) || 4733 (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) || 4734 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) && 4735 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 4736 /* implied cookie-ack.. we must have lost the ack */ 4737 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4738 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4739 stcb->asoc.overall_error_count, 4740 0, 4741 SCTP_FROM_SCTP_INPUT, 4742 __LINE__); 4743 } 4744 stcb->asoc.overall_error_count = 0; 4745 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, 4746 *netp); 4747 } 4748 process_control_chunks: 4749 while (IS_SCTP_CONTROL(ch)) { 4750 /* validate chunk length */ 4751 chk_length = ntohs(ch->chunk_length); 4752 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n", 4753 ch->chunk_type, chk_length); 4754 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length); 4755 if (chk_length < sizeof(*ch) || 4756 (*offset + (int)chk_length) > length) { 4757 *offset = length; 4758 if (locked_tcb) { 4759 SCTP_TCB_UNLOCK(locked_tcb); 4760 } 4761 return (NULL); 4762 } 4763 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 4764 /* 4765 * INIT-ACK only gets the init ack "header" portion only 4766 * because we don't have to process the peer's COOKIE. All 4767 * others get a complete chunk. 4768 */ 4769 if ((ch->chunk_type == SCTP_INITIATION_ACK) || 4770 (ch->chunk_type == SCTP_INITIATION)) { 4771 /* get an init-ack chunk */ 4772 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4773 sizeof(struct sctp_init_ack_chunk), chunk_buf); 4774 if (ch == NULL) { 4775 *offset = length; 4776 if (locked_tcb) { 4777 SCTP_TCB_UNLOCK(locked_tcb); 4778 } 4779 return (NULL); 4780 } 4781 } else { 4782 /* For cookies and all other chunks. */ 4783 if (chk_length > sizeof(chunk_buf)) { 4784 /* 4785 * use just the size of the chunk buffer so 4786 * the front part of our chunks fit in 4787 * contiguous space up to the chunk buffer 4788 * size (508 bytes). For chunks that need to 4789 * get more than that they must use the 4790 * sctp_m_getptr() function or other means 4791 * (e.g. know how to parse mbuf chains). 4792 * Cookies do this already. 4793 */ 4794 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4795 (sizeof(chunk_buf) - 4), 4796 chunk_buf); 4797 if (ch == NULL) { 4798 *offset = length; 4799 if (locked_tcb) { 4800 SCTP_TCB_UNLOCK(locked_tcb); 4801 } 4802 return (NULL); 4803 } 4804 } else { 4805 /* We can fit it all */ 4806 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4807 chk_length, chunk_buf); 4808 if (ch == NULL) { 4809 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n"); 4810 *offset = length; 4811 if (locked_tcb) { 4812 SCTP_TCB_UNLOCK(locked_tcb); 4813 } 4814 return (NULL); 4815 } 4816 } 4817 } 4818 num_chunks++; 4819 /* Save off the last place we got a control from */ 4820 if (stcb != NULL) { 4821 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) { 4822 /* 4823 * allow last_control to be NULL if 4824 * ASCONF... ASCONF processing will find the 4825 * right net later 4826 */ 4827 if ((netp != NULL) && (*netp != NULL)) 4828 stcb->asoc.last_control_chunk_from = *netp; 4829 } 4830 } 4831 #ifdef SCTP_AUDITING_ENABLED 4832 sctp_audit_log(0xB0, ch->chunk_type); 4833 #endif 4834 4835 /* check to see if this chunk required auth, but isn't */ 4836 if ((stcb != NULL) && 4837 (stcb->asoc.auth_supported == 1) && 4838 sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) && 4839 !stcb->asoc.authenticated) { 4840 /* "silently" ignore */ 4841 SCTP_STAT_INCR(sctps_recvauthmissing); 4842 goto next_chunk; 4843 } 4844 switch (ch->chunk_type) { 4845 case SCTP_INITIATION: 4846 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n"); 4847 /* The INIT chunk must be the only chunk. */ 4848 if ((num_chunks > 1) || 4849 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 4850 /* RFC 4960 requires that no ABORT is sent */ 4851 *offset = length; 4852 if (locked_tcb) { 4853 SCTP_TCB_UNLOCK(locked_tcb); 4854 } 4855 return (NULL); 4856 } 4857 /* Honor our resource limit. */ 4858 if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) { 4859 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 4860 sctp_abort_association(inp, stcb, m, iphlen, 4861 src, dst, sh, op_err, 4862 mflowtype, mflowid, 4863 vrf_id, port); 4864 *offset = length; 4865 return (NULL); 4866 } 4867 sctp_handle_init(m, iphlen, *offset, src, dst, sh, 4868 (struct sctp_init_chunk *)ch, inp, 4869 stcb, *netp, &abort_no_unlock, 4870 mflowtype, mflowid, 4871 vrf_id, port); 4872 *offset = length; 4873 if ((!abort_no_unlock) && (locked_tcb)) { 4874 SCTP_TCB_UNLOCK(locked_tcb); 4875 } 4876 return (NULL); 4877 break; 4878 case SCTP_PAD_CHUNK: 4879 break; 4880 case SCTP_INITIATION_ACK: 4881 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n"); 4882 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4883 /* We are not interested anymore */ 4884 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4885 ; 4886 } else { 4887 if ((locked_tcb != NULL) && (locked_tcb != stcb)) { 4888 /* Very unlikely */ 4889 SCTP_TCB_UNLOCK(locked_tcb); 4890 } 4891 *offset = length; 4892 if (stcb) { 4893 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4894 so = SCTP_INP_SO(inp); 4895 atomic_add_int(&stcb->asoc.refcnt, 1); 4896 SCTP_TCB_UNLOCK(stcb); 4897 SCTP_SOCKET_LOCK(so, 1); 4898 SCTP_TCB_LOCK(stcb); 4899 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4900 #endif 4901 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4902 SCTP_FROM_SCTP_INPUT + SCTP_LOC_29); 4903 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4904 SCTP_SOCKET_UNLOCK(so, 1); 4905 #endif 4906 } 4907 return (NULL); 4908 } 4909 } 4910 /* The INIT-ACK chunk must be the only chunk. */ 4911 if ((num_chunks > 1) || 4912 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 4913 *offset = length; 4914 if (locked_tcb) { 4915 SCTP_TCB_UNLOCK(locked_tcb); 4916 } 4917 return (NULL); 4918 } 4919 if ((netp) && (*netp)) { 4920 ret = sctp_handle_init_ack(m, iphlen, *offset, 4921 src, dst, sh, 4922 (struct sctp_init_ack_chunk *)ch, 4923 stcb, *netp, 4924 &abort_no_unlock, 4925 mflowtype, mflowid, 4926 vrf_id); 4927 } else { 4928 ret = -1; 4929 } 4930 *offset = length; 4931 if (abort_no_unlock) { 4932 return (NULL); 4933 } 4934 /* 4935 * Special case, I must call the output routine to 4936 * get the cookie echoed 4937 */ 4938 if ((stcb != NULL) && (ret == 0)) { 4939 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4940 } 4941 if (locked_tcb) { 4942 SCTP_TCB_UNLOCK(locked_tcb); 4943 } 4944 return (NULL); 4945 break; 4946 case SCTP_SELECTIVE_ACK: 4947 { 4948 struct sctp_sack_chunk *sack; 4949 int abort_now = 0; 4950 uint32_t a_rwnd, cum_ack; 4951 uint16_t num_seg, num_dup; 4952 uint8_t flags; 4953 int offset_seg, offset_dup; 4954 4955 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n"); 4956 SCTP_STAT_INCR(sctps_recvsacks); 4957 if (stcb == NULL) { 4958 SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n"); 4959 break; 4960 } 4961 if (chk_length < sizeof(struct sctp_sack_chunk)) { 4962 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n"); 4963 break; 4964 } 4965 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 4966 /*- 4967 * If we have sent a shutdown-ack, we will pay no 4968 * attention to a sack sent in to us since 4969 * we don't care anymore. 4970 */ 4971 break; 4972 } 4973 sack = (struct sctp_sack_chunk *)ch; 4974 flags = ch->chunk_flags; 4975 cum_ack = ntohl(sack->sack.cum_tsn_ack); 4976 num_seg = ntohs(sack->sack.num_gap_ack_blks); 4977 num_dup = ntohs(sack->sack.num_dup_tsns); 4978 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd); 4979 if (sizeof(struct sctp_sack_chunk) + 4980 num_seg * sizeof(struct sctp_gap_ack_block) + 4981 num_dup * sizeof(uint32_t) != chk_length) { 4982 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n"); 4983 break; 4984 } 4985 offset_seg = *offset + sizeof(struct sctp_sack_chunk); 4986 offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block); 4987 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 4988 cum_ack, num_seg, a_rwnd); 4989 stcb->asoc.seen_a_sack_this_pkt = 1; 4990 if ((stcb->asoc.pr_sctp_cnt == 0) && 4991 (num_seg == 0) && 4992 SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) && 4993 (stcb->asoc.saw_sack_with_frags == 0) && 4994 (stcb->asoc.saw_sack_with_nr_frags == 0) && 4995 (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) 4996 ) { 4997 /* 4998 * We have a SIMPLE sack having no 4999 * prior segments and data on sent 5000 * queue to be acked.. Use the 5001 * faster path sack processing. We 5002 * also allow window update sacks 5003 * with no missing segments to go 5004 * this way too. 5005 */ 5006 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, &abort_now, ecne_seen); 5007 } else { 5008 if (netp && *netp) 5009 sctp_handle_sack(m, offset_seg, offset_dup, stcb, 5010 num_seg, 0, num_dup, &abort_now, flags, 5011 cum_ack, a_rwnd, ecne_seen); 5012 } 5013 if (abort_now) { 5014 /* ABORT signal from sack processing */ 5015 *offset = length; 5016 return (NULL); 5017 } 5018 if (TAILQ_EMPTY(&stcb->asoc.send_queue) && 5019 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 5020 (stcb->asoc.stream_queue_cnt == 0)) { 5021 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 5022 } 5023 } 5024 break; 5025 /* 5026 * EY - nr_sack: If the received chunk is an 5027 * nr_sack chunk 5028 */ 5029 case SCTP_NR_SELECTIVE_ACK: 5030 { 5031 struct sctp_nr_sack_chunk *nr_sack; 5032 int abort_now = 0; 5033 uint32_t a_rwnd, cum_ack; 5034 uint16_t num_seg, num_nr_seg, num_dup; 5035 uint8_t flags; 5036 int offset_seg, offset_dup; 5037 5038 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n"); 5039 SCTP_STAT_INCR(sctps_recvsacks); 5040 if (stcb == NULL) { 5041 SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n"); 5042 break; 5043 } 5044 if (stcb->asoc.nrsack_supported == 0) { 5045 goto unknown_chunk; 5046 } 5047 if (chk_length < sizeof(struct sctp_nr_sack_chunk)) { 5048 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n"); 5049 break; 5050 } 5051 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 5052 /*- 5053 * If we have sent a shutdown-ack, we will pay no 5054 * attention to a sack sent in to us since 5055 * we don't care anymore. 5056 */ 5057 break; 5058 } 5059 nr_sack = (struct sctp_nr_sack_chunk *)ch; 5060 flags = ch->chunk_flags; 5061 cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack); 5062 num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks); 5063 num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks); 5064 num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns); 5065 a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd); 5066 if (sizeof(struct sctp_nr_sack_chunk) + 5067 (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) + 5068 num_dup * sizeof(uint32_t) != chk_length) { 5069 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n"); 5070 break; 5071 } 5072 offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk); 5073 offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block); 5074 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 5075 cum_ack, num_seg, a_rwnd); 5076 stcb->asoc.seen_a_sack_this_pkt = 1; 5077 if ((stcb->asoc.pr_sctp_cnt == 0) && 5078 (num_seg == 0) && (num_nr_seg == 0) && 5079 SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) && 5080 (stcb->asoc.saw_sack_with_frags == 0) && 5081 (stcb->asoc.saw_sack_with_nr_frags == 0) && 5082 (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 5083 /* 5084 * We have a SIMPLE sack having no 5085 * prior segments and data on sent 5086 * queue to be acked. Use the faster 5087 * path sack processing. We also 5088 * allow window update sacks with no 5089 * missing segments to go this way 5090 * too. 5091 */ 5092 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 5093 &abort_now, ecne_seen); 5094 } else { 5095 if (netp && *netp) 5096 sctp_handle_sack(m, offset_seg, offset_dup, stcb, 5097 num_seg, num_nr_seg, num_dup, &abort_now, flags, 5098 cum_ack, a_rwnd, ecne_seen); 5099 } 5100 if (abort_now) { 5101 /* ABORT signal from sack processing */ 5102 *offset = length; 5103 return (NULL); 5104 } 5105 if (TAILQ_EMPTY(&stcb->asoc.send_queue) && 5106 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 5107 (stcb->asoc.stream_queue_cnt == 0)) { 5108 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 5109 } 5110 } 5111 break; 5112 5113 case SCTP_HEARTBEAT_REQUEST: 5114 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n"); 5115 if ((stcb) && netp && *netp) { 5116 SCTP_STAT_INCR(sctps_recvheartbeat); 5117 sctp_send_heartbeat_ack(stcb, m, *offset, 5118 chk_length, *netp); 5119 5120 /* He's alive so give him credit */ 5121 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5122 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5123 stcb->asoc.overall_error_count, 5124 0, 5125 SCTP_FROM_SCTP_INPUT, 5126 __LINE__); 5127 } 5128 stcb->asoc.overall_error_count = 0; 5129 } 5130 break; 5131 case SCTP_HEARTBEAT_ACK: 5132 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n"); 5133 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) { 5134 /* Its not ours */ 5135 *offset = length; 5136 if (locked_tcb) { 5137 SCTP_TCB_UNLOCK(locked_tcb); 5138 } 5139 return (NULL); 5140 } 5141 /* He's alive so give him credit */ 5142 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5143 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5144 stcb->asoc.overall_error_count, 5145 0, 5146 SCTP_FROM_SCTP_INPUT, 5147 __LINE__); 5148 } 5149 stcb->asoc.overall_error_count = 0; 5150 SCTP_STAT_INCR(sctps_recvheartbeatack); 5151 if (netp && *netp) 5152 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch, 5153 stcb, *netp); 5154 break; 5155 case SCTP_ABORT_ASSOCIATION: 5156 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n", 5157 (void *)stcb); 5158 if ((stcb) && netp && *netp) 5159 sctp_handle_abort((struct sctp_abort_chunk *)ch, 5160 stcb, *netp); 5161 *offset = length; 5162 return (NULL); 5163 break; 5164 case SCTP_SHUTDOWN: 5165 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n", 5166 (void *)stcb); 5167 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) { 5168 *offset = length; 5169 if (locked_tcb) { 5170 SCTP_TCB_UNLOCK(locked_tcb); 5171 } 5172 return (NULL); 5173 } 5174 if (netp && *netp) { 5175 int abort_flag = 0; 5176 5177 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch, 5178 stcb, *netp, &abort_flag); 5179 if (abort_flag) { 5180 *offset = length; 5181 return (NULL); 5182 } 5183 } 5184 break; 5185 case SCTP_SHUTDOWN_ACK: 5186 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", (void *)stcb); 5187 if ((stcb) && (netp) && (*netp)) 5188 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp); 5189 *offset = length; 5190 return (NULL); 5191 break; 5192 5193 case SCTP_OPERATION_ERROR: 5194 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n"); 5195 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) { 5196 *offset = length; 5197 return (NULL); 5198 } 5199 break; 5200 case SCTP_COOKIE_ECHO: 5201 SCTPDBG(SCTP_DEBUG_INPUT3, 5202 "SCTP_COOKIE-ECHO, stcb %p\n", (void *)stcb); 5203 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 5204 ; 5205 } else { 5206 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5207 /* We are not interested anymore */ 5208 abend: 5209 if (stcb) { 5210 SCTP_TCB_UNLOCK(stcb); 5211 } 5212 *offset = length; 5213 return (NULL); 5214 } 5215 } 5216 /* 5217 * First are we accepting? We do this again here 5218 * since it is possible that a previous endpoint WAS 5219 * listening responded to a INIT-ACK and then 5220 * closed. We opened and bound.. and are now no 5221 * longer listening. 5222 */ 5223 5224 if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) { 5225 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 5226 (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) { 5227 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 5228 sctp_abort_association(inp, stcb, m, iphlen, 5229 src, dst, sh, op_err, 5230 mflowtype, mflowid, 5231 vrf_id, port); 5232 } 5233 *offset = length; 5234 return (NULL); 5235 } else { 5236 struct mbuf *ret_buf; 5237 struct sctp_inpcb *linp; 5238 5239 if (stcb) { 5240 linp = NULL; 5241 } else { 5242 linp = inp; 5243 } 5244 5245 if (linp) { 5246 SCTP_ASOC_CREATE_LOCK(linp); 5247 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5248 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5249 SCTP_ASOC_CREATE_UNLOCK(linp); 5250 goto abend; 5251 } 5252 } 5253 if (netp) { 5254 ret_buf = 5255 sctp_handle_cookie_echo(m, iphlen, 5256 *offset, 5257 src, dst, 5258 sh, 5259 (struct sctp_cookie_echo_chunk *)ch, 5260 &inp, &stcb, netp, 5261 auth_skipped, 5262 auth_offset, 5263 auth_len, 5264 &locked_tcb, 5265 mflowtype, 5266 mflowid, 5267 vrf_id, 5268 port); 5269 } else { 5270 ret_buf = NULL; 5271 } 5272 if (linp) { 5273 SCTP_ASOC_CREATE_UNLOCK(linp); 5274 } 5275 if (ret_buf == NULL) { 5276 if (locked_tcb) { 5277 SCTP_TCB_UNLOCK(locked_tcb); 5278 } 5279 SCTPDBG(SCTP_DEBUG_INPUT3, 5280 "GAK, null buffer\n"); 5281 *offset = length; 5282 return (NULL); 5283 } 5284 /* if AUTH skipped, see if it verified... */ 5285 if (auth_skipped) { 5286 got_auth = 1; 5287 auth_skipped = 0; 5288 } 5289 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) { 5290 /* 5291 * Restart the timer if we have 5292 * pending data 5293 */ 5294 struct sctp_tmit_chunk *chk; 5295 5296 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 5297 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 5298 } 5299 } 5300 break; 5301 case SCTP_COOKIE_ACK: 5302 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", (void *)stcb); 5303 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) { 5304 if (locked_tcb) { 5305 SCTP_TCB_UNLOCK(locked_tcb); 5306 } 5307 return (NULL); 5308 } 5309 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5310 /* We are not interested anymore */ 5311 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 5312 ; 5313 } else if (stcb) { 5314 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5315 so = SCTP_INP_SO(inp); 5316 atomic_add_int(&stcb->asoc.refcnt, 1); 5317 SCTP_TCB_UNLOCK(stcb); 5318 SCTP_SOCKET_LOCK(so, 1); 5319 SCTP_TCB_LOCK(stcb); 5320 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5321 #endif 5322 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 5323 SCTP_FROM_SCTP_INPUT + SCTP_LOC_30); 5324 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5325 SCTP_SOCKET_UNLOCK(so, 1); 5326 #endif 5327 *offset = length; 5328 return (NULL); 5329 } 5330 } 5331 /* He's alive so give him credit */ 5332 if ((stcb) && netp && *netp) { 5333 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5334 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5335 stcb->asoc.overall_error_count, 5336 0, 5337 SCTP_FROM_SCTP_INPUT, 5338 __LINE__); 5339 } 5340 stcb->asoc.overall_error_count = 0; 5341 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp); 5342 } 5343 break; 5344 case SCTP_ECN_ECHO: 5345 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n"); 5346 /* He's alive so give him credit */ 5347 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) { 5348 /* Its not ours */ 5349 if (locked_tcb) { 5350 SCTP_TCB_UNLOCK(locked_tcb); 5351 } 5352 *offset = length; 5353 return (NULL); 5354 } 5355 if (stcb) { 5356 if (stcb->asoc.ecn_supported == 0) { 5357 goto unknown_chunk; 5358 } 5359 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5360 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5361 stcb->asoc.overall_error_count, 5362 0, 5363 SCTP_FROM_SCTP_INPUT, 5364 __LINE__); 5365 } 5366 stcb->asoc.overall_error_count = 0; 5367 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, 5368 stcb); 5369 ecne_seen = 1; 5370 } 5371 break; 5372 case SCTP_ECN_CWR: 5373 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n"); 5374 /* He's alive so give him credit */ 5375 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) { 5376 /* Its not ours */ 5377 if (locked_tcb) { 5378 SCTP_TCB_UNLOCK(locked_tcb); 5379 } 5380 *offset = length; 5381 return (NULL); 5382 } 5383 if (stcb) { 5384 if (stcb->asoc.ecn_supported == 0) { 5385 goto unknown_chunk; 5386 } 5387 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5388 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5389 stcb->asoc.overall_error_count, 5390 0, 5391 SCTP_FROM_SCTP_INPUT, 5392 __LINE__); 5393 } 5394 stcb->asoc.overall_error_count = 0; 5395 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp); 5396 } 5397 break; 5398 case SCTP_SHUTDOWN_COMPLETE: 5399 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", (void *)stcb); 5400 /* must be first and only chunk */ 5401 if ((num_chunks > 1) || 5402 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 5403 *offset = length; 5404 if (locked_tcb) { 5405 SCTP_TCB_UNLOCK(locked_tcb); 5406 } 5407 return (NULL); 5408 } 5409 if ((stcb) && netp && *netp) { 5410 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch, 5411 stcb, *netp); 5412 } 5413 *offset = length; 5414 return (NULL); 5415 break; 5416 case SCTP_ASCONF: 5417 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n"); 5418 /* He's alive so give him credit */ 5419 if (stcb) { 5420 if (stcb->asoc.asconf_supported == 0) { 5421 goto unknown_chunk; 5422 } 5423 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5424 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5425 stcb->asoc.overall_error_count, 5426 0, 5427 SCTP_FROM_SCTP_INPUT, 5428 __LINE__); 5429 } 5430 stcb->asoc.overall_error_count = 0; 5431 sctp_handle_asconf(m, *offset, src, 5432 (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0); 5433 asconf_cnt++; 5434 } 5435 break; 5436 case SCTP_ASCONF_ACK: 5437 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n"); 5438 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { 5439 /* Its not ours */ 5440 if (locked_tcb) { 5441 SCTP_TCB_UNLOCK(locked_tcb); 5442 } 5443 *offset = length; 5444 return (NULL); 5445 } 5446 if ((stcb) && netp && *netp) { 5447 if (stcb->asoc.asconf_supported == 0) { 5448 goto unknown_chunk; 5449 } 5450 /* He's alive so give him credit */ 5451 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5452 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5453 stcb->asoc.overall_error_count, 5454 0, 5455 SCTP_FROM_SCTP_INPUT, 5456 __LINE__); 5457 } 5458 stcb->asoc.overall_error_count = 0; 5459 sctp_handle_asconf_ack(m, *offset, 5460 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock); 5461 if (abort_no_unlock) 5462 return (NULL); 5463 } 5464 break; 5465 case SCTP_FORWARD_CUM_TSN: 5466 case SCTP_IFORWARD_CUM_TSN: 5467 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n"); 5468 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { 5469 /* Its not ours */ 5470 if (locked_tcb) { 5471 SCTP_TCB_UNLOCK(locked_tcb); 5472 } 5473 *offset = length; 5474 return (NULL); 5475 } 5476 /* He's alive so give him credit */ 5477 if (stcb) { 5478 int abort_flag = 0; 5479 5480 if (stcb->asoc.prsctp_supported == 0) { 5481 goto unknown_chunk; 5482 } 5483 stcb->asoc.overall_error_count = 0; 5484 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5485 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5486 stcb->asoc.overall_error_count, 5487 0, 5488 SCTP_FROM_SCTP_INPUT, 5489 __LINE__); 5490 } 5491 *fwd_tsn_seen = 1; 5492 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5493 /* We are not interested anymore */ 5494 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5495 so = SCTP_INP_SO(inp); 5496 atomic_add_int(&stcb->asoc.refcnt, 1); 5497 SCTP_TCB_UNLOCK(stcb); 5498 SCTP_SOCKET_LOCK(so, 1); 5499 SCTP_TCB_LOCK(stcb); 5500 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5501 #endif 5502 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 5503 SCTP_FROM_SCTP_INPUT + SCTP_LOC_31); 5504 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5505 SCTP_SOCKET_UNLOCK(so, 1); 5506 #endif 5507 *offset = length; 5508 return (NULL); 5509 } 5510 sctp_handle_forward_tsn(stcb, 5511 (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset); 5512 if (abort_flag) { 5513 *offset = length; 5514 return (NULL); 5515 } else { 5516 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5517 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5518 stcb->asoc.overall_error_count, 5519 0, 5520 SCTP_FROM_SCTP_INPUT, 5521 __LINE__); 5522 } 5523 stcb->asoc.overall_error_count = 0; 5524 } 5525 5526 } 5527 break; 5528 case SCTP_STREAM_RESET: 5529 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n"); 5530 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) { 5531 /* Its not ours */ 5532 if (locked_tcb) { 5533 SCTP_TCB_UNLOCK(locked_tcb); 5534 } 5535 *offset = length; 5536 return (NULL); 5537 } 5538 if (stcb->asoc.reconfig_supported == 0) { 5539 goto unknown_chunk; 5540 } 5541 if (sctp_handle_stream_reset(stcb, m, *offset, ch)) { 5542 /* stop processing */ 5543 *offset = length; 5544 return (NULL); 5545 } 5546 break; 5547 case SCTP_PACKET_DROPPED: 5548 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n"); 5549 /* re-get it all please */ 5550 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { 5551 /* Its not ours */ 5552 if (locked_tcb) { 5553 SCTP_TCB_UNLOCK(locked_tcb); 5554 } 5555 *offset = length; 5556 return (NULL); 5557 } 5558 if (ch && (stcb) && netp && (*netp)) { 5559 if (stcb->asoc.pktdrop_supported == 0) { 5560 goto unknown_chunk; 5561 } 5562 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch, 5563 stcb, *netp, 5564 min(chk_length, (sizeof(chunk_buf) - 4))); 5565 5566 } 5567 break; 5568 case SCTP_AUTHENTICATION: 5569 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n"); 5570 if (stcb == NULL) { 5571 /* save the first AUTH for later processing */ 5572 if (auth_skipped == 0) { 5573 auth_offset = *offset; 5574 auth_len = chk_length; 5575 auth_skipped = 1; 5576 } 5577 /* skip this chunk (temporarily) */ 5578 goto next_chunk; 5579 } 5580 if (stcb->asoc.auth_supported == 0) { 5581 goto unknown_chunk; 5582 } 5583 if ((chk_length < (sizeof(struct sctp_auth_chunk))) || 5584 (chk_length > (sizeof(struct sctp_auth_chunk) + 5585 SCTP_AUTH_DIGEST_LEN_MAX))) { 5586 /* Its not ours */ 5587 if (locked_tcb) { 5588 SCTP_TCB_UNLOCK(locked_tcb); 5589 } 5590 *offset = length; 5591 return (NULL); 5592 } 5593 if (got_auth == 1) { 5594 /* skip this chunk... it's already auth'd */ 5595 goto next_chunk; 5596 } 5597 got_auth = 1; 5598 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, 5599 m, *offset)) { 5600 /* auth HMAC failed so dump the packet */ 5601 *offset = length; 5602 return (stcb); 5603 } else { 5604 /* remaining chunks are HMAC checked */ 5605 stcb->asoc.authenticated = 1; 5606 } 5607 break; 5608 5609 default: 5610 unknown_chunk: 5611 /* it's an unknown chunk! */ 5612 if ((ch->chunk_type & 0x40) && (stcb != NULL)) { 5613 struct sctp_gen_error_cause *cause; 5614 int len; 5615 5616 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause), 5617 0, M_NOWAIT, 1, MT_DATA); 5618 if (op_err != NULL) { 5619 len = min(SCTP_SIZE32(chk_length), (uint32_t) (length - *offset)); 5620 cause = mtod(op_err, struct sctp_gen_error_cause *); 5621 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); 5622 cause->length = htons((uint16_t) (len + sizeof(struct sctp_gen_error_cause))); 5623 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); 5624 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, len, M_NOWAIT); 5625 if (SCTP_BUF_NEXT(op_err) != NULL) { 5626 #ifdef SCTP_MBUF_LOGGING 5627 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 5628 sctp_log_mbc(SCTP_BUF_NEXT(op_err), SCTP_MBUF_ICOPY); 5629 } 5630 #endif 5631 sctp_queue_op_err(stcb, op_err); 5632 } else { 5633 sctp_m_freem(op_err); 5634 } 5635 } 5636 } 5637 if ((ch->chunk_type & 0x80) == 0) { 5638 /* discard this packet */ 5639 *offset = length; 5640 return (stcb); 5641 } /* else skip this bad chunk and continue... */ 5642 break; 5643 } /* switch (ch->chunk_type) */ 5644 5645 5646 next_chunk: 5647 /* get the next chunk */ 5648 *offset += SCTP_SIZE32(chk_length); 5649 if (*offset >= length) { 5650 /* no more data left in the mbuf chain */ 5651 break; 5652 } 5653 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 5654 sizeof(struct sctp_chunkhdr), chunk_buf); 5655 if (ch == NULL) { 5656 if (locked_tcb) { 5657 SCTP_TCB_UNLOCK(locked_tcb); 5658 } 5659 *offset = length; 5660 return (NULL); 5661 } 5662 } /* while */ 5663 5664 if (asconf_cnt > 0 && stcb != NULL) { 5665 sctp_send_asconf_ack(stcb); 5666 } 5667 return (stcb); 5668 } 5669 5670 5671 /* 5672 * common input chunk processing (v4 and v6) 5673 */ 5674 void 5675 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int length, 5676 struct sockaddr *src, struct sockaddr *dst, 5677 struct sctphdr *sh, struct sctp_chunkhdr *ch, 5678 #if !defined(SCTP_WITH_NO_CSUM) 5679 uint8_t compute_crc, 5680 #endif 5681 uint8_t ecn_bits, 5682 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 5683 uint32_t vrf_id, uint16_t port) 5684 { 5685 uint32_t high_tsn; 5686 int fwd_tsn_seen = 0, data_processed = 0; 5687 struct mbuf *m = *mm, *op_err; 5688 char msg[SCTP_DIAG_INFO_LEN]; 5689 int un_sent; 5690 int cnt_ctrl_ready = 0; 5691 struct sctp_inpcb *inp = NULL, *inp_decr = NULL; 5692 struct sctp_tcb *stcb = NULL; 5693 struct sctp_nets *net = NULL; 5694 5695 SCTP_STAT_INCR(sctps_recvdatagrams); 5696 #ifdef SCTP_AUDITING_ENABLED 5697 sctp_audit_log(0xE0, 1); 5698 sctp_auditing(0, inp, stcb, net); 5699 #endif 5700 #if !defined(SCTP_WITH_NO_CSUM) 5701 if (compute_crc != 0) { 5702 uint32_t check, calc_check; 5703 5704 check = sh->checksum; 5705 sh->checksum = 0; 5706 calc_check = sctp_calculate_cksum(m, iphlen); 5707 sh->checksum = check; 5708 if (calc_check != check) { 5709 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n", 5710 calc_check, check, (void *)m, length, iphlen); 5711 stcb = sctp_findassociation_addr(m, offset, src, dst, 5712 sh, ch, &inp, &net, vrf_id); 5713 #if defined(INET) || defined(INET6) 5714 if ((ch->chunk_type != SCTP_INITIATION) && 5715 (net != NULL) && (net->port != port)) { 5716 if (net->port == 0) { 5717 /* UDP encapsulation turned on. */ 5718 net->mtu -= sizeof(struct udphdr); 5719 if (stcb->asoc.smallest_mtu > net->mtu) { 5720 sctp_pathmtu_adjustment(stcb, net->mtu); 5721 } 5722 } else if (port == 0) { 5723 /* UDP encapsulation turned off. */ 5724 net->mtu += sizeof(struct udphdr); 5725 /* XXX Update smallest_mtu */ 5726 } 5727 net->port = port; 5728 } 5729 #endif 5730 if (net != NULL) { 5731 net->flowtype = mflowtype; 5732 net->flowid = mflowid; 5733 } 5734 if ((inp != NULL) && (stcb != NULL)) { 5735 sctp_send_packet_dropped(stcb, net, m, length, iphlen, 1); 5736 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED); 5737 } else if ((inp != NULL) && (stcb == NULL)) { 5738 inp_decr = inp; 5739 } 5740 SCTP_STAT_INCR(sctps_badsum); 5741 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors); 5742 goto out; 5743 } 5744 } 5745 #endif 5746 /* Destination port of 0 is illegal, based on RFC4960. */ 5747 if (sh->dest_port == 0) { 5748 SCTP_STAT_INCR(sctps_hdrops); 5749 goto out; 5750 } 5751 stcb = sctp_findassociation_addr(m, offset, src, dst, 5752 sh, ch, &inp, &net, vrf_id); 5753 #if defined(INET) || defined(INET6) 5754 if ((ch->chunk_type != SCTP_INITIATION) && 5755 (net != NULL) && (net->port != port)) { 5756 if (net->port == 0) { 5757 /* UDP encapsulation turned on. */ 5758 net->mtu -= sizeof(struct udphdr); 5759 if (stcb->asoc.smallest_mtu > net->mtu) { 5760 sctp_pathmtu_adjustment(stcb, net->mtu); 5761 } 5762 } else if (port == 0) { 5763 /* UDP encapsulation turned off. */ 5764 net->mtu += sizeof(struct udphdr); 5765 /* XXX Update smallest_mtu */ 5766 } 5767 net->port = port; 5768 } 5769 #endif 5770 if (net != NULL) { 5771 net->flowtype = mflowtype; 5772 net->flowid = mflowid; 5773 } 5774 if (inp == NULL) { 5775 SCTP_STAT_INCR(sctps_noport); 5776 if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) { 5777 goto out; 5778 } 5779 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 5780 sctp_send_shutdown_complete2(src, dst, sh, 5781 mflowtype, mflowid, fibnum, 5782 vrf_id, port); 5783 goto out; 5784 } 5785 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) { 5786 goto out; 5787 } 5788 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) { 5789 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 5790 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 5791 (ch->chunk_type != SCTP_INIT))) { 5792 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 5793 "Out of the blue"); 5794 sctp_send_abort(m, iphlen, src, dst, 5795 sh, 0, op_err, 5796 mflowtype, mflowid, fibnum, 5797 vrf_id, port); 5798 } 5799 } 5800 goto out; 5801 } else if (stcb == NULL) { 5802 inp_decr = inp; 5803 } 5804 #ifdef IPSEC 5805 /*- 5806 * I very much doubt any of the IPSEC stuff will work but I have no 5807 * idea, so I will leave it in place. 5808 */ 5809 if (inp != NULL) { 5810 switch (dst->sa_family) { 5811 #ifdef INET 5812 case AF_INET: 5813 if (ipsec4_in_reject(m, &inp->ip_inp.inp)) { 5814 SCTP_STAT_INCR(sctps_hdrops); 5815 goto out; 5816 } 5817 break; 5818 #endif 5819 #ifdef INET6 5820 case AF_INET6: 5821 if (ipsec6_in_reject(m, &inp->ip_inp.inp)) { 5822 SCTP_STAT_INCR(sctps_hdrops); 5823 goto out; 5824 } 5825 break; 5826 #endif 5827 default: 5828 break; 5829 } 5830 } 5831 #endif 5832 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n", 5833 (void *)m, iphlen, offset, length, (void *)stcb); 5834 if (stcb) { 5835 /* always clear this before beginning a packet */ 5836 stcb->asoc.authenticated = 0; 5837 stcb->asoc.seen_a_sack_this_pkt = 0; 5838 SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n", 5839 (void *)stcb, stcb->asoc.state); 5840 5841 if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) || 5842 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5843 /*- 5844 * If we hit here, we had a ref count 5845 * up when the assoc was aborted and the 5846 * timer is clearing out the assoc, we should 5847 * NOT respond to any packet.. its OOTB. 5848 */ 5849 SCTP_TCB_UNLOCK(stcb); 5850 stcb = NULL; 5851 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); 5852 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 5853 msg); 5854 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err, 5855 mflowtype, mflowid, inp->fibnum, 5856 vrf_id, port); 5857 goto out; 5858 } 5859 } 5860 if (IS_SCTP_CONTROL(ch)) { 5861 /* process the control portion of the SCTP packet */ 5862 /* sa_ignore NO_NULL_CHK */ 5863 stcb = sctp_process_control(m, iphlen, &offset, length, 5864 src, dst, sh, ch, 5865 inp, stcb, &net, &fwd_tsn_seen, 5866 mflowtype, mflowid, fibnum, 5867 vrf_id, port); 5868 if (stcb) { 5869 /* 5870 * This covers us if the cookie-echo was there and 5871 * it changes our INP. 5872 */ 5873 inp = stcb->sctp_ep; 5874 #if defined(INET) || defined(INET6) 5875 if ((ch->chunk_type != SCTP_INITIATION) && 5876 (net != NULL) && (net->port != port)) { 5877 if (net->port == 0) { 5878 /* UDP encapsulation turned on. */ 5879 net->mtu -= sizeof(struct udphdr); 5880 if (stcb->asoc.smallest_mtu > net->mtu) { 5881 sctp_pathmtu_adjustment(stcb, net->mtu); 5882 } 5883 } else if (port == 0) { 5884 /* UDP encapsulation turned off. */ 5885 net->mtu += sizeof(struct udphdr); 5886 /* XXX Update smallest_mtu */ 5887 } 5888 net->port = port; 5889 } 5890 #endif 5891 } 5892 } else { 5893 /* 5894 * no control chunks, so pre-process DATA chunks (these 5895 * checks are taken care of by control processing) 5896 */ 5897 5898 /* 5899 * if DATA only packet, and auth is required, then punt... 5900 * can't have authenticated without any AUTH (control) 5901 * chunks 5902 */ 5903 if ((stcb != NULL) && 5904 (stcb->asoc.auth_supported == 1) && 5905 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) { 5906 /* "silently" ignore */ 5907 SCTP_STAT_INCR(sctps_recvauthmissing); 5908 goto out; 5909 } 5910 if (stcb == NULL) { 5911 /* out of the blue DATA chunk */ 5912 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); 5913 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 5914 msg); 5915 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err, 5916 mflowtype, mflowid, fibnum, 5917 vrf_id, port); 5918 goto out; 5919 } 5920 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) { 5921 /* v_tag mismatch! */ 5922 SCTP_STAT_INCR(sctps_badvtag); 5923 goto out; 5924 } 5925 } 5926 5927 if (stcb == NULL) { 5928 /* 5929 * no valid TCB for this packet, or we found it's a bad 5930 * packet while processing control, or we're done with this 5931 * packet (done or skip rest of data), so we drop it... 5932 */ 5933 goto out; 5934 } 5935 /* 5936 * DATA chunk processing 5937 */ 5938 /* plow through the data chunks while length > offset */ 5939 5940 /* 5941 * Rest should be DATA only. Check authentication state if AUTH for 5942 * DATA is required. 5943 */ 5944 if ((length > offset) && 5945 (stcb != NULL) && 5946 (stcb->asoc.auth_supported == 1) && 5947 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) && 5948 !stcb->asoc.authenticated) { 5949 /* "silently" ignore */ 5950 SCTP_STAT_INCR(sctps_recvauthmissing); 5951 SCTPDBG(SCTP_DEBUG_AUTH1, 5952 "Data chunk requires AUTH, skipped\n"); 5953 goto trigger_send; 5954 } 5955 if (length > offset) { 5956 int retval; 5957 5958 /* 5959 * First check to make sure our state is correct. We would 5960 * not get here unless we really did have a tag, so we don't 5961 * abort if this happens, just dump the chunk silently. 5962 */ 5963 switch (SCTP_GET_STATE(&stcb->asoc)) { 5964 case SCTP_STATE_COOKIE_ECHOED: 5965 /* 5966 * we consider data with valid tags in this state 5967 * shows us the cookie-ack was lost. Imply it was 5968 * there. 5969 */ 5970 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5971 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5972 stcb->asoc.overall_error_count, 5973 0, 5974 SCTP_FROM_SCTP_INPUT, 5975 __LINE__); 5976 } 5977 stcb->asoc.overall_error_count = 0; 5978 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net); 5979 break; 5980 case SCTP_STATE_COOKIE_WAIT: 5981 /* 5982 * We consider OOTB any data sent during asoc setup. 5983 */ 5984 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); 5985 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 5986 msg); 5987 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err, 5988 mflowtype, mflowid, inp->fibnum, 5989 vrf_id, port); 5990 goto out; 5991 /* sa_ignore NOTREACHED */ 5992 break; 5993 case SCTP_STATE_EMPTY: /* should not happen */ 5994 case SCTP_STATE_INUSE: /* should not happen */ 5995 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */ 5996 case SCTP_STATE_SHUTDOWN_ACK_SENT: 5997 default: 5998 goto out; 5999 /* sa_ignore NOTREACHED */ 6000 break; 6001 case SCTP_STATE_OPEN: 6002 case SCTP_STATE_SHUTDOWN_SENT: 6003 break; 6004 } 6005 /* plow through the data chunks while length > offset */ 6006 retval = sctp_process_data(mm, iphlen, &offset, length, 6007 inp, stcb, net, &high_tsn); 6008 if (retval == 2) { 6009 /* 6010 * The association aborted, NO UNLOCK needed since 6011 * the association is destroyed. 6012 */ 6013 stcb = NULL; 6014 goto out; 6015 } 6016 data_processed = 1; 6017 /* 6018 * Anything important needs to have been m_copy'ed in 6019 * process_data 6020 */ 6021 } 6022 /* take care of ecn */ 6023 if ((data_processed == 1) && 6024 (stcb->asoc.ecn_supported == 1) && 6025 ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) { 6026 /* Yep, we need to add a ECNE */ 6027 sctp_send_ecn_echo(stcb, net, high_tsn); 6028 } 6029 if ((data_processed == 0) && (fwd_tsn_seen)) { 6030 int was_a_gap; 6031 uint32_t highest_tsn; 6032 6033 if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) { 6034 highest_tsn = stcb->asoc.highest_tsn_inside_nr_map; 6035 } else { 6036 highest_tsn = stcb->asoc.highest_tsn_inside_map; 6037 } 6038 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 6039 stcb->asoc.send_sack = 1; 6040 sctp_sack_check(stcb, was_a_gap); 6041 } else if (fwd_tsn_seen) { 6042 stcb->asoc.send_sack = 1; 6043 } 6044 /* trigger send of any chunks in queue... */ 6045 trigger_send: 6046 #ifdef SCTP_AUDITING_ENABLED 6047 sctp_audit_log(0xE0, 2); 6048 sctp_auditing(1, inp, stcb, net); 6049 #endif 6050 SCTPDBG(SCTP_DEBUG_INPUT1, 6051 "Check for chunk output prw:%d tqe:%d tf=%d\n", 6052 stcb->asoc.peers_rwnd, 6053 TAILQ_EMPTY(&stcb->asoc.control_send_queue), 6054 stcb->asoc.total_flight); 6055 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 6056 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 6057 cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq; 6058 } 6059 if (!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue) || 6060 cnt_ctrl_ready || 6061 stcb->asoc.trigger_reset || 6062 ((un_sent) && 6063 (stcb->asoc.peers_rwnd > 0 || 6064 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) { 6065 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n"); 6066 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 6067 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n"); 6068 } 6069 #ifdef SCTP_AUDITING_ENABLED 6070 sctp_audit_log(0xE0, 3); 6071 sctp_auditing(2, inp, stcb, net); 6072 #endif 6073 out: 6074 if (stcb != NULL) { 6075 SCTP_TCB_UNLOCK(stcb); 6076 } 6077 if (inp_decr != NULL) { 6078 /* reduce ref-count */ 6079 SCTP_INP_WLOCK(inp_decr); 6080 SCTP_INP_DECR_REF(inp_decr); 6081 SCTP_INP_WUNLOCK(inp_decr); 6082 } 6083 return; 6084 } 6085 6086 #ifdef INET 6087 void 6088 sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port) 6089 { 6090 struct mbuf *m; 6091 int iphlen; 6092 uint32_t vrf_id = 0; 6093 uint8_t ecn_bits; 6094 struct sockaddr_in src, dst; 6095 struct ip *ip; 6096 struct sctphdr *sh; 6097 struct sctp_chunkhdr *ch; 6098 int length, offset; 6099 6100 #if !defined(SCTP_WITH_NO_CSUM) 6101 uint8_t compute_crc; 6102 6103 #endif 6104 uint32_t mflowid; 6105 uint8_t mflowtype; 6106 uint16_t fibnum; 6107 6108 iphlen = off; 6109 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) { 6110 SCTP_RELEASE_PKT(i_pak); 6111 return; 6112 } 6113 m = SCTP_HEADER_TO_CHAIN(i_pak); 6114 #ifdef SCTP_MBUF_LOGGING 6115 /* Log in any input mbufs */ 6116 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6117 sctp_log_mbc(m, SCTP_MBUF_INPUT); 6118 } 6119 #endif 6120 #ifdef SCTP_PACKET_LOGGING 6121 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { 6122 sctp_packet_log(m); 6123 } 6124 #endif 6125 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 6126 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%b.\n", 6127 m->m_pkthdr.len, 6128 if_name(m->m_pkthdr.rcvif), 6129 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 6130 mflowid = m->m_pkthdr.flowid; 6131 mflowtype = M_HASHTYPE_GET(m); 6132 fibnum = M_GETFIB(m); 6133 SCTP_STAT_INCR(sctps_recvpackets); 6134 SCTP_STAT_INCR_COUNTER64(sctps_inpackets); 6135 /* Get IP, SCTP, and first chunk header together in the first mbuf. */ 6136 offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 6137 if (SCTP_BUF_LEN(m) < offset) { 6138 if ((m = m_pullup(m, offset)) == NULL) { 6139 SCTP_STAT_INCR(sctps_hdrops); 6140 return; 6141 } 6142 } 6143 ip = mtod(m, struct ip *); 6144 sh = (struct sctphdr *)((caddr_t)ip + iphlen); 6145 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr)); 6146 offset -= sizeof(struct sctp_chunkhdr); 6147 memset(&src, 0, sizeof(struct sockaddr_in)); 6148 src.sin_family = AF_INET; 6149 src.sin_len = sizeof(struct sockaddr_in); 6150 src.sin_port = sh->src_port; 6151 src.sin_addr = ip->ip_src; 6152 memset(&dst, 0, sizeof(struct sockaddr_in)); 6153 dst.sin_family = AF_INET; 6154 dst.sin_len = sizeof(struct sockaddr_in); 6155 dst.sin_port = sh->dest_port; 6156 dst.sin_addr = ip->ip_dst; 6157 length = ntohs(ip->ip_len); 6158 /* Validate mbuf chain length with IP payload length. */ 6159 if (SCTP_HEADER_LEN(m) != length) { 6160 SCTPDBG(SCTP_DEBUG_INPUT1, 6161 "sctp_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m)); 6162 SCTP_STAT_INCR(sctps_hdrops); 6163 goto out; 6164 } 6165 /* SCTP does not allow broadcasts or multicasts */ 6166 if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) { 6167 goto out; 6168 } 6169 if (SCTP_IS_IT_BROADCAST(dst.sin_addr, m)) { 6170 goto out; 6171 } 6172 ecn_bits = ip->ip_tos; 6173 #if defined(SCTP_WITH_NO_CSUM) 6174 SCTP_STAT_INCR(sctps_recvnocrc); 6175 #else 6176 if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) { 6177 SCTP_STAT_INCR(sctps_recvhwcrc); 6178 compute_crc = 0; 6179 } else { 6180 SCTP_STAT_INCR(sctps_recvswcrc); 6181 compute_crc = 1; 6182 } 6183 #endif 6184 sctp_common_input_processing(&m, iphlen, offset, length, 6185 (struct sockaddr *)&src, 6186 (struct sockaddr *)&dst, 6187 sh, ch, 6188 #if !defined(SCTP_WITH_NO_CSUM) 6189 compute_crc, 6190 #endif 6191 ecn_bits, 6192 mflowtype, mflowid, fibnum, 6193 vrf_id, port); 6194 out: 6195 if (m) { 6196 sctp_m_freem(m); 6197 } 6198 return; 6199 } 6200 6201 #if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) 6202 extern int *sctp_cpuarry; 6203 6204 #endif 6205 6206 int 6207 sctp_input(struct mbuf **mp, int *offp, int proto SCTP_UNUSED) 6208 { 6209 struct mbuf *m; 6210 int off; 6211 6212 m = *mp; 6213 off = *offp; 6214 #if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) 6215 if (mp_ncpus > 1) { 6216 struct ip *ip; 6217 struct sctphdr *sh; 6218 int offset; 6219 int cpu_to_use; 6220 uint32_t flowid, tag; 6221 6222 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 6223 flowid = m->m_pkthdr.flowid; 6224 } else { 6225 /* 6226 * No flow id built by lower layers fix it so we 6227 * create one. 6228 */ 6229 offset = off + sizeof(struct sctphdr); 6230 if (SCTP_BUF_LEN(m) < offset) { 6231 if ((m = m_pullup(m, offset)) == NULL) { 6232 SCTP_STAT_INCR(sctps_hdrops); 6233 return (IPPROTO_DONE); 6234 } 6235 } 6236 ip = mtod(m, struct ip *); 6237 sh = (struct sctphdr *)((caddr_t)ip + off); 6238 tag = htonl(sh->v_tag); 6239 flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port); 6240 m->m_pkthdr.flowid = flowid; 6241 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); 6242 } 6243 cpu_to_use = sctp_cpuarry[flowid % mp_ncpus]; 6244 sctp_queue_to_mcore(m, off, cpu_to_use); 6245 return (IPPROTO_DONE); 6246 } 6247 #endif 6248 sctp_input_with_port(m, off, 0); 6249 return (IPPROTO_DONE); 6250 } 6251 6252 #endif 6253