1 /*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <netinet/sctp_os.h> 37 #include <netinet/sctp_var.h> 38 #include <netinet/sctp_sysctl.h> 39 #include <netinet/sctp_pcb.h> 40 #include <netinet/sctp_header.h> 41 #include <netinet/sctputil.h> 42 #include <netinet/sctp_output.h> 43 #include <netinet/sctp_input.h> 44 #include <netinet/sctp_auth.h> 45 #include <netinet/sctp_indata.h> 46 #include <netinet/sctp_asconf.h> 47 #include <netinet/sctp_bsd_addr.h> 48 #include <netinet/sctp_timer.h> 49 #include <netinet/sctp_crc32.h> 50 #if defined(INET) || defined(INET6) 51 #include <netinet/udp.h> 52 #endif 53 #include <sys/smp.h> 54 55 56 57 static void 58 sctp_stop_all_cookie_timers(struct sctp_tcb *stcb) 59 { 60 struct sctp_nets *net; 61 62 /* 63 * This now not only stops all cookie timers it also stops any INIT 64 * timers as well. This will make sure that the timers are stopped 65 * in all collision cases. 66 */ 67 SCTP_TCB_LOCK_ASSERT(stcb); 68 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 69 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) { 70 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, 71 stcb->sctp_ep, 72 stcb, 73 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1); 74 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) { 75 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, 76 stcb->sctp_ep, 77 stcb, 78 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2); 79 } 80 } 81 } 82 83 /* INIT handler */ 84 static void 85 sctp_handle_init(struct mbuf *m, int iphlen, int offset, 86 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, 87 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, 88 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_no_unlock, 89 uint8_t mflowtype, uint32_t mflowid, 90 uint32_t vrf_id, uint16_t port) 91 { 92 struct sctp_init *init; 93 struct mbuf *op_err; 94 95 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n", 96 (void *)stcb); 97 if (stcb == NULL) { 98 SCTP_INP_RLOCK(inp); 99 } 100 /* validate length */ 101 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) { 102 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 103 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 104 mflowtype, mflowid, 105 vrf_id, port); 106 if (stcb) 107 *abort_no_unlock = 1; 108 goto outnow; 109 } 110 /* validate parameters */ 111 init = &cp->init; 112 if (init->initiate_tag == 0) { 113 /* protocol error... send abort */ 114 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 115 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 116 mflowtype, mflowid, 117 vrf_id, port); 118 if (stcb) 119 *abort_no_unlock = 1; 120 goto outnow; 121 } 122 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) { 123 /* invalid parameter... send abort */ 124 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 125 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 126 mflowtype, mflowid, 127 vrf_id, port); 128 if (stcb) 129 *abort_no_unlock = 1; 130 goto outnow; 131 } 132 if (init->num_inbound_streams == 0) { 133 /* protocol error... send abort */ 134 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 135 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 136 mflowtype, mflowid, 137 vrf_id, port); 138 if (stcb) 139 *abort_no_unlock = 1; 140 goto outnow; 141 } 142 if (init->num_outbound_streams == 0) { 143 /* protocol error... send abort */ 144 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 145 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 146 mflowtype, mflowid, 147 vrf_id, port); 148 if (stcb) 149 *abort_no_unlock = 1; 150 goto outnow; 151 } 152 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp), 153 offset + ntohs(cp->ch.chunk_length))) { 154 /* auth parameter(s) error... send abort */ 155 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 156 "Problem with AUTH parameters"); 157 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 158 mflowtype, mflowid, 159 vrf_id, port); 160 if (stcb) 161 *abort_no_unlock = 1; 162 goto outnow; 163 } 164 /* We are only accepting if we have a listening socket. */ 165 if ((stcb == NULL) && 166 ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 167 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 168 (!SCTP_IS_LISTENING(inp)))) { 169 /* 170 * FIX ME ?? What about TCP model and we have a 171 * match/restart case? Actually no fix is needed. the lookup 172 * will always find the existing assoc so stcb would not be 173 * NULL. It may be questionable to do this since we COULD 174 * just send back the INIT-ACK and hope that the app did 175 * accept()'s by the time the COOKIE was sent. But there is 176 * a price to pay for COOKIE generation and I don't want to 177 * pay it on the chance that the app will actually do some 178 * accepts(). The App just looses and should NOT be in this 179 * state :-) 180 */ 181 if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) { 182 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 183 "No listener"); 184 sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err, 185 mflowtype, mflowid, inp->fibnum, 186 vrf_id, port); 187 } 188 goto outnow; 189 } 190 if ((stcb != NULL) && 191 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 192 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n"); 193 sctp_send_shutdown_ack(stcb, NULL); 194 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 195 } else { 196 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); 197 sctp_send_initiate_ack(inp, stcb, net, m, iphlen, offset, 198 src, dst, sh, cp, 199 mflowtype, mflowid, 200 vrf_id, port, 201 ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED)); 202 } 203 outnow: 204 if (stcb == NULL) { 205 SCTP_INP_RUNLOCK(inp); 206 } 207 } 208 209 /* 210 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error 211 */ 212 213 int 214 sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked 215 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 216 SCTP_UNUSED 217 #endif 218 ) 219 { 220 int unsent_data; 221 unsigned int i; 222 struct sctp_stream_queue_pending *sp; 223 struct sctp_association *asoc; 224 225 /* 226 * This function returns if any stream has true unsent data on it. 227 * Note that as it looks through it will clean up any places that 228 * have old data that has been sent but left at top of stream queue. 229 */ 230 asoc = &stcb->asoc; 231 unsent_data = 0; 232 SCTP_TCB_SEND_LOCK(stcb); 233 if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 234 /* Check to see if some data queued */ 235 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 236 /* sa_ignore FREED_MEMORY */ 237 sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue); 238 if (sp == NULL) { 239 continue; 240 } 241 if ((sp->msg_is_complete) && 242 (sp->length == 0) && 243 (sp->sender_all_done)) { 244 /* 245 * We are doing differed cleanup. Last time 246 * through when we took all the data the 247 * sender_all_done was not set. 248 */ 249 if (sp->put_last_out == 0) { 250 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 251 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", 252 sp->sender_all_done, 253 sp->length, 254 sp->msg_is_complete, 255 sp->put_last_out); 256 } 257 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1); 258 TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next); 259 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, &asoc->strmout[i], sp, 1); 260 if (sp->net) { 261 sctp_free_remote_addr(sp->net); 262 sp->net = NULL; 263 } 264 if (sp->data) { 265 sctp_m_freem(sp->data); 266 sp->data = NULL; 267 } 268 sctp_free_a_strmoq(stcb, sp, so_locked); 269 if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { 270 unsent_data++; 271 } 272 } else { 273 unsent_data++; 274 } 275 if (unsent_data > 0) { 276 break; 277 } 278 } 279 } 280 SCTP_TCB_SEND_UNLOCK(stcb); 281 return (unsent_data); 282 } 283 284 static int 285 sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb) 286 { 287 struct sctp_init *init; 288 struct sctp_association *asoc; 289 struct sctp_nets *lnet; 290 unsigned int i; 291 292 init = &cp->init; 293 asoc = &stcb->asoc; 294 /* save off parameters */ 295 asoc->peer_vtag = ntohl(init->initiate_tag); 296 asoc->peers_rwnd = ntohl(init->a_rwnd); 297 /* init tsn's */ 298 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1; 299 300 if (!TAILQ_EMPTY(&asoc->nets)) { 301 /* update any ssthresh's that may have a default */ 302 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 303 lnet->ssthresh = asoc->peers_rwnd; 304 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 305 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION); 306 } 307 } 308 } 309 SCTP_TCB_SEND_LOCK(stcb); 310 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) { 311 unsigned int newcnt; 312 struct sctp_stream_out *outs; 313 struct sctp_stream_queue_pending *sp, *nsp; 314 struct sctp_tmit_chunk *chk, *nchk; 315 316 /* abandon the upper streams */ 317 newcnt = ntohs(init->num_inbound_streams); 318 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 319 if (chk->rec.data.sid >= newcnt) { 320 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 321 asoc->send_queue_cnt--; 322 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 323 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 324 #ifdef INVARIANTS 325 } else { 326 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 327 #endif 328 } 329 if (chk->data != NULL) { 330 sctp_free_bufspace(stcb, asoc, chk, 1); 331 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 332 0, chk, SCTP_SO_NOT_LOCKED); 333 if (chk->data) { 334 sctp_m_freem(chk->data); 335 chk->data = NULL; 336 } 337 } 338 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 339 /* sa_ignore FREED_MEMORY */ 340 } 341 } 342 if (asoc->strmout) { 343 for (i = newcnt; i < asoc->pre_open_streams; i++) { 344 outs = &asoc->strmout[i]; 345 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 346 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1); 347 TAILQ_REMOVE(&outs->outqueue, sp, next); 348 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1); 349 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, 350 stcb, 0, sp, SCTP_SO_NOT_LOCKED); 351 if (sp->data) { 352 sctp_m_freem(sp->data); 353 sp->data = NULL; 354 } 355 if (sp->net) { 356 sctp_free_remote_addr(sp->net); 357 sp->net = NULL; 358 } 359 /* Free the chunk */ 360 sctp_free_a_strmoq(stcb, sp, SCTP_SO_NOT_LOCKED); 361 /* sa_ignore FREED_MEMORY */ 362 } 363 outs->state = SCTP_STREAM_CLOSED; 364 } 365 } 366 /* cut back the count */ 367 asoc->pre_open_streams = newcnt; 368 } 369 SCTP_TCB_SEND_UNLOCK(stcb); 370 asoc->streamoutcnt = asoc->pre_open_streams; 371 if (asoc->strmout) { 372 for (i = 0; i < asoc->streamoutcnt; i++) { 373 asoc->strmout[i].state = SCTP_STREAM_OPEN; 374 } 375 } 376 /* EY - nr_sack: initialize highest tsn in nr_mapping_array */ 377 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map; 378 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 379 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 380 } 381 /* This is the next one we expect */ 382 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1; 383 384 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn); 385 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in; 386 387 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 388 /* open the requested streams */ 389 390 if (asoc->strmin != NULL) { 391 /* Free the old ones */ 392 for (i = 0; i < asoc->streamincnt; i++) { 393 sctp_clean_up_stream(stcb, &asoc->strmin[i].inqueue); 394 sctp_clean_up_stream(stcb, &asoc->strmin[i].uno_inqueue); 395 } 396 SCTP_FREE(asoc->strmin, SCTP_M_STRMI); 397 } 398 if (asoc->max_inbound_streams > ntohs(init->num_outbound_streams)) { 399 asoc->streamincnt = ntohs(init->num_outbound_streams); 400 } else { 401 asoc->streamincnt = asoc->max_inbound_streams; 402 } 403 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt * 404 sizeof(struct sctp_stream_in), SCTP_M_STRMI); 405 if (asoc->strmin == NULL) { 406 /* we didn't get memory for the streams! */ 407 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n"); 408 return (-1); 409 } 410 for (i = 0; i < asoc->streamincnt; i++) { 411 asoc->strmin[i].sid = i; 412 asoc->strmin[i].last_mid_delivered = 0xffffffff; 413 TAILQ_INIT(&asoc->strmin[i].inqueue); 414 TAILQ_INIT(&asoc->strmin[i].uno_inqueue); 415 asoc->strmin[i].pd_api_started = 0; 416 asoc->strmin[i].delivery_started = 0; 417 } 418 /* 419 * load_address_from_init will put the addresses into the 420 * association when the COOKIE is processed or the INIT-ACK is 421 * processed. Both types of COOKIE's existing and new call this 422 * routine. It will remove addresses that are no longer in the 423 * association (for the restarting case where addresses are 424 * removed). Up front when the INIT arrives we will discard it if it 425 * is a restart and new addresses have been added. 426 */ 427 /* sa_ignore MEMLEAK */ 428 return (0); 429 } 430 431 /* 432 * INIT-ACK message processing/consumption returns value < 0 on error 433 */ 434 static int 435 sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, 436 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, 437 struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 438 struct sctp_nets *net, int *abort_no_unlock, 439 uint8_t mflowtype, uint32_t mflowid, 440 uint32_t vrf_id) 441 { 442 struct sctp_association *asoc; 443 struct mbuf *op_err; 444 int retval, abort_flag; 445 uint32_t initack_limit; 446 int nat_friendly = 0; 447 448 /* First verify that we have no illegal param's */ 449 abort_flag = 0; 450 451 op_err = sctp_arethere_unrecognized_parameters(m, 452 (offset + sizeof(struct sctp_init_chunk)), 453 &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly); 454 if (abort_flag) { 455 /* Send an abort and notify peer */ 456 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 457 *abort_no_unlock = 1; 458 return (-1); 459 } 460 asoc = &stcb->asoc; 461 asoc->peer_supports_nat = (uint8_t)nat_friendly; 462 /* process the peer's parameters in the INIT-ACK */ 463 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb); 464 if (retval < 0) { 465 return (retval); 466 } 467 initack_limit = offset + ntohs(cp->ch.chunk_length); 468 /* load all addresses */ 469 if ((retval = sctp_load_addresses_from_init(stcb, m, 470 (offset + sizeof(struct sctp_init_chunk)), initack_limit, 471 src, dst, NULL, stcb->asoc.port))) { 472 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 473 "Problem with address parameters"); 474 SCTPDBG(SCTP_DEBUG_INPUT1, 475 "Load addresses from INIT causes an abort %d\n", 476 retval); 477 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 478 src, dst, sh, op_err, 479 mflowtype, mflowid, 480 vrf_id, net->port); 481 *abort_no_unlock = 1; 482 return (-1); 483 } 484 /* if the peer doesn't support asconf, flush the asconf queue */ 485 if (asoc->asconf_supported == 0) { 486 struct sctp_asconf_addr *param, *nparam; 487 488 TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) { 489 TAILQ_REMOVE(&asoc->asconf_queue, param, next); 490 SCTP_FREE(param, SCTP_M_ASC_ADDR); 491 } 492 } 493 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs, 494 stcb->asoc.local_hmacs); 495 if (op_err) { 496 sctp_queue_op_err(stcb, op_err); 497 /* queuing will steal away the mbuf chain to the out queue */ 498 op_err = NULL; 499 } 500 /* extract the cookie and queue it to "echo" it back... */ 501 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 502 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 503 stcb->asoc.overall_error_count, 504 0, 505 SCTP_FROM_SCTP_INPUT, 506 __LINE__); 507 } 508 stcb->asoc.overall_error_count = 0; 509 net->error_count = 0; 510 511 /* 512 * Cancel the INIT timer, We do this first before queueing the 513 * cookie. We always cancel at the primary to assue that we are 514 * canceling the timer started by the INIT which always goes to the 515 * primary. 516 */ 517 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb, 518 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); 519 520 /* calculate the RTO */ 521 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy, 522 SCTP_RTT_FROM_NON_DATA); 523 retval = sctp_send_cookie_echo(m, offset, stcb, net); 524 if (retval < 0) { 525 /* 526 * No cookie, we probably should send a op error. But in any 527 * case if there is no cookie in the INIT-ACK, we can 528 * abandon the peer, its broke. 529 */ 530 if (retval == -3) { 531 uint16_t len; 532 533 len = (uint16_t)(sizeof(struct sctp_error_missing_param) + sizeof(uint16_t)); 534 /* We abort with an error of missing mandatory param */ 535 op_err = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 536 if (op_err != NULL) { 537 struct sctp_error_missing_param *cause; 538 539 SCTP_BUF_LEN(op_err) = len; 540 cause = mtod(op_err, struct sctp_error_missing_param *); 541 /* Subtract the reserved param */ 542 cause->cause.code = htons(SCTP_CAUSE_MISSING_PARAM); 543 cause->cause.length = htons(len); 544 cause->num_missing_params = htonl(1); 545 cause->type[0] = htons(SCTP_STATE_COOKIE); 546 } 547 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 548 src, dst, sh, op_err, 549 mflowtype, mflowid, 550 vrf_id, net->port); 551 *abort_no_unlock = 1; 552 } 553 return (retval); 554 } 555 return (0); 556 } 557 558 static void 559 sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp, 560 struct sctp_tcb *stcb, struct sctp_nets *net) 561 { 562 union sctp_sockstore store; 563 struct sctp_nets *r_net, *f_net; 564 struct timeval tv; 565 int req_prim = 0; 566 uint16_t old_error_counter; 567 568 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) { 569 /* Invalid length */ 570 return; 571 } 572 memset(&store, 0, sizeof(store)); 573 switch (cp->heartbeat.hb_info.addr_family) { 574 #ifdef INET 575 case AF_INET: 576 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) { 577 store.sin.sin_family = cp->heartbeat.hb_info.addr_family; 578 store.sin.sin_len = cp->heartbeat.hb_info.addr_len; 579 store.sin.sin_port = stcb->rport; 580 memcpy(&store.sin.sin_addr, cp->heartbeat.hb_info.address, 581 sizeof(store.sin.sin_addr)); 582 } else { 583 return; 584 } 585 break; 586 #endif 587 #ifdef INET6 588 case AF_INET6: 589 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) { 590 store.sin6.sin6_family = cp->heartbeat.hb_info.addr_family; 591 store.sin6.sin6_len = cp->heartbeat.hb_info.addr_len; 592 store.sin6.sin6_port = stcb->rport; 593 memcpy(&store.sin6.sin6_addr, cp->heartbeat.hb_info.address, sizeof(struct in6_addr)); 594 } else { 595 return; 596 } 597 break; 598 #endif 599 default: 600 return; 601 } 602 r_net = sctp_findnet(stcb, &store.sa); 603 if (r_net == NULL) { 604 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n"); 605 return; 606 } 607 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) && 608 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) && 609 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) { 610 /* 611 * If the its a HB and it's random value is correct when can 612 * confirm the destination. 613 */ 614 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 615 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) { 616 stcb->asoc.primary_destination = r_net; 617 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 618 f_net = TAILQ_FIRST(&stcb->asoc.nets); 619 if (f_net != r_net) { 620 /* 621 * first one on the list is NOT the primary 622 * sctp_cmpaddr() is much more efficient if 623 * the primary is the first on the list, 624 * make it so. 625 */ 626 TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next); 627 TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next); 628 } 629 req_prim = 1; 630 } 631 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 632 stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 633 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, 634 r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4); 635 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net); 636 } 637 old_error_counter = r_net->error_count; 638 r_net->error_count = 0; 639 r_net->hb_responded = 1; 640 tv.tv_sec = cp->heartbeat.hb_info.time_value_1; 641 tv.tv_usec = cp->heartbeat.hb_info.time_value_2; 642 /* Now lets do a RTO with this */ 643 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy, 644 SCTP_RTT_FROM_NON_DATA); 645 if (!(r_net->dest_state & SCTP_ADDR_REACHABLE)) { 646 r_net->dest_state |= SCTP_ADDR_REACHABLE; 647 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 648 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 649 } 650 if (r_net->dest_state & SCTP_ADDR_PF) { 651 r_net->dest_state &= ~SCTP_ADDR_PF; 652 stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 653 } 654 if (old_error_counter > 0) { 655 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 656 stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5); 657 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net); 658 } 659 if (r_net == stcb->asoc.primary_destination) { 660 if (stcb->asoc.alternate) { 661 /* release the alternate, primary is good */ 662 sctp_free_remote_addr(stcb->asoc.alternate); 663 stcb->asoc.alternate = NULL; 664 } 665 } 666 /* Mobility adaptation */ 667 if (req_prim) { 668 if ((sctp_is_mobility_feature_on(stcb->sctp_ep, 669 SCTP_MOBILITY_BASE) || 670 sctp_is_mobility_feature_on(stcb->sctp_ep, 671 SCTP_MOBILITY_FASTHANDOFF)) && 672 sctp_is_mobility_feature_on(stcb->sctp_ep, 673 SCTP_MOBILITY_PRIM_DELETED)) { 674 675 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, 676 stcb->sctp_ep, stcb, NULL, 677 SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 678 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 679 SCTP_MOBILITY_FASTHANDOFF)) { 680 sctp_assoc_immediate_retrans(stcb, 681 stcb->asoc.primary_destination); 682 } 683 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 684 SCTP_MOBILITY_BASE)) { 685 sctp_move_chunks_from_net(stcb, 686 stcb->asoc.deleted_primary); 687 } 688 sctp_delete_prim_timer(stcb->sctp_ep, stcb, 689 stcb->asoc.deleted_primary); 690 } 691 } 692 } 693 694 static int 695 sctp_handle_nat_colliding_state(struct sctp_tcb *stcb) 696 { 697 /* 698 * return 0 means we want you to proceed with the abort non-zero 699 * means no abort processing 700 */ 701 struct sctpasochead *head; 702 703 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) { 704 /* generate a new vtag and send init */ 705 LIST_REMOVE(stcb, sctp_asocs); 706 stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 707 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; 708 /* 709 * put it in the bucket in the vtag hash of assoc's for the 710 * system 711 */ 712 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 713 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 714 return (1); 715 } 716 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 717 /* 718 * treat like a case where the cookie expired i.e.: - dump 719 * current cookie. - generate a new vtag. - resend init. 720 */ 721 /* generate a new vtag and send init */ 722 LIST_REMOVE(stcb, sctp_asocs); 723 stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED; 724 stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT; 725 sctp_stop_all_cookie_timers(stcb); 726 sctp_toss_old_cookies(stcb, &stcb->asoc); 727 stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 728 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; 729 /* 730 * put it in the bucket in the vtag hash of assoc's for the 731 * system 732 */ 733 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 734 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 735 return (1); 736 } 737 return (0); 738 } 739 740 static int 741 sctp_handle_nat_missing_state(struct sctp_tcb *stcb, 742 struct sctp_nets *net) 743 { 744 /* 745 * return 0 means we want you to proceed with the abort non-zero 746 * means no abort processing 747 */ 748 if (stcb->asoc.auth_supported == 0) { 749 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n"); 750 return (0); 751 } 752 sctp_asconf_send_nat_state_update(stcb, net); 753 return (1); 754 } 755 756 757 static void 758 sctp_handle_abort(struct sctp_abort_chunk *abort, 759 struct sctp_tcb *stcb, struct sctp_nets *net) 760 { 761 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 762 struct socket *so; 763 #endif 764 uint16_t len; 765 uint16_t error; 766 767 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n"); 768 if (stcb == NULL) 769 return; 770 771 len = ntohs(abort->ch.chunk_length); 772 if (len > sizeof(struct sctp_chunkhdr)) { 773 /* 774 * Need to check the cause codes for our two magic nat 775 * aborts which don't kill the assoc necessarily. 776 */ 777 struct sctp_gen_error_cause *cause; 778 779 cause = (struct sctp_gen_error_cause *)(abort + 1); 780 error = ntohs(cause->code); 781 if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) { 782 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n", 783 abort->ch.chunk_flags); 784 if (sctp_handle_nat_colliding_state(stcb)) { 785 return; 786 } 787 } else if (error == SCTP_CAUSE_NAT_MISSING_STATE) { 788 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n", 789 abort->ch.chunk_flags); 790 if (sctp_handle_nat_missing_state(stcb, net)) { 791 return; 792 } 793 } 794 } else { 795 error = 0; 796 } 797 /* stop any receive timers */ 798 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, 799 SCTP_FROM_SCTP_INPUT + SCTP_LOC_7); 800 /* notify user of the abort and clean up... */ 801 sctp_abort_notification(stcb, 1, error, abort, SCTP_SO_NOT_LOCKED); 802 /* free the tcb */ 803 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 804 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 805 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 806 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 807 } 808 #ifdef SCTP_ASOCLOG_OF_TSNS 809 sctp_print_out_track_log(stcb); 810 #endif 811 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 812 so = SCTP_INP_SO(stcb->sctp_ep); 813 atomic_add_int(&stcb->asoc.refcnt, 1); 814 SCTP_TCB_UNLOCK(stcb); 815 SCTP_SOCKET_LOCK(so, 1); 816 SCTP_TCB_LOCK(stcb); 817 atomic_subtract_int(&stcb->asoc.refcnt, 1); 818 #endif 819 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 820 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 821 SCTP_FROM_SCTP_INPUT + SCTP_LOC_8); 822 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 823 SCTP_SOCKET_UNLOCK(so, 1); 824 #endif 825 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n"); 826 } 827 828 static void 829 sctp_start_net_timers(struct sctp_tcb *stcb) 830 { 831 uint32_t cnt_hb_sent; 832 struct sctp_nets *net; 833 834 cnt_hb_sent = 0; 835 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 836 /* 837 * For each network start: 1) A pmtu timer. 2) A HB timer 3) 838 * If the dest in unconfirmed send a hb as well if under 839 * max_hb_burst have been sent. 840 */ 841 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net); 842 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 843 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 844 (cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) { 845 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 846 cnt_hb_sent++; 847 } 848 } 849 if (cnt_hb_sent) { 850 sctp_chunk_output(stcb->sctp_ep, stcb, 851 SCTP_OUTPUT_FROM_COOKIE_ACK, 852 SCTP_SO_NOT_LOCKED); 853 } 854 } 855 856 857 static void 858 sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, 859 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag) 860 { 861 struct sctp_association *asoc; 862 int some_on_streamwheel; 863 int old_state; 864 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 865 struct socket *so; 866 #endif 867 868 SCTPDBG(SCTP_DEBUG_INPUT2, 869 "sctp_handle_shutdown: handling SHUTDOWN\n"); 870 if (stcb == NULL) 871 return; 872 asoc = &stcb->asoc; 873 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 874 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 875 return; 876 } 877 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) { 878 /* Shutdown NOT the expected size */ 879 return; 880 } 881 old_state = SCTP_GET_STATE(asoc); 882 sctp_update_acked(stcb, cp, abort_flag); 883 if (*abort_flag) { 884 return; 885 } 886 if (asoc->control_pdapi) { 887 /* 888 * With a normal shutdown we assume the end of last record. 889 */ 890 SCTP_INP_READ_LOCK(stcb->sctp_ep); 891 if (asoc->control_pdapi->on_strm_q) { 892 struct sctp_stream_in *strm; 893 894 strm = &asoc->strmin[asoc->control_pdapi->sinfo_stream]; 895 if (asoc->control_pdapi->on_strm_q == SCTP_ON_UNORDERED) { 896 /* Unordered */ 897 TAILQ_REMOVE(&strm->uno_inqueue, asoc->control_pdapi, next_instrm); 898 asoc->control_pdapi->on_strm_q = 0; 899 } else if (asoc->control_pdapi->on_strm_q == SCTP_ON_ORDERED) { 900 /* Ordered */ 901 TAILQ_REMOVE(&strm->inqueue, asoc->control_pdapi, next_instrm); 902 asoc->control_pdapi->on_strm_q = 0; 903 #ifdef INVARIANTS 904 } else { 905 panic("Unknown state on ctrl:%p on_strm_q:%d", 906 asoc->control_pdapi, 907 asoc->control_pdapi->on_strm_q); 908 #endif 909 } 910 } 911 asoc->control_pdapi->end_added = 1; 912 asoc->control_pdapi->pdapi_aborted = 1; 913 asoc->control_pdapi = NULL; 914 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 915 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 916 so = SCTP_INP_SO(stcb->sctp_ep); 917 atomic_add_int(&stcb->asoc.refcnt, 1); 918 SCTP_TCB_UNLOCK(stcb); 919 SCTP_SOCKET_LOCK(so, 1); 920 SCTP_TCB_LOCK(stcb); 921 atomic_subtract_int(&stcb->asoc.refcnt, 1); 922 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 923 /* assoc was freed while we were unlocked */ 924 SCTP_SOCKET_UNLOCK(so, 1); 925 return; 926 } 927 #endif 928 if (stcb->sctp_socket) { 929 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 930 } 931 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 932 SCTP_SOCKET_UNLOCK(so, 1); 933 #endif 934 } 935 /* goto SHUTDOWN_RECEIVED state to block new requests */ 936 if (stcb->sctp_socket) { 937 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 938 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) && 939 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 940 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED); 941 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 942 /* 943 * notify upper layer that peer has initiated a 944 * shutdown 945 */ 946 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 947 948 /* reset time */ 949 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 950 } 951 } 952 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 953 /* 954 * stop the shutdown timer, since we WILL move to 955 * SHUTDOWN-ACK-SENT. 956 */ 957 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 958 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9); 959 } 960 /* Now is there unsent data on a stream somewhere? */ 961 some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED); 962 963 if (!TAILQ_EMPTY(&asoc->send_queue) || 964 !TAILQ_EMPTY(&asoc->sent_queue) || 965 some_on_streamwheel) { 966 /* By returning we will push more data out */ 967 return; 968 } else { 969 /* no outstanding data to send, so move on... */ 970 /* send SHUTDOWN-ACK */ 971 /* move to SHUTDOWN-ACK-SENT state */ 972 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 973 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 974 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 975 } 976 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 977 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 978 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 979 sctp_stop_timers_for_shutdown(stcb); 980 sctp_send_shutdown_ack(stcb, net); 981 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 982 stcb->sctp_ep, stcb, net); 983 } else if (old_state == SCTP_STATE_SHUTDOWN_ACK_SENT) { 984 sctp_send_shutdown_ack(stcb, net); 985 } 986 } 987 } 988 989 static void 990 sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED, 991 struct sctp_tcb *stcb, 992 struct sctp_nets *net) 993 { 994 struct sctp_association *asoc; 995 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 996 struct socket *so; 997 998 so = SCTP_INP_SO(stcb->sctp_ep); 999 #endif 1000 SCTPDBG(SCTP_DEBUG_INPUT2, 1001 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n"); 1002 if (stcb == NULL) 1003 return; 1004 1005 asoc = &stcb->asoc; 1006 /* process according to association state */ 1007 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 1008 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 1009 /* unexpected SHUTDOWN-ACK... do OOTB handling... */ 1010 sctp_send_shutdown_complete(stcb, net, 1); 1011 SCTP_TCB_UNLOCK(stcb); 1012 return; 1013 } 1014 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 1015 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 1016 /* unexpected SHUTDOWN-ACK... so ignore... */ 1017 SCTP_TCB_UNLOCK(stcb); 1018 return; 1019 } 1020 if (asoc->control_pdapi) { 1021 /* 1022 * With a normal shutdown we assume the end of last record. 1023 */ 1024 SCTP_INP_READ_LOCK(stcb->sctp_ep); 1025 asoc->control_pdapi->end_added = 1; 1026 asoc->control_pdapi->pdapi_aborted = 1; 1027 asoc->control_pdapi = NULL; 1028 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 1029 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1030 atomic_add_int(&stcb->asoc.refcnt, 1); 1031 SCTP_TCB_UNLOCK(stcb); 1032 SCTP_SOCKET_LOCK(so, 1); 1033 SCTP_TCB_LOCK(stcb); 1034 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1035 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1036 /* assoc was freed while we were unlocked */ 1037 SCTP_SOCKET_UNLOCK(so, 1); 1038 return; 1039 } 1040 #endif 1041 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1042 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1043 SCTP_SOCKET_UNLOCK(so, 1); 1044 #endif 1045 } 1046 #ifdef INVARIANTS 1047 if (!TAILQ_EMPTY(&asoc->send_queue) || 1048 !TAILQ_EMPTY(&asoc->sent_queue) || 1049 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) { 1050 panic("Queues are not empty when handling SHUTDOWN-ACK"); 1051 } 1052 #endif 1053 /* stop the timer */ 1054 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, 1055 SCTP_FROM_SCTP_INPUT + SCTP_LOC_10); 1056 /* send SHUTDOWN-COMPLETE */ 1057 sctp_send_shutdown_complete(stcb, net, 0); 1058 /* notify upper layer protocol */ 1059 if (stcb->sctp_socket) { 1060 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1061 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 1062 stcb->sctp_socket->so_snd.sb_cc = 0; 1063 } 1064 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 1065 } 1066 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 1067 /* free the TCB but first save off the ep */ 1068 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1069 atomic_add_int(&stcb->asoc.refcnt, 1); 1070 SCTP_TCB_UNLOCK(stcb); 1071 SCTP_SOCKET_LOCK(so, 1); 1072 SCTP_TCB_LOCK(stcb); 1073 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1074 #endif 1075 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1076 SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); 1077 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1078 SCTP_SOCKET_UNLOCK(so, 1); 1079 #endif 1080 } 1081 1082 /* 1083 * Skip past the param header and then we will find the chunk that caused the 1084 * problem. There are two possibilities ASCONF or FWD-TSN other than that and 1085 * our peer must be broken. 1086 */ 1087 static void 1088 sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr, 1089 struct sctp_nets *net) 1090 { 1091 struct sctp_chunkhdr *chk; 1092 1093 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr)); 1094 switch (chk->chunk_type) { 1095 case SCTP_ASCONF_ACK: 1096 case SCTP_ASCONF: 1097 sctp_asconf_cleanup(stcb, net); 1098 break; 1099 case SCTP_IFORWARD_CUM_TSN: 1100 case SCTP_FORWARD_CUM_TSN: 1101 stcb->asoc.prsctp_supported = 0; 1102 break; 1103 default: 1104 SCTPDBG(SCTP_DEBUG_INPUT2, 1105 "Peer does not support chunk type %d(%x)??\n", 1106 chk->chunk_type, (uint32_t)chk->chunk_type); 1107 break; 1108 } 1109 } 1110 1111 /* 1112 * Skip past the param header and then we will find the param that caused the 1113 * problem. There are a number of param's in a ASCONF OR the prsctp param 1114 * these will turn of specific features. 1115 * XXX: Is this the right thing to do? 1116 */ 1117 static void 1118 sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr) 1119 { 1120 struct sctp_paramhdr *pbad; 1121 1122 pbad = phdr + 1; 1123 switch (ntohs(pbad->param_type)) { 1124 /* pr-sctp draft */ 1125 case SCTP_PRSCTP_SUPPORTED: 1126 stcb->asoc.prsctp_supported = 0; 1127 break; 1128 case SCTP_SUPPORTED_CHUNK_EXT: 1129 break; 1130 /* draft-ietf-tsvwg-addip-sctp */ 1131 case SCTP_HAS_NAT_SUPPORT: 1132 stcb->asoc.peer_supports_nat = 0; 1133 break; 1134 case SCTP_ADD_IP_ADDRESS: 1135 case SCTP_DEL_IP_ADDRESS: 1136 case SCTP_SET_PRIM_ADDR: 1137 stcb->asoc.asconf_supported = 0; 1138 break; 1139 case SCTP_SUCCESS_REPORT: 1140 case SCTP_ERROR_CAUSE_IND: 1141 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n"); 1142 SCTPDBG(SCTP_DEBUG_INPUT2, 1143 "Turning off ASCONF to this strange peer\n"); 1144 stcb->asoc.asconf_supported = 0; 1145 break; 1146 default: 1147 SCTPDBG(SCTP_DEBUG_INPUT2, 1148 "Peer does not support param type %d(%x)??\n", 1149 pbad->param_type, (uint32_t)pbad->param_type); 1150 break; 1151 } 1152 } 1153 1154 static int 1155 sctp_handle_error(struct sctp_chunkhdr *ch, 1156 struct sctp_tcb *stcb, struct sctp_nets *net) 1157 { 1158 int chklen; 1159 struct sctp_paramhdr *phdr; 1160 uint16_t error, error_type; 1161 uint16_t error_len; 1162 struct sctp_association *asoc; 1163 int adjust; 1164 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1165 struct socket *so; 1166 #endif 1167 1168 /* parse through all of the errors and process */ 1169 asoc = &stcb->asoc; 1170 phdr = (struct sctp_paramhdr *)((caddr_t)ch + 1171 sizeof(struct sctp_chunkhdr)); 1172 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr); 1173 error = 0; 1174 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) { 1175 /* Process an Error Cause */ 1176 error_type = ntohs(phdr->param_type); 1177 error_len = ntohs(phdr->param_length); 1178 if ((error_len > chklen) || (error_len == 0)) { 1179 /* invalid param length for this param */ 1180 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n", 1181 chklen, error_len); 1182 return (0); 1183 } 1184 if (error == 0) { 1185 /* report the first error cause */ 1186 error = error_type; 1187 } 1188 switch (error_type) { 1189 case SCTP_CAUSE_INVALID_STREAM: 1190 case SCTP_CAUSE_MISSING_PARAM: 1191 case SCTP_CAUSE_INVALID_PARAM: 1192 case SCTP_CAUSE_NO_USER_DATA: 1193 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n", 1194 error_type); 1195 break; 1196 case SCTP_CAUSE_NAT_COLLIDING_STATE: 1197 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n", 1198 ch->chunk_flags); 1199 if (sctp_handle_nat_colliding_state(stcb)) { 1200 return (0); 1201 } 1202 break; 1203 case SCTP_CAUSE_NAT_MISSING_STATE: 1204 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n", 1205 ch->chunk_flags); 1206 if (sctp_handle_nat_missing_state(stcb, net)) { 1207 return (0); 1208 } 1209 break; 1210 case SCTP_CAUSE_STALE_COOKIE: 1211 /* 1212 * We only act if we have echoed a cookie and are 1213 * waiting. 1214 */ 1215 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 1216 int *p; 1217 1218 p = (int *)((caddr_t)phdr + sizeof(*phdr)); 1219 /* Save the time doubled */ 1220 asoc->cookie_preserve_req = ntohl(*p) << 1; 1221 asoc->stale_cookie_count++; 1222 if (asoc->stale_cookie_count > 1223 asoc->max_init_times) { 1224 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 1225 /* now free the asoc */ 1226 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1227 so = SCTP_INP_SO(stcb->sctp_ep); 1228 atomic_add_int(&stcb->asoc.refcnt, 1); 1229 SCTP_TCB_UNLOCK(stcb); 1230 SCTP_SOCKET_LOCK(so, 1); 1231 SCTP_TCB_LOCK(stcb); 1232 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1233 #endif 1234 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1235 SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); 1236 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1237 SCTP_SOCKET_UNLOCK(so, 1); 1238 #endif 1239 return (-1); 1240 } 1241 /* blast back to INIT state */ 1242 sctp_toss_old_cookies(stcb, &stcb->asoc); 1243 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED; 1244 asoc->state |= SCTP_STATE_COOKIE_WAIT; 1245 sctp_stop_all_cookie_timers(stcb); 1246 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1247 } 1248 break; 1249 case SCTP_CAUSE_UNRESOLVABLE_ADDR: 1250 /* 1251 * Nothing we can do here, we don't do hostname 1252 * addresses so if the peer does not like my IPv6 1253 * (or IPv4 for that matter) it does not matter. If 1254 * they don't support that type of address, they can 1255 * NOT possibly get that packet type... i.e. with no 1256 * IPv6 you can't receive a IPv6 packet. so we can 1257 * safely ignore this one. If we ever added support 1258 * for HOSTNAME Addresses, then we would need to do 1259 * something here. 1260 */ 1261 break; 1262 case SCTP_CAUSE_UNRECOG_CHUNK: 1263 sctp_process_unrecog_chunk(stcb, phdr, net); 1264 break; 1265 case SCTP_CAUSE_UNRECOG_PARAM: 1266 sctp_process_unrecog_param(stcb, phdr); 1267 break; 1268 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN: 1269 /* 1270 * We ignore this since the timer will drive out a 1271 * new cookie anyway and there timer will drive us 1272 * to send a SHUTDOWN_COMPLETE. We can't send one 1273 * here since we don't have their tag. 1274 */ 1275 break; 1276 case SCTP_CAUSE_DELETING_LAST_ADDR: 1277 case SCTP_CAUSE_RESOURCE_SHORTAGE: 1278 case SCTP_CAUSE_DELETING_SRC_ADDR: 1279 /* 1280 * We should NOT get these here, but in a 1281 * ASCONF-ACK. 1282 */ 1283 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n", 1284 error_type); 1285 break; 1286 case SCTP_CAUSE_OUT_OF_RESC: 1287 /* 1288 * And what, pray tell do we do with the fact that 1289 * the peer is out of resources? Not really sure we 1290 * could do anything but abort. I suspect this 1291 * should have came WITH an abort instead of in a 1292 * OP-ERROR. 1293 */ 1294 break; 1295 default: 1296 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n", 1297 error_type); 1298 break; 1299 } 1300 adjust = SCTP_SIZE32(error_len); 1301 chklen -= adjust; 1302 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust); 1303 } 1304 sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, error, ch, SCTP_SO_NOT_LOCKED); 1305 return (0); 1306 } 1307 1308 static int 1309 sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, 1310 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, 1311 struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 1312 struct sctp_nets *net, int *abort_no_unlock, 1313 uint8_t mflowtype, uint32_t mflowid, 1314 uint32_t vrf_id) 1315 { 1316 struct sctp_init_ack *init_ack; 1317 struct mbuf *op_err; 1318 1319 SCTPDBG(SCTP_DEBUG_INPUT2, 1320 "sctp_handle_init_ack: handling INIT-ACK\n"); 1321 1322 if (stcb == NULL) { 1323 SCTPDBG(SCTP_DEBUG_INPUT2, 1324 "sctp_handle_init_ack: TCB is null\n"); 1325 return (-1); 1326 } 1327 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) { 1328 /* Invalid length */ 1329 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1330 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1331 src, dst, sh, op_err, 1332 mflowtype, mflowid, 1333 vrf_id, net->port); 1334 *abort_no_unlock = 1; 1335 return (-1); 1336 } 1337 init_ack = &cp->init; 1338 /* validate parameters */ 1339 if (init_ack->initiate_tag == 0) { 1340 /* protocol error... send an abort */ 1341 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1342 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1343 src, dst, sh, op_err, 1344 mflowtype, mflowid, 1345 vrf_id, net->port); 1346 *abort_no_unlock = 1; 1347 return (-1); 1348 } 1349 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) { 1350 /* protocol error... send an abort */ 1351 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1352 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1353 src, dst, sh, op_err, 1354 mflowtype, mflowid, 1355 vrf_id, net->port); 1356 *abort_no_unlock = 1; 1357 return (-1); 1358 } 1359 if (init_ack->num_inbound_streams == 0) { 1360 /* protocol error... send an abort */ 1361 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1362 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1363 src, dst, sh, op_err, 1364 mflowtype, mflowid, 1365 vrf_id, net->port); 1366 *abort_no_unlock = 1; 1367 return (-1); 1368 } 1369 if (init_ack->num_outbound_streams == 0) { 1370 /* protocol error... send an abort */ 1371 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1372 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1373 src, dst, sh, op_err, 1374 mflowtype, mflowid, 1375 vrf_id, net->port); 1376 *abort_no_unlock = 1; 1377 return (-1); 1378 } 1379 /* process according to association state... */ 1380 switch (stcb->asoc.state & SCTP_STATE_MASK) { 1381 case SCTP_STATE_COOKIE_WAIT: 1382 /* this is the expected state for this chunk */ 1383 /* process the INIT-ACK parameters */ 1384 if (stcb->asoc.primary_destination->dest_state & 1385 SCTP_ADDR_UNCONFIRMED) { 1386 /* 1387 * The primary is where we sent the INIT, we can 1388 * always consider it confirmed when the INIT-ACK is 1389 * returned. Do this before we load addresses 1390 * though. 1391 */ 1392 stcb->asoc.primary_destination->dest_state &= 1393 ~SCTP_ADDR_UNCONFIRMED; 1394 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 1395 stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED); 1396 } 1397 if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb, 1398 net, abort_no_unlock, 1399 mflowtype, mflowid, 1400 vrf_id) < 0) { 1401 /* error in parsing parameters */ 1402 return (-1); 1403 } 1404 /* update our state */ 1405 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n"); 1406 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED); 1407 1408 /* reset the RTO calc */ 1409 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 1410 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 1411 stcb->asoc.overall_error_count, 1412 0, 1413 SCTP_FROM_SCTP_INPUT, 1414 __LINE__); 1415 } 1416 stcb->asoc.overall_error_count = 0; 1417 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1418 /* 1419 * collapse the init timer back in case of a exponential 1420 * backoff 1421 */ 1422 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep, 1423 stcb, net); 1424 /* 1425 * the send at the end of the inbound data processing will 1426 * cause the cookie to be sent 1427 */ 1428 break; 1429 case SCTP_STATE_SHUTDOWN_SENT: 1430 /* incorrect state... discard */ 1431 break; 1432 case SCTP_STATE_COOKIE_ECHOED: 1433 /* incorrect state... discard */ 1434 break; 1435 case SCTP_STATE_OPEN: 1436 /* incorrect state... discard */ 1437 break; 1438 case SCTP_STATE_EMPTY: 1439 case SCTP_STATE_INUSE: 1440 default: 1441 /* incorrect state... discard */ 1442 return (-1); 1443 break; 1444 } 1445 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); 1446 return (0); 1447 } 1448 1449 static struct sctp_tcb * 1450 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1451 struct sockaddr *src, struct sockaddr *dst, 1452 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1453 struct sctp_inpcb *inp, struct sctp_nets **netp, 1454 struct sockaddr *init_src, int *notification, 1455 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1456 uint8_t mflowtype, uint32_t mflowid, 1457 uint32_t vrf_id, uint16_t port); 1458 1459 1460 /* 1461 * handle a state cookie for an existing association m: input packet mbuf 1462 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a 1463 * "split" mbuf and the cookie signature does not exist offset: offset into 1464 * mbuf to the cookie-echo chunk 1465 */ 1466 static struct sctp_tcb * 1467 sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, 1468 struct sockaddr *src, struct sockaddr *dst, 1469 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1470 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp, 1471 struct sockaddr *init_src, int *notification, 1472 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1473 uint8_t mflowtype, uint32_t mflowid, 1474 uint32_t vrf_id, uint16_t port) 1475 { 1476 struct sctp_association *asoc; 1477 struct sctp_init_chunk *init_cp, init_buf; 1478 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1479 struct sctp_nets *net; 1480 struct mbuf *op_err; 1481 int init_offset, initack_offset, i; 1482 int retval; 1483 int spec_flag = 0; 1484 uint32_t how_indx; 1485 #if defined(SCTP_DETAILED_STR_STATS) 1486 int j; 1487 #endif 1488 1489 net = *netp; 1490 /* I know that the TCB is non-NULL from the caller */ 1491 asoc = &stcb->asoc; 1492 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) { 1493 if (asoc->cookie_how[how_indx] == 0) 1494 break; 1495 } 1496 if (how_indx < sizeof(asoc->cookie_how)) { 1497 asoc->cookie_how[how_indx] = 1; 1498 } 1499 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 1500 /* SHUTDOWN came in after sending INIT-ACK */ 1501 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 1502 op_err = sctp_generate_cause(SCTP_CAUSE_COOKIE_IN_SHUTDOWN, ""); 1503 sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err, 1504 mflowtype, mflowid, inp->fibnum, 1505 vrf_id, net->port); 1506 if (how_indx < sizeof(asoc->cookie_how)) 1507 asoc->cookie_how[how_indx] = 2; 1508 return (NULL); 1509 } 1510 /* 1511 * find and validate the INIT chunk in the cookie (peer's info) the 1512 * INIT should start after the cookie-echo header struct (chunk 1513 * header, state cookie header struct) 1514 */ 1515 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk); 1516 1517 init_cp = (struct sctp_init_chunk *) 1518 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1519 (uint8_t *)&init_buf); 1520 if (init_cp == NULL) { 1521 /* could not pull a INIT chunk in cookie */ 1522 return (NULL); 1523 } 1524 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1525 return (NULL); 1526 } 1527 /* 1528 * find and validate the INIT-ACK chunk in the cookie (my info) the 1529 * INIT-ACK follows the INIT chunk 1530 */ 1531 initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length)); 1532 initack_cp = (struct sctp_init_ack_chunk *) 1533 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1534 (uint8_t *)&initack_buf); 1535 if (initack_cp == NULL) { 1536 /* could not pull INIT-ACK chunk in cookie */ 1537 return (NULL); 1538 } 1539 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1540 return (NULL); 1541 } 1542 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1543 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) { 1544 /* 1545 * case D in Section 5.2.4 Table 2: MMAA process accordingly 1546 * to get into the OPEN state 1547 */ 1548 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1549 /*- 1550 * Opps, this means that we somehow generated two vtag's 1551 * the same. I.e. we did: 1552 * Us Peer 1553 * <---INIT(tag=a)------ 1554 * ----INIT-ACK(tag=t)--> 1555 * ----INIT(tag=t)------> *1 1556 * <---INIT-ACK(tag=a)--- 1557 * <----CE(tag=t)------------- *2 1558 * 1559 * At point *1 we should be generating a different 1560 * tag t'. Which means we would throw away the CE and send 1561 * ours instead. Basically this is case C (throw away side). 1562 */ 1563 if (how_indx < sizeof(asoc->cookie_how)) 1564 asoc->cookie_how[how_indx] = 17; 1565 return (NULL); 1566 1567 } 1568 switch (SCTP_GET_STATE(asoc)) { 1569 case SCTP_STATE_COOKIE_WAIT: 1570 case SCTP_STATE_COOKIE_ECHOED: 1571 /* 1572 * INIT was sent but got a COOKIE_ECHO with the 1573 * correct tags... just accept it...but we must 1574 * process the init so that we can make sure we have 1575 * the right seq no's. 1576 */ 1577 /* First we must process the INIT !! */ 1578 retval = sctp_process_init(init_cp, stcb); 1579 if (retval < 0) { 1580 if (how_indx < sizeof(asoc->cookie_how)) 1581 asoc->cookie_how[how_indx] = 3; 1582 return (NULL); 1583 } 1584 /* we have already processed the INIT so no problem */ 1585 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, 1586 stcb, net, 1587 SCTP_FROM_SCTP_INPUT + SCTP_LOC_13); 1588 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, 1589 stcb, net, 1590 SCTP_FROM_SCTP_INPUT + SCTP_LOC_14); 1591 /* update current state */ 1592 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1593 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1594 else 1595 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1596 1597 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1598 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1599 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1600 stcb->sctp_ep, stcb, asoc->primary_destination); 1601 } 1602 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1603 sctp_stop_all_cookie_timers(stcb); 1604 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1605 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1606 (!SCTP_IS_LISTENING(inp))) { 1607 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1608 struct socket *so; 1609 #endif 1610 /* 1611 * Here is where collision would go if we 1612 * did a connect() and instead got a 1613 * init/init-ack/cookie done before the 1614 * init-ack came back.. 1615 */ 1616 stcb->sctp_ep->sctp_flags |= 1617 SCTP_PCB_FLAGS_CONNECTED; 1618 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1619 so = SCTP_INP_SO(stcb->sctp_ep); 1620 atomic_add_int(&stcb->asoc.refcnt, 1); 1621 SCTP_TCB_UNLOCK(stcb); 1622 SCTP_SOCKET_LOCK(so, 1); 1623 SCTP_TCB_LOCK(stcb); 1624 atomic_add_int(&stcb->asoc.refcnt, -1); 1625 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1626 SCTP_SOCKET_UNLOCK(so, 1); 1627 return (NULL); 1628 } 1629 #endif 1630 soisconnected(stcb->sctp_socket); 1631 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1632 SCTP_SOCKET_UNLOCK(so, 1); 1633 #endif 1634 } 1635 /* notify upper layer */ 1636 *notification = SCTP_NOTIFY_ASSOC_UP; 1637 /* 1638 * since we did not send a HB make sure we don't 1639 * double things 1640 */ 1641 net->hb_responded = 1; 1642 net->RTO = sctp_calculate_rto(stcb, asoc, net, 1643 &cookie->time_entered, 1644 sctp_align_unsafe_makecopy, 1645 SCTP_RTT_FROM_NON_DATA); 1646 1647 if (stcb->asoc.sctp_autoclose_ticks && 1648 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) { 1649 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 1650 inp, stcb, NULL); 1651 } 1652 break; 1653 default: 1654 /* 1655 * we're in the OPEN state (or beyond), so peer must 1656 * have simply lost the COOKIE-ACK 1657 */ 1658 break; 1659 } /* end switch */ 1660 sctp_stop_all_cookie_timers(stcb); 1661 /* 1662 * We ignore the return code here.. not sure if we should 1663 * somehow abort.. but we do have an existing asoc. This 1664 * really should not fail. 1665 */ 1666 if (sctp_load_addresses_from_init(stcb, m, 1667 init_offset + sizeof(struct sctp_init_chunk), 1668 initack_offset, src, dst, init_src, stcb->asoc.port)) { 1669 if (how_indx < sizeof(asoc->cookie_how)) 1670 asoc->cookie_how[how_indx] = 4; 1671 return (NULL); 1672 } 1673 /* respond with a COOKIE-ACK */ 1674 sctp_toss_old_cookies(stcb, asoc); 1675 sctp_send_cookie_ack(stcb); 1676 if (how_indx < sizeof(asoc->cookie_how)) 1677 asoc->cookie_how[how_indx] = 5; 1678 return (stcb); 1679 } 1680 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1681 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag && 1682 cookie->tie_tag_my_vtag == 0 && 1683 cookie->tie_tag_peer_vtag == 0) { 1684 /* 1685 * case C in Section 5.2.4 Table 2: XMOO silently discard 1686 */ 1687 if (how_indx < sizeof(asoc->cookie_how)) 1688 asoc->cookie_how[how_indx] = 6; 1689 return (NULL); 1690 } 1691 /* 1692 * If nat support, and the below and stcb is established, send back 1693 * a ABORT(colliding state) if we are established. 1694 */ 1695 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) && 1696 (asoc->peer_supports_nat) && 1697 ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1698 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) || 1699 (asoc->peer_vtag == 0)))) { 1700 /* 1701 * Special case - Peer's support nat. We may have two init's 1702 * that we gave out the same tag on since one was not 1703 * established.. i.e. we get INIT from host-1 behind the nat 1704 * and we respond tag-a, we get a INIT from host-2 behind 1705 * the nat and we get tag-a again. Then we bring up host-1 1706 * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1). 1707 * Now we have colliding state. We must send an abort here 1708 * with colliding state indication. 1709 */ 1710 op_err = sctp_generate_cause(SCTP_CAUSE_NAT_COLLIDING_STATE, ""); 1711 sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err, 1712 mflowtype, mflowid, inp->fibnum, 1713 vrf_id, port); 1714 return (NULL); 1715 } 1716 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1717 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) || 1718 (asoc->peer_vtag == 0))) { 1719 /* 1720 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info 1721 * should be ok, re-accept peer info 1722 */ 1723 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1724 /* 1725 * Extension of case C. If we hit this, then the 1726 * random number generator returned the same vtag 1727 * when we first sent our INIT-ACK and when we later 1728 * sent our INIT. The side with the seq numbers that 1729 * are different will be the one that normnally 1730 * would have hit case C. This in effect "extends" 1731 * our vtags in this collision case to be 64 bits. 1732 * The same collision could occur aka you get both 1733 * vtag and seq number the same twice in a row.. but 1734 * is much less likely. If it did happen then we 1735 * would proceed through and bring up the assoc.. we 1736 * may end up with the wrong stream setup however.. 1737 * which would be bad.. but there is no way to 1738 * tell.. until we send on a stream that does not 1739 * exist :-) 1740 */ 1741 if (how_indx < sizeof(asoc->cookie_how)) 1742 asoc->cookie_how[how_indx] = 7; 1743 1744 return (NULL); 1745 } 1746 if (how_indx < sizeof(asoc->cookie_how)) 1747 asoc->cookie_how[how_indx] = 8; 1748 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 1749 SCTP_FROM_SCTP_INPUT + SCTP_LOC_15); 1750 sctp_stop_all_cookie_timers(stcb); 1751 /* 1752 * since we did not send a HB make sure we don't double 1753 * things 1754 */ 1755 net->hb_responded = 1; 1756 if (stcb->asoc.sctp_autoclose_ticks && 1757 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1758 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1759 NULL); 1760 } 1761 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1762 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1763 1764 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { 1765 /* 1766 * Ok the peer probably discarded our data (if we 1767 * echoed a cookie+data). So anything on the 1768 * sent_queue should be marked for retransmit, we 1769 * may not get something to kick us so it COULD 1770 * still take a timeout to move these.. but it can't 1771 * hurt to mark them. 1772 */ 1773 struct sctp_tmit_chunk *chk; 1774 1775 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1776 if (chk->sent < SCTP_DATAGRAM_RESEND) { 1777 chk->sent = SCTP_DATAGRAM_RESEND; 1778 sctp_flight_size_decrease(chk); 1779 sctp_total_flight_decrease(stcb, chk); 1780 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1781 spec_flag++; 1782 } 1783 } 1784 1785 } 1786 /* process the INIT info (peer's info) */ 1787 retval = sctp_process_init(init_cp, stcb); 1788 if (retval < 0) { 1789 if (how_indx < sizeof(asoc->cookie_how)) 1790 asoc->cookie_how[how_indx] = 9; 1791 return (NULL); 1792 } 1793 if (sctp_load_addresses_from_init(stcb, m, 1794 init_offset + sizeof(struct sctp_init_chunk), 1795 initack_offset, src, dst, init_src, stcb->asoc.port)) { 1796 if (how_indx < sizeof(asoc->cookie_how)) 1797 asoc->cookie_how[how_indx] = 10; 1798 return (NULL); 1799 } 1800 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) || 1801 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) { 1802 *notification = SCTP_NOTIFY_ASSOC_UP; 1803 1804 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1805 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1806 (!SCTP_IS_LISTENING(inp))) { 1807 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1808 struct socket *so; 1809 #endif 1810 stcb->sctp_ep->sctp_flags |= 1811 SCTP_PCB_FLAGS_CONNECTED; 1812 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1813 so = SCTP_INP_SO(stcb->sctp_ep); 1814 atomic_add_int(&stcb->asoc.refcnt, 1); 1815 SCTP_TCB_UNLOCK(stcb); 1816 SCTP_SOCKET_LOCK(so, 1); 1817 SCTP_TCB_LOCK(stcb); 1818 atomic_add_int(&stcb->asoc.refcnt, -1); 1819 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1820 SCTP_SOCKET_UNLOCK(so, 1); 1821 return (NULL); 1822 } 1823 #endif 1824 soisconnected(stcb->sctp_socket); 1825 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1826 SCTP_SOCKET_UNLOCK(so, 1); 1827 #endif 1828 } 1829 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1830 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1831 else 1832 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1833 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1834 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1835 SCTP_STAT_INCR_COUNTER32(sctps_restartestab); 1836 } else { 1837 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1838 } 1839 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1840 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1841 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1842 stcb->sctp_ep, stcb, asoc->primary_destination); 1843 } 1844 sctp_stop_all_cookie_timers(stcb); 1845 sctp_toss_old_cookies(stcb, asoc); 1846 sctp_send_cookie_ack(stcb); 1847 if (spec_flag) { 1848 /* 1849 * only if we have retrans set do we do this. What 1850 * this call does is get only the COOKIE-ACK out and 1851 * then when we return the normal call to 1852 * sctp_chunk_output will get the retrans out behind 1853 * this. 1854 */ 1855 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED); 1856 } 1857 if (how_indx < sizeof(asoc->cookie_how)) 1858 asoc->cookie_how[how_indx] = 11; 1859 1860 return (stcb); 1861 } 1862 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1863 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) && 1864 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce && 1865 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce && 1866 cookie->tie_tag_peer_vtag != 0) { 1867 struct sctpasochead *head; 1868 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1869 struct socket *so; 1870 #endif 1871 1872 if (asoc->peer_supports_nat) { 1873 /* 1874 * This is a gross gross hack. Just call the 1875 * cookie_new code since we are allowing a duplicate 1876 * association. I hope this works... 1877 */ 1878 return (sctp_process_cookie_new(m, iphlen, offset, src, dst, 1879 sh, cookie, cookie_len, 1880 inp, netp, init_src, notification, 1881 auth_skipped, auth_offset, auth_len, 1882 mflowtype, mflowid, 1883 vrf_id, port)); 1884 } 1885 /* 1886 * case A in Section 5.2.4 Table 2: XXMM (peer restarted) 1887 */ 1888 /* temp code */ 1889 if (how_indx < sizeof(asoc->cookie_how)) 1890 asoc->cookie_how[how_indx] = 12; 1891 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, 1892 SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1893 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 1894 SCTP_FROM_SCTP_INPUT + SCTP_LOC_17); 1895 1896 /* notify upper layer */ 1897 *notification = SCTP_NOTIFY_ASSOC_RESTART; 1898 atomic_add_int(&stcb->asoc.refcnt, 1); 1899 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) && 1900 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1901 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 1902 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1903 } 1904 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1905 SCTP_STAT_INCR_GAUGE32(sctps_restartestab); 1906 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1907 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab); 1908 } 1909 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1910 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1911 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1912 stcb->sctp_ep, stcb, asoc->primary_destination); 1913 1914 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) { 1915 /* move to OPEN state, if not in SHUTDOWN_SENT */ 1916 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1917 } 1918 asoc->pre_open_streams = 1919 ntohs(initack_cp->init.num_outbound_streams); 1920 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1921 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1922 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1923 1924 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1925 1926 asoc->str_reset_seq_in = asoc->init_seq_number; 1927 1928 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1929 if (asoc->mapping_array) { 1930 memset(asoc->mapping_array, 0, 1931 asoc->mapping_array_size); 1932 } 1933 if (asoc->nr_mapping_array) { 1934 memset(asoc->nr_mapping_array, 0, 1935 asoc->mapping_array_size); 1936 } 1937 SCTP_TCB_UNLOCK(stcb); 1938 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1939 so = SCTP_INP_SO(stcb->sctp_ep); 1940 SCTP_SOCKET_LOCK(so, 1); 1941 #endif 1942 SCTP_INP_INFO_WLOCK(); 1943 SCTP_INP_WLOCK(stcb->sctp_ep); 1944 SCTP_TCB_LOCK(stcb); 1945 atomic_add_int(&stcb->asoc.refcnt, -1); 1946 /* send up all the data */ 1947 SCTP_TCB_SEND_LOCK(stcb); 1948 1949 sctp_report_all_outbound(stcb, 0, 1, SCTP_SO_LOCKED); 1950 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1951 stcb->asoc.strmout[i].chunks_on_queues = 0; 1952 #if defined(SCTP_DETAILED_STR_STATS) 1953 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 1954 asoc->strmout[i].abandoned_sent[j] = 0; 1955 asoc->strmout[i].abandoned_unsent[j] = 0; 1956 } 1957 #else 1958 asoc->strmout[i].abandoned_sent[0] = 0; 1959 asoc->strmout[i].abandoned_unsent[0] = 0; 1960 #endif 1961 stcb->asoc.strmout[i].sid = i; 1962 stcb->asoc.strmout[i].next_mid_ordered = 0; 1963 stcb->asoc.strmout[i].next_mid_unordered = 0; 1964 stcb->asoc.strmout[i].last_msg_incomplete = 0; 1965 } 1966 /* process the INIT-ACK info (my info) */ 1967 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1968 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1969 1970 /* pull from vtag hash */ 1971 LIST_REMOVE(stcb, sctp_asocs); 1972 /* re-insert to new vtag position */ 1973 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, 1974 SCTP_BASE_INFO(hashasocmark))]; 1975 /* 1976 * put it in the bucket in the vtag hash of assoc's for the 1977 * system 1978 */ 1979 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 1980 1981 SCTP_TCB_SEND_UNLOCK(stcb); 1982 SCTP_INP_WUNLOCK(stcb->sctp_ep); 1983 SCTP_INP_INFO_WUNLOCK(); 1984 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1985 SCTP_SOCKET_UNLOCK(so, 1); 1986 #endif 1987 asoc->total_flight = 0; 1988 asoc->total_flight_count = 0; 1989 /* process the INIT info (peer's info) */ 1990 retval = sctp_process_init(init_cp, stcb); 1991 if (retval < 0) { 1992 if (how_indx < sizeof(asoc->cookie_how)) 1993 asoc->cookie_how[how_indx] = 13; 1994 1995 return (NULL); 1996 } 1997 /* 1998 * since we did not send a HB make sure we don't double 1999 * things 2000 */ 2001 net->hb_responded = 1; 2002 2003 if (sctp_load_addresses_from_init(stcb, m, 2004 init_offset + sizeof(struct sctp_init_chunk), 2005 initack_offset, src, dst, init_src, stcb->asoc.port)) { 2006 if (how_indx < sizeof(asoc->cookie_how)) 2007 asoc->cookie_how[how_indx] = 14; 2008 2009 return (NULL); 2010 } 2011 /* respond with a COOKIE-ACK */ 2012 sctp_stop_all_cookie_timers(stcb); 2013 sctp_toss_old_cookies(stcb, asoc); 2014 sctp_send_cookie_ack(stcb); 2015 if (how_indx < sizeof(asoc->cookie_how)) 2016 asoc->cookie_how[how_indx] = 15; 2017 2018 return (stcb); 2019 } 2020 if (how_indx < sizeof(asoc->cookie_how)) 2021 asoc->cookie_how[how_indx] = 16; 2022 /* all other cases... */ 2023 return (NULL); 2024 } 2025 2026 2027 /* 2028 * handle a state cookie for a new association m: input packet mbuf chain-- 2029 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf 2030 * and the cookie signature does not exist offset: offset into mbuf to the 2031 * cookie-echo chunk length: length of the cookie chunk to: where the init 2032 * was from returns a new TCB 2033 */ 2034 static struct sctp_tcb * 2035 sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 2036 struct sockaddr *src, struct sockaddr *dst, 2037 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 2038 struct sctp_inpcb *inp, struct sctp_nets **netp, 2039 struct sockaddr *init_src, int *notification, 2040 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2041 uint8_t mflowtype, uint32_t mflowid, 2042 uint32_t vrf_id, uint16_t port) 2043 { 2044 struct sctp_tcb *stcb; 2045 struct sctp_init_chunk *init_cp, init_buf; 2046 struct sctp_init_ack_chunk *initack_cp, initack_buf; 2047 union sctp_sockstore store; 2048 struct sctp_association *asoc; 2049 int init_offset, initack_offset, initack_limit; 2050 int retval; 2051 int error = 0; 2052 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE]; 2053 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2054 struct socket *so; 2055 2056 so = SCTP_INP_SO(inp); 2057 #endif 2058 2059 /* 2060 * find and validate the INIT chunk in the cookie (peer's info) the 2061 * INIT should start after the cookie-echo header struct (chunk 2062 * header, state cookie header struct) 2063 */ 2064 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk); 2065 init_cp = (struct sctp_init_chunk *) 2066 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 2067 (uint8_t *)&init_buf); 2068 if (init_cp == NULL) { 2069 /* could not pull a INIT chunk in cookie */ 2070 SCTPDBG(SCTP_DEBUG_INPUT1, 2071 "process_cookie_new: could not pull INIT chunk hdr\n"); 2072 return (NULL); 2073 } 2074 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 2075 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n"); 2076 return (NULL); 2077 } 2078 initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length)); 2079 /* 2080 * find and validate the INIT-ACK chunk in the cookie (my info) the 2081 * INIT-ACK follows the INIT chunk 2082 */ 2083 initack_cp = (struct sctp_init_ack_chunk *) 2084 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 2085 (uint8_t *)&initack_buf); 2086 if (initack_cp == NULL) { 2087 /* could not pull INIT-ACK chunk in cookie */ 2088 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n"); 2089 return (NULL); 2090 } 2091 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 2092 return (NULL); 2093 } 2094 /* 2095 * NOTE: We can't use the INIT_ACK's chk_length to determine the 2096 * "initack_limit" value. This is because the chk_length field 2097 * includes the length of the cookie, but the cookie is omitted when 2098 * the INIT and INIT_ACK are tacked onto the cookie... 2099 */ 2100 initack_limit = offset + cookie_len; 2101 2102 /* 2103 * now that we know the INIT/INIT-ACK are in place, create a new TCB 2104 * and popluate 2105 */ 2106 2107 /* 2108 * Here we do a trick, we set in NULL for the proc/thread argument. 2109 * We do this since in effect we only use the p argument when the 2110 * socket is unbound and we must do an implicit bind. Since we are 2111 * getting a cookie, we cannot be unbound. 2112 */ 2113 stcb = sctp_aloc_assoc(inp, init_src, &error, 2114 ntohl(initack_cp->init.initiate_tag), vrf_id, 2115 ntohs(initack_cp->init.num_outbound_streams), 2116 port, 2117 (struct thread *)NULL 2118 ); 2119 if (stcb == NULL) { 2120 struct mbuf *op_err; 2121 2122 /* memory problem? */ 2123 SCTPDBG(SCTP_DEBUG_INPUT1, 2124 "process_cookie_new: no room for another TCB!\n"); 2125 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 2126 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 2127 src, dst, sh, op_err, 2128 mflowtype, mflowid, 2129 vrf_id, port); 2130 return (NULL); 2131 } 2132 /* get the correct sctp_nets */ 2133 if (netp) 2134 *netp = sctp_findnet(stcb, init_src); 2135 2136 asoc = &stcb->asoc; 2137 /* get scope variables out of cookie */ 2138 asoc->scope.ipv4_local_scope = cookie->ipv4_scope; 2139 asoc->scope.site_scope = cookie->site_scope; 2140 asoc->scope.local_scope = cookie->local_scope; 2141 asoc->scope.loopback_scope = cookie->loopback_scope; 2142 2143 if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) || 2144 (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal)) { 2145 struct mbuf *op_err; 2146 2147 /* 2148 * Houston we have a problem. The EP changed while the 2149 * cookie was in flight. Only recourse is to abort the 2150 * association. 2151 */ 2152 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 2153 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 2154 src, dst, sh, op_err, 2155 mflowtype, mflowid, 2156 vrf_id, port); 2157 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2158 atomic_add_int(&stcb->asoc.refcnt, 1); 2159 SCTP_TCB_UNLOCK(stcb); 2160 SCTP_SOCKET_LOCK(so, 1); 2161 SCTP_TCB_LOCK(stcb); 2162 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2163 #endif 2164 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2165 SCTP_FROM_SCTP_INPUT + SCTP_LOC_18); 2166 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2167 SCTP_SOCKET_UNLOCK(so, 1); 2168 #endif 2169 return (NULL); 2170 } 2171 /* process the INIT-ACK info (my info) */ 2172 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 2173 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 2174 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 2175 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 2176 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 2177 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 2178 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 2179 asoc->str_reset_seq_in = asoc->init_seq_number; 2180 2181 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 2182 2183 /* process the INIT info (peer's info) */ 2184 if (netp) 2185 retval = sctp_process_init(init_cp, stcb); 2186 else 2187 retval = 0; 2188 if (retval < 0) { 2189 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2190 atomic_add_int(&stcb->asoc.refcnt, 1); 2191 SCTP_TCB_UNLOCK(stcb); 2192 SCTP_SOCKET_LOCK(so, 1); 2193 SCTP_TCB_LOCK(stcb); 2194 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2195 #endif 2196 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2197 SCTP_FROM_SCTP_INPUT + SCTP_LOC_19); 2198 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2199 SCTP_SOCKET_UNLOCK(so, 1); 2200 #endif 2201 return (NULL); 2202 } 2203 /* load all addresses */ 2204 if (sctp_load_addresses_from_init(stcb, m, 2205 init_offset + sizeof(struct sctp_init_chunk), initack_offset, 2206 src, dst, init_src, port)) { 2207 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2208 atomic_add_int(&stcb->asoc.refcnt, 1); 2209 SCTP_TCB_UNLOCK(stcb); 2210 SCTP_SOCKET_LOCK(so, 1); 2211 SCTP_TCB_LOCK(stcb); 2212 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2213 #endif 2214 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2215 SCTP_FROM_SCTP_INPUT + SCTP_LOC_20); 2216 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2217 SCTP_SOCKET_UNLOCK(so, 1); 2218 #endif 2219 return (NULL); 2220 } 2221 /* 2222 * verify any preceding AUTH chunk that was skipped 2223 */ 2224 /* pull the local authentication parameters from the cookie/init-ack */ 2225 sctp_auth_get_cookie_params(stcb, m, 2226 initack_offset + sizeof(struct sctp_init_ack_chunk), 2227 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk))); 2228 if (auth_skipped) { 2229 struct sctp_auth_chunk *auth; 2230 2231 auth = (struct sctp_auth_chunk *) 2232 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); 2233 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { 2234 /* auth HMAC failed, dump the assoc and packet */ 2235 SCTPDBG(SCTP_DEBUG_AUTH1, 2236 "COOKIE-ECHO: AUTH failed\n"); 2237 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2238 atomic_add_int(&stcb->asoc.refcnt, 1); 2239 SCTP_TCB_UNLOCK(stcb); 2240 SCTP_SOCKET_LOCK(so, 1); 2241 SCTP_TCB_LOCK(stcb); 2242 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2243 #endif 2244 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2245 SCTP_FROM_SCTP_INPUT + SCTP_LOC_21); 2246 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2247 SCTP_SOCKET_UNLOCK(so, 1); 2248 #endif 2249 return (NULL); 2250 } else { 2251 /* remaining chunks checked... good to go */ 2252 stcb->asoc.authenticated = 1; 2253 } 2254 } 2255 /* 2256 * if we're doing ASCONFs, check to see if we have any new local 2257 * addresses that need to get added to the peer (eg. addresses 2258 * changed while cookie echo in flight). This needs to be done 2259 * after we go to the OPEN state to do the correct asconf 2260 * processing. else, make sure we have the correct addresses in our 2261 * lists 2262 */ 2263 2264 /* warning, we re-use sin, sin6, sa_store here! */ 2265 /* pull in local_address (our "from" address) */ 2266 switch (cookie->laddr_type) { 2267 #ifdef INET 2268 case SCTP_IPV4_ADDRESS: 2269 /* source addr is IPv4 */ 2270 memset(&store.sin, 0, sizeof(struct sockaddr_in)); 2271 store.sin.sin_family = AF_INET; 2272 store.sin.sin_len = sizeof(struct sockaddr_in); 2273 store.sin.sin_addr.s_addr = cookie->laddress[0]; 2274 break; 2275 #endif 2276 #ifdef INET6 2277 case SCTP_IPV6_ADDRESS: 2278 /* source addr is IPv6 */ 2279 memset(&store.sin6, 0, sizeof(struct sockaddr_in6)); 2280 store.sin6.sin6_family = AF_INET6; 2281 store.sin6.sin6_len = sizeof(struct sockaddr_in6); 2282 store.sin6.sin6_scope_id = cookie->scope_id; 2283 memcpy(&store.sin6.sin6_addr, cookie->laddress, sizeof(struct in6_addr)); 2284 break; 2285 #endif 2286 default: 2287 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2288 atomic_add_int(&stcb->asoc.refcnt, 1); 2289 SCTP_TCB_UNLOCK(stcb); 2290 SCTP_SOCKET_LOCK(so, 1); 2291 SCTP_TCB_LOCK(stcb); 2292 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2293 #endif 2294 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2295 SCTP_FROM_SCTP_INPUT + SCTP_LOC_22); 2296 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2297 SCTP_SOCKET_UNLOCK(so, 1); 2298 #endif 2299 return (NULL); 2300 } 2301 2302 /* update current state */ 2303 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2304 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2305 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2306 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2307 stcb->sctp_ep, stcb, asoc->primary_destination); 2308 } 2309 sctp_stop_all_cookie_timers(stcb); 2310 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab); 2311 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2312 2313 /* set up to notify upper layer */ 2314 *notification = SCTP_NOTIFY_ASSOC_UP; 2315 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2316 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2317 (!SCTP_IS_LISTENING(inp))) { 2318 /* 2319 * This is an endpoint that called connect() how it got a 2320 * cookie that is NEW is a bit of a mystery. It must be that 2321 * the INIT was sent, but before it got there.. a complete 2322 * INIT/INIT-ACK/COOKIE arrived. But of course then it 2323 * should have went to the other code.. not here.. oh well.. 2324 * a bit of protection is worth having.. 2325 */ 2326 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2327 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2328 atomic_add_int(&stcb->asoc.refcnt, 1); 2329 SCTP_TCB_UNLOCK(stcb); 2330 SCTP_SOCKET_LOCK(so, 1); 2331 SCTP_TCB_LOCK(stcb); 2332 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2333 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2334 SCTP_SOCKET_UNLOCK(so, 1); 2335 return (NULL); 2336 } 2337 #endif 2338 soisconnected(stcb->sctp_socket); 2339 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2340 SCTP_SOCKET_UNLOCK(so, 1); 2341 #endif 2342 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 2343 (SCTP_IS_LISTENING(inp))) { 2344 /* 2345 * We don't want to do anything with this one. Since it is 2346 * the listening guy. The timer will get started for 2347 * accepted connections in the caller. 2348 */ 2349 ; 2350 } 2351 /* since we did not send a HB make sure we don't double things */ 2352 if ((netp) && (*netp)) 2353 (*netp)->hb_responded = 1; 2354 2355 if (stcb->asoc.sctp_autoclose_ticks && 2356 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2357 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 2358 } 2359 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 2360 if ((netp != NULL) && (*netp != NULL)) { 2361 /* calculate the RTT and set the encaps port */ 2362 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp, 2363 &cookie->time_entered, sctp_align_unsafe_makecopy, 2364 SCTP_RTT_FROM_NON_DATA); 2365 } 2366 /* respond with a COOKIE-ACK */ 2367 sctp_send_cookie_ack(stcb); 2368 2369 /* 2370 * check the address lists for any ASCONFs that need to be sent 2371 * AFTER the cookie-ack is sent 2372 */ 2373 sctp_check_address_list(stcb, m, 2374 initack_offset + sizeof(struct sctp_init_ack_chunk), 2375 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)), 2376 &store.sa, cookie->local_scope, cookie->site_scope, 2377 cookie->ipv4_scope, cookie->loopback_scope); 2378 2379 2380 return (stcb); 2381 } 2382 2383 /* 2384 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e 2385 * we NEED to make sure we are not already using the vtag. If so we 2386 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit! 2387 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag, 2388 SCTP_BASE_INFO(hashasocmark))]; 2389 LIST_FOREACH(stcb, head, sctp_asocs) { 2390 if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep)) { 2391 -- SEND ABORT - TRY AGAIN -- 2392 } 2393 } 2394 */ 2395 2396 /* 2397 * handles a COOKIE-ECHO message stcb: modified to either a new or left as 2398 * existing (non-NULL) TCB 2399 */ 2400 static struct mbuf * 2401 sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, 2402 struct sockaddr *src, struct sockaddr *dst, 2403 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp, 2404 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp, 2405 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2406 struct sctp_tcb **locked_tcb, 2407 uint8_t mflowtype, uint32_t mflowid, 2408 uint32_t vrf_id, uint16_t port) 2409 { 2410 struct sctp_state_cookie *cookie; 2411 struct sctp_tcb *l_stcb = *stcb; 2412 struct sctp_inpcb *l_inp; 2413 struct sockaddr *to; 2414 struct sctp_pcb *ep; 2415 struct mbuf *m_sig; 2416 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE]; 2417 uint8_t *sig; 2418 uint8_t cookie_ok = 0; 2419 unsigned int sig_offset, cookie_offset; 2420 unsigned int cookie_len; 2421 struct timeval now; 2422 struct timeval time_expires; 2423 int notification = 0; 2424 struct sctp_nets *netl; 2425 int had_a_existing_tcb = 0; 2426 int send_int_conf = 0; 2427 #ifdef INET 2428 struct sockaddr_in sin; 2429 #endif 2430 #ifdef INET6 2431 struct sockaddr_in6 sin6; 2432 #endif 2433 2434 SCTPDBG(SCTP_DEBUG_INPUT2, 2435 "sctp_handle_cookie: handling COOKIE-ECHO\n"); 2436 2437 if (inp_p == NULL) { 2438 return (NULL); 2439 } 2440 cookie = &cp->cookie; 2441 cookie_offset = offset + sizeof(struct sctp_chunkhdr); 2442 cookie_len = ntohs(cp->ch.chunk_length); 2443 2444 if (cookie_len < sizeof(struct sctp_cookie_echo_chunk) + 2445 sizeof(struct sctp_init_chunk) + 2446 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) { 2447 /* cookie too small */ 2448 return (NULL); 2449 } 2450 if ((cookie->peerport != sh->src_port) || 2451 (cookie->myport != sh->dest_port) || 2452 (cookie->my_vtag != sh->v_tag)) { 2453 /* 2454 * invalid ports or bad tag. Note that we always leave the 2455 * v_tag in the header in network order and when we stored 2456 * it in the my_vtag slot we also left it in network order. 2457 * This maintains the match even though it may be in the 2458 * opposite byte order of the machine :-> 2459 */ 2460 return (NULL); 2461 } 2462 /* 2463 * split off the signature into its own mbuf (since it should not be 2464 * calculated in the sctp_hmac_m() call). 2465 */ 2466 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE; 2467 m_sig = m_split(m, sig_offset, M_NOWAIT); 2468 if (m_sig == NULL) { 2469 /* out of memory or ?? */ 2470 return (NULL); 2471 } 2472 #ifdef SCTP_MBUF_LOGGING 2473 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 2474 sctp_log_mbc(m_sig, SCTP_MBUF_SPLIT); 2475 } 2476 #endif 2477 2478 /* 2479 * compute the signature/digest for the cookie 2480 */ 2481 ep = &(*inp_p)->sctp_ep; 2482 l_inp = *inp_p; 2483 if (l_stcb) { 2484 SCTP_TCB_UNLOCK(l_stcb); 2485 } 2486 SCTP_INP_RLOCK(l_inp); 2487 if (l_stcb) { 2488 SCTP_TCB_LOCK(l_stcb); 2489 } 2490 /* which cookie is it? */ 2491 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) && 2492 (ep->current_secret_number != ep->last_secret_number)) { 2493 /* it's the old cookie */ 2494 (void)sctp_hmac_m(SCTP_HMAC, 2495 (uint8_t *)ep->secret_key[(int)ep->last_secret_number], 2496 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2497 } else { 2498 /* it's the current cookie */ 2499 (void)sctp_hmac_m(SCTP_HMAC, 2500 (uint8_t *)ep->secret_key[(int)ep->current_secret_number], 2501 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2502 } 2503 /* get the signature */ 2504 SCTP_INP_RUNLOCK(l_inp); 2505 sig = (uint8_t *)sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *)&tmp_sig); 2506 if (sig == NULL) { 2507 /* couldn't find signature */ 2508 sctp_m_freem(m_sig); 2509 return (NULL); 2510 } 2511 /* compare the received digest with the computed digest */ 2512 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) { 2513 /* try the old cookie? */ 2514 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) && 2515 (ep->current_secret_number != ep->last_secret_number)) { 2516 /* compute digest with old */ 2517 (void)sctp_hmac_m(SCTP_HMAC, 2518 (uint8_t *)ep->secret_key[(int)ep->last_secret_number], 2519 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2520 /* compare */ 2521 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0) 2522 cookie_ok = 1; 2523 } 2524 } else { 2525 cookie_ok = 1; 2526 } 2527 2528 /* 2529 * Now before we continue we must reconstruct our mbuf so that 2530 * normal processing of any other chunks will work. 2531 */ 2532 { 2533 struct mbuf *m_at; 2534 2535 m_at = m; 2536 while (SCTP_BUF_NEXT(m_at) != NULL) { 2537 m_at = SCTP_BUF_NEXT(m_at); 2538 } 2539 SCTP_BUF_NEXT(m_at) = m_sig; 2540 } 2541 2542 if (cookie_ok == 0) { 2543 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n"); 2544 SCTPDBG(SCTP_DEBUG_INPUT2, 2545 "offset = %u, cookie_offset = %u, sig_offset = %u\n", 2546 (uint32_t)offset, cookie_offset, sig_offset); 2547 return (NULL); 2548 } 2549 /* 2550 * check the cookie timestamps to be sure it's not stale 2551 */ 2552 (void)SCTP_GETTIME_TIMEVAL(&now); 2553 /* Expire time is in Ticks, so we convert to seconds */ 2554 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life); 2555 time_expires.tv_usec = cookie->time_entered.tv_usec; 2556 /* 2557 * TODO sctp_constants.h needs alternative time macros when _KERNEL 2558 * is undefined. 2559 */ 2560 if (timevalcmp(&now, &time_expires, >)) { 2561 /* cookie is stale! */ 2562 struct mbuf *op_err; 2563 struct sctp_error_stale_cookie *cause; 2564 uint32_t tim; 2565 2566 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_stale_cookie), 2567 0, M_NOWAIT, 1, MT_DATA); 2568 if (op_err == NULL) { 2569 /* FOOBAR */ 2570 return (NULL); 2571 } 2572 /* Set the len */ 2573 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_stale_cookie); 2574 cause = mtod(op_err, struct sctp_error_stale_cookie *); 2575 cause->cause.code = htons(SCTP_CAUSE_STALE_COOKIE); 2576 cause->cause.length = htons((sizeof(struct sctp_paramhdr) + 2577 (sizeof(uint32_t)))); 2578 /* seconds to usec */ 2579 tim = (now.tv_sec - time_expires.tv_sec) * 1000000; 2580 /* add in usec */ 2581 if (tim == 0) 2582 tim = now.tv_usec - cookie->time_entered.tv_usec; 2583 cause->stale_time = htonl(tim); 2584 sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err, 2585 mflowtype, mflowid, l_inp->fibnum, 2586 vrf_id, port); 2587 return (NULL); 2588 } 2589 /* 2590 * Now we must see with the lookup address if we have an existing 2591 * asoc. This will only happen if we were in the COOKIE-WAIT state 2592 * and a INIT collided with us and somewhere the peer sent the 2593 * cookie on another address besides the single address our assoc 2594 * had for him. In this case we will have one of the tie-tags set at 2595 * least AND the address field in the cookie can be used to look it 2596 * up. 2597 */ 2598 to = NULL; 2599 switch (cookie->addr_type) { 2600 #ifdef INET6 2601 case SCTP_IPV6_ADDRESS: 2602 memset(&sin6, 0, sizeof(sin6)); 2603 sin6.sin6_family = AF_INET6; 2604 sin6.sin6_len = sizeof(sin6); 2605 sin6.sin6_port = sh->src_port; 2606 sin6.sin6_scope_id = cookie->scope_id; 2607 memcpy(&sin6.sin6_addr.s6_addr, cookie->address, 2608 sizeof(sin6.sin6_addr.s6_addr)); 2609 to = (struct sockaddr *)&sin6; 2610 break; 2611 #endif 2612 #ifdef INET 2613 case SCTP_IPV4_ADDRESS: 2614 memset(&sin, 0, sizeof(sin)); 2615 sin.sin_family = AF_INET; 2616 sin.sin_len = sizeof(sin); 2617 sin.sin_port = sh->src_port; 2618 sin.sin_addr.s_addr = cookie->address[0]; 2619 to = (struct sockaddr *)&sin; 2620 break; 2621 #endif 2622 default: 2623 /* This should not happen */ 2624 return (NULL); 2625 } 2626 if (*stcb == NULL) { 2627 /* Yep, lets check */ 2628 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, dst, NULL); 2629 if (*stcb == NULL) { 2630 /* 2631 * We should have only got back the same inp. If we 2632 * got back a different ep we have a problem. The 2633 * original findep got back l_inp and now 2634 */ 2635 if (l_inp != *inp_p) { 2636 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n"); 2637 } 2638 } else { 2639 if (*locked_tcb == NULL) { 2640 /* 2641 * In this case we found the assoc only 2642 * after we locked the create lock. This 2643 * means we are in a colliding case and we 2644 * must make sure that we unlock the tcb if 2645 * its one of the cases where we throw away 2646 * the incoming packets. 2647 */ 2648 *locked_tcb = *stcb; 2649 2650 /* 2651 * We must also increment the inp ref count 2652 * since the ref_count flags was set when we 2653 * did not find the TCB, now we found it 2654 * which reduces the refcount.. we must 2655 * raise it back out to balance it all :-) 2656 */ 2657 SCTP_INP_INCR_REF((*stcb)->sctp_ep); 2658 if ((*stcb)->sctp_ep != l_inp) { 2659 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n", 2660 (void *)(*stcb)->sctp_ep, (void *)l_inp); 2661 } 2662 } 2663 } 2664 } 2665 cookie_len -= SCTP_SIGNATURE_SIZE; 2666 if (*stcb == NULL) { 2667 /* this is the "normal" case... get a new TCB */ 2668 *stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, sh, 2669 cookie, cookie_len, *inp_p, 2670 netp, to, ¬ification, 2671 auth_skipped, auth_offset, auth_len, 2672 mflowtype, mflowid, 2673 vrf_id, port); 2674 } else { 2675 /* this is abnormal... cookie-echo on existing TCB */ 2676 had_a_existing_tcb = 1; 2677 *stcb = sctp_process_cookie_existing(m, iphlen, offset, 2678 src, dst, sh, 2679 cookie, cookie_len, *inp_p, *stcb, netp, to, 2680 ¬ification, auth_skipped, auth_offset, auth_len, 2681 mflowtype, mflowid, 2682 vrf_id, port); 2683 } 2684 2685 if (*stcb == NULL) { 2686 /* still no TCB... must be bad cookie-echo */ 2687 return (NULL); 2688 } 2689 if (*netp != NULL) { 2690 (*netp)->flowtype = mflowtype; 2691 (*netp)->flowid = mflowid; 2692 } 2693 /* 2694 * Ok, we built an association so confirm the address we sent the 2695 * INIT-ACK to. 2696 */ 2697 netl = sctp_findnet(*stcb, to); 2698 /* 2699 * This code should in theory NOT run but 2700 */ 2701 if (netl == NULL) { 2702 /* TSNH! Huh, why do I need to add this address here? */ 2703 if (sctp_add_remote_addr(*stcb, to, NULL, port, 2704 SCTP_DONOT_SETSCOPE, SCTP_IN_COOKIE_PROC)) { 2705 return (NULL); 2706 } 2707 netl = sctp_findnet(*stcb, to); 2708 } 2709 if (netl) { 2710 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) { 2711 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2712 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL, 2713 netl); 2714 send_int_conf = 1; 2715 } 2716 } 2717 sctp_start_net_timers(*stcb); 2718 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 2719 if (!had_a_existing_tcb || 2720 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 2721 /* 2722 * If we have a NEW cookie or the connect never 2723 * reached the connected state during collision we 2724 * must do the TCP accept thing. 2725 */ 2726 struct socket *so, *oso; 2727 struct sctp_inpcb *inp; 2728 2729 if (notification == SCTP_NOTIFY_ASSOC_RESTART) { 2730 /* 2731 * For a restart we will keep the same 2732 * socket, no need to do anything. I THINK!! 2733 */ 2734 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2735 if (send_int_conf) { 2736 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2737 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2738 } 2739 return (m); 2740 } 2741 oso = (*inp_p)->sctp_socket; 2742 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2743 SCTP_TCB_UNLOCK((*stcb)); 2744 CURVNET_SET(oso->so_vnet); 2745 so = sonewconn(oso, 0 2746 ); 2747 CURVNET_RESTORE(); 2748 SCTP_TCB_LOCK((*stcb)); 2749 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2750 2751 if (so == NULL) { 2752 struct mbuf *op_err; 2753 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2754 struct socket *pcb_so; 2755 #endif 2756 /* Too many sockets */ 2757 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n"); 2758 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 2759 sctp_abort_association(*inp_p, NULL, m, iphlen, 2760 src, dst, sh, op_err, 2761 mflowtype, mflowid, 2762 vrf_id, port); 2763 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2764 pcb_so = SCTP_INP_SO(*inp_p); 2765 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2766 SCTP_TCB_UNLOCK((*stcb)); 2767 SCTP_SOCKET_LOCK(pcb_so, 1); 2768 SCTP_TCB_LOCK((*stcb)); 2769 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2770 #endif 2771 (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, 2772 SCTP_FROM_SCTP_INPUT + SCTP_LOC_23); 2773 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2774 SCTP_SOCKET_UNLOCK(pcb_so, 1); 2775 #endif 2776 return (NULL); 2777 } 2778 inp = (struct sctp_inpcb *)so->so_pcb; 2779 SCTP_INP_INCR_REF(inp); 2780 /* 2781 * We add the unbound flag here so that if we get an 2782 * soabort() before we get the move_pcb done, we 2783 * will properly cleanup. 2784 */ 2785 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 2786 SCTP_PCB_FLAGS_CONNECTED | 2787 SCTP_PCB_FLAGS_IN_TCPPOOL | 2788 SCTP_PCB_FLAGS_UNBOUND | 2789 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) | 2790 SCTP_PCB_FLAGS_DONT_WAKE); 2791 inp->sctp_features = (*inp_p)->sctp_features; 2792 inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features; 2793 inp->sctp_socket = so; 2794 inp->sctp_frag_point = (*inp_p)->sctp_frag_point; 2795 inp->max_cwnd = (*inp_p)->max_cwnd; 2796 inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off; 2797 inp->ecn_supported = (*inp_p)->ecn_supported; 2798 inp->prsctp_supported = (*inp_p)->prsctp_supported; 2799 inp->auth_supported = (*inp_p)->auth_supported; 2800 inp->asconf_supported = (*inp_p)->asconf_supported; 2801 inp->reconfig_supported = (*inp_p)->reconfig_supported; 2802 inp->nrsack_supported = (*inp_p)->nrsack_supported; 2803 inp->pktdrop_supported = (*inp_p)->pktdrop_supported; 2804 inp->partial_delivery_point = (*inp_p)->partial_delivery_point; 2805 inp->sctp_context = (*inp_p)->sctp_context; 2806 inp->local_strreset_support = (*inp_p)->local_strreset_support; 2807 inp->fibnum = (*inp_p)->fibnum; 2808 inp->inp_starting_point_for_iterator = NULL; 2809 /* 2810 * copy in the authentication parameters from the 2811 * original endpoint 2812 */ 2813 if (inp->sctp_ep.local_hmacs) 2814 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2815 inp->sctp_ep.local_hmacs = 2816 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs); 2817 if (inp->sctp_ep.local_auth_chunks) 2818 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 2819 inp->sctp_ep.local_auth_chunks = 2820 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks); 2821 2822 /* 2823 * Now we must move it from one hash table to 2824 * another and get the tcb in the right place. 2825 */ 2826 2827 /* 2828 * This is where the one-2-one socket is put into 2829 * the accept state waiting for the accept! 2830 */ 2831 if (*stcb) { 2832 (*stcb)->asoc.state |= SCTP_STATE_IN_ACCEPT_QUEUE; 2833 } 2834 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb); 2835 2836 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2837 SCTP_TCB_UNLOCK((*stcb)); 2838 2839 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, 2840 0); 2841 SCTP_TCB_LOCK((*stcb)); 2842 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2843 2844 2845 /* 2846 * now we must check to see if we were aborted while 2847 * the move was going on and the lock/unlock 2848 * happened. 2849 */ 2850 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 2851 /* 2852 * yep it was, we leave the assoc attached 2853 * to the socket since the sctp_inpcb_free() 2854 * call will send an abort for us. 2855 */ 2856 SCTP_INP_DECR_REF(inp); 2857 return (NULL); 2858 } 2859 SCTP_INP_DECR_REF(inp); 2860 /* Switch over to the new guy */ 2861 *inp_p = inp; 2862 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2863 if (send_int_conf) { 2864 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2865 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2866 } 2867 /* 2868 * Pull it from the incomplete queue and wake the 2869 * guy 2870 */ 2871 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2872 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2873 SCTP_TCB_UNLOCK((*stcb)); 2874 SCTP_SOCKET_LOCK(so, 1); 2875 #endif 2876 soisconnected(so); 2877 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2878 SCTP_TCB_LOCK((*stcb)); 2879 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2880 SCTP_SOCKET_UNLOCK(so, 1); 2881 #endif 2882 return (m); 2883 } 2884 } 2885 if (notification) { 2886 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2887 } 2888 if (send_int_conf) { 2889 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2890 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2891 } 2892 return (m); 2893 } 2894 2895 static void 2896 sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED, 2897 struct sctp_tcb *stcb, struct sctp_nets *net) 2898 { 2899 /* cp must not be used, others call this without a c-ack :-) */ 2900 struct sctp_association *asoc; 2901 2902 SCTPDBG(SCTP_DEBUG_INPUT2, 2903 "sctp_handle_cookie_ack: handling COOKIE-ACK\n"); 2904 if ((stcb == NULL) || (net == NULL)) { 2905 return; 2906 } 2907 asoc = &stcb->asoc; 2908 2909 sctp_stop_all_cookie_timers(stcb); 2910 /* process according to association state */ 2911 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 2912 /* state change only needed when I am in right state */ 2913 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2914 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2915 sctp_start_net_timers(stcb); 2916 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2917 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2918 stcb->sctp_ep, stcb, asoc->primary_destination); 2919 2920 } 2921 /* update RTO */ 2922 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 2923 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2924 if (asoc->overall_error_count == 0) { 2925 net->RTO = sctp_calculate_rto(stcb, asoc, net, 2926 &asoc->time_entered, sctp_align_safe_nocopy, 2927 SCTP_RTT_FROM_NON_DATA); 2928 } 2929 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 2930 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2931 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2932 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2933 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2934 struct socket *so; 2935 2936 #endif 2937 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2938 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2939 so = SCTP_INP_SO(stcb->sctp_ep); 2940 atomic_add_int(&stcb->asoc.refcnt, 1); 2941 SCTP_TCB_UNLOCK(stcb); 2942 SCTP_SOCKET_LOCK(so, 1); 2943 SCTP_TCB_LOCK(stcb); 2944 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2945 #endif 2946 if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) { 2947 soisconnected(stcb->sctp_socket); 2948 } 2949 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2950 SCTP_SOCKET_UNLOCK(so, 1); 2951 #endif 2952 } 2953 /* 2954 * since we did not send a HB make sure we don't double 2955 * things 2956 */ 2957 net->hb_responded = 1; 2958 2959 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2960 /* 2961 * We don't need to do the asconf thing, nor hb or 2962 * autoclose if the socket is closed. 2963 */ 2964 goto closed_socket; 2965 } 2966 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 2967 stcb, net); 2968 2969 2970 if (stcb->asoc.sctp_autoclose_ticks && 2971 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2972 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 2973 stcb->sctp_ep, stcb, NULL); 2974 } 2975 /* 2976 * send ASCONF if parameters are pending and ASCONFs are 2977 * allowed (eg. addresses changed when init/cookie echo were 2978 * in flight) 2979 */ 2980 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) && 2981 (stcb->asoc.asconf_supported == 1) && 2982 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) { 2983 #ifdef SCTP_TIMER_BASED_ASCONF 2984 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, 2985 stcb->sctp_ep, stcb, 2986 stcb->asoc.primary_destination); 2987 #else 2988 sctp_send_asconf(stcb, stcb->asoc.primary_destination, 2989 SCTP_ADDR_NOT_LOCKED); 2990 #endif 2991 } 2992 } 2993 closed_socket: 2994 /* Toss the cookie if I can */ 2995 sctp_toss_old_cookies(stcb, asoc); 2996 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 2997 /* Restart the timer if we have pending data */ 2998 struct sctp_tmit_chunk *chk; 2999 3000 chk = TAILQ_FIRST(&asoc->sent_queue); 3001 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 3002 } 3003 } 3004 3005 static void 3006 sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp, 3007 struct sctp_tcb *stcb) 3008 { 3009 struct sctp_nets *net; 3010 struct sctp_tmit_chunk *lchk; 3011 struct sctp_ecne_chunk bkup; 3012 uint8_t override_bit; 3013 uint32_t tsn, window_data_tsn; 3014 int len; 3015 unsigned int pkt_cnt; 3016 3017 len = ntohs(cp->ch.chunk_length); 3018 if ((len != sizeof(struct sctp_ecne_chunk)) && 3019 (len != sizeof(struct old_sctp_ecne_chunk))) { 3020 return; 3021 } 3022 if (len == sizeof(struct old_sctp_ecne_chunk)) { 3023 /* Its the old format */ 3024 memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk)); 3025 bkup.num_pkts_since_cwr = htonl(1); 3026 cp = &bkup; 3027 } 3028 SCTP_STAT_INCR(sctps_recvecne); 3029 tsn = ntohl(cp->tsn); 3030 pkt_cnt = ntohl(cp->num_pkts_since_cwr); 3031 lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead); 3032 if (lchk == NULL) { 3033 window_data_tsn = stcb->asoc.sending_seq - 1; 3034 } else { 3035 window_data_tsn = lchk->rec.data.tsn; 3036 } 3037 3038 /* Find where it was sent to if possible. */ 3039 net = NULL; 3040 TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) { 3041 if (lchk->rec.data.tsn == tsn) { 3042 net = lchk->whoTo; 3043 net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send; 3044 break; 3045 } 3046 if (SCTP_TSN_GT(lchk->rec.data.tsn, tsn)) { 3047 break; 3048 } 3049 } 3050 if (net == NULL) { 3051 /* 3052 * What to do. A previous send of a CWR was possibly lost. 3053 * See how old it is, we may have it marked on the actual 3054 * net. 3055 */ 3056 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3057 if (tsn == net->last_cwr_tsn) { 3058 /* Found him, send it off */ 3059 break; 3060 } 3061 } 3062 if (net == NULL) { 3063 /* 3064 * If we reach here, we need to send a special CWR 3065 * that says hey, we did this a long time ago and 3066 * you lost the response. 3067 */ 3068 net = TAILQ_FIRST(&stcb->asoc.nets); 3069 if (net == NULL) { 3070 /* TSNH */ 3071 return; 3072 } 3073 override_bit = SCTP_CWR_REDUCE_OVERRIDE; 3074 } else { 3075 override_bit = 0; 3076 } 3077 } else { 3078 override_bit = 0; 3079 } 3080 if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) && 3081 ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) { 3082 /* 3083 * JRS - Use the congestion control given in the pluggable 3084 * CC module 3085 */ 3086 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt); 3087 /* 3088 * We reduce once every RTT. So we will only lower cwnd at 3089 * the next sending seq i.e. the window_data_tsn 3090 */ 3091 net->cwr_window_tsn = window_data_tsn; 3092 net->ecn_ce_pkt_cnt += pkt_cnt; 3093 net->lost_cnt = pkt_cnt; 3094 net->last_cwr_tsn = tsn; 3095 } else { 3096 override_bit |= SCTP_CWR_IN_SAME_WINDOW; 3097 if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) && 3098 ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) { 3099 /* 3100 * Another loss in the same window update how many 3101 * marks/packets lost we have had. 3102 */ 3103 int cnt = 1; 3104 3105 if (pkt_cnt > net->lost_cnt) { 3106 /* Should be the case */ 3107 cnt = (pkt_cnt - net->lost_cnt); 3108 net->ecn_ce_pkt_cnt += cnt; 3109 } 3110 net->lost_cnt = pkt_cnt; 3111 net->last_cwr_tsn = tsn; 3112 /* 3113 * Most CC functions will ignore this call, since we 3114 * are in-window yet of the initial CE the peer saw. 3115 */ 3116 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt); 3117 } 3118 } 3119 /* 3120 * We always send a CWR this way if our previous one was lost our 3121 * peer will get an update, or if it is not time again to reduce we 3122 * still get the cwr to the peer. Note we set the override when we 3123 * could not find the TSN on the chunk or the destination network. 3124 */ 3125 sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit); 3126 } 3127 3128 static void 3129 sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net) 3130 { 3131 /* 3132 * Here we get a CWR from the peer. We must look in the outqueue and 3133 * make sure that we have a covered ECNE in the control chunk part. 3134 * If so remove it. 3135 */ 3136 struct sctp_tmit_chunk *chk; 3137 struct sctp_ecne_chunk *ecne; 3138 int override; 3139 uint32_t cwr_tsn; 3140 3141 cwr_tsn = ntohl(cp->tsn); 3142 override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE; 3143 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 3144 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) { 3145 continue; 3146 } 3147 if ((override == 0) && (chk->whoTo != net)) { 3148 /* Must be from the right src unless override is set */ 3149 continue; 3150 } 3151 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 3152 if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) { 3153 /* this covers this ECNE, we can remove it */ 3154 stcb->asoc.ecn_echo_cnt_onq--; 3155 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, 3156 sctp_next); 3157 sctp_m_freem(chk->data); 3158 chk->data = NULL; 3159 stcb->asoc.ctrl_queue_cnt--; 3160 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 3161 if (override == 0) { 3162 break; 3163 } 3164 } 3165 } 3166 } 3167 3168 static void 3169 sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED, 3170 struct sctp_tcb *stcb, struct sctp_nets *net) 3171 { 3172 struct sctp_association *asoc; 3173 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3174 struct socket *so; 3175 #endif 3176 3177 SCTPDBG(SCTP_DEBUG_INPUT2, 3178 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n"); 3179 if (stcb == NULL) 3180 return; 3181 3182 asoc = &stcb->asoc; 3183 /* process according to association state */ 3184 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 3185 /* unexpected SHUTDOWN-COMPLETE... so ignore... */ 3186 SCTPDBG(SCTP_DEBUG_INPUT2, 3187 "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n"); 3188 SCTP_TCB_UNLOCK(stcb); 3189 return; 3190 } 3191 /* notify upper layer protocol */ 3192 if (stcb->sctp_socket) { 3193 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 3194 } 3195 #ifdef INVARIANTS 3196 if (!TAILQ_EMPTY(&asoc->send_queue) || 3197 !TAILQ_EMPTY(&asoc->sent_queue) || 3198 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) { 3199 panic("Queues are not empty when handling SHUTDOWN-COMPLETE"); 3200 } 3201 #endif 3202 /* stop the timer */ 3203 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, 3204 SCTP_FROM_SCTP_INPUT + SCTP_LOC_24); 3205 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 3206 /* free the TCB */ 3207 SCTPDBG(SCTP_DEBUG_INPUT2, 3208 "sctp_handle_shutdown_complete: calls free-asoc\n"); 3209 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3210 so = SCTP_INP_SO(stcb->sctp_ep); 3211 atomic_add_int(&stcb->asoc.refcnt, 1); 3212 SCTP_TCB_UNLOCK(stcb); 3213 SCTP_SOCKET_LOCK(so, 1); 3214 SCTP_TCB_LOCK(stcb); 3215 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3216 #endif 3217 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 3218 SCTP_FROM_SCTP_INPUT + SCTP_LOC_25); 3219 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3220 SCTP_SOCKET_UNLOCK(so, 1); 3221 #endif 3222 return; 3223 } 3224 3225 static int 3226 process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, 3227 struct sctp_nets *net, uint8_t flg) 3228 { 3229 switch (desc->chunk_type) { 3230 case SCTP_DATA: 3231 /* find the tsn to resend (possibly */ 3232 { 3233 uint32_t tsn; 3234 struct sctp_tmit_chunk *tp1; 3235 3236 tsn = ntohl(desc->tsn_ifany); 3237 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3238 if (tp1->rec.data.tsn == tsn) { 3239 /* found it */ 3240 break; 3241 } 3242 if (SCTP_TSN_GT(tp1->rec.data.tsn, tsn)) { 3243 /* not found */ 3244 tp1 = NULL; 3245 break; 3246 } 3247 } 3248 if (tp1 == NULL) { 3249 /* 3250 * Do it the other way , aka without paying 3251 * attention to queue seq order. 3252 */ 3253 SCTP_STAT_INCR(sctps_pdrpdnfnd); 3254 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3255 if (tp1->rec.data.tsn == tsn) { 3256 /* found it */ 3257 break; 3258 } 3259 } 3260 } 3261 if (tp1 == NULL) { 3262 SCTP_STAT_INCR(sctps_pdrptsnnf); 3263 } 3264 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) { 3265 uint8_t *ddp; 3266 3267 if (((flg & SCTP_BADCRC) == 0) && 3268 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 3269 return (0); 3270 } 3271 if ((stcb->asoc.peers_rwnd == 0) && 3272 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 3273 SCTP_STAT_INCR(sctps_pdrpdiwnp); 3274 return (0); 3275 } 3276 if (stcb->asoc.peers_rwnd == 0 && 3277 (flg & SCTP_FROM_MIDDLE_BOX)) { 3278 SCTP_STAT_INCR(sctps_pdrpdizrw); 3279 return (0); 3280 } 3281 ddp = (uint8_t *)(mtod(tp1->data, caddr_t)+ 3282 sizeof(struct sctp_data_chunk)); 3283 { 3284 unsigned int iii; 3285 3286 for (iii = 0; iii < sizeof(desc->data_bytes); 3287 iii++) { 3288 if (ddp[iii] != desc->data_bytes[iii]) { 3289 SCTP_STAT_INCR(sctps_pdrpbadd); 3290 return (-1); 3291 } 3292 } 3293 } 3294 3295 if (tp1->do_rtt) { 3296 /* 3297 * this guy had a RTO calculation 3298 * pending on it, cancel it 3299 */ 3300 if (tp1->whoTo->rto_needed == 0) { 3301 tp1->whoTo->rto_needed = 1; 3302 } 3303 tp1->do_rtt = 0; 3304 } 3305 SCTP_STAT_INCR(sctps_pdrpmark); 3306 if (tp1->sent != SCTP_DATAGRAM_RESEND) 3307 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3308 /* 3309 * mark it as if we were doing a FR, since 3310 * we will be getting gap ack reports behind 3311 * the info from the router. 3312 */ 3313 tp1->rec.data.doing_fast_retransmit = 1; 3314 /* 3315 * mark the tsn with what sequences can 3316 * cause a new FR. 3317 */ 3318 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 3319 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 3320 } else { 3321 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn; 3322 } 3323 3324 /* restart the timer */ 3325 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3326 stcb, tp1->whoTo, 3327 SCTP_FROM_SCTP_INPUT + SCTP_LOC_26); 3328 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3329 stcb, tp1->whoTo); 3330 3331 /* fix counts and things */ 3332 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3333 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP, 3334 tp1->whoTo->flight_size, 3335 tp1->book_size, 3336 (uint32_t)(uintptr_t)stcb, 3337 tp1->rec.data.tsn); 3338 } 3339 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3340 sctp_flight_size_decrease(tp1); 3341 sctp_total_flight_decrease(stcb, tp1); 3342 } 3343 tp1->sent = SCTP_DATAGRAM_RESEND; 3344 } { 3345 /* audit code */ 3346 unsigned int audit; 3347 3348 audit = 0; 3349 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3350 if (tp1->sent == SCTP_DATAGRAM_RESEND) 3351 audit++; 3352 } 3353 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue, 3354 sctp_next) { 3355 if (tp1->sent == SCTP_DATAGRAM_RESEND) 3356 audit++; 3357 } 3358 if (audit != stcb->asoc.sent_queue_retran_cnt) { 3359 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n", 3360 audit, stcb->asoc.sent_queue_retran_cnt); 3361 #ifndef SCTP_AUDITING_ENABLED 3362 stcb->asoc.sent_queue_retran_cnt = audit; 3363 #endif 3364 } 3365 } 3366 } 3367 break; 3368 case SCTP_ASCONF: 3369 { 3370 struct sctp_tmit_chunk *asconf; 3371 3372 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 3373 sctp_next) { 3374 if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 3375 break; 3376 } 3377 } 3378 if (asconf) { 3379 if (asconf->sent != SCTP_DATAGRAM_RESEND) 3380 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3381 asconf->sent = SCTP_DATAGRAM_RESEND; 3382 asconf->snd_count--; 3383 } 3384 } 3385 break; 3386 case SCTP_INITIATION: 3387 /* resend the INIT */ 3388 stcb->asoc.dropped_special_cnt++; 3389 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) { 3390 /* 3391 * If we can get it in, in a few attempts we do 3392 * this, otherwise we let the timer fire. 3393 */ 3394 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, 3395 stcb, net, 3396 SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 3397 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 3398 } 3399 break; 3400 case SCTP_SELECTIVE_ACK: 3401 case SCTP_NR_SELECTIVE_ACK: 3402 /* resend the sack */ 3403 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 3404 break; 3405 case SCTP_HEARTBEAT_REQUEST: 3406 /* resend a demand HB */ 3407 if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) { 3408 /* 3409 * Only retransmit if we KNOW we wont destroy the 3410 * tcb 3411 */ 3412 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 3413 } 3414 break; 3415 case SCTP_SHUTDOWN: 3416 sctp_send_shutdown(stcb, net); 3417 break; 3418 case SCTP_SHUTDOWN_ACK: 3419 sctp_send_shutdown_ack(stcb, net); 3420 break; 3421 case SCTP_COOKIE_ECHO: 3422 { 3423 struct sctp_tmit_chunk *cookie; 3424 3425 cookie = NULL; 3426 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, 3427 sctp_next) { 3428 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 3429 break; 3430 } 3431 } 3432 if (cookie) { 3433 if (cookie->sent != SCTP_DATAGRAM_RESEND) 3434 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3435 cookie->sent = SCTP_DATAGRAM_RESEND; 3436 sctp_stop_all_cookie_timers(stcb); 3437 } 3438 } 3439 break; 3440 case SCTP_COOKIE_ACK: 3441 sctp_send_cookie_ack(stcb); 3442 break; 3443 case SCTP_ASCONF_ACK: 3444 /* resend last asconf ack */ 3445 sctp_send_asconf_ack(stcb); 3446 break; 3447 case SCTP_IFORWARD_CUM_TSN: 3448 case SCTP_FORWARD_CUM_TSN: 3449 send_forward_tsn(stcb, &stcb->asoc); 3450 break; 3451 /* can't do anything with these */ 3452 case SCTP_PACKET_DROPPED: 3453 case SCTP_INITIATION_ACK: /* this should not happen */ 3454 case SCTP_HEARTBEAT_ACK: 3455 case SCTP_ABORT_ASSOCIATION: 3456 case SCTP_OPERATION_ERROR: 3457 case SCTP_SHUTDOWN_COMPLETE: 3458 case SCTP_ECN_ECHO: 3459 case SCTP_ECN_CWR: 3460 default: 3461 break; 3462 } 3463 return (0); 3464 } 3465 3466 void 3467 sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list) 3468 { 3469 uint32_t i; 3470 uint16_t temp; 3471 3472 /* 3473 * We set things to 0xffffffff since this is the last delivered 3474 * sequence and we will be sending in 0 after the reset. 3475 */ 3476 3477 if (number_entries) { 3478 for (i = 0; i < number_entries; i++) { 3479 temp = ntohs(list[i]); 3480 if (temp >= stcb->asoc.streamincnt) { 3481 continue; 3482 } 3483 stcb->asoc.strmin[temp].last_mid_delivered = 0xffffffff; 3484 } 3485 } else { 3486 list = NULL; 3487 for (i = 0; i < stcb->asoc.streamincnt; i++) { 3488 stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff; 3489 } 3490 } 3491 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3492 } 3493 3494 static void 3495 sctp_reset_out_streams(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list) 3496 { 3497 uint32_t i; 3498 uint16_t temp; 3499 3500 if (number_entries > 0) { 3501 for (i = 0; i < number_entries; i++) { 3502 temp = ntohs(list[i]); 3503 if (temp >= stcb->asoc.streamoutcnt) { 3504 /* no such stream */ 3505 continue; 3506 } 3507 stcb->asoc.strmout[temp].next_mid_ordered = 0; 3508 stcb->asoc.strmout[temp].next_mid_unordered = 0; 3509 } 3510 } else { 3511 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3512 stcb->asoc.strmout[i].next_mid_ordered = 0; 3513 stcb->asoc.strmout[i].next_mid_unordered = 0; 3514 } 3515 } 3516 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3517 } 3518 3519 static void 3520 sctp_reset_clear_pending(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list) 3521 { 3522 uint32_t i; 3523 uint16_t temp; 3524 3525 if (number_entries > 0) { 3526 for (i = 0; i < number_entries; i++) { 3527 temp = ntohs(list[i]); 3528 if (temp >= stcb->asoc.streamoutcnt) { 3529 /* no such stream */ 3530 continue; 3531 } 3532 stcb->asoc.strmout[temp].state = SCTP_STREAM_OPEN; 3533 } 3534 } else { 3535 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3536 stcb->asoc.strmout[i].state = SCTP_STREAM_OPEN; 3537 } 3538 } 3539 } 3540 3541 3542 struct sctp_stream_reset_request * 3543 sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk) 3544 { 3545 struct sctp_association *asoc; 3546 struct sctp_chunkhdr *ch; 3547 struct sctp_stream_reset_request *r; 3548 struct sctp_tmit_chunk *chk; 3549 int len, clen; 3550 3551 asoc = &stcb->asoc; 3552 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 3553 asoc->stream_reset_outstanding = 0; 3554 return (NULL); 3555 } 3556 if (stcb->asoc.str_reset == NULL) { 3557 asoc->stream_reset_outstanding = 0; 3558 return (NULL); 3559 } 3560 chk = stcb->asoc.str_reset; 3561 if (chk->data == NULL) { 3562 return (NULL); 3563 } 3564 if (bchk) { 3565 /* he wants a copy of the chk pointer */ 3566 *bchk = chk; 3567 } 3568 clen = chk->send_size; 3569 ch = mtod(chk->data, struct sctp_chunkhdr *); 3570 r = (struct sctp_stream_reset_request *)(ch + 1); 3571 if (ntohl(r->request_seq) == seq) { 3572 /* found it */ 3573 return (r); 3574 } 3575 len = SCTP_SIZE32(ntohs(r->ph.param_length)); 3576 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) { 3577 /* move to the next one, there can only be a max of two */ 3578 r = (struct sctp_stream_reset_request *)((caddr_t)r + len); 3579 if (ntohl(r->request_seq) == seq) { 3580 return (r); 3581 } 3582 } 3583 /* that seq is not here */ 3584 return (NULL); 3585 } 3586 3587 static void 3588 sctp_clean_up_stream_reset(struct sctp_tcb *stcb) 3589 { 3590 struct sctp_association *asoc; 3591 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset; 3592 3593 if (stcb->asoc.str_reset == NULL) { 3594 return; 3595 } 3596 asoc = &stcb->asoc; 3597 3598 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, 3599 chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28); 3600 TAILQ_REMOVE(&asoc->control_send_queue, 3601 chk, 3602 sctp_next); 3603 if (chk->data) { 3604 sctp_m_freem(chk->data); 3605 chk->data = NULL; 3606 } 3607 asoc->ctrl_queue_cnt--; 3608 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 3609 /* sa_ignore NO_NULL_CHK */ 3610 stcb->asoc.str_reset = NULL; 3611 } 3612 3613 3614 static int 3615 sctp_handle_stream_reset_response(struct sctp_tcb *stcb, 3616 uint32_t seq, uint32_t action, 3617 struct sctp_stream_reset_response *respin) 3618 { 3619 uint16_t type; 3620 int lparam_len; 3621 struct sctp_association *asoc = &stcb->asoc; 3622 struct sctp_tmit_chunk *chk; 3623 struct sctp_stream_reset_request *req_param; 3624 struct sctp_stream_reset_out_request *req_out_param; 3625 struct sctp_stream_reset_in_request *req_in_param; 3626 uint32_t number_entries; 3627 3628 if (asoc->stream_reset_outstanding == 0) { 3629 /* duplicate */ 3630 return (0); 3631 } 3632 if (seq == stcb->asoc.str_reset_seq_out) { 3633 req_param = sctp_find_stream_reset(stcb, seq, &chk); 3634 if (req_param != NULL) { 3635 stcb->asoc.str_reset_seq_out++; 3636 type = ntohs(req_param->ph.param_type); 3637 lparam_len = ntohs(req_param->ph.param_length); 3638 if (type == SCTP_STR_RESET_OUT_REQUEST) { 3639 int no_clear = 0; 3640 3641 req_out_param = (struct sctp_stream_reset_out_request *)req_param; 3642 number_entries = (lparam_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t); 3643 asoc->stream_reset_out_is_outstanding = 0; 3644 if (asoc->stream_reset_outstanding) 3645 asoc->stream_reset_outstanding--; 3646 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) { 3647 /* do it */ 3648 sctp_reset_out_streams(stcb, number_entries, req_out_param->list_of_streams); 3649 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3650 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED); 3651 } else if (action == SCTP_STREAM_RESET_RESULT_IN_PROGRESS) { 3652 /* 3653 * Set it up so we don't stop 3654 * retransmitting 3655 */ 3656 asoc->stream_reset_outstanding++; 3657 stcb->asoc.str_reset_seq_out--; 3658 asoc->stream_reset_out_is_outstanding = 1; 3659 no_clear = 1; 3660 } else { 3661 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED); 3662 } 3663 if (no_clear == 0) { 3664 sctp_reset_clear_pending(stcb, number_entries, req_out_param->list_of_streams); 3665 } 3666 } else if (type == SCTP_STR_RESET_IN_REQUEST) { 3667 req_in_param = (struct sctp_stream_reset_in_request *)req_param; 3668 number_entries = (lparam_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t); 3669 if (asoc->stream_reset_outstanding) 3670 asoc->stream_reset_outstanding--; 3671 if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3672 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_IN, stcb, 3673 number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED); 3674 } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) { 3675 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, 3676 number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED); 3677 } 3678 } else if (type == SCTP_STR_RESET_ADD_OUT_STREAMS) { 3679 /* Ok we now may have more streams */ 3680 int num_stream; 3681 3682 num_stream = stcb->asoc.strm_pending_add_size; 3683 if (num_stream > (stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt)) { 3684 /* TSNH */ 3685 num_stream = stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt; 3686 } 3687 stcb->asoc.strm_pending_add_size = 0; 3688 if (asoc->stream_reset_outstanding) 3689 asoc->stream_reset_outstanding--; 3690 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) { 3691 /* Put the new streams into effect */ 3692 int i; 3693 3694 for (i = asoc->streamoutcnt; i < (asoc->streamoutcnt + num_stream); i++) { 3695 asoc->strmout[i].state = SCTP_STREAM_OPEN; 3696 } 3697 asoc->streamoutcnt += num_stream; 3698 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0); 3699 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3700 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 3701 SCTP_STREAM_CHANGE_DENIED); 3702 } else { 3703 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 3704 SCTP_STREAM_CHANGE_FAILED); 3705 } 3706 } else if (type == SCTP_STR_RESET_ADD_IN_STREAMS) { 3707 if (asoc->stream_reset_outstanding) 3708 asoc->stream_reset_outstanding--; 3709 if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3710 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 3711 SCTP_STREAM_CHANGE_DENIED); 3712 } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) { 3713 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 3714 SCTP_STREAM_CHANGE_FAILED); 3715 } 3716 } else if (type == SCTP_STR_RESET_TSN_REQUEST) { 3717 /** 3718 * a) Adopt the new in tsn. 3719 * b) reset the map 3720 * c) Adopt the new out-tsn 3721 */ 3722 struct sctp_stream_reset_response_tsn *resp; 3723 struct sctp_forward_tsn_chunk fwdtsn; 3724 int abort_flag = 0; 3725 3726 if (respin == NULL) { 3727 /* huh ? */ 3728 return (0); 3729 } 3730 if (ntohs(respin->ph.param_length) < sizeof(struct sctp_stream_reset_response_tsn)) { 3731 return (0); 3732 } 3733 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) { 3734 resp = (struct sctp_stream_reset_response_tsn *)respin; 3735 asoc->stream_reset_outstanding--; 3736 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3737 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3738 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1); 3739 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3740 if (abort_flag) { 3741 return (1); 3742 } 3743 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1); 3744 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 3745 sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 3746 } 3747 stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3748 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn); 3749 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3750 3751 stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map; 3752 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 3753 3754 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn); 3755 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn; 3756 3757 sctp_reset_out_streams(stcb, 0, (uint16_t *)NULL); 3758 sctp_reset_in_stream(stcb, 0, (uint16_t *)NULL); 3759 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 0); 3760 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3761 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 3762 SCTP_ASSOC_RESET_DENIED); 3763 } else { 3764 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 3765 SCTP_ASSOC_RESET_FAILED); 3766 } 3767 } 3768 /* get rid of the request and get the request flags */ 3769 if (asoc->stream_reset_outstanding == 0) { 3770 sctp_clean_up_stream_reset(stcb); 3771 } 3772 } 3773 } 3774 if (asoc->stream_reset_outstanding == 0) { 3775 sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED); 3776 } 3777 return (0); 3778 } 3779 3780 static void 3781 sctp_handle_str_reset_request_in(struct sctp_tcb *stcb, 3782 struct sctp_tmit_chunk *chk, 3783 struct sctp_stream_reset_in_request *req, int trunc) 3784 { 3785 uint32_t seq; 3786 int len, i; 3787 int number_entries; 3788 uint16_t temp; 3789 3790 /* 3791 * peer wants me to send a str-reset to him for my outgoing seq's if 3792 * seq_in is right. 3793 */ 3794 struct sctp_association *asoc = &stcb->asoc; 3795 3796 seq = ntohl(req->request_seq); 3797 if (asoc->str_reset_seq_in == seq) { 3798 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3799 if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) { 3800 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3801 } else if (trunc) { 3802 /* Can't do it, since they exceeded our buffer size */ 3803 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3804 } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) { 3805 len = ntohs(req->ph.param_length); 3806 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t)); 3807 if (number_entries) { 3808 for (i = 0; i < number_entries; i++) { 3809 temp = ntohs(req->list_of_streams[i]); 3810 if (temp >= stcb->asoc.streamoutcnt) { 3811 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3812 goto bad_boy; 3813 } 3814 req->list_of_streams[i] = temp; 3815 } 3816 for (i = 0; i < number_entries; i++) { 3817 if (stcb->asoc.strmout[req->list_of_streams[i]].state == SCTP_STREAM_OPEN) { 3818 stcb->asoc.strmout[req->list_of_streams[i]].state = SCTP_STREAM_RESET_PENDING; 3819 } 3820 } 3821 } else { 3822 /* Its all */ 3823 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3824 if (stcb->asoc.strmout[i].state == SCTP_STREAM_OPEN) 3825 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_PENDING; 3826 } 3827 } 3828 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 3829 } else { 3830 /* Can't do it, since we have sent one out */ 3831 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS; 3832 } 3833 bad_boy: 3834 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3835 asoc->str_reset_seq_in++; 3836 } else if (asoc->str_reset_seq_in - 1 == seq) { 3837 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3838 } else if (asoc->str_reset_seq_in - 2 == seq) { 3839 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3840 } else { 3841 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 3842 } 3843 sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED); 3844 } 3845 3846 static int 3847 sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb, 3848 struct sctp_tmit_chunk *chk, 3849 struct sctp_stream_reset_tsn_request *req) 3850 { 3851 /* reset all in and out and update the tsn */ 3852 /* 3853 * A) reset my str-seq's on in and out. B) Select a receive next, 3854 * and set cum-ack to it. Also process this selected number as a 3855 * fwd-tsn as well. C) set in the response my next sending seq. 3856 */ 3857 struct sctp_forward_tsn_chunk fwdtsn; 3858 struct sctp_association *asoc = &stcb->asoc; 3859 int abort_flag = 0; 3860 uint32_t seq; 3861 3862 seq = ntohl(req->request_seq); 3863 if (asoc->str_reset_seq_in == seq) { 3864 asoc->last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3865 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) { 3866 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3867 } else { 3868 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3869 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3870 fwdtsn.ch.chunk_flags = 0; 3871 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1); 3872 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3873 if (abort_flag) { 3874 return (1); 3875 } 3876 asoc->highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA; 3877 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 3878 sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 3879 } 3880 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 3881 asoc->mapping_array_base_tsn = asoc->highest_tsn_inside_map + 1; 3882 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 3883 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map; 3884 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 3885 atomic_add_int(&asoc->sending_seq, 1); 3886 /* save off historical data for retrans */ 3887 asoc->last_sending_seq[1] = asoc->last_sending_seq[0]; 3888 asoc->last_sending_seq[0] = asoc->sending_seq; 3889 asoc->last_base_tsnsent[1] = asoc->last_base_tsnsent[0]; 3890 asoc->last_base_tsnsent[0] = asoc->mapping_array_base_tsn; 3891 sctp_reset_out_streams(stcb, 0, (uint16_t *)NULL); 3892 sctp_reset_in_stream(stcb, 0, (uint16_t *)NULL); 3893 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 3894 sctp_notify_stream_reset_tsn(stcb, asoc->sending_seq, (asoc->mapping_array_base_tsn + 1), 0); 3895 } 3896 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 3897 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]); 3898 asoc->str_reset_seq_in++; 3899 } else if (asoc->str_reset_seq_in - 1 == seq) { 3900 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 3901 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]); 3902 } else if (asoc->str_reset_seq_in - 2 == seq) { 3903 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1], 3904 asoc->last_sending_seq[1], asoc->last_base_tsnsent[1]); 3905 } else { 3906 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 3907 } 3908 return (0); 3909 } 3910 3911 static void 3912 sctp_handle_str_reset_request_out(struct sctp_tcb *stcb, 3913 struct sctp_tmit_chunk *chk, 3914 struct sctp_stream_reset_out_request *req, int trunc) 3915 { 3916 uint32_t seq, tsn; 3917 int number_entries, len; 3918 struct sctp_association *asoc = &stcb->asoc; 3919 3920 seq = ntohl(req->request_seq); 3921 3922 /* now if its not a duplicate we process it */ 3923 if (asoc->str_reset_seq_in == seq) { 3924 len = ntohs(req->ph.param_length); 3925 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t)); 3926 /* 3927 * the sender is resetting, handle the list issue.. we must 3928 * a) verify if we can do the reset, if so no problem b) If 3929 * we can't do the reset we must copy the request. c) queue 3930 * it, and setup the data in processor to trigger it off 3931 * when needed and dequeue all the queued data. 3932 */ 3933 tsn = ntohl(req->send_reset_at_tsn); 3934 3935 /* move the reset action back one */ 3936 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3937 if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) { 3938 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3939 } else if (trunc) { 3940 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3941 } else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 3942 /* we can do it now */ 3943 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams); 3944 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 3945 } else { 3946 /* 3947 * we must queue it up and thus wait for the TSN's 3948 * to arrive that are at or before tsn 3949 */ 3950 struct sctp_stream_reset_list *liste; 3951 int siz; 3952 3953 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t)); 3954 SCTP_MALLOC(liste, struct sctp_stream_reset_list *, 3955 siz, SCTP_M_STRESET); 3956 if (liste == NULL) { 3957 /* gak out of memory */ 3958 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3959 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3960 return; 3961 } 3962 liste->seq = seq; 3963 liste->tsn = tsn; 3964 liste->number_entries = number_entries; 3965 memcpy(&liste->list_of_streams, req->list_of_streams, number_entries * sizeof(uint16_t)); 3966 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp); 3967 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_IN_PROGRESS; 3968 } 3969 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3970 asoc->str_reset_seq_in++; 3971 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3972 /* 3973 * one seq back, just echo back last action since my 3974 * response was lost. 3975 */ 3976 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3977 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3978 /* 3979 * two seq back, just echo back last action since my 3980 * response was lost. 3981 */ 3982 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3983 } else { 3984 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 3985 } 3986 } 3987 3988 static void 3989 sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk, 3990 struct sctp_stream_reset_add_strm *str_add) 3991 { 3992 /* 3993 * Peer is requesting to add more streams. If its within our 3994 * max-streams we will allow it. 3995 */ 3996 uint32_t num_stream, i; 3997 uint32_t seq; 3998 struct sctp_association *asoc = &stcb->asoc; 3999 struct sctp_queued_to_read *ctl, *nctl; 4000 4001 /* Get the number. */ 4002 seq = ntohl(str_add->request_seq); 4003 num_stream = ntohs(str_add->number_of_streams); 4004 /* Now what would be the new total? */ 4005 if (asoc->str_reset_seq_in == seq) { 4006 num_stream += stcb->asoc.streamincnt; 4007 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 4008 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) { 4009 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4010 } else if ((num_stream > stcb->asoc.max_inbound_streams) || 4011 (num_stream > 0xffff)) { 4012 /* We must reject it they ask for to many */ 4013 denied: 4014 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4015 } else { 4016 /* Ok, we can do that :-) */ 4017 struct sctp_stream_in *oldstrm; 4018 4019 /* save off the old */ 4020 oldstrm = stcb->asoc.strmin; 4021 SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *, 4022 (num_stream * sizeof(struct sctp_stream_in)), 4023 SCTP_M_STRMI); 4024 if (stcb->asoc.strmin == NULL) { 4025 stcb->asoc.strmin = oldstrm; 4026 goto denied; 4027 } 4028 /* copy off the old data */ 4029 for (i = 0; i < stcb->asoc.streamincnt; i++) { 4030 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue); 4031 TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue); 4032 stcb->asoc.strmin[i].sid = i; 4033 stcb->asoc.strmin[i].last_mid_delivered = oldstrm[i].last_mid_delivered; 4034 stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started; 4035 stcb->asoc.strmin[i].pd_api_started = oldstrm[i].pd_api_started; 4036 /* now anything on those queues? */ 4037 TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next_instrm, nctl) { 4038 TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next_instrm); 4039 TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next_instrm); 4040 } 4041 TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].uno_inqueue, next_instrm, nctl) { 4042 TAILQ_REMOVE(&oldstrm[i].uno_inqueue, ctl, next_instrm); 4043 TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].uno_inqueue, ctl, next_instrm); 4044 } 4045 } 4046 /* Init the new streams */ 4047 for (i = stcb->asoc.streamincnt; i < num_stream; i++) { 4048 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue); 4049 TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue); 4050 stcb->asoc.strmin[i].sid = i; 4051 stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff; 4052 stcb->asoc.strmin[i].pd_api_started = 0; 4053 stcb->asoc.strmin[i].delivery_started = 0; 4054 } 4055 SCTP_FREE(oldstrm, SCTP_M_STRMI); 4056 /* update the size */ 4057 stcb->asoc.streamincnt = num_stream; 4058 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 4059 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0); 4060 } 4061 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4062 asoc->str_reset_seq_in++; 4063 } else if ((asoc->str_reset_seq_in - 1) == seq) { 4064 /* 4065 * one seq back, just echo back last action since my 4066 * response was lost. 4067 */ 4068 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4069 } else if ((asoc->str_reset_seq_in - 2) == seq) { 4070 /* 4071 * two seq back, just echo back last action since my 4072 * response was lost. 4073 */ 4074 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 4075 } else { 4076 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 4077 4078 } 4079 } 4080 4081 static void 4082 sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk, 4083 struct sctp_stream_reset_add_strm *str_add) 4084 { 4085 /* 4086 * Peer is requesting to add more streams. If its within our 4087 * max-streams we will allow it. 4088 */ 4089 uint16_t num_stream; 4090 uint32_t seq; 4091 struct sctp_association *asoc = &stcb->asoc; 4092 4093 /* Get the number. */ 4094 seq = ntohl(str_add->request_seq); 4095 num_stream = ntohs(str_add->number_of_streams); 4096 /* Now what would be the new total? */ 4097 if (asoc->str_reset_seq_in == seq) { 4098 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 4099 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) { 4100 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4101 } else if (stcb->asoc.stream_reset_outstanding) { 4102 /* We must reject it we have something pending */ 4103 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS; 4104 } else { 4105 /* Ok, we can do that :-) */ 4106 int mychk; 4107 4108 mychk = stcb->asoc.streamoutcnt; 4109 mychk += num_stream; 4110 if (mychk < 0x10000) { 4111 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 4112 if (sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, 1, num_stream, 0, 1)) { 4113 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4114 } 4115 } else { 4116 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4117 } 4118 } 4119 sctp_add_stream_reset_result(chk, seq, stcb->asoc.last_reset_action[0]); 4120 asoc->str_reset_seq_in++; 4121 } else if ((asoc->str_reset_seq_in - 1) == seq) { 4122 /* 4123 * one seq back, just echo back last action since my 4124 * response was lost. 4125 */ 4126 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4127 } else if ((asoc->str_reset_seq_in - 2) == seq) { 4128 /* 4129 * two seq back, just echo back last action since my 4130 * response was lost. 4131 */ 4132 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 4133 } else { 4134 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 4135 } 4136 } 4137 4138 #ifdef __GNUC__ 4139 __attribute__((noinline)) 4140 #endif 4141 static int 4142 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset, 4143 struct sctp_chunkhdr *ch_req) 4144 { 4145 uint16_t remaining_length, param_len, ptype; 4146 struct sctp_paramhdr pstore; 4147 uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE]; 4148 uint32_t seq = 0; 4149 int num_req = 0; 4150 int trunc = 0; 4151 struct sctp_tmit_chunk *chk; 4152 struct sctp_chunkhdr *ch; 4153 struct sctp_paramhdr *ph; 4154 int ret_code = 0; 4155 int num_param = 0; 4156 4157 /* now it may be a reset or a reset-response */ 4158 remaining_length = ntohs(ch_req->chunk_length) - sizeof(struct sctp_chunkhdr); 4159 4160 /* setup for adding the response */ 4161 sctp_alloc_a_chunk(stcb, chk); 4162 if (chk == NULL) { 4163 return (ret_code); 4164 } 4165 chk->copy_by_ref = 0; 4166 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 4167 chk->rec.chunk_id.can_take_data = 0; 4168 chk->flags = 0; 4169 chk->asoc = &stcb->asoc; 4170 chk->no_fr_allowed = 0; 4171 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr); 4172 chk->book_size_scale = 0; 4173 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 4174 if (chk->data == NULL) { 4175 strres_nochunk: 4176 if (chk->data) { 4177 sctp_m_freem(chk->data); 4178 chk->data = NULL; 4179 } 4180 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 4181 return (ret_code); 4182 } 4183 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 4184 4185 /* setup chunk parameters */ 4186 chk->sent = SCTP_DATAGRAM_UNSENT; 4187 chk->snd_count = 0; 4188 chk->whoTo = NULL; 4189 4190 ch = mtod(chk->data, struct sctp_chunkhdr *); 4191 ch->chunk_type = SCTP_STREAM_RESET; 4192 ch->chunk_flags = 0; 4193 ch->chunk_length = htons(chk->send_size); 4194 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 4195 offset += sizeof(struct sctp_chunkhdr); 4196 while (remaining_length >= sizeof(struct sctp_paramhdr)) { 4197 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *)&pstore); 4198 if (ph == NULL) { 4199 /* TSNH */ 4200 break; 4201 } 4202 param_len = ntohs(ph->param_length); 4203 if ((param_len > remaining_length) || 4204 (param_len < (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)))) { 4205 /* bad parameter length */ 4206 break; 4207 } 4208 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, sizeof(cstore)), 4209 (uint8_t *)&cstore); 4210 if (ph == NULL) { 4211 /* TSNH */ 4212 break; 4213 } 4214 ptype = ntohs(ph->param_type); 4215 num_param++; 4216 if (param_len > sizeof(cstore)) { 4217 trunc = 1; 4218 } else { 4219 trunc = 0; 4220 } 4221 if (num_param > SCTP_MAX_RESET_PARAMS) { 4222 /* hit the max of parameters already sorry.. */ 4223 break; 4224 } 4225 if (ptype == SCTP_STR_RESET_OUT_REQUEST) { 4226 struct sctp_stream_reset_out_request *req_out; 4227 4228 if (param_len < sizeof(struct sctp_stream_reset_out_request)) { 4229 break; 4230 } 4231 req_out = (struct sctp_stream_reset_out_request *)ph; 4232 num_req++; 4233 if (stcb->asoc.stream_reset_outstanding) { 4234 seq = ntohl(req_out->response_seq); 4235 if (seq == stcb->asoc.str_reset_seq_out) { 4236 /* implicit ack */ 4237 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_RESULT_PERFORMED, NULL); 4238 } 4239 } 4240 sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc); 4241 } else if (ptype == SCTP_STR_RESET_ADD_OUT_STREAMS) { 4242 struct sctp_stream_reset_add_strm *str_add; 4243 4244 if (param_len < sizeof(struct sctp_stream_reset_add_strm)) { 4245 break; 4246 } 4247 str_add = (struct sctp_stream_reset_add_strm *)ph; 4248 num_req++; 4249 sctp_handle_str_reset_add_strm(stcb, chk, str_add); 4250 } else if (ptype == SCTP_STR_RESET_ADD_IN_STREAMS) { 4251 struct sctp_stream_reset_add_strm *str_add; 4252 4253 if (param_len < sizeof(struct sctp_stream_reset_add_strm)) { 4254 break; 4255 } 4256 str_add = (struct sctp_stream_reset_add_strm *)ph; 4257 num_req++; 4258 sctp_handle_str_reset_add_out_strm(stcb, chk, str_add); 4259 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) { 4260 struct sctp_stream_reset_in_request *req_in; 4261 4262 num_req++; 4263 req_in = (struct sctp_stream_reset_in_request *)ph; 4264 sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc); 4265 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) { 4266 struct sctp_stream_reset_tsn_request *req_tsn; 4267 4268 num_req++; 4269 req_tsn = (struct sctp_stream_reset_tsn_request *)ph; 4270 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) { 4271 ret_code = 1; 4272 goto strres_nochunk; 4273 } 4274 /* no more */ 4275 break; 4276 } else if (ptype == SCTP_STR_RESET_RESPONSE) { 4277 struct sctp_stream_reset_response *resp; 4278 uint32_t result; 4279 4280 if (param_len < sizeof(struct sctp_stream_reset_response)) { 4281 break; 4282 } 4283 resp = (struct sctp_stream_reset_response *)ph; 4284 seq = ntohl(resp->response_seq); 4285 result = ntohl(resp->result); 4286 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) { 4287 ret_code = 1; 4288 goto strres_nochunk; 4289 } 4290 } else { 4291 break; 4292 } 4293 offset += SCTP_SIZE32(param_len); 4294 if (remaining_length >= SCTP_SIZE32(param_len)) { 4295 remaining_length -= SCTP_SIZE32(param_len); 4296 } else { 4297 remaining_length = 0; 4298 } 4299 } 4300 if (num_req == 0) { 4301 /* we have no response free the stuff */ 4302 goto strres_nochunk; 4303 } 4304 /* ok we have a chunk to link in */ 4305 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, 4306 chk, 4307 sctp_next); 4308 stcb->asoc.ctrl_queue_cnt++; 4309 return (ret_code); 4310 } 4311 4312 /* 4313 * Handle a router or endpoints report of a packet loss, there are two ways 4314 * to handle this, either we get the whole packet and must disect it 4315 * ourselves (possibly with truncation and or corruption) or it is a summary 4316 * from a middle box that did the disectting for us. 4317 */ 4318 static void 4319 sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, 4320 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit) 4321 { 4322 uint32_t bottle_bw, on_queue; 4323 uint16_t trunc_len; 4324 unsigned int chlen; 4325 unsigned int at; 4326 struct sctp_chunk_desc desc; 4327 struct sctp_chunkhdr *ch; 4328 4329 chlen = ntohs(cp->ch.chunk_length); 4330 chlen -= sizeof(struct sctp_pktdrop_chunk); 4331 /* XXX possible chlen underflow */ 4332 if (chlen == 0) { 4333 ch = NULL; 4334 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) 4335 SCTP_STAT_INCR(sctps_pdrpbwrpt); 4336 } else { 4337 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr)); 4338 chlen -= sizeof(struct sctphdr); 4339 /* XXX possible chlen underflow */ 4340 memset(&desc, 0, sizeof(desc)); 4341 } 4342 trunc_len = (uint16_t)ntohs(cp->trunc_len); 4343 if (trunc_len > limit) { 4344 trunc_len = limit; 4345 } 4346 /* now the chunks themselves */ 4347 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) { 4348 desc.chunk_type = ch->chunk_type; 4349 /* get amount we need to move */ 4350 at = ntohs(ch->chunk_length); 4351 if (at < sizeof(struct sctp_chunkhdr)) { 4352 /* corrupt chunk, maybe at the end? */ 4353 SCTP_STAT_INCR(sctps_pdrpcrupt); 4354 break; 4355 } 4356 if (trunc_len == 0) { 4357 /* we are supposed to have all of it */ 4358 if (at > chlen) { 4359 /* corrupt skip it */ 4360 SCTP_STAT_INCR(sctps_pdrpcrupt); 4361 break; 4362 } 4363 } else { 4364 /* is there enough of it left ? */ 4365 if (desc.chunk_type == SCTP_DATA) { 4366 if (chlen < (sizeof(struct sctp_data_chunk) + 4367 sizeof(desc.data_bytes))) { 4368 break; 4369 } 4370 } else { 4371 if (chlen < sizeof(struct sctp_chunkhdr)) { 4372 break; 4373 } 4374 } 4375 } 4376 if (desc.chunk_type == SCTP_DATA) { 4377 /* can we get out the tsn? */ 4378 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 4379 SCTP_STAT_INCR(sctps_pdrpmbda); 4380 4381 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) { 4382 /* yep */ 4383 struct sctp_data_chunk *dcp; 4384 uint8_t *ddp; 4385 unsigned int iii; 4386 4387 dcp = (struct sctp_data_chunk *)ch; 4388 ddp = (uint8_t *)(dcp + 1); 4389 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) { 4390 desc.data_bytes[iii] = ddp[iii]; 4391 } 4392 desc.tsn_ifany = dcp->dp.tsn; 4393 } else { 4394 /* nope we are done. */ 4395 SCTP_STAT_INCR(sctps_pdrpnedat); 4396 break; 4397 } 4398 } else { 4399 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 4400 SCTP_STAT_INCR(sctps_pdrpmbct); 4401 } 4402 4403 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) { 4404 SCTP_STAT_INCR(sctps_pdrppdbrk); 4405 break; 4406 } 4407 if (SCTP_SIZE32(at) > chlen) { 4408 break; 4409 } 4410 chlen -= SCTP_SIZE32(at); 4411 if (chlen < sizeof(struct sctp_chunkhdr)) { 4412 /* done, none left */ 4413 break; 4414 } 4415 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at)); 4416 } 4417 /* Now update any rwnd --- possibly */ 4418 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) { 4419 /* From a peer, we get a rwnd report */ 4420 uint32_t a_rwnd; 4421 4422 SCTP_STAT_INCR(sctps_pdrpfehos); 4423 4424 bottle_bw = ntohl(cp->bottle_bw); 4425 on_queue = ntohl(cp->current_onq); 4426 if (bottle_bw && on_queue) { 4427 /* a rwnd report is in here */ 4428 if (bottle_bw > on_queue) 4429 a_rwnd = bottle_bw - on_queue; 4430 else 4431 a_rwnd = 0; 4432 4433 if (a_rwnd == 0) 4434 stcb->asoc.peers_rwnd = 0; 4435 else { 4436 if (a_rwnd > stcb->asoc.total_flight) { 4437 stcb->asoc.peers_rwnd = 4438 a_rwnd - stcb->asoc.total_flight; 4439 } else { 4440 stcb->asoc.peers_rwnd = 0; 4441 } 4442 if (stcb->asoc.peers_rwnd < 4443 stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4444 /* SWS sender side engages */ 4445 stcb->asoc.peers_rwnd = 0; 4446 } 4447 } 4448 } 4449 } else { 4450 SCTP_STAT_INCR(sctps_pdrpfmbox); 4451 } 4452 4453 /* now middle boxes in sat networks get a cwnd bump */ 4454 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) && 4455 (stcb->asoc.sat_t3_loss_recovery == 0) && 4456 (stcb->asoc.sat_network)) { 4457 /* 4458 * This is debatable but for sat networks it makes sense 4459 * Note if a T3 timer has went off, we will prohibit any 4460 * changes to cwnd until we exit the t3 loss recovery. 4461 */ 4462 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb, 4463 net, cp, &bottle_bw, &on_queue); 4464 } 4465 } 4466 4467 /* 4468 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to 4469 * still contain IP/SCTP header - stcb: is the tcb found for this packet - 4470 * offset: offset into the mbuf chain to first chunkhdr - length: is the 4471 * length of the complete packet outputs: - length: modified to remaining 4472 * length after control processing - netp: modified to new sctp_nets after 4473 * cookie-echo processing - return NULL to discard the packet (ie. no asoc, 4474 * bad packet,...) otherwise return the tcb for this packet 4475 */ 4476 #ifdef __GNUC__ 4477 __attribute__((noinline)) 4478 #endif 4479 static struct sctp_tcb * 4480 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, 4481 struct sockaddr *src, struct sockaddr *dst, 4482 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp, 4483 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen, 4484 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 4485 uint32_t vrf_id, uint16_t port) 4486 { 4487 struct sctp_association *asoc; 4488 struct mbuf *op_err; 4489 char msg[SCTP_DIAG_INFO_LEN]; 4490 uint32_t vtag_in; 4491 int num_chunks = 0; /* number of control chunks processed */ 4492 uint32_t chk_length; 4493 int ret; 4494 int abort_no_unlock = 0; 4495 int ecne_seen = 0; 4496 4497 /* 4498 * How big should this be, and should it be alloc'd? Lets try the 4499 * d-mtu-ceiling for now (2k) and that should hopefully work ... 4500 * until we get into jumbo grams and such.. 4501 */ 4502 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; 4503 struct sctp_tcb *locked_tcb = stcb; 4504 int got_auth = 0; 4505 uint32_t auth_offset = 0, auth_len = 0; 4506 int auth_skipped = 0; 4507 int asconf_cnt = 0; 4508 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4509 struct socket *so; 4510 #endif 4511 4512 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n", 4513 iphlen, *offset, length, (void *)stcb); 4514 4515 /* validate chunk header length... */ 4516 if (ntohs(ch->chunk_length) < sizeof(*ch)) { 4517 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n", 4518 ntohs(ch->chunk_length)); 4519 if (locked_tcb) { 4520 SCTP_TCB_UNLOCK(locked_tcb); 4521 } 4522 return (NULL); 4523 } 4524 /* 4525 * validate the verification tag 4526 */ 4527 vtag_in = ntohl(sh->v_tag); 4528 4529 if (locked_tcb) { 4530 SCTP_TCB_LOCK_ASSERT(locked_tcb); 4531 } 4532 if (ch->chunk_type == SCTP_INITIATION) { 4533 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n", 4534 ntohs(ch->chunk_length), vtag_in); 4535 if (vtag_in != 0) { 4536 /* protocol error- silently discard... */ 4537 SCTP_STAT_INCR(sctps_badvtag); 4538 if (locked_tcb) { 4539 SCTP_TCB_UNLOCK(locked_tcb); 4540 } 4541 return (NULL); 4542 } 4543 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) { 4544 /* 4545 * If there is no stcb, skip the AUTH chunk and process 4546 * later after a stcb is found (to validate the lookup was 4547 * valid. 4548 */ 4549 if ((ch->chunk_type == SCTP_AUTHENTICATION) && 4550 (stcb == NULL) && 4551 (inp->auth_supported == 1)) { 4552 /* save this chunk for later processing */ 4553 auth_skipped = 1; 4554 auth_offset = *offset; 4555 auth_len = ntohs(ch->chunk_length); 4556 4557 /* (temporarily) move past this chunk */ 4558 *offset += SCTP_SIZE32(auth_len); 4559 if (*offset >= length) { 4560 /* no more data left in the mbuf chain */ 4561 *offset = length; 4562 if (locked_tcb) { 4563 SCTP_TCB_UNLOCK(locked_tcb); 4564 } 4565 return (NULL); 4566 } 4567 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4568 sizeof(struct sctp_chunkhdr), chunk_buf); 4569 } 4570 if (ch == NULL) { 4571 /* Help */ 4572 *offset = length; 4573 if (locked_tcb) { 4574 SCTP_TCB_UNLOCK(locked_tcb); 4575 } 4576 return (NULL); 4577 } 4578 if (ch->chunk_type == SCTP_COOKIE_ECHO) { 4579 goto process_control_chunks; 4580 } 4581 /* 4582 * first check if it's an ASCONF with an unknown src addr we 4583 * need to look inside to find the association 4584 */ 4585 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) { 4586 struct sctp_chunkhdr *asconf_ch = ch; 4587 uint32_t asconf_offset = 0, asconf_len = 0; 4588 4589 /* inp's refcount may be reduced */ 4590 SCTP_INP_INCR_REF(inp); 4591 4592 asconf_offset = *offset; 4593 do { 4594 asconf_len = ntohs(asconf_ch->chunk_length); 4595 if (asconf_len < sizeof(struct sctp_asconf_paramhdr)) 4596 break; 4597 stcb = sctp_findassociation_ep_asconf(m, 4598 *offset, 4599 dst, 4600 sh, &inp, netp, vrf_id); 4601 if (stcb != NULL) 4602 break; 4603 asconf_offset += SCTP_SIZE32(asconf_len); 4604 asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset, 4605 sizeof(struct sctp_chunkhdr), chunk_buf); 4606 } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF); 4607 if (stcb == NULL) { 4608 /* 4609 * reduce inp's refcount if not reduced in 4610 * sctp_findassociation_ep_asconf(). 4611 */ 4612 SCTP_INP_DECR_REF(inp); 4613 } else { 4614 locked_tcb = stcb; 4615 } 4616 4617 /* now go back and verify any auth chunk to be sure */ 4618 if (auth_skipped && (stcb != NULL)) { 4619 struct sctp_auth_chunk *auth; 4620 4621 auth = (struct sctp_auth_chunk *) 4622 sctp_m_getptr(m, auth_offset, 4623 auth_len, chunk_buf); 4624 got_auth = 1; 4625 auth_skipped = 0; 4626 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, 4627 auth_offset)) { 4628 /* auth HMAC failed so dump it */ 4629 *offset = length; 4630 if (locked_tcb) { 4631 SCTP_TCB_UNLOCK(locked_tcb); 4632 } 4633 return (NULL); 4634 } else { 4635 /* remaining chunks are HMAC checked */ 4636 stcb->asoc.authenticated = 1; 4637 } 4638 } 4639 } 4640 if (stcb == NULL) { 4641 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); 4642 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 4643 msg); 4644 /* no association, so it's out of the blue... */ 4645 sctp_handle_ootb(m, iphlen, *offset, src, dst, sh, inp, op_err, 4646 mflowtype, mflowid, inp->fibnum, 4647 vrf_id, port); 4648 *offset = length; 4649 if (locked_tcb) { 4650 SCTP_TCB_UNLOCK(locked_tcb); 4651 } 4652 return (NULL); 4653 } 4654 asoc = &stcb->asoc; 4655 /* ABORT and SHUTDOWN can use either v_tag... */ 4656 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) || 4657 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) || 4658 (ch->chunk_type == SCTP_PACKET_DROPPED)) { 4659 /* Take the T-bit always into account. */ 4660 if ((((ch->chunk_flags & SCTP_HAD_NO_TCB) == 0) && 4661 (vtag_in == asoc->my_vtag)) || 4662 (((ch->chunk_flags & SCTP_HAD_NO_TCB) == SCTP_HAD_NO_TCB) && 4663 (vtag_in == asoc->peer_vtag))) { 4664 /* this is valid */ 4665 } else { 4666 /* drop this packet... */ 4667 SCTP_STAT_INCR(sctps_badvtag); 4668 if (locked_tcb) { 4669 SCTP_TCB_UNLOCK(locked_tcb); 4670 } 4671 return (NULL); 4672 } 4673 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 4674 if (vtag_in != asoc->my_vtag) { 4675 /* 4676 * this could be a stale SHUTDOWN-ACK or the 4677 * peer never got the SHUTDOWN-COMPLETE and 4678 * is still hung; we have started a new asoc 4679 * but it won't complete until the shutdown 4680 * is completed 4681 */ 4682 if (locked_tcb) { 4683 SCTP_TCB_UNLOCK(locked_tcb); 4684 } 4685 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); 4686 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 4687 msg); 4688 sctp_handle_ootb(m, iphlen, *offset, src, dst, 4689 sh, inp, op_err, 4690 mflowtype, mflowid, fibnum, 4691 vrf_id, port); 4692 return (NULL); 4693 } 4694 } else { 4695 /* for all other chunks, vtag must match */ 4696 if (vtag_in != asoc->my_vtag) { 4697 /* invalid vtag... */ 4698 SCTPDBG(SCTP_DEBUG_INPUT3, 4699 "invalid vtag: %xh, expect %xh\n", 4700 vtag_in, asoc->my_vtag); 4701 SCTP_STAT_INCR(sctps_badvtag); 4702 if (locked_tcb) { 4703 SCTP_TCB_UNLOCK(locked_tcb); 4704 } 4705 *offset = length; 4706 return (NULL); 4707 } 4708 } 4709 } /* end if !SCTP_COOKIE_ECHO */ 4710 /* 4711 * process all control chunks... 4712 */ 4713 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) || 4714 (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) || 4715 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) && 4716 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 4717 /* implied cookie-ack.. we must have lost the ack */ 4718 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4719 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4720 stcb->asoc.overall_error_count, 4721 0, 4722 SCTP_FROM_SCTP_INPUT, 4723 __LINE__); 4724 } 4725 stcb->asoc.overall_error_count = 0; 4726 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, 4727 *netp); 4728 } 4729 process_control_chunks: 4730 while (IS_SCTP_CONTROL(ch)) { 4731 /* validate chunk length */ 4732 chk_length = ntohs(ch->chunk_length); 4733 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n", 4734 ch->chunk_type, chk_length); 4735 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length); 4736 if (chk_length < sizeof(*ch) || 4737 (*offset + (int)chk_length) > length) { 4738 *offset = length; 4739 if (locked_tcb) { 4740 SCTP_TCB_UNLOCK(locked_tcb); 4741 } 4742 return (NULL); 4743 } 4744 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 4745 /* 4746 * INIT-ACK only gets the init ack "header" portion only 4747 * because we don't have to process the peer's COOKIE. All 4748 * others get a complete chunk. 4749 */ 4750 if ((ch->chunk_type == SCTP_INITIATION_ACK) || 4751 (ch->chunk_type == SCTP_INITIATION)) { 4752 /* get an init-ack chunk */ 4753 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4754 sizeof(struct sctp_init_ack_chunk), chunk_buf); 4755 if (ch == NULL) { 4756 *offset = length; 4757 if (locked_tcb) { 4758 SCTP_TCB_UNLOCK(locked_tcb); 4759 } 4760 return (NULL); 4761 } 4762 } else { 4763 /* For cookies and all other chunks. */ 4764 if (chk_length > sizeof(chunk_buf)) { 4765 /* 4766 * use just the size of the chunk buffer so 4767 * the front part of our chunks fit in 4768 * contiguous space up to the chunk buffer 4769 * size (508 bytes). For chunks that need to 4770 * get more than that they must use the 4771 * sctp_m_getptr() function or other means 4772 * (e.g. know how to parse mbuf chains). 4773 * Cookies do this already. 4774 */ 4775 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4776 (sizeof(chunk_buf) - 4), 4777 chunk_buf); 4778 if (ch == NULL) { 4779 *offset = length; 4780 if (locked_tcb) { 4781 SCTP_TCB_UNLOCK(locked_tcb); 4782 } 4783 return (NULL); 4784 } 4785 } else { 4786 /* We can fit it all */ 4787 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4788 chk_length, chunk_buf); 4789 if (ch == NULL) { 4790 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n"); 4791 *offset = length; 4792 if (locked_tcb) { 4793 SCTP_TCB_UNLOCK(locked_tcb); 4794 } 4795 return (NULL); 4796 } 4797 } 4798 } 4799 num_chunks++; 4800 /* Save off the last place we got a control from */ 4801 if (stcb != NULL) { 4802 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) { 4803 /* 4804 * allow last_control to be NULL if 4805 * ASCONF... ASCONF processing will find the 4806 * right net later 4807 */ 4808 if ((netp != NULL) && (*netp != NULL)) 4809 stcb->asoc.last_control_chunk_from = *netp; 4810 } 4811 } 4812 #ifdef SCTP_AUDITING_ENABLED 4813 sctp_audit_log(0xB0, ch->chunk_type); 4814 #endif 4815 4816 /* check to see if this chunk required auth, but isn't */ 4817 if ((stcb != NULL) && 4818 (stcb->asoc.auth_supported == 1) && 4819 sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) && 4820 !stcb->asoc.authenticated) { 4821 /* "silently" ignore */ 4822 SCTP_STAT_INCR(sctps_recvauthmissing); 4823 goto next_chunk; 4824 } 4825 switch (ch->chunk_type) { 4826 case SCTP_INITIATION: 4827 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n"); 4828 /* The INIT chunk must be the only chunk. */ 4829 if ((num_chunks > 1) || 4830 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 4831 /* RFC 4960 requires that no ABORT is sent */ 4832 *offset = length; 4833 if (locked_tcb) { 4834 SCTP_TCB_UNLOCK(locked_tcb); 4835 } 4836 return (NULL); 4837 } 4838 /* Honor our resource limit. */ 4839 if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) { 4840 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 4841 sctp_abort_association(inp, stcb, m, iphlen, 4842 src, dst, sh, op_err, 4843 mflowtype, mflowid, 4844 vrf_id, port); 4845 *offset = length; 4846 return (NULL); 4847 } 4848 sctp_handle_init(m, iphlen, *offset, src, dst, sh, 4849 (struct sctp_init_chunk *)ch, inp, 4850 stcb, *netp, &abort_no_unlock, 4851 mflowtype, mflowid, 4852 vrf_id, port); 4853 *offset = length; 4854 if ((!abort_no_unlock) && (locked_tcb)) { 4855 SCTP_TCB_UNLOCK(locked_tcb); 4856 } 4857 return (NULL); 4858 break; 4859 case SCTP_PAD_CHUNK: 4860 break; 4861 case SCTP_INITIATION_ACK: 4862 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n"); 4863 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4864 /* We are not interested anymore */ 4865 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4866 ; 4867 } else { 4868 if ((locked_tcb != NULL) && (locked_tcb != stcb)) { 4869 /* Very unlikely */ 4870 SCTP_TCB_UNLOCK(locked_tcb); 4871 } 4872 *offset = length; 4873 if (stcb) { 4874 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4875 so = SCTP_INP_SO(inp); 4876 atomic_add_int(&stcb->asoc.refcnt, 1); 4877 SCTP_TCB_UNLOCK(stcb); 4878 SCTP_SOCKET_LOCK(so, 1); 4879 SCTP_TCB_LOCK(stcb); 4880 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4881 #endif 4882 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 4883 SCTP_FROM_SCTP_INPUT + SCTP_LOC_29); 4884 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4885 SCTP_SOCKET_UNLOCK(so, 1); 4886 #endif 4887 } 4888 return (NULL); 4889 } 4890 } 4891 /* The INIT-ACK chunk must be the only chunk. */ 4892 if ((num_chunks > 1) || 4893 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 4894 *offset = length; 4895 if (locked_tcb) { 4896 SCTP_TCB_UNLOCK(locked_tcb); 4897 } 4898 return (NULL); 4899 } 4900 if ((netp) && (*netp)) { 4901 ret = sctp_handle_init_ack(m, iphlen, *offset, 4902 src, dst, sh, 4903 (struct sctp_init_ack_chunk *)ch, 4904 stcb, *netp, 4905 &abort_no_unlock, 4906 mflowtype, mflowid, 4907 vrf_id); 4908 } else { 4909 ret = -1; 4910 } 4911 *offset = length; 4912 if (abort_no_unlock) { 4913 return (NULL); 4914 } 4915 /* 4916 * Special case, I must call the output routine to 4917 * get the cookie echoed 4918 */ 4919 if ((stcb != NULL) && (ret == 0)) { 4920 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4921 } 4922 if (locked_tcb) { 4923 SCTP_TCB_UNLOCK(locked_tcb); 4924 } 4925 return (NULL); 4926 break; 4927 case SCTP_SELECTIVE_ACK: 4928 { 4929 struct sctp_sack_chunk *sack; 4930 int abort_now = 0; 4931 uint32_t a_rwnd, cum_ack; 4932 uint16_t num_seg, num_dup; 4933 uint8_t flags; 4934 int offset_seg, offset_dup; 4935 4936 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n"); 4937 SCTP_STAT_INCR(sctps_recvsacks); 4938 if (stcb == NULL) { 4939 SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n"); 4940 break; 4941 } 4942 if (chk_length < sizeof(struct sctp_sack_chunk)) { 4943 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n"); 4944 break; 4945 } 4946 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 4947 /*- 4948 * If we have sent a shutdown-ack, we will pay no 4949 * attention to a sack sent in to us since 4950 * we don't care anymore. 4951 */ 4952 break; 4953 } 4954 sack = (struct sctp_sack_chunk *)ch; 4955 flags = ch->chunk_flags; 4956 cum_ack = ntohl(sack->sack.cum_tsn_ack); 4957 num_seg = ntohs(sack->sack.num_gap_ack_blks); 4958 num_dup = ntohs(sack->sack.num_dup_tsns); 4959 a_rwnd = (uint32_t)ntohl(sack->sack.a_rwnd); 4960 if (sizeof(struct sctp_sack_chunk) + 4961 num_seg * sizeof(struct sctp_gap_ack_block) + 4962 num_dup * sizeof(uint32_t) != chk_length) { 4963 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n"); 4964 break; 4965 } 4966 offset_seg = *offset + sizeof(struct sctp_sack_chunk); 4967 offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block); 4968 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 4969 cum_ack, num_seg, a_rwnd); 4970 stcb->asoc.seen_a_sack_this_pkt = 1; 4971 if ((stcb->asoc.pr_sctp_cnt == 0) && 4972 (num_seg == 0) && 4973 SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) && 4974 (stcb->asoc.saw_sack_with_frags == 0) && 4975 (stcb->asoc.saw_sack_with_nr_frags == 0) && 4976 (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) 4977 ) { 4978 /* 4979 * We have a SIMPLE sack having no 4980 * prior segments and data on sent 4981 * queue to be acked.. Use the 4982 * faster path sack processing. We 4983 * also allow window update sacks 4984 * with no missing segments to go 4985 * this way too. 4986 */ 4987 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, &abort_now, ecne_seen); 4988 } else { 4989 if (netp && *netp) 4990 sctp_handle_sack(m, offset_seg, offset_dup, stcb, 4991 num_seg, 0, num_dup, &abort_now, flags, 4992 cum_ack, a_rwnd, ecne_seen); 4993 } 4994 if (abort_now) { 4995 /* ABORT signal from sack processing */ 4996 *offset = length; 4997 return (NULL); 4998 } 4999 if (TAILQ_EMPTY(&stcb->asoc.send_queue) && 5000 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 5001 (stcb->asoc.stream_queue_cnt == 0)) { 5002 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 5003 } 5004 } 5005 break; 5006 /* 5007 * EY - nr_sack: If the received chunk is an 5008 * nr_sack chunk 5009 */ 5010 case SCTP_NR_SELECTIVE_ACK: 5011 { 5012 struct sctp_nr_sack_chunk *nr_sack; 5013 int abort_now = 0; 5014 uint32_t a_rwnd, cum_ack; 5015 uint16_t num_seg, num_nr_seg, num_dup; 5016 uint8_t flags; 5017 int offset_seg, offset_dup; 5018 5019 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n"); 5020 SCTP_STAT_INCR(sctps_recvsacks); 5021 if (stcb == NULL) { 5022 SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n"); 5023 break; 5024 } 5025 if (stcb->asoc.nrsack_supported == 0) { 5026 goto unknown_chunk; 5027 } 5028 if (chk_length < sizeof(struct sctp_nr_sack_chunk)) { 5029 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n"); 5030 break; 5031 } 5032 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 5033 /*- 5034 * If we have sent a shutdown-ack, we will pay no 5035 * attention to a sack sent in to us since 5036 * we don't care anymore. 5037 */ 5038 break; 5039 } 5040 nr_sack = (struct sctp_nr_sack_chunk *)ch; 5041 flags = ch->chunk_flags; 5042 cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack); 5043 num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks); 5044 num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks); 5045 num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns); 5046 a_rwnd = (uint32_t)ntohl(nr_sack->nr_sack.a_rwnd); 5047 if (sizeof(struct sctp_nr_sack_chunk) + 5048 (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) + 5049 num_dup * sizeof(uint32_t) != chk_length) { 5050 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n"); 5051 break; 5052 } 5053 offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk); 5054 offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block); 5055 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 5056 cum_ack, num_seg, a_rwnd); 5057 stcb->asoc.seen_a_sack_this_pkt = 1; 5058 if ((stcb->asoc.pr_sctp_cnt == 0) && 5059 (num_seg == 0) && (num_nr_seg == 0) && 5060 SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) && 5061 (stcb->asoc.saw_sack_with_frags == 0) && 5062 (stcb->asoc.saw_sack_with_nr_frags == 0) && 5063 (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 5064 /* 5065 * We have a SIMPLE sack having no 5066 * prior segments and data on sent 5067 * queue to be acked. Use the faster 5068 * path sack processing. We also 5069 * allow window update sacks with no 5070 * missing segments to go this way 5071 * too. 5072 */ 5073 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 5074 &abort_now, ecne_seen); 5075 } else { 5076 if (netp && *netp) 5077 sctp_handle_sack(m, offset_seg, offset_dup, stcb, 5078 num_seg, num_nr_seg, num_dup, &abort_now, flags, 5079 cum_ack, a_rwnd, ecne_seen); 5080 } 5081 if (abort_now) { 5082 /* ABORT signal from sack processing */ 5083 *offset = length; 5084 return (NULL); 5085 } 5086 if (TAILQ_EMPTY(&stcb->asoc.send_queue) && 5087 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 5088 (stcb->asoc.stream_queue_cnt == 0)) { 5089 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 5090 } 5091 } 5092 break; 5093 5094 case SCTP_HEARTBEAT_REQUEST: 5095 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n"); 5096 if ((stcb) && netp && *netp) { 5097 SCTP_STAT_INCR(sctps_recvheartbeat); 5098 sctp_send_heartbeat_ack(stcb, m, *offset, 5099 chk_length, *netp); 5100 5101 /* He's alive so give him credit */ 5102 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5103 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5104 stcb->asoc.overall_error_count, 5105 0, 5106 SCTP_FROM_SCTP_INPUT, 5107 __LINE__); 5108 } 5109 stcb->asoc.overall_error_count = 0; 5110 } 5111 break; 5112 case SCTP_HEARTBEAT_ACK: 5113 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n"); 5114 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) { 5115 /* Its not ours */ 5116 *offset = length; 5117 if (locked_tcb) { 5118 SCTP_TCB_UNLOCK(locked_tcb); 5119 } 5120 return (NULL); 5121 } 5122 /* He's alive so give him credit */ 5123 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5124 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5125 stcb->asoc.overall_error_count, 5126 0, 5127 SCTP_FROM_SCTP_INPUT, 5128 __LINE__); 5129 } 5130 stcb->asoc.overall_error_count = 0; 5131 SCTP_STAT_INCR(sctps_recvheartbeatack); 5132 if (netp && *netp) 5133 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch, 5134 stcb, *netp); 5135 break; 5136 case SCTP_ABORT_ASSOCIATION: 5137 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n", 5138 (void *)stcb); 5139 if ((stcb) && netp && *netp) 5140 sctp_handle_abort((struct sctp_abort_chunk *)ch, 5141 stcb, *netp); 5142 *offset = length; 5143 return (NULL); 5144 break; 5145 case SCTP_SHUTDOWN: 5146 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n", 5147 (void *)stcb); 5148 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) { 5149 *offset = length; 5150 if (locked_tcb) { 5151 SCTP_TCB_UNLOCK(locked_tcb); 5152 } 5153 return (NULL); 5154 } 5155 if (netp && *netp) { 5156 int abort_flag = 0; 5157 5158 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch, 5159 stcb, *netp, &abort_flag); 5160 if (abort_flag) { 5161 *offset = length; 5162 return (NULL); 5163 } 5164 } 5165 break; 5166 case SCTP_SHUTDOWN_ACK: 5167 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", (void *)stcb); 5168 if ((stcb) && (netp) && (*netp)) 5169 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp); 5170 *offset = length; 5171 return (NULL); 5172 break; 5173 5174 case SCTP_OPERATION_ERROR: 5175 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n"); 5176 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) { 5177 *offset = length; 5178 return (NULL); 5179 } 5180 break; 5181 case SCTP_COOKIE_ECHO: 5182 SCTPDBG(SCTP_DEBUG_INPUT3, 5183 "SCTP_COOKIE-ECHO, stcb %p\n", (void *)stcb); 5184 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 5185 ; 5186 } else { 5187 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5188 /* We are not interested anymore */ 5189 abend: 5190 if (stcb) { 5191 SCTP_TCB_UNLOCK(stcb); 5192 } 5193 *offset = length; 5194 return (NULL); 5195 } 5196 } 5197 /*- 5198 * First are we accepting? We do this again here 5199 * since it is possible that a previous endpoint WAS 5200 * listening responded to a INIT-ACK and then 5201 * closed. We opened and bound.. and are now no 5202 * longer listening. 5203 * 5204 * XXXGL: notes on checking listen queue length. 5205 * 1) SCTP_IS_LISTENING() doesn't necessarily mean 5206 * SOLISTENING(), because a listening "UDP type" 5207 * socket isn't listening in terms of the socket 5208 * layer. It is a normal data flow socket, that 5209 * can fork off new connections. Thus, we should 5210 * look into sol_qlen only in case we are !UDP. 5211 * 2) Checking sol_qlen in general requires locking 5212 * the socket, and this code lacks that. 5213 */ 5214 if ((stcb == NULL) && 5215 (!SCTP_IS_LISTENING(inp) || 5216 (!(inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) && 5217 inp->sctp_socket->sol_qlen >= inp->sctp_socket->sol_qlimit))) { 5218 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 5219 (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) { 5220 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 5221 sctp_abort_association(inp, stcb, m, iphlen, 5222 src, dst, sh, op_err, 5223 mflowtype, mflowid, 5224 vrf_id, port); 5225 } 5226 *offset = length; 5227 return (NULL); 5228 } else { 5229 struct mbuf *ret_buf; 5230 struct sctp_inpcb *linp; 5231 5232 if (stcb) { 5233 linp = NULL; 5234 } else { 5235 linp = inp; 5236 } 5237 5238 if (linp) { 5239 SCTP_ASOC_CREATE_LOCK(linp); 5240 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5241 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5242 SCTP_ASOC_CREATE_UNLOCK(linp); 5243 goto abend; 5244 } 5245 } 5246 if (netp) { 5247 ret_buf = 5248 sctp_handle_cookie_echo(m, iphlen, 5249 *offset, 5250 src, dst, 5251 sh, 5252 (struct sctp_cookie_echo_chunk *)ch, 5253 &inp, &stcb, netp, 5254 auth_skipped, 5255 auth_offset, 5256 auth_len, 5257 &locked_tcb, 5258 mflowtype, 5259 mflowid, 5260 vrf_id, 5261 port); 5262 } else { 5263 ret_buf = NULL; 5264 } 5265 if (linp) { 5266 SCTP_ASOC_CREATE_UNLOCK(linp); 5267 } 5268 if (ret_buf == NULL) { 5269 if (locked_tcb) { 5270 SCTP_TCB_UNLOCK(locked_tcb); 5271 } 5272 SCTPDBG(SCTP_DEBUG_INPUT3, 5273 "GAK, null buffer\n"); 5274 *offset = length; 5275 return (NULL); 5276 } 5277 /* if AUTH skipped, see if it verified... */ 5278 if (auth_skipped) { 5279 got_auth = 1; 5280 auth_skipped = 0; 5281 } 5282 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) { 5283 /* 5284 * Restart the timer if we have 5285 * pending data 5286 */ 5287 struct sctp_tmit_chunk *chk; 5288 5289 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 5290 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 5291 } 5292 } 5293 break; 5294 case SCTP_COOKIE_ACK: 5295 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", (void *)stcb); 5296 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) { 5297 if (locked_tcb) { 5298 SCTP_TCB_UNLOCK(locked_tcb); 5299 } 5300 return (NULL); 5301 } 5302 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5303 /* We are not interested anymore */ 5304 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 5305 ; 5306 } else if (stcb) { 5307 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5308 so = SCTP_INP_SO(inp); 5309 atomic_add_int(&stcb->asoc.refcnt, 1); 5310 SCTP_TCB_UNLOCK(stcb); 5311 SCTP_SOCKET_LOCK(so, 1); 5312 SCTP_TCB_LOCK(stcb); 5313 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5314 #endif 5315 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 5316 SCTP_FROM_SCTP_INPUT + SCTP_LOC_30); 5317 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5318 SCTP_SOCKET_UNLOCK(so, 1); 5319 #endif 5320 *offset = length; 5321 return (NULL); 5322 } 5323 } 5324 /* He's alive so give him credit */ 5325 if ((stcb) && netp && *netp) { 5326 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5327 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5328 stcb->asoc.overall_error_count, 5329 0, 5330 SCTP_FROM_SCTP_INPUT, 5331 __LINE__); 5332 } 5333 stcb->asoc.overall_error_count = 0; 5334 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp); 5335 } 5336 break; 5337 case SCTP_ECN_ECHO: 5338 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n"); 5339 /* He's alive so give him credit */ 5340 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) { 5341 /* Its not ours */ 5342 if (locked_tcb) { 5343 SCTP_TCB_UNLOCK(locked_tcb); 5344 } 5345 *offset = length; 5346 return (NULL); 5347 } 5348 if (stcb) { 5349 if (stcb->asoc.ecn_supported == 0) { 5350 goto unknown_chunk; 5351 } 5352 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5353 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5354 stcb->asoc.overall_error_count, 5355 0, 5356 SCTP_FROM_SCTP_INPUT, 5357 __LINE__); 5358 } 5359 stcb->asoc.overall_error_count = 0; 5360 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, 5361 stcb); 5362 ecne_seen = 1; 5363 } 5364 break; 5365 case SCTP_ECN_CWR: 5366 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n"); 5367 /* He's alive so give him credit */ 5368 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) { 5369 /* Its not ours */ 5370 if (locked_tcb) { 5371 SCTP_TCB_UNLOCK(locked_tcb); 5372 } 5373 *offset = length; 5374 return (NULL); 5375 } 5376 if (stcb) { 5377 if (stcb->asoc.ecn_supported == 0) { 5378 goto unknown_chunk; 5379 } 5380 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5381 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5382 stcb->asoc.overall_error_count, 5383 0, 5384 SCTP_FROM_SCTP_INPUT, 5385 __LINE__); 5386 } 5387 stcb->asoc.overall_error_count = 0; 5388 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp); 5389 } 5390 break; 5391 case SCTP_SHUTDOWN_COMPLETE: 5392 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", (void *)stcb); 5393 /* must be first and only chunk */ 5394 if ((num_chunks > 1) || 5395 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 5396 *offset = length; 5397 if (locked_tcb) { 5398 SCTP_TCB_UNLOCK(locked_tcb); 5399 } 5400 return (NULL); 5401 } 5402 if ((stcb) && netp && *netp) { 5403 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch, 5404 stcb, *netp); 5405 } 5406 *offset = length; 5407 return (NULL); 5408 break; 5409 case SCTP_ASCONF: 5410 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n"); 5411 /* He's alive so give him credit */ 5412 if (stcb) { 5413 if (stcb->asoc.asconf_supported == 0) { 5414 goto unknown_chunk; 5415 } 5416 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5417 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5418 stcb->asoc.overall_error_count, 5419 0, 5420 SCTP_FROM_SCTP_INPUT, 5421 __LINE__); 5422 } 5423 stcb->asoc.overall_error_count = 0; 5424 sctp_handle_asconf(m, *offset, src, 5425 (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0); 5426 asconf_cnt++; 5427 } 5428 break; 5429 case SCTP_ASCONF_ACK: 5430 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n"); 5431 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { 5432 /* Its not ours */ 5433 if (locked_tcb) { 5434 SCTP_TCB_UNLOCK(locked_tcb); 5435 } 5436 *offset = length; 5437 return (NULL); 5438 } 5439 if ((stcb) && netp && *netp) { 5440 if (stcb->asoc.asconf_supported == 0) { 5441 goto unknown_chunk; 5442 } 5443 /* He's alive so give him credit */ 5444 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5445 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5446 stcb->asoc.overall_error_count, 5447 0, 5448 SCTP_FROM_SCTP_INPUT, 5449 __LINE__); 5450 } 5451 stcb->asoc.overall_error_count = 0; 5452 sctp_handle_asconf_ack(m, *offset, 5453 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock); 5454 if (abort_no_unlock) 5455 return (NULL); 5456 } 5457 break; 5458 case SCTP_FORWARD_CUM_TSN: 5459 case SCTP_IFORWARD_CUM_TSN: 5460 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n"); 5461 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { 5462 /* Its not ours */ 5463 if (locked_tcb) { 5464 SCTP_TCB_UNLOCK(locked_tcb); 5465 } 5466 *offset = length; 5467 return (NULL); 5468 } 5469 /* He's alive so give him credit */ 5470 if (stcb) { 5471 int abort_flag = 0; 5472 5473 if (stcb->asoc.prsctp_supported == 0) { 5474 goto unknown_chunk; 5475 } 5476 stcb->asoc.overall_error_count = 0; 5477 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5478 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5479 stcb->asoc.overall_error_count, 5480 0, 5481 SCTP_FROM_SCTP_INPUT, 5482 __LINE__); 5483 } 5484 *fwd_tsn_seen = 1; 5485 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5486 /* We are not interested anymore */ 5487 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5488 so = SCTP_INP_SO(inp); 5489 atomic_add_int(&stcb->asoc.refcnt, 1); 5490 SCTP_TCB_UNLOCK(stcb); 5491 SCTP_SOCKET_LOCK(so, 1); 5492 SCTP_TCB_LOCK(stcb); 5493 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5494 #endif 5495 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 5496 SCTP_FROM_SCTP_INPUT + SCTP_LOC_31); 5497 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5498 SCTP_SOCKET_UNLOCK(so, 1); 5499 #endif 5500 *offset = length; 5501 return (NULL); 5502 } 5503 /* 5504 * For sending a SACK this looks like DATA 5505 * chunks. 5506 */ 5507 stcb->asoc.last_data_chunk_from = stcb->asoc.last_control_chunk_from; 5508 sctp_handle_forward_tsn(stcb, 5509 (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset); 5510 if (abort_flag) { 5511 *offset = length; 5512 return (NULL); 5513 } else { 5514 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5515 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5516 stcb->asoc.overall_error_count, 5517 0, 5518 SCTP_FROM_SCTP_INPUT, 5519 __LINE__); 5520 } 5521 stcb->asoc.overall_error_count = 0; 5522 } 5523 5524 } 5525 break; 5526 case SCTP_STREAM_RESET: 5527 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n"); 5528 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) { 5529 /* Its not ours */ 5530 if (locked_tcb) { 5531 SCTP_TCB_UNLOCK(locked_tcb); 5532 } 5533 *offset = length; 5534 return (NULL); 5535 } 5536 if (stcb->asoc.reconfig_supported == 0) { 5537 goto unknown_chunk; 5538 } 5539 if (sctp_handle_stream_reset(stcb, m, *offset, ch)) { 5540 /* stop processing */ 5541 *offset = length; 5542 return (NULL); 5543 } 5544 break; 5545 case SCTP_PACKET_DROPPED: 5546 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n"); 5547 /* re-get it all please */ 5548 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { 5549 /* Its not ours */ 5550 if (locked_tcb) { 5551 SCTP_TCB_UNLOCK(locked_tcb); 5552 } 5553 *offset = length; 5554 return (NULL); 5555 } 5556 if (ch && (stcb) && netp && (*netp)) { 5557 if (stcb->asoc.pktdrop_supported == 0) { 5558 goto unknown_chunk; 5559 } 5560 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch, 5561 stcb, *netp, 5562 min(chk_length, (sizeof(chunk_buf) - 4))); 5563 5564 } 5565 break; 5566 case SCTP_AUTHENTICATION: 5567 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n"); 5568 if (stcb == NULL) { 5569 /* save the first AUTH for later processing */ 5570 if (auth_skipped == 0) { 5571 auth_offset = *offset; 5572 auth_len = chk_length; 5573 auth_skipped = 1; 5574 } 5575 /* skip this chunk (temporarily) */ 5576 goto next_chunk; 5577 } 5578 if (stcb->asoc.auth_supported == 0) { 5579 goto unknown_chunk; 5580 } 5581 if ((chk_length < (sizeof(struct sctp_auth_chunk))) || 5582 (chk_length > (sizeof(struct sctp_auth_chunk) + 5583 SCTP_AUTH_DIGEST_LEN_MAX))) { 5584 /* Its not ours */ 5585 if (locked_tcb) { 5586 SCTP_TCB_UNLOCK(locked_tcb); 5587 } 5588 *offset = length; 5589 return (NULL); 5590 } 5591 if (got_auth == 1) { 5592 /* skip this chunk... it's already auth'd */ 5593 goto next_chunk; 5594 } 5595 got_auth = 1; 5596 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, 5597 m, *offset)) { 5598 /* auth HMAC failed so dump the packet */ 5599 *offset = length; 5600 return (stcb); 5601 } else { 5602 /* remaining chunks are HMAC checked */ 5603 stcb->asoc.authenticated = 1; 5604 } 5605 break; 5606 5607 default: 5608 unknown_chunk: 5609 /* it's an unknown chunk! */ 5610 if ((ch->chunk_type & 0x40) && (stcb != NULL)) { 5611 struct sctp_gen_error_cause *cause; 5612 int len; 5613 5614 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause), 5615 0, M_NOWAIT, 1, MT_DATA); 5616 if (op_err != NULL) { 5617 len = min(SCTP_SIZE32(chk_length), (uint32_t)(length - *offset)); 5618 cause = mtod(op_err, struct sctp_gen_error_cause *); 5619 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); 5620 cause->length = htons((uint16_t)(len + sizeof(struct sctp_gen_error_cause))); 5621 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); 5622 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, len, M_NOWAIT); 5623 if (SCTP_BUF_NEXT(op_err) != NULL) { 5624 #ifdef SCTP_MBUF_LOGGING 5625 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 5626 sctp_log_mbc(SCTP_BUF_NEXT(op_err), SCTP_MBUF_ICOPY); 5627 } 5628 #endif 5629 sctp_queue_op_err(stcb, op_err); 5630 } else { 5631 sctp_m_freem(op_err); 5632 } 5633 } 5634 } 5635 if ((ch->chunk_type & 0x80) == 0) { 5636 /* discard this packet */ 5637 *offset = length; 5638 return (stcb); 5639 } /* else skip this bad chunk and continue... */ 5640 break; 5641 } /* switch (ch->chunk_type) */ 5642 5643 5644 next_chunk: 5645 /* get the next chunk */ 5646 *offset += SCTP_SIZE32(chk_length); 5647 if (*offset >= length) { 5648 /* no more data left in the mbuf chain */ 5649 break; 5650 } 5651 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 5652 sizeof(struct sctp_chunkhdr), chunk_buf); 5653 if (ch == NULL) { 5654 if (locked_tcb) { 5655 SCTP_TCB_UNLOCK(locked_tcb); 5656 } 5657 *offset = length; 5658 return (NULL); 5659 } 5660 } /* while */ 5661 5662 if (asconf_cnt > 0 && stcb != NULL) { 5663 sctp_send_asconf_ack(stcb); 5664 } 5665 return (stcb); 5666 } 5667 5668 5669 /* 5670 * common input chunk processing (v4 and v6) 5671 */ 5672 void 5673 sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int length, 5674 struct sockaddr *src, struct sockaddr *dst, 5675 struct sctphdr *sh, struct sctp_chunkhdr *ch, 5676 #if !defined(SCTP_WITH_NO_CSUM) 5677 uint8_t compute_crc, 5678 #endif 5679 uint8_t ecn_bits, 5680 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 5681 uint32_t vrf_id, uint16_t port) 5682 { 5683 uint32_t high_tsn; 5684 int fwd_tsn_seen = 0, data_processed = 0; 5685 struct mbuf *m = *mm, *op_err; 5686 char msg[SCTP_DIAG_INFO_LEN]; 5687 int un_sent; 5688 int cnt_ctrl_ready = 0; 5689 struct sctp_inpcb *inp = NULL, *inp_decr = NULL; 5690 struct sctp_tcb *stcb = NULL; 5691 struct sctp_nets *net = NULL; 5692 5693 SCTP_STAT_INCR(sctps_recvdatagrams); 5694 #ifdef SCTP_AUDITING_ENABLED 5695 sctp_audit_log(0xE0, 1); 5696 sctp_auditing(0, inp, stcb, net); 5697 #endif 5698 #if !defined(SCTP_WITH_NO_CSUM) 5699 if (compute_crc != 0) { 5700 uint32_t check, calc_check; 5701 5702 check = sh->checksum; 5703 sh->checksum = 0; 5704 calc_check = sctp_calculate_cksum(m, iphlen); 5705 sh->checksum = check; 5706 if (calc_check != check) { 5707 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n", 5708 calc_check, check, (void *)m, length, iphlen); 5709 stcb = sctp_findassociation_addr(m, offset, src, dst, 5710 sh, ch, &inp, &net, vrf_id); 5711 #if defined(INET) || defined(INET6) 5712 if ((ch->chunk_type != SCTP_INITIATION) && 5713 (net != NULL) && (net->port != port)) { 5714 if (net->port == 0) { 5715 /* UDP encapsulation turned on. */ 5716 net->mtu -= sizeof(struct udphdr); 5717 if (stcb->asoc.smallest_mtu > net->mtu) { 5718 sctp_pathmtu_adjustment(stcb, net->mtu); 5719 } 5720 } else if (port == 0) { 5721 /* UDP encapsulation turned off. */ 5722 net->mtu += sizeof(struct udphdr); 5723 /* XXX Update smallest_mtu */ 5724 } 5725 net->port = port; 5726 } 5727 #endif 5728 if (net != NULL) { 5729 net->flowtype = mflowtype; 5730 net->flowid = mflowid; 5731 } 5732 if ((inp != NULL) && (stcb != NULL)) { 5733 sctp_send_packet_dropped(stcb, net, m, length, iphlen, 1); 5734 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED); 5735 } else if ((inp != NULL) && (stcb == NULL)) { 5736 inp_decr = inp; 5737 } 5738 SCTP_STAT_INCR(sctps_badsum); 5739 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors); 5740 goto out; 5741 } 5742 } 5743 #endif 5744 /* Destination port of 0 is illegal, based on RFC4960. */ 5745 if (sh->dest_port == 0) { 5746 SCTP_STAT_INCR(sctps_hdrops); 5747 goto out; 5748 } 5749 stcb = sctp_findassociation_addr(m, offset, src, dst, 5750 sh, ch, &inp, &net, vrf_id); 5751 #if defined(INET) || defined(INET6) 5752 if ((ch->chunk_type != SCTP_INITIATION) && 5753 (net != NULL) && (net->port != port)) { 5754 if (net->port == 0) { 5755 /* UDP encapsulation turned on. */ 5756 net->mtu -= sizeof(struct udphdr); 5757 if (stcb->asoc.smallest_mtu > net->mtu) { 5758 sctp_pathmtu_adjustment(stcb, net->mtu); 5759 } 5760 } else if (port == 0) { 5761 /* UDP encapsulation turned off. */ 5762 net->mtu += sizeof(struct udphdr); 5763 /* XXX Update smallest_mtu */ 5764 } 5765 net->port = port; 5766 } 5767 #endif 5768 if (net != NULL) { 5769 net->flowtype = mflowtype; 5770 net->flowid = mflowid; 5771 } 5772 if (inp == NULL) { 5773 SCTP_STAT_INCR(sctps_noport); 5774 if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) { 5775 goto out; 5776 } 5777 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 5778 sctp_send_shutdown_complete2(src, dst, sh, 5779 mflowtype, mflowid, fibnum, 5780 vrf_id, port); 5781 goto out; 5782 } 5783 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) { 5784 goto out; 5785 } 5786 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) { 5787 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 5788 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 5789 (ch->chunk_type != SCTP_INIT))) { 5790 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 5791 "Out of the blue"); 5792 sctp_send_abort(m, iphlen, src, dst, 5793 sh, 0, op_err, 5794 mflowtype, mflowid, fibnum, 5795 vrf_id, port); 5796 } 5797 } 5798 goto out; 5799 } else if (stcb == NULL) { 5800 inp_decr = inp; 5801 } 5802 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n", 5803 (void *)m, iphlen, offset, length, (void *)stcb); 5804 if (stcb) { 5805 /* always clear this before beginning a packet */ 5806 stcb->asoc.authenticated = 0; 5807 stcb->asoc.seen_a_sack_this_pkt = 0; 5808 SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n", 5809 (void *)stcb, stcb->asoc.state); 5810 5811 if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) || 5812 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5813 /*- 5814 * If we hit here, we had a ref count 5815 * up when the assoc was aborted and the 5816 * timer is clearing out the assoc, we should 5817 * NOT respond to any packet.. its OOTB. 5818 */ 5819 SCTP_TCB_UNLOCK(stcb); 5820 stcb = NULL; 5821 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); 5822 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 5823 msg); 5824 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err, 5825 mflowtype, mflowid, inp->fibnum, 5826 vrf_id, port); 5827 goto out; 5828 } 5829 } 5830 if (IS_SCTP_CONTROL(ch)) { 5831 /* process the control portion of the SCTP packet */ 5832 /* sa_ignore NO_NULL_CHK */ 5833 stcb = sctp_process_control(m, iphlen, &offset, length, 5834 src, dst, sh, ch, 5835 inp, stcb, &net, &fwd_tsn_seen, 5836 mflowtype, mflowid, fibnum, 5837 vrf_id, port); 5838 if (stcb) { 5839 /* 5840 * This covers us if the cookie-echo was there and 5841 * it changes our INP. 5842 */ 5843 inp = stcb->sctp_ep; 5844 #if defined(INET) || defined(INET6) 5845 if ((ch->chunk_type != SCTP_INITIATION) && 5846 (net != NULL) && (net->port != port)) { 5847 if (net->port == 0) { 5848 /* UDP encapsulation turned on. */ 5849 net->mtu -= sizeof(struct udphdr); 5850 if (stcb->asoc.smallest_mtu > net->mtu) { 5851 sctp_pathmtu_adjustment(stcb, net->mtu); 5852 } 5853 } else if (port == 0) { 5854 /* UDP encapsulation turned off. */ 5855 net->mtu += sizeof(struct udphdr); 5856 /* XXX Update smallest_mtu */ 5857 } 5858 net->port = port; 5859 } 5860 #endif 5861 } 5862 } else { 5863 /* 5864 * no control chunks, so pre-process DATA chunks (these 5865 * checks are taken care of by control processing) 5866 */ 5867 5868 /* 5869 * if DATA only packet, and auth is required, then punt... 5870 * can't have authenticated without any AUTH (control) 5871 * chunks 5872 */ 5873 if ((stcb != NULL) && 5874 (stcb->asoc.auth_supported == 1) && 5875 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) { 5876 /* "silently" ignore */ 5877 SCTP_STAT_INCR(sctps_recvauthmissing); 5878 goto out; 5879 } 5880 if (stcb == NULL) { 5881 /* out of the blue DATA chunk */ 5882 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); 5883 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 5884 msg); 5885 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err, 5886 mflowtype, mflowid, fibnum, 5887 vrf_id, port); 5888 goto out; 5889 } 5890 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) { 5891 /* v_tag mismatch! */ 5892 SCTP_STAT_INCR(sctps_badvtag); 5893 goto out; 5894 } 5895 } 5896 5897 if (stcb == NULL) { 5898 /* 5899 * no valid TCB for this packet, or we found it's a bad 5900 * packet while processing control, or we're done with this 5901 * packet (done or skip rest of data), so we drop it... 5902 */ 5903 goto out; 5904 } 5905 /* 5906 * DATA chunk processing 5907 */ 5908 /* plow through the data chunks while length > offset */ 5909 5910 /* 5911 * Rest should be DATA only. Check authentication state if AUTH for 5912 * DATA is required. 5913 */ 5914 if ((length > offset) && 5915 (stcb != NULL) && 5916 (stcb->asoc.auth_supported == 1) && 5917 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) && 5918 !stcb->asoc.authenticated) { 5919 /* "silently" ignore */ 5920 SCTP_STAT_INCR(sctps_recvauthmissing); 5921 SCTPDBG(SCTP_DEBUG_AUTH1, 5922 "Data chunk requires AUTH, skipped\n"); 5923 goto trigger_send; 5924 } 5925 if (length > offset) { 5926 int retval; 5927 5928 /* 5929 * First check to make sure our state is correct. We would 5930 * not get here unless we really did have a tag, so we don't 5931 * abort if this happens, just dump the chunk silently. 5932 */ 5933 switch (SCTP_GET_STATE(&stcb->asoc)) { 5934 case SCTP_STATE_COOKIE_ECHOED: 5935 /* 5936 * we consider data with valid tags in this state 5937 * shows us the cookie-ack was lost. Imply it was 5938 * there. 5939 */ 5940 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5941 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5942 stcb->asoc.overall_error_count, 5943 0, 5944 SCTP_FROM_SCTP_INPUT, 5945 __LINE__); 5946 } 5947 stcb->asoc.overall_error_count = 0; 5948 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net); 5949 break; 5950 case SCTP_STATE_COOKIE_WAIT: 5951 /* 5952 * We consider OOTB any data sent during asoc setup. 5953 */ 5954 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__); 5955 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 5956 msg); 5957 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err, 5958 mflowtype, mflowid, inp->fibnum, 5959 vrf_id, port); 5960 goto out; 5961 /* sa_ignore NOTREACHED */ 5962 break; 5963 case SCTP_STATE_EMPTY: /* should not happen */ 5964 case SCTP_STATE_INUSE: /* should not happen */ 5965 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */ 5966 case SCTP_STATE_SHUTDOWN_ACK_SENT: 5967 default: 5968 goto out; 5969 /* sa_ignore NOTREACHED */ 5970 break; 5971 case SCTP_STATE_OPEN: 5972 case SCTP_STATE_SHUTDOWN_SENT: 5973 break; 5974 } 5975 /* plow through the data chunks while length > offset */ 5976 retval = sctp_process_data(mm, iphlen, &offset, length, 5977 inp, stcb, net, &high_tsn); 5978 if (retval == 2) { 5979 /* 5980 * The association aborted, NO UNLOCK needed since 5981 * the association is destroyed. 5982 */ 5983 stcb = NULL; 5984 goto out; 5985 } 5986 data_processed = 1; 5987 /* 5988 * Anything important needs to have been m_copy'ed in 5989 * process_data 5990 */ 5991 } 5992 /* take care of ecn */ 5993 if ((data_processed == 1) && 5994 (stcb->asoc.ecn_supported == 1) && 5995 ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) { 5996 /* Yep, we need to add a ECNE */ 5997 sctp_send_ecn_echo(stcb, net, high_tsn); 5998 } 5999 if ((data_processed == 0) && (fwd_tsn_seen)) { 6000 int was_a_gap; 6001 uint32_t highest_tsn; 6002 6003 if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) { 6004 highest_tsn = stcb->asoc.highest_tsn_inside_nr_map; 6005 } else { 6006 highest_tsn = stcb->asoc.highest_tsn_inside_map; 6007 } 6008 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 6009 stcb->asoc.send_sack = 1; 6010 sctp_sack_check(stcb, was_a_gap); 6011 } else if (fwd_tsn_seen) { 6012 stcb->asoc.send_sack = 1; 6013 } 6014 /* trigger send of any chunks in queue... */ 6015 trigger_send: 6016 #ifdef SCTP_AUDITING_ENABLED 6017 sctp_audit_log(0xE0, 2); 6018 sctp_auditing(1, inp, stcb, net); 6019 #endif 6020 SCTPDBG(SCTP_DEBUG_INPUT1, 6021 "Check for chunk output prw:%d tqe:%d tf=%d\n", 6022 stcb->asoc.peers_rwnd, 6023 TAILQ_EMPTY(&stcb->asoc.control_send_queue), 6024 stcb->asoc.total_flight); 6025 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 6026 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 6027 cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq; 6028 } 6029 if (!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue) || 6030 cnt_ctrl_ready || 6031 stcb->asoc.trigger_reset || 6032 ((un_sent) && 6033 (stcb->asoc.peers_rwnd > 0 || 6034 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) { 6035 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n"); 6036 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 6037 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n"); 6038 } 6039 #ifdef SCTP_AUDITING_ENABLED 6040 sctp_audit_log(0xE0, 3); 6041 sctp_auditing(2, inp, stcb, net); 6042 #endif 6043 out: 6044 if (stcb != NULL) { 6045 SCTP_TCB_UNLOCK(stcb); 6046 } 6047 if (inp_decr != NULL) { 6048 /* reduce ref-count */ 6049 SCTP_INP_WLOCK(inp_decr); 6050 SCTP_INP_DECR_REF(inp_decr); 6051 SCTP_INP_WUNLOCK(inp_decr); 6052 } 6053 return; 6054 } 6055 6056 #ifdef INET 6057 void 6058 sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port) 6059 { 6060 struct mbuf *m; 6061 int iphlen; 6062 uint32_t vrf_id = 0; 6063 uint8_t ecn_bits; 6064 struct sockaddr_in src, dst; 6065 struct ip *ip; 6066 struct sctphdr *sh; 6067 struct sctp_chunkhdr *ch; 6068 int length, offset; 6069 #if !defined(SCTP_WITH_NO_CSUM) 6070 uint8_t compute_crc; 6071 #endif 6072 uint32_t mflowid; 6073 uint8_t mflowtype; 6074 uint16_t fibnum; 6075 6076 iphlen = off; 6077 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) { 6078 SCTP_RELEASE_PKT(i_pak); 6079 return; 6080 } 6081 m = SCTP_HEADER_TO_CHAIN(i_pak); 6082 #ifdef SCTP_MBUF_LOGGING 6083 /* Log in any input mbufs */ 6084 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6085 sctp_log_mbc(m, SCTP_MBUF_INPUT); 6086 } 6087 #endif 6088 #ifdef SCTP_PACKET_LOGGING 6089 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { 6090 sctp_packet_log(m); 6091 } 6092 #endif 6093 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 6094 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%b.\n", 6095 m->m_pkthdr.len, 6096 if_name(m->m_pkthdr.rcvif), 6097 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 6098 mflowid = m->m_pkthdr.flowid; 6099 mflowtype = M_HASHTYPE_GET(m); 6100 fibnum = M_GETFIB(m); 6101 SCTP_STAT_INCR(sctps_recvpackets); 6102 SCTP_STAT_INCR_COUNTER64(sctps_inpackets); 6103 /* Get IP, SCTP, and first chunk header together in the first mbuf. */ 6104 offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 6105 if (SCTP_BUF_LEN(m) < offset) { 6106 if ((m = m_pullup(m, offset)) == NULL) { 6107 SCTP_STAT_INCR(sctps_hdrops); 6108 return; 6109 } 6110 } 6111 ip = mtod(m, struct ip *); 6112 sh = (struct sctphdr *)((caddr_t)ip + iphlen); 6113 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr)); 6114 offset -= sizeof(struct sctp_chunkhdr); 6115 memset(&src, 0, sizeof(struct sockaddr_in)); 6116 src.sin_family = AF_INET; 6117 src.sin_len = sizeof(struct sockaddr_in); 6118 src.sin_port = sh->src_port; 6119 src.sin_addr = ip->ip_src; 6120 memset(&dst, 0, sizeof(struct sockaddr_in)); 6121 dst.sin_family = AF_INET; 6122 dst.sin_len = sizeof(struct sockaddr_in); 6123 dst.sin_port = sh->dest_port; 6124 dst.sin_addr = ip->ip_dst; 6125 length = ntohs(ip->ip_len); 6126 /* Validate mbuf chain length with IP payload length. */ 6127 if (SCTP_HEADER_LEN(m) != length) { 6128 SCTPDBG(SCTP_DEBUG_INPUT1, 6129 "sctp_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m)); 6130 SCTP_STAT_INCR(sctps_hdrops); 6131 goto out; 6132 } 6133 /* SCTP does not allow broadcasts or multicasts */ 6134 if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) { 6135 goto out; 6136 } 6137 if (SCTP_IS_IT_BROADCAST(dst.sin_addr, m)) { 6138 goto out; 6139 } 6140 ecn_bits = ip->ip_tos; 6141 #if defined(SCTP_WITH_NO_CSUM) 6142 SCTP_STAT_INCR(sctps_recvnocrc); 6143 #else 6144 if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) { 6145 SCTP_STAT_INCR(sctps_recvhwcrc); 6146 compute_crc = 0; 6147 } else { 6148 SCTP_STAT_INCR(sctps_recvswcrc); 6149 compute_crc = 1; 6150 } 6151 #endif 6152 sctp_common_input_processing(&m, iphlen, offset, length, 6153 (struct sockaddr *)&src, 6154 (struct sockaddr *)&dst, 6155 sh, ch, 6156 #if !defined(SCTP_WITH_NO_CSUM) 6157 compute_crc, 6158 #endif 6159 ecn_bits, 6160 mflowtype, mflowid, fibnum, 6161 vrf_id, port); 6162 out: 6163 if (m) { 6164 sctp_m_freem(m); 6165 } 6166 return; 6167 } 6168 6169 #if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) 6170 extern int *sctp_cpuarry; 6171 #endif 6172 6173 int 6174 sctp_input(struct mbuf **mp, int *offp, int proto SCTP_UNUSED) 6175 { 6176 struct mbuf *m; 6177 int off; 6178 6179 m = *mp; 6180 off = *offp; 6181 #if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) 6182 if (mp_ncpus > 1) { 6183 struct ip *ip; 6184 struct sctphdr *sh; 6185 int offset; 6186 int cpu_to_use; 6187 uint32_t flowid, tag; 6188 6189 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 6190 flowid = m->m_pkthdr.flowid; 6191 } else { 6192 /* 6193 * No flow id built by lower layers fix it so we 6194 * create one. 6195 */ 6196 offset = off + sizeof(struct sctphdr); 6197 if (SCTP_BUF_LEN(m) < offset) { 6198 if ((m = m_pullup(m, offset)) == NULL) { 6199 SCTP_STAT_INCR(sctps_hdrops); 6200 return (IPPROTO_DONE); 6201 } 6202 } 6203 ip = mtod(m, struct ip *); 6204 sh = (struct sctphdr *)((caddr_t)ip + off); 6205 tag = htonl(sh->v_tag); 6206 flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port); 6207 m->m_pkthdr.flowid = flowid; 6208 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE_HASH); 6209 } 6210 cpu_to_use = sctp_cpuarry[flowid % mp_ncpus]; 6211 sctp_queue_to_mcore(m, off, cpu_to_use); 6212 return (IPPROTO_DONE); 6213 } 6214 #endif 6215 sctp_input_with_port(m, off, 0); 6216 return (IPPROTO_DONE); 6217 } 6218 #endif 6219