1 /*- 2 * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* $KAME: sctp_pcb.c,v 1.38 2005/03/06 16:04:18 itojun Exp $ */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_ipsec.h" 37 #include "opt_compat.h" 38 #include "opt_inet6.h" 39 #include "opt_inet.h" 40 #include "opt_sctp.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/malloc.h> 45 #include <sys/mbuf.h> 46 #include <sys/domain.h> 47 #include <sys/protosw.h> 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #include <sys/priv.h> 51 #include <sys/proc.h> 52 #include <sys/kernel.h> 53 #include <sys/sysctl.h> 54 55 #include <sys/limits.h> 56 #include <machine/cpu.h> 57 58 #include <net/if.h> 59 #include <net/if_types.h> 60 #include <net/route.h> 61 #include <netinet/in.h> 62 #include <netinet/in_systm.h> 63 #include <netinet/ip.h> 64 #include <netinet/in_pcb.h> 65 #include <netinet/in_var.h> 66 #include <netinet/ip_var.h> 67 68 #ifdef INET6 69 #include <netinet/ip6.h> 70 #include <netinet6/ip6_var.h> 71 #include <netinet6/scope6_var.h> 72 #include <netinet6/in6_pcb.h> 73 #endif /* INET6 */ 74 75 #ifdef IPSEC 76 #include <netinet6/ipsec.h> 77 #include <netkey/key.h> 78 #endif /* IPSEC */ 79 80 #include <netinet/sctp_os.h> 81 #include <netinet/sctp_var.h> 82 #include <netinet/sctp_pcb.h> 83 #include <netinet/sctputil.h> 84 #include <netinet/sctp.h> 85 #include <netinet/sctp_header.h> 86 #include <netinet/sctp_asconf.h> 87 #include <netinet/sctp_output.h> 88 #include <netinet/sctp_timer.h> 89 90 91 #ifdef SCTP_DEBUG 92 uint32_t sctp_debug_on = 0; 93 94 #endif /* SCTP_DEBUG */ 95 96 97 extern int sctp_pcbtblsize; 98 extern int sctp_hashtblsize; 99 extern int sctp_chunkscale; 100 101 struct sctp_epinfo sctppcbinfo; 102 103 /* FIX: we don't handle multiple link local scopes */ 104 /* "scopeless" replacement IN6_ARE_ADDR_EQUAL */ 105 int 106 SCTP6_ARE_ADDR_EQUAL(struct in6_addr *a, struct in6_addr *b) 107 { 108 struct in6_addr tmp_a, tmp_b; 109 110 /* use a copy of a and b */ 111 tmp_a = *a; 112 tmp_b = *b; 113 in6_clearscope(&tmp_a); 114 in6_clearscope(&tmp_b); 115 return (IN6_ARE_ADDR_EQUAL(&tmp_a, &tmp_b)); 116 } 117 118 119 void 120 sctp_fill_pcbinfo(struct sctp_pcbinfo *spcb) 121 { 122 /* 123 * We really don't need to lock this, but I will just because it 124 * does not hurt. 125 */ 126 SCTP_INP_INFO_RLOCK(); 127 spcb->ep_count = sctppcbinfo.ipi_count_ep; 128 spcb->asoc_count = sctppcbinfo.ipi_count_asoc; 129 spcb->laddr_count = sctppcbinfo.ipi_count_laddr; 130 spcb->raddr_count = sctppcbinfo.ipi_count_raddr; 131 spcb->chk_count = sctppcbinfo.ipi_count_chunk; 132 spcb->readq_count = sctppcbinfo.ipi_count_readq; 133 spcb->stream_oque = sctppcbinfo.ipi_count_strmoq; 134 spcb->free_chunks = sctppcbinfo.ipi_free_chunks; 135 136 SCTP_INP_INFO_RUNLOCK(); 137 } 138 139 140 /* 141 * Notes on locks for FreeBSD 5 and up. All association lookups that have a 142 * definte ep, the INP structure is assumed to be locked for reading. If we 143 * need to go find the INP (ususally when a **inp is passed) then we must 144 * lock the INFO structure first and if needed lock the INP too. Note that if 145 * we lock it we must 146 * 147 */ 148 149 150 /* 151 * Given a endpoint, look and find in its association list any association 152 * with the "to" address given. This can be a "from" address, too, for 153 * inbound packets. For outbound packets it is a true "to" address. 154 */ 155 156 static struct sctp_tcb * 157 sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from, 158 struct sockaddr *to, struct sctp_nets **netp) 159 { 160 /**** ASSUMSES THE CALLER holds the INP_INFO_RLOCK */ 161 162 /* 163 * Note for this module care must be taken when observing what to is 164 * for. In most of the rest of the code the TO field represents my 165 * peer and the FROM field represents my address. For this module it 166 * is reversed of that. 167 */ 168 /* 169 * If we support the TCP model, then we must now dig through to see 170 * if we can find our endpoint in the list of tcp ep's. 171 */ 172 uint16_t lport, rport; 173 struct sctppcbhead *ephead; 174 struct sctp_inpcb *inp; 175 struct sctp_laddr *laddr; 176 struct sctp_tcb *stcb; 177 struct sctp_nets *net; 178 179 if ((to == NULL) || (from == NULL)) { 180 return (NULL); 181 } 182 if (to->sa_family == AF_INET && from->sa_family == AF_INET) { 183 lport = ((struct sockaddr_in *)to)->sin_port; 184 rport = ((struct sockaddr_in *)from)->sin_port; 185 } else if (to->sa_family == AF_INET6 && from->sa_family == AF_INET6) { 186 lport = ((struct sockaddr_in6 *)to)->sin6_port; 187 rport = ((struct sockaddr_in6 *)from)->sin6_port; 188 } else { 189 return NULL; 190 } 191 ephead = &sctppcbinfo.sctp_tcpephash[SCTP_PCBHASH_ALLADDR( 192 (lport + rport), sctppcbinfo.hashtcpmark)]; 193 /* 194 * Ok now for each of the guys in this bucket we must look and see: 195 * - Does the remote port match. - Does there single association's 196 * addresses match this address (to). If so we update p_ep to point 197 * to this ep and return the tcb from it. 198 */ 199 LIST_FOREACH(inp, ephead, sctp_hash) { 200 if (lport != inp->sctp_lport) { 201 continue; 202 } 203 SCTP_INP_RLOCK(inp); 204 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 205 SCTP_INP_RUNLOCK(inp); 206 continue; 207 } 208 /* check to see if the ep has one of the addresses */ 209 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 210 /* We are NOT bound all, so look further */ 211 int match = 0; 212 213 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 214 215 if (laddr->ifa == NULL) { 216 #ifdef SCTP_DEBUG 217 if (sctp_debug_on & SCTP_DEBUG_PCB1) { 218 printf("An ounce of prevention is worth a pound of cure\n"); 219 } 220 #endif 221 continue; 222 } 223 if (laddr->ifa->ifa_addr == NULL) { 224 #ifdef SCTP_DEBUG 225 if (sctp_debug_on & SCTP_DEBUG_PCB1) { 226 printf("ifa with a NULL address\n"); 227 } 228 #endif 229 continue; 230 } 231 if (laddr->ifa->ifa_addr->sa_family == 232 to->sa_family) { 233 /* see if it matches */ 234 struct sockaddr_in *intf_addr, *sin; 235 236 intf_addr = (struct sockaddr_in *) 237 laddr->ifa->ifa_addr; 238 sin = (struct sockaddr_in *)to; 239 if (from->sa_family == AF_INET) { 240 if (sin->sin_addr.s_addr == 241 intf_addr->sin_addr.s_addr) { 242 match = 1; 243 break; 244 } 245 } else { 246 struct sockaddr_in6 *intf_addr6; 247 struct sockaddr_in6 *sin6; 248 249 sin6 = (struct sockaddr_in6 *) 250 to; 251 intf_addr6 = (struct sockaddr_in6 *) 252 laddr->ifa->ifa_addr; 253 254 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr, 255 &intf_addr6->sin6_addr)) { 256 match = 1; 257 break; 258 } 259 } 260 } 261 } 262 if (match == 0) { 263 /* This endpoint does not have this address */ 264 SCTP_INP_RUNLOCK(inp); 265 continue; 266 } 267 } 268 /* 269 * Ok if we hit here the ep has the address, does it hold 270 * the tcb? 271 */ 272 273 stcb = LIST_FIRST(&inp->sctp_asoc_list); 274 if (stcb == NULL) { 275 SCTP_INP_RUNLOCK(inp); 276 continue; 277 } 278 SCTP_TCB_LOCK(stcb); 279 if (stcb->rport != rport) { 280 /* remote port does not match. */ 281 SCTP_TCB_UNLOCK(stcb); 282 SCTP_INP_RUNLOCK(inp); 283 continue; 284 } 285 /* Does this TCB have a matching address? */ 286 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 287 288 if (net->ro._l_addr.sa.sa_family != from->sa_family) { 289 /* not the same family, can't be a match */ 290 continue; 291 } 292 if (from->sa_family == AF_INET) { 293 struct sockaddr_in *sin, *rsin; 294 295 sin = (struct sockaddr_in *)&net->ro._l_addr; 296 rsin = (struct sockaddr_in *)from; 297 if (sin->sin_addr.s_addr == 298 rsin->sin_addr.s_addr) { 299 /* found it */ 300 if (netp != NULL) { 301 *netp = net; 302 } 303 /* Update the endpoint pointer */ 304 *inp_p = inp; 305 SCTP_INP_RUNLOCK(inp); 306 return (stcb); 307 } 308 } else { 309 struct sockaddr_in6 *sin6, *rsin6; 310 311 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 312 rsin6 = (struct sockaddr_in6 *)from; 313 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr, 314 &rsin6->sin6_addr)) { 315 /* found it */ 316 if (netp != NULL) { 317 *netp = net; 318 } 319 /* Update the endpoint pointer */ 320 *inp_p = inp; 321 SCTP_INP_RUNLOCK(inp); 322 return (stcb); 323 } 324 } 325 } 326 SCTP_TCB_UNLOCK(stcb); 327 SCTP_INP_RUNLOCK(inp); 328 } 329 return (NULL); 330 } 331 332 /* 333 * rules for use 334 * 335 * 1) If I return a NULL you must decrement any INP ref cnt. 2) If I find an 336 * stcb, both will be locked (locked_tcb and stcb) but decrement will be done 337 * (if locked == NULL). 3) Decrement happens on return ONLY if locked == 338 * NULL. 339 */ 340 341 struct sctp_tcb * 342 sctp_findassociation_ep_addr(struct sctp_inpcb **inp_p, struct sockaddr *remote, 343 struct sctp_nets **netp, struct sockaddr *local, struct sctp_tcb *locked_tcb) 344 { 345 struct sctpasochead *head; 346 struct sctp_inpcb *inp; 347 struct sctp_tcb *stcb; 348 struct sctp_nets *net; 349 uint16_t rport; 350 351 inp = *inp_p; 352 if (remote->sa_family == AF_INET) { 353 rport = (((struct sockaddr_in *)remote)->sin_port); 354 } else if (remote->sa_family == AF_INET6) { 355 rport = (((struct sockaddr_in6 *)remote)->sin6_port); 356 } else { 357 return (NULL); 358 } 359 if (locked_tcb) { 360 /* 361 * UN-lock so we can do proper locking here this occurs when 362 * called from load_addresses_from_init. 363 */ 364 SCTP_TCB_UNLOCK(locked_tcb); 365 } 366 SCTP_INP_INFO_RLOCK(); 367 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 368 /* 369 * Now either this guy is our listener or it's the 370 * connector. If it is the one that issued the connect, then 371 * it's only chance is to be the first TCB in the list. If 372 * it is the acceptor, then do the special_lookup to hash 373 * and find the real inp. 374 */ 375 if ((inp->sctp_socket) && (inp->sctp_socket->so_qlimit)) { 376 /* to is peer addr, from is my addr */ 377 stcb = sctp_tcb_special_locate(inp_p, remote, local, 378 netp); 379 if ((stcb != NULL) && (locked_tcb == NULL)) { 380 /* we have a locked tcb, lower refcount */ 381 SCTP_INP_WLOCK(inp); 382 SCTP_INP_DECR_REF(inp); 383 SCTP_INP_WUNLOCK(inp); 384 } 385 if ((locked_tcb != NULL) && (locked_tcb != stcb)) { 386 SCTP_INP_RLOCK(locked_tcb->sctp_ep); 387 SCTP_TCB_LOCK(locked_tcb); 388 SCTP_INP_RUNLOCK(locked_tcb->sctp_ep); 389 } 390 SCTP_INP_INFO_RUNLOCK(); 391 return (stcb); 392 } else { 393 SCTP_INP_WLOCK(inp); 394 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 395 goto null_return; 396 } 397 stcb = LIST_FIRST(&inp->sctp_asoc_list); 398 if (stcb == NULL) { 399 goto null_return; 400 } 401 SCTP_TCB_LOCK(stcb); 402 if (stcb->rport != rport) { 403 /* remote port does not match. */ 404 SCTP_TCB_UNLOCK(stcb); 405 goto null_return; 406 } 407 /* now look at the list of remote addresses */ 408 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 409 #ifdef INVARIANTS 410 if (net == (TAILQ_NEXT(net, sctp_next))) { 411 panic("Corrupt net list"); 412 } 413 #endif 414 if (net->ro._l_addr.sa.sa_family != 415 remote->sa_family) { 416 /* not the same family */ 417 continue; 418 } 419 if (remote->sa_family == AF_INET) { 420 struct sockaddr_in *sin, *rsin; 421 422 sin = (struct sockaddr_in *) 423 &net->ro._l_addr; 424 rsin = (struct sockaddr_in *)remote; 425 if (sin->sin_addr.s_addr == 426 rsin->sin_addr.s_addr) { 427 /* found it */ 428 if (netp != NULL) { 429 *netp = net; 430 } 431 if (locked_tcb == NULL) { 432 SCTP_INP_DECR_REF(inp); 433 } else if (locked_tcb != stcb) { 434 SCTP_TCB_LOCK(locked_tcb); 435 } 436 SCTP_INP_WUNLOCK(inp); 437 SCTP_INP_INFO_RUNLOCK(); 438 return (stcb); 439 } 440 } else if (remote->sa_family == AF_INET6) { 441 struct sockaddr_in6 *sin6, *rsin6; 442 443 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 444 rsin6 = (struct sockaddr_in6 *)remote; 445 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr, 446 &rsin6->sin6_addr)) { 447 /* found it */ 448 if (netp != NULL) { 449 *netp = net; 450 } 451 if (locked_tcb == NULL) { 452 SCTP_INP_DECR_REF(inp); 453 } else if (locked_tcb != stcb) { 454 SCTP_TCB_LOCK(locked_tcb); 455 } 456 SCTP_INP_WUNLOCK(inp); 457 SCTP_INP_INFO_RUNLOCK(); 458 return (stcb); 459 } 460 } 461 } 462 SCTP_TCB_UNLOCK(stcb); 463 } 464 } else { 465 SCTP_INP_WLOCK(inp); 466 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 467 goto null_return; 468 } 469 head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(rport, 470 inp->sctp_hashmark)]; 471 if (head == NULL) { 472 goto null_return; 473 } 474 LIST_FOREACH(stcb, head, sctp_tcbhash) { 475 if (stcb->rport != rport) { 476 /* remote port does not match */ 477 continue; 478 } 479 /* now look at the list of remote addresses */ 480 SCTP_TCB_LOCK(stcb); 481 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 482 #ifdef INVARIANTS 483 if (net == (TAILQ_NEXT(net, sctp_next))) { 484 panic("Corrupt net list"); 485 } 486 #endif 487 if (net->ro._l_addr.sa.sa_family != 488 remote->sa_family) { 489 /* not the same family */ 490 continue; 491 } 492 if (remote->sa_family == AF_INET) { 493 struct sockaddr_in *sin, *rsin; 494 495 sin = (struct sockaddr_in *) 496 &net->ro._l_addr; 497 rsin = (struct sockaddr_in *)remote; 498 if (sin->sin_addr.s_addr == 499 rsin->sin_addr.s_addr) { 500 /* found it */ 501 if (netp != NULL) { 502 *netp = net; 503 } 504 if (locked_tcb == NULL) { 505 SCTP_INP_DECR_REF(inp); 506 } else if (locked_tcb != stcb) { 507 SCTP_TCB_LOCK(locked_tcb); 508 } 509 SCTP_INP_WUNLOCK(inp); 510 SCTP_INP_INFO_RUNLOCK(); 511 return (stcb); 512 } 513 } else if (remote->sa_family == AF_INET6) { 514 struct sockaddr_in6 *sin6, *rsin6; 515 516 sin6 = (struct sockaddr_in6 *) 517 &net->ro._l_addr; 518 rsin6 = (struct sockaddr_in6 *)remote; 519 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr, 520 &rsin6->sin6_addr)) { 521 /* found it */ 522 if (netp != NULL) { 523 *netp = net; 524 } 525 if (locked_tcb == NULL) { 526 SCTP_INP_DECR_REF(inp); 527 } else if (locked_tcb != stcb) { 528 SCTP_TCB_LOCK(locked_tcb); 529 } 530 SCTP_INP_WUNLOCK(inp); 531 SCTP_INP_INFO_RUNLOCK(); 532 return (stcb); 533 } 534 } 535 } 536 SCTP_TCB_UNLOCK(stcb); 537 } 538 } 539 null_return: 540 /* clean up for returning null */ 541 if (locked_tcb) { 542 SCTP_TCB_LOCK(locked_tcb); 543 } 544 SCTP_INP_WUNLOCK(inp); 545 SCTP_INP_INFO_RUNLOCK(); 546 /* not found */ 547 return (NULL); 548 } 549 550 /* 551 * Find an association for a specific endpoint using the association id given 552 * out in the COMM_UP notification 553 */ 554 555 struct sctp_tcb * 556 sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock) 557 { 558 /* 559 * Use my the assoc_id to find a endpoint 560 */ 561 struct sctpasochead *head; 562 struct sctp_tcb *stcb; 563 uint32_t id; 564 565 if (asoc_id == 0 || inp == NULL) { 566 return (NULL); 567 } 568 SCTP_INP_INFO_RLOCK(); 569 id = (uint32_t) asoc_id; 570 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(id, 571 sctppcbinfo.hashasocmark)]; 572 if (head == NULL) { 573 /* invalid id TSNH */ 574 SCTP_INP_INFO_RUNLOCK(); 575 return (NULL); 576 } 577 LIST_FOREACH(stcb, head, sctp_asocs) { 578 SCTP_INP_RLOCK(stcb->sctp_ep); 579 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 580 SCTP_INP_RUNLOCK(stcb->sctp_ep); 581 SCTP_INP_INFO_RUNLOCK(); 582 return (NULL); 583 } 584 if (stcb->asoc.assoc_id == id) { 585 /* candidate */ 586 if (inp != stcb->sctp_ep) { 587 /* 588 * some other guy has the same id active (id 589 * collision ??). 590 */ 591 SCTP_INP_RUNLOCK(stcb->sctp_ep); 592 continue; 593 } 594 if (want_lock) { 595 SCTP_TCB_LOCK(stcb); 596 } 597 SCTP_INP_RUNLOCK(stcb->sctp_ep); 598 SCTP_INP_INFO_RUNLOCK(); 599 return (stcb); 600 } 601 SCTP_INP_RUNLOCK(stcb->sctp_ep); 602 } 603 /* Ok if we missed here, lets try the restart hash */ 604 head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(id, sctppcbinfo.hashrestartmark)]; 605 if (head == NULL) { 606 /* invalid id TSNH */ 607 SCTP_INP_INFO_RUNLOCK(); 608 return (NULL); 609 } 610 LIST_FOREACH(stcb, head, sctp_tcbrestarhash) { 611 SCTP_INP_RLOCK(stcb->sctp_ep); 612 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 613 SCTP_INP_RUNLOCK(stcb->sctp_ep); 614 SCTP_INP_INFO_RUNLOCK(); 615 return (NULL); 616 } 617 SCTP_TCB_LOCK(stcb); 618 SCTP_INP_RUNLOCK(stcb->sctp_ep); 619 if (stcb->asoc.assoc_id == id) { 620 /* candidate */ 621 if (inp != stcb->sctp_ep) { 622 /* 623 * some other guy has the same id active (id 624 * collision ??). 625 */ 626 SCTP_TCB_UNLOCK(stcb); 627 continue; 628 } 629 SCTP_INP_INFO_RUNLOCK(); 630 return (stcb); 631 } 632 SCTP_TCB_UNLOCK(stcb); 633 } 634 SCTP_INP_INFO_RUNLOCK(); 635 return (NULL); 636 } 637 638 639 static struct sctp_inpcb * 640 sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head, 641 uint16_t lport) 642 { 643 struct sctp_inpcb *inp; 644 struct sockaddr_in *sin; 645 struct sockaddr_in6 *sin6; 646 struct sctp_laddr *laddr; 647 648 /* 649 * Endpoing probe expects that the INP_INFO is locked. 650 */ 651 if (nam->sa_family == AF_INET) { 652 sin = (struct sockaddr_in *)nam; 653 sin6 = NULL; 654 } else if (nam->sa_family == AF_INET6) { 655 sin6 = (struct sockaddr_in6 *)nam; 656 sin = NULL; 657 } else { 658 /* unsupported family */ 659 return (NULL); 660 } 661 if (head == NULL) 662 return (NULL); 663 LIST_FOREACH(inp, head, sctp_hash) { 664 SCTP_INP_RLOCK(inp); 665 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 666 SCTP_INP_RUNLOCK(inp); 667 continue; 668 } 669 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) && 670 (inp->sctp_lport == lport)) { 671 /* got it */ 672 if ((nam->sa_family == AF_INET) && 673 (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 674 (((struct inpcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY) 675 ) { 676 /* IPv4 on a IPv6 socket with ONLY IPv6 set */ 677 SCTP_INP_RUNLOCK(inp); 678 continue; 679 } 680 /* A V6 address and the endpoint is NOT bound V6 */ 681 if (nam->sa_family == AF_INET6 && 682 (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 683 SCTP_INP_RUNLOCK(inp); 684 continue; 685 } 686 SCTP_INP_RUNLOCK(inp); 687 return (inp); 688 } 689 SCTP_INP_RUNLOCK(inp); 690 } 691 692 if ((nam->sa_family == AF_INET) && 693 (sin->sin_addr.s_addr == INADDR_ANY)) { 694 /* Can't hunt for one that has no address specified */ 695 return (NULL); 696 } else if ((nam->sa_family == AF_INET6) && 697 (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))) { 698 /* Can't hunt for one that has no address specified */ 699 return (NULL); 700 } 701 /* 702 * ok, not bound to all so see if we can find a EP bound to this 703 * address. 704 */ 705 LIST_FOREACH(inp, head, sctp_hash) { 706 SCTP_INP_RLOCK(inp); 707 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 708 SCTP_INP_RUNLOCK(inp); 709 continue; 710 } 711 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)) { 712 SCTP_INP_RUNLOCK(inp); 713 continue; 714 } 715 /* 716 * Ok this could be a likely candidate, look at all of its 717 * addresses 718 */ 719 if (inp->sctp_lport != lport) { 720 SCTP_INP_RUNLOCK(inp); 721 continue; 722 } 723 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 724 if (laddr->ifa == NULL) { 725 #ifdef SCTP_DEBUG 726 if (sctp_debug_on & SCTP_DEBUG_PCB1) { 727 printf("An ounce of prevention is worth a pound of cure\n"); 728 } 729 #endif 730 continue; 731 } 732 #ifdef SCTP_DEBUG 733 if (sctp_debug_on & SCTP_DEBUG_PCB1) { 734 printf("Ok laddr->ifa:%p is possible, ", 735 laddr->ifa); 736 } 737 #endif 738 if (laddr->ifa->ifa_addr == NULL) { 739 #ifdef SCTP_DEBUG 740 if (sctp_debug_on & SCTP_DEBUG_PCB1) { 741 printf("Huh IFA as an ifa_addr=NULL, "); 742 } 743 #endif 744 continue; 745 } 746 if (laddr->ifa->ifa_addr->sa_family == nam->sa_family) { 747 /* possible, see if it matches */ 748 struct sockaddr_in *intf_addr; 749 750 intf_addr = (struct sockaddr_in *) 751 laddr->ifa->ifa_addr; 752 if (nam->sa_family == AF_INET) { 753 if (sin->sin_addr.s_addr == 754 intf_addr->sin_addr.s_addr) { 755 SCTP_INP_RUNLOCK(inp); 756 return (inp); 757 } 758 } else if (nam->sa_family == AF_INET6) { 759 struct sockaddr_in6 *intf_addr6; 760 761 intf_addr6 = (struct sockaddr_in6 *) 762 laddr->ifa->ifa_addr; 763 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr, 764 &intf_addr6->sin6_addr)) { 765 SCTP_INP_RUNLOCK(inp); 766 return (inp); 767 } 768 } 769 } 770 } 771 SCTP_INP_RUNLOCK(inp); 772 } 773 return (NULL); 774 } 775 776 777 struct sctp_inpcb * 778 sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock) 779 { 780 /* 781 * First we check the hash table to see if someone has this port 782 * bound with just the port. 783 */ 784 struct sctp_inpcb *inp; 785 struct sctppcbhead *head; 786 struct sockaddr_in *sin; 787 struct sockaddr_in6 *sin6; 788 int lport; 789 790 if (nam->sa_family == AF_INET) { 791 sin = (struct sockaddr_in *)nam; 792 lport = ((struct sockaddr_in *)nam)->sin_port; 793 } else if (nam->sa_family == AF_INET6) { 794 sin6 = (struct sockaddr_in6 *)nam; 795 lport = ((struct sockaddr_in6 *)nam)->sin6_port; 796 } else { 797 /* unsupported family */ 798 return (NULL); 799 } 800 /* 801 * I could cheat here and just cast to one of the types but we will 802 * do it right. It also provides the check against an Unsupported 803 * type too. 804 */ 805 /* Find the head of the ALLADDR chain */ 806 if (have_lock == 0) { 807 SCTP_INP_INFO_RLOCK(); 808 809 } 810 head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport, 811 sctppcbinfo.hashmark)]; 812 inp = sctp_endpoint_probe(nam, head, lport); 813 814 /* 815 * If the TCP model exists it could be that the main listening 816 * endpoint is gone but there exists a connected socket for this guy 817 * yet. If so we can return the first one that we find. This may NOT 818 * be the correct one but the sctp_findassociation_ep_addr has 819 * further code to look at all TCP models. 820 */ 821 if (inp == NULL && find_tcp_pool) { 822 unsigned int i; 823 824 for (i = 0; i < sctppcbinfo.hashtblsize; i++) { 825 /* 826 * This is real gross, but we do NOT have a remote 827 * port at this point depending on who is calling. 828 * We must therefore look for ANY one that matches 829 * our local port :/ 830 */ 831 head = &sctppcbinfo.sctp_tcpephash[i]; 832 if (LIST_FIRST(head)) { 833 inp = sctp_endpoint_probe(nam, head, lport); 834 if (inp) { 835 /* Found one */ 836 break; 837 } 838 } 839 } 840 } 841 if (inp) { 842 SCTP_INP_INCR_REF(inp); 843 } 844 if (have_lock == 0) { 845 SCTP_INP_INFO_RUNLOCK(); 846 } 847 return (inp); 848 } 849 850 /* 851 * Find an association for an endpoint with the pointer to whom you want to 852 * send to and the endpoint pointer. The address can be IPv4 or IPv6. We may 853 * need to change the *to to some other struct like a mbuf... 854 */ 855 struct sctp_tcb * 856 sctp_findassociation_addr_sa(struct sockaddr *to, struct sockaddr *from, 857 struct sctp_inpcb **inp_p, struct sctp_nets **netp, int find_tcp_pool) 858 { 859 struct sctp_inpcb *inp; 860 struct sctp_tcb *retval; 861 862 SCTP_INP_INFO_RLOCK(); 863 if (find_tcp_pool) { 864 if (inp_p != NULL) { 865 retval = sctp_tcb_special_locate(inp_p, from, to, netp); 866 } else { 867 retval = sctp_tcb_special_locate(&inp, from, to, netp); 868 } 869 if (retval != NULL) { 870 SCTP_INP_INFO_RUNLOCK(); 871 return (retval); 872 } 873 } 874 inp = sctp_pcb_findep(to, 0, 1); 875 if (inp_p != NULL) { 876 *inp_p = inp; 877 } 878 SCTP_INP_INFO_RUNLOCK(); 879 880 if (inp == NULL) { 881 return (NULL); 882 } 883 /* 884 * ok, we have an endpoint, now lets find the assoc for it (if any) 885 * we now place the source address or from in the to of the find 886 * endpoint call. Since in reality this chain is used from the 887 * inbound packet side. 888 */ 889 if (inp_p != NULL) { 890 retval = sctp_findassociation_ep_addr(inp_p, from, netp, to, NULL); 891 } else { 892 retval = sctp_findassociation_ep_addr(&inp, from, netp, to, NULL); 893 } 894 return retval; 895 } 896 897 898 /* 899 * This routine will grub through the mbuf that is a INIT or INIT-ACK and 900 * find all addresses that the sender has specified in any address list. Each 901 * address will be used to lookup the TCB and see if one exits. 902 */ 903 static struct sctp_tcb * 904 sctp_findassociation_special_addr(struct mbuf *m, int iphlen, int offset, 905 struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp, 906 struct sockaddr *dest) 907 { 908 struct sockaddr_in sin4; 909 struct sockaddr_in6 sin6; 910 struct sctp_paramhdr *phdr, parm_buf; 911 struct sctp_tcb *retval; 912 uint32_t ptype, plen; 913 914 memset(&sin4, 0, sizeof(sin4)); 915 memset(&sin6, 0, sizeof(sin6)); 916 sin4.sin_len = sizeof(sin4); 917 sin4.sin_family = AF_INET; 918 sin4.sin_port = sh->src_port; 919 sin6.sin6_len = sizeof(sin6); 920 sin6.sin6_family = AF_INET6; 921 sin6.sin6_port = sh->src_port; 922 923 retval = NULL; 924 offset += sizeof(struct sctp_init_chunk); 925 926 phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf)); 927 while (phdr != NULL) { 928 /* now we must see if we want the parameter */ 929 ptype = ntohs(phdr->param_type); 930 plen = ntohs(phdr->param_length); 931 if (plen == 0) { 932 break; 933 } 934 if (ptype == SCTP_IPV4_ADDRESS && 935 plen == sizeof(struct sctp_ipv4addr_param)) { 936 /* Get the rest of the address */ 937 struct sctp_ipv4addr_param ip4_parm, *p4; 938 939 phdr = sctp_get_next_param(m, offset, 940 (struct sctp_paramhdr *)&ip4_parm, plen); 941 if (phdr == NULL) { 942 return (NULL); 943 } 944 p4 = (struct sctp_ipv4addr_param *)phdr; 945 memcpy(&sin4.sin_addr, &p4->addr, sizeof(p4->addr)); 946 /* look it up */ 947 retval = sctp_findassociation_ep_addr(inp_p, 948 (struct sockaddr *)&sin4, netp, dest, NULL); 949 if (retval != NULL) { 950 return (retval); 951 } 952 } else if (ptype == SCTP_IPV6_ADDRESS && 953 plen == sizeof(struct sctp_ipv6addr_param)) { 954 /* Get the rest of the address */ 955 struct sctp_ipv6addr_param ip6_parm, *p6; 956 957 phdr = sctp_get_next_param(m, offset, 958 (struct sctp_paramhdr *)&ip6_parm, plen); 959 if (phdr == NULL) { 960 return (NULL); 961 } 962 p6 = (struct sctp_ipv6addr_param *)phdr; 963 memcpy(&sin6.sin6_addr, &p6->addr, sizeof(p6->addr)); 964 /* look it up */ 965 retval = sctp_findassociation_ep_addr(inp_p, 966 (struct sockaddr *)&sin6, netp, dest, NULL); 967 if (retval != NULL) { 968 return (retval); 969 } 970 } 971 offset += SCTP_SIZE32(plen); 972 phdr = sctp_get_next_param(m, offset, &parm_buf, 973 sizeof(parm_buf)); 974 } 975 return (NULL); 976 } 977 978 979 static struct sctp_tcb * 980 sctp_findassoc_by_vtag(struct sockaddr *from, uint32_t vtag, 981 struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint16_t rport, 982 uint16_t lport, int skip_src_check) 983 { 984 /* 985 * Use my vtag to hash. If we find it we then verify the source addr 986 * is in the assoc. If all goes well we save a bit on rec of a 987 * packet. 988 */ 989 struct sctpasochead *head; 990 struct sctp_nets *net; 991 struct sctp_tcb *stcb; 992 993 *netp = NULL; 994 *inp_p = NULL; 995 SCTP_INP_INFO_RLOCK(); 996 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(vtag, 997 sctppcbinfo.hashasocmark)]; 998 if (head == NULL) { 999 /* invalid vtag */ 1000 SCTP_INP_INFO_RUNLOCK(); 1001 return (NULL); 1002 } 1003 LIST_FOREACH(stcb, head, sctp_asocs) { 1004 SCTP_INP_RLOCK(stcb->sctp_ep); 1005 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 1006 SCTP_INP_RUNLOCK(stcb->sctp_ep); 1007 SCTP_INP_INFO_RUNLOCK(); 1008 return (NULL); 1009 } 1010 SCTP_TCB_LOCK(stcb); 1011 SCTP_INP_RUNLOCK(stcb->sctp_ep); 1012 if (stcb->asoc.my_vtag == vtag) { 1013 /* candidate */ 1014 if (stcb->rport != rport) { 1015 /* 1016 * we could remove this if vtags are unique 1017 * across the system. 1018 */ 1019 SCTP_TCB_UNLOCK(stcb); 1020 continue; 1021 } 1022 if (stcb->sctp_ep->sctp_lport != lport) { 1023 /* 1024 * we could remove this if vtags are unique 1025 * across the system. 1026 */ 1027 SCTP_TCB_UNLOCK(stcb); 1028 continue; 1029 } 1030 if (skip_src_check) { 1031 *netp = NULL; /* unknown */ 1032 *inp_p = stcb->sctp_ep; 1033 SCTP_INP_INFO_RUNLOCK(); 1034 return (stcb); 1035 } 1036 net = sctp_findnet(stcb, from); 1037 if (net) { 1038 /* yep its him. */ 1039 *netp = net; 1040 SCTP_STAT_INCR(sctps_vtagexpress); 1041 *inp_p = stcb->sctp_ep; 1042 SCTP_INP_INFO_RUNLOCK(); 1043 return (stcb); 1044 } else { 1045 /* 1046 * not him, this should only happen in rare 1047 * cases so I peg it. 1048 */ 1049 SCTP_STAT_INCR(sctps_vtagbogus); 1050 } 1051 } 1052 SCTP_TCB_UNLOCK(stcb); 1053 } 1054 SCTP_INP_INFO_RUNLOCK(); 1055 return (NULL); 1056 } 1057 1058 /* 1059 * Find an association with the pointer to the inbound IP packet. This can be 1060 * a IPv4 or IPv6 packet. 1061 */ 1062 struct sctp_tcb * 1063 sctp_findassociation_addr(struct mbuf *m, int iphlen, int offset, 1064 struct sctphdr *sh, struct sctp_chunkhdr *ch, 1065 struct sctp_inpcb **inp_p, struct sctp_nets **netp) 1066 { 1067 int find_tcp_pool; 1068 struct ip *iph; 1069 struct sctp_tcb *retval; 1070 struct sockaddr_storage to_store, from_store; 1071 struct sockaddr *to = (struct sockaddr *)&to_store; 1072 struct sockaddr *from = (struct sockaddr *)&from_store; 1073 struct sctp_inpcb *inp; 1074 1075 1076 iph = mtod(m, struct ip *); 1077 if (iph->ip_v == IPVERSION) { 1078 /* its IPv4 */ 1079 struct sockaddr_in *from4; 1080 1081 from4 = (struct sockaddr_in *)&from_store; 1082 bzero(from4, sizeof(*from4)); 1083 from4->sin_family = AF_INET; 1084 from4->sin_len = sizeof(struct sockaddr_in); 1085 from4->sin_addr.s_addr = iph->ip_src.s_addr; 1086 from4->sin_port = sh->src_port; 1087 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 1088 /* its IPv6 */ 1089 struct ip6_hdr *ip6; 1090 struct sockaddr_in6 *from6; 1091 1092 ip6 = mtod(m, struct ip6_hdr *); 1093 from6 = (struct sockaddr_in6 *)&from_store; 1094 bzero(from6, sizeof(*from6)); 1095 from6->sin6_family = AF_INET6; 1096 from6->sin6_len = sizeof(struct sockaddr_in6); 1097 from6->sin6_addr = ip6->ip6_src; 1098 from6->sin6_port = sh->src_port; 1099 /* Get the scopes in properly to the sin6 addr's */ 1100 /* we probably don't need these operations */ 1101 (void)sa6_recoverscope(from6); 1102 sa6_embedscope(from6, ip6_use_defzone); 1103 } else { 1104 /* Currently not supported. */ 1105 return (NULL); 1106 } 1107 if (sh->v_tag) { 1108 /* we only go down this path if vtag is non-zero */ 1109 retval = sctp_findassoc_by_vtag(from, ntohl(sh->v_tag), 1110 inp_p, netp, sh->src_port, sh->dest_port, 0); 1111 if (retval) { 1112 return (retval); 1113 } 1114 } 1115 if (iph->ip_v == IPVERSION) { 1116 /* its IPv4 */ 1117 struct sockaddr_in *to4; 1118 1119 to4 = (struct sockaddr_in *)&to_store; 1120 bzero(to4, sizeof(*to4)); 1121 to4->sin_family = AF_INET; 1122 to4->sin_len = sizeof(struct sockaddr_in); 1123 to4->sin_addr.s_addr = iph->ip_dst.s_addr; 1124 to4->sin_port = sh->dest_port; 1125 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 1126 /* its IPv6 */ 1127 struct ip6_hdr *ip6; 1128 struct sockaddr_in6 *to6; 1129 1130 ip6 = mtod(m, struct ip6_hdr *); 1131 to6 = (struct sockaddr_in6 *)&to_store; 1132 bzero(to6, sizeof(*to6)); 1133 to6->sin6_family = AF_INET6; 1134 to6->sin6_len = sizeof(struct sockaddr_in6); 1135 to6->sin6_addr = ip6->ip6_dst; 1136 to6->sin6_port = sh->dest_port; 1137 /* Get the scopes in properly to the sin6 addr's */ 1138 /* we probably don't need these operations */ 1139 (void)sa6_recoverscope(to6); 1140 sa6_embedscope(to6, ip6_use_defzone); 1141 } 1142 find_tcp_pool = 0; 1143 /* 1144 * FIX FIX?, I think we only need to look in the TCP pool if its an 1145 * INIT or COOKIE-ECHO, We really don't need to find it that way if 1146 * its a INIT-ACK or COOKIE_ACK since these in bot one-2-one and 1147 * one-2-N would be in the main pool anyway. 1148 */ 1149 if ((ch->chunk_type != SCTP_INITIATION) && 1150 (ch->chunk_type != SCTP_INITIATION_ACK) && 1151 (ch->chunk_type != SCTP_COOKIE_ACK) && 1152 (ch->chunk_type != SCTP_COOKIE_ECHO)) { 1153 /* Other chunk types go to the tcp pool. */ 1154 find_tcp_pool = 1; 1155 } 1156 if (inp_p) { 1157 retval = sctp_findassociation_addr_sa(to, from, inp_p, netp, 1158 find_tcp_pool); 1159 inp = *inp_p; 1160 } else { 1161 retval = sctp_findassociation_addr_sa(to, from, &inp, netp, 1162 find_tcp_pool); 1163 } 1164 #ifdef SCTP_DEBUG 1165 if (sctp_debug_on & SCTP_DEBUG_PCB1) { 1166 printf("retval:%p inp:%p\n", retval, inp); 1167 } 1168 #endif 1169 if (retval == NULL && inp) { 1170 /* Found a EP but not this address */ 1171 if ((ch->chunk_type == SCTP_INITIATION) || 1172 (ch->chunk_type == SCTP_INITIATION_ACK)) { 1173 /* 1174 * special hook, we do NOT return linp or an 1175 * association that is linked to an existing 1176 * association that is under the TCP pool (i.e. no 1177 * listener exists). The endpoint finding routine 1178 * will always find a listner before examining the 1179 * TCP pool. 1180 */ 1181 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) { 1182 if (inp_p) { 1183 *inp_p = NULL; 1184 } 1185 return (NULL); 1186 } 1187 retval = sctp_findassociation_special_addr(m, iphlen, 1188 offset, sh, &inp, netp, to); 1189 if (inp_p != NULL) { 1190 *inp_p = inp; 1191 } 1192 } 1193 } 1194 #ifdef SCTP_DEBUG 1195 if (sctp_debug_on & SCTP_DEBUG_PCB1) { 1196 printf("retval is %p\n", retval); 1197 } 1198 #endif 1199 return (retval); 1200 } 1201 1202 /* 1203 * lookup an association by an ASCONF lookup address. 1204 * if the lookup address is 0.0.0.0 or ::0, use the vtag to do the lookup 1205 */ 1206 struct sctp_tcb * 1207 sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset, 1208 struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp) 1209 { 1210 struct sctp_tcb *stcb; 1211 struct sockaddr_in *sin; 1212 struct sockaddr_in6 *sin6; 1213 struct sockaddr_storage local_store, remote_store; 1214 struct ip *iph; 1215 struct sctp_paramhdr parm_buf, *phdr; 1216 int ptype; 1217 int zero_address = 0; 1218 1219 1220 memset(&local_store, 0, sizeof(local_store)); 1221 memset(&remote_store, 0, sizeof(remote_store)); 1222 1223 /* First get the destination address setup too. */ 1224 iph = mtod(m, struct ip *); 1225 if (iph->ip_v == IPVERSION) { 1226 /* its IPv4 */ 1227 sin = (struct sockaddr_in *)&local_store; 1228 sin->sin_family = AF_INET; 1229 sin->sin_len = sizeof(*sin); 1230 sin->sin_port = sh->dest_port; 1231 sin->sin_addr.s_addr = iph->ip_dst.s_addr; 1232 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 1233 /* its IPv6 */ 1234 struct ip6_hdr *ip6; 1235 1236 ip6 = mtod(m, struct ip6_hdr *); 1237 sin6 = (struct sockaddr_in6 *)&local_store; 1238 sin6->sin6_family = AF_INET6; 1239 sin6->sin6_len = sizeof(*sin6); 1240 sin6->sin6_port = sh->dest_port; 1241 sin6->sin6_addr = ip6->ip6_dst; 1242 } else { 1243 return NULL; 1244 } 1245 1246 phdr = sctp_get_next_param(m, offset + sizeof(struct sctp_asconf_chunk), 1247 &parm_buf, sizeof(struct sctp_paramhdr)); 1248 if (phdr == NULL) { 1249 #ifdef SCTP_DEBUG 1250 if (sctp_debug_on & SCTP_DEBUG_INPUT3) { 1251 printf("findassociation_ep_asconf: failed to get asconf lookup addr\n"); 1252 } 1253 #endif /* SCTP_DEBUG */ 1254 return NULL; 1255 } 1256 ptype = (int)((uint32_t) ntohs(phdr->param_type)); 1257 /* get the correlation address */ 1258 if (ptype == SCTP_IPV6_ADDRESS) { 1259 /* ipv6 address param */ 1260 struct sctp_ipv6addr_param *p6, p6_buf; 1261 1262 if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv6addr_param)) { 1263 return NULL; 1264 } 1265 p6 = (struct sctp_ipv6addr_param *)sctp_get_next_param(m, 1266 offset + sizeof(struct sctp_asconf_chunk), 1267 &p6_buf.ph, sizeof(*p6)); 1268 if (p6 == NULL) { 1269 #ifdef SCTP_DEBUG 1270 if (sctp_debug_on & SCTP_DEBUG_INPUT3) { 1271 printf("findassociation_ep_asconf: failed to get asconf v6 lookup addr\n"); 1272 } 1273 #endif /* SCTP_DEBUG */ 1274 return (NULL); 1275 } 1276 sin6 = (struct sockaddr_in6 *)&remote_store; 1277 sin6->sin6_family = AF_INET6; 1278 sin6->sin6_len = sizeof(*sin6); 1279 sin6->sin6_port = sh->src_port; 1280 memcpy(&sin6->sin6_addr, &p6->addr, sizeof(struct in6_addr)); 1281 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 1282 zero_address = 1; 1283 } else if (ptype == SCTP_IPV4_ADDRESS) { 1284 /* ipv4 address param */ 1285 struct sctp_ipv4addr_param *p4, p4_buf; 1286 1287 if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv4addr_param)) { 1288 return NULL; 1289 } 1290 p4 = (struct sctp_ipv4addr_param *)sctp_get_next_param(m, 1291 offset + sizeof(struct sctp_asconf_chunk), 1292 &p4_buf.ph, sizeof(*p4)); 1293 if (p4 == NULL) { 1294 #ifdef SCTP_DEBUG 1295 if (sctp_debug_on & SCTP_DEBUG_INPUT3) { 1296 printf("findassociation_ep_asconf: failed to get asconf v4 lookup addr\n"); 1297 } 1298 #endif /* SCTP_DEBUG */ 1299 return (NULL); 1300 } 1301 sin = (struct sockaddr_in *)&remote_store; 1302 sin->sin_family = AF_INET; 1303 sin->sin_len = sizeof(*sin); 1304 sin->sin_port = sh->src_port; 1305 memcpy(&sin->sin_addr, &p4->addr, sizeof(struct in_addr)); 1306 if (sin->sin_addr.s_addr == INADDR_ANY) 1307 zero_address = 1; 1308 } else { 1309 /* invalid address param type */ 1310 return NULL; 1311 } 1312 1313 if (zero_address) { 1314 stcb = sctp_findassoc_by_vtag(NULL, ntohl(sh->v_tag), inp_p, 1315 netp, sh->src_port, sh->dest_port, 1); 1316 /* 1317 * printf("findassociation_ep_asconf: zero lookup address 1318 * finds stcb 0x%x\n", (uint32_t)stcb); 1319 */ 1320 } else { 1321 stcb = sctp_findassociation_ep_addr(inp_p, 1322 (struct sockaddr *)&remote_store, netp, 1323 (struct sockaddr *)&local_store, NULL); 1324 } 1325 return (stcb); 1326 } 1327 1328 1329 extern int sctp_max_burst_default; 1330 1331 extern unsigned int sctp_delayed_sack_time_default; 1332 extern unsigned int sctp_heartbeat_interval_default; 1333 extern unsigned int sctp_pmtu_raise_time_default; 1334 extern unsigned int sctp_shutdown_guard_time_default; 1335 extern unsigned int sctp_secret_lifetime_default; 1336 1337 extern unsigned int sctp_rto_max_default; 1338 extern unsigned int sctp_rto_min_default; 1339 extern unsigned int sctp_rto_initial_default; 1340 extern unsigned int sctp_init_rto_max_default; 1341 extern unsigned int sctp_valid_cookie_life_default; 1342 extern unsigned int sctp_init_rtx_max_default; 1343 extern unsigned int sctp_assoc_rtx_max_default; 1344 extern unsigned int sctp_path_rtx_max_default; 1345 extern unsigned int sctp_nr_outgoing_streams_default; 1346 1347 /* 1348 * allocate a sctp_inpcb and setup a temporary binding to a port/all 1349 * addresses. This way if we don't get a bind we by default pick a ephemeral 1350 * port with all addresses bound. 1351 */ 1352 int 1353 sctp_inpcb_alloc(struct socket *so) 1354 { 1355 /* 1356 * we get called when a new endpoint starts up. We need to allocate 1357 * the sctp_inpcb structure from the zone and init it. Mark it as 1358 * unbound and find a port that we can use as an ephemeral with 1359 * INADDR_ANY. If the user binds later no problem we can then add in 1360 * the specific addresses. And setup the default parameters for the 1361 * EP. 1362 */ 1363 int i, error; 1364 struct sctp_inpcb *inp; 1365 1366 struct sctp_pcb *m; 1367 struct timeval time; 1368 sctp_sharedkey_t *null_key; 1369 1370 error = 0; 1371 1372 SCTP_INP_INFO_WLOCK(); 1373 inp = (struct sctp_inpcb *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_ep); 1374 if (inp == NULL) { 1375 printf("Out of SCTP-INPCB structures - no resources\n"); 1376 SCTP_INP_INFO_WUNLOCK(); 1377 return (ENOBUFS); 1378 } 1379 /* zap it */ 1380 bzero(inp, sizeof(*inp)); 1381 1382 /* bump generations */ 1383 /* setup socket pointers */ 1384 inp->sctp_socket = so; 1385 inp->ip_inp.inp.inp_socket = so; 1386 1387 inp->partial_delivery_point = so->so_rcv.sb_hiwat >> SCTP_PARTIAL_DELIVERY_SHIFT; 1388 inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 1389 1390 #ifdef IPSEC 1391 { 1392 struct inpcbpolicy *pcb_sp = NULL; 1393 1394 error = ipsec_init_pcbpolicy(so, &pcb_sp); 1395 /* Arrange to share the policy */ 1396 inp->ip_inp.inp.inp_sp = pcb_sp; 1397 ((struct in6pcb *)(&inp->ip_inp.inp))->in6p_sp = pcb_sp; 1398 } 1399 if (error != 0) { 1400 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp); 1401 SCTP_INP_INFO_WUNLOCK(); 1402 return error; 1403 } 1404 #endif /* IPSEC */ 1405 SCTP_INCR_EP_COUNT(); 1406 inp->ip_inp.inp.inp_ip_ttl = ip_defttl; 1407 SCTP_INP_INFO_WUNLOCK(); 1408 1409 so->so_pcb = (caddr_t)inp; 1410 1411 if ((so->so_type == SOCK_DGRAM) || 1412 (so->so_type == SOCK_SEQPACKET)) { 1413 /* UDP style socket */ 1414 inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE | 1415 SCTP_PCB_FLAGS_UNBOUND); 1416 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 1417 /* Be sure it is NON-BLOCKING IO for UDP */ 1418 /* so->so_state |= SS_NBIO; */ 1419 } else if (so->so_type == SOCK_STREAM) { 1420 /* TCP style socket */ 1421 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 1422 SCTP_PCB_FLAGS_UNBOUND); 1423 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT); 1424 /* Be sure we have blocking IO by default */ 1425 so->so_state &= ~SS_NBIO; 1426 } else { 1427 /* 1428 * unsupported socket type (RAW, etc)- in case we missed it 1429 * in protosw 1430 */ 1431 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp); 1432 return (EOPNOTSUPP); 1433 } 1434 inp->sctp_tcbhash = hashinit(sctp_pcbtblsize, 1435 M_PCB, 1436 &inp->sctp_hashmark); 1437 if (inp->sctp_tcbhash == NULL) { 1438 printf("Out of SCTP-INPCB->hashinit - no resources\n"); 1439 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp); 1440 return (ENOBUFS); 1441 } 1442 SCTP_INP_INFO_WLOCK(); 1443 SCTP_INP_LOCK_INIT(inp); 1444 SCTP_INP_READ_INIT(inp); 1445 SCTP_ASOC_CREATE_LOCK_INIT(inp); 1446 /* lock the new ep */ 1447 SCTP_INP_WLOCK(inp); 1448 1449 /* add it to the info area */ 1450 LIST_INSERT_HEAD(&sctppcbinfo.listhead, inp, sctp_list); 1451 SCTP_INP_INFO_WUNLOCK(); 1452 1453 TAILQ_INIT(&inp->read_queue); 1454 LIST_INIT(&inp->sctp_addr_list); 1455 LIST_INIT(&inp->sctp_asoc_list); 1456 1457 #ifdef SCTP_TRACK_FREED_ASOCS 1458 /* TEMP CODE */ 1459 LIST_INIT(&inp->sctp_asoc_free_list); 1460 #endif 1461 /* Init the timer structure for signature change */ 1462 SCTP_OS_TIMER_INIT(&inp->sctp_ep.signature_change.timer); 1463 inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NEWCOOKIE; 1464 1465 /* now init the actual endpoint default data */ 1466 m = &inp->sctp_ep; 1467 1468 /* setup the base timeout information */ 1469 m->sctp_timeoutticks[SCTP_TIMER_SEND] = SEC_TO_TICKS(SCTP_SEND_SEC); /* needed ? */ 1470 m->sctp_timeoutticks[SCTP_TIMER_INIT] = SEC_TO_TICKS(SCTP_INIT_SEC); /* needed ? */ 1471 m->sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sctp_delayed_sack_time_default); 1472 m->sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(sctp_heartbeat_interval_default); 1473 m->sctp_timeoutticks[SCTP_TIMER_PMTU] = SEC_TO_TICKS(sctp_pmtu_raise_time_default); 1474 m->sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] = SEC_TO_TICKS(sctp_shutdown_guard_time_default); 1475 m->sctp_timeoutticks[SCTP_TIMER_SIGNATURE] = SEC_TO_TICKS(sctp_secret_lifetime_default); 1476 /* all max/min max are in ms */ 1477 m->sctp_maxrto = sctp_rto_max_default; 1478 m->sctp_minrto = sctp_rto_min_default; 1479 m->initial_rto = sctp_rto_initial_default; 1480 m->initial_init_rto_max = sctp_init_rto_max_default; 1481 1482 m->max_open_streams_intome = MAX_SCTP_STREAMS; 1483 1484 m->max_init_times = sctp_init_rtx_max_default; 1485 m->max_send_times = sctp_assoc_rtx_max_default; 1486 m->def_net_failure = sctp_path_rtx_max_default; 1487 m->sctp_sws_sender = SCTP_SWS_SENDER_DEF; 1488 m->sctp_sws_receiver = SCTP_SWS_RECEIVER_DEF; 1489 m->max_burst = sctp_max_burst_default; 1490 /* number of streams to pre-open on a association */ 1491 m->pre_open_stream_count = sctp_nr_outgoing_streams_default; 1492 1493 /* Add adaptation cookie */ 1494 m->adaptation_layer_indicator = 0x504C5253; 1495 1496 /* seed random number generator */ 1497 m->random_counter = 1; 1498 m->store_at = SCTP_SIGNATURE_SIZE; 1499 SCTP_READ_RANDOM(m->random_numbers, sizeof(m->random_numbers)); 1500 sctp_fill_random_store(m); 1501 1502 /* Minimum cookie size */ 1503 m->size_of_a_cookie = (sizeof(struct sctp_init_msg) * 2) + 1504 sizeof(struct sctp_state_cookie); 1505 m->size_of_a_cookie += SCTP_SIGNATURE_SIZE; 1506 1507 /* Setup the initial secret */ 1508 SCTP_GETTIME_TIMEVAL(&time); 1509 m->time_of_secret_change = time.tv_sec; 1510 1511 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1512 m->secret_key[0][i] = sctp_select_initial_TSN(m); 1513 } 1514 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1515 1516 /* How long is a cookie good for ? */ 1517 m->def_cookie_life = sctp_valid_cookie_life_default; 1518 1519 /* 1520 * Initialize authentication parameters 1521 */ 1522 m->local_hmacs = sctp_default_supported_hmaclist(); 1523 m->local_auth_chunks = sctp_alloc_chunklist(); 1524 sctp_auth_set_default_chunks(m->local_auth_chunks); 1525 LIST_INIT(&m->shared_keys); 1526 /* add default NULL key as key id 0 */ 1527 null_key = sctp_alloc_sharedkey(); 1528 sctp_insert_sharedkey(&m->shared_keys, null_key); 1529 SCTP_INP_WUNLOCK(inp); 1530 #ifdef SCTP_LOG_CLOSING 1531 sctp_log_closing(inp, NULL, 12); 1532 #endif 1533 return (error); 1534 } 1535 1536 1537 void 1538 sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp, 1539 struct sctp_tcb *stcb) 1540 { 1541 struct sctp_nets *net; 1542 uint16_t lport, rport; 1543 struct sctppcbhead *head; 1544 struct sctp_laddr *laddr, *oladdr; 1545 1546 SCTP_TCB_UNLOCK(stcb); 1547 SCTP_INP_INFO_WLOCK(); 1548 SCTP_INP_WLOCK(old_inp); 1549 SCTP_INP_WLOCK(new_inp); 1550 SCTP_TCB_LOCK(stcb); 1551 1552 new_inp->sctp_ep.time_of_secret_change = 1553 old_inp->sctp_ep.time_of_secret_change; 1554 memcpy(new_inp->sctp_ep.secret_key, old_inp->sctp_ep.secret_key, 1555 sizeof(old_inp->sctp_ep.secret_key)); 1556 new_inp->sctp_ep.current_secret_number = 1557 old_inp->sctp_ep.current_secret_number; 1558 new_inp->sctp_ep.last_secret_number = 1559 old_inp->sctp_ep.last_secret_number; 1560 new_inp->sctp_ep.size_of_a_cookie = old_inp->sctp_ep.size_of_a_cookie; 1561 1562 /* make it so new data pours into the new socket */ 1563 stcb->sctp_socket = new_inp->sctp_socket; 1564 stcb->sctp_ep = new_inp; 1565 1566 /* Copy the port across */ 1567 lport = new_inp->sctp_lport = old_inp->sctp_lport; 1568 rport = stcb->rport; 1569 /* Pull the tcb from the old association */ 1570 LIST_REMOVE(stcb, sctp_tcbhash); 1571 LIST_REMOVE(stcb, sctp_tcblist); 1572 1573 /* Now insert the new_inp into the TCP connected hash */ 1574 head = &sctppcbinfo.sctp_tcpephash[SCTP_PCBHASH_ALLADDR((lport + rport), 1575 sctppcbinfo.hashtcpmark)]; 1576 1577 LIST_INSERT_HEAD(head, new_inp, sctp_hash); 1578 1579 /* Now move the tcb into the endpoint list */ 1580 LIST_INSERT_HEAD(&new_inp->sctp_asoc_list, stcb, sctp_tcblist); 1581 /* 1582 * Question, do we even need to worry about the ep-hash since we 1583 * only have one connection? Probably not :> so lets get rid of it 1584 * and not suck up any kernel memory in that. 1585 */ 1586 1587 /* Ok. Let's restart timer. */ 1588 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1589 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, new_inp, 1590 stcb, net); 1591 } 1592 1593 SCTP_INP_INFO_WUNLOCK(); 1594 if (new_inp->sctp_tcbhash != NULL) { 1595 SCTP_FREE(new_inp->sctp_tcbhash); 1596 new_inp->sctp_tcbhash = NULL; 1597 } 1598 if ((new_inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 1599 /* Subset bound, so copy in the laddr list from the old_inp */ 1600 LIST_FOREACH(oladdr, &old_inp->sctp_addr_list, sctp_nxt_addr) { 1601 laddr = (struct sctp_laddr *)SCTP_ZONE_GET( 1602 sctppcbinfo.ipi_zone_laddr); 1603 if (laddr == NULL) { 1604 /* 1605 * Gak, what can we do? This assoc is really 1606 * HOSED. We probably should send an abort 1607 * here. 1608 */ 1609 #ifdef SCTP_DEBUG 1610 if (sctp_debug_on & SCTP_DEBUG_PCB1) { 1611 printf("Association hosed in TCP model, out of laddr memory\n"); 1612 } 1613 #endif /* SCTP_DEBUG */ 1614 continue; 1615 } 1616 SCTP_INCR_LADDR_COUNT(); 1617 bzero(laddr, sizeof(*laddr)); 1618 laddr->ifa = oladdr->ifa; 1619 LIST_INSERT_HEAD(&new_inp->sctp_addr_list, laddr, 1620 sctp_nxt_addr); 1621 new_inp->laddr_count++; 1622 } 1623 } 1624 /* 1625 * Now any running timers need to be adjusted since we really don't 1626 * care if they are running or not just blast in the new_inp into 1627 * all of them. 1628 */ 1629 1630 stcb->asoc.hb_timer.ep = (void *)new_inp; 1631 stcb->asoc.dack_timer.ep = (void *)new_inp; 1632 stcb->asoc.asconf_timer.ep = (void *)new_inp; 1633 stcb->asoc.strreset_timer.ep = (void *)new_inp; 1634 stcb->asoc.shut_guard_timer.ep = (void *)new_inp; 1635 stcb->asoc.autoclose_timer.ep = (void *)new_inp; 1636 stcb->asoc.delayed_event_timer.ep = (void *)new_inp; 1637 /* now what about the nets? */ 1638 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1639 net->pmtu_timer.ep = (void *)new_inp; 1640 net->rxt_timer.ep = (void *)new_inp; 1641 net->fr_timer.ep = (void *)new_inp; 1642 } 1643 SCTP_INP_WUNLOCK(new_inp); 1644 SCTP_INP_WUNLOCK(old_inp); 1645 } 1646 1647 static int 1648 sctp_isport_inuse(struct sctp_inpcb *inp, uint16_t lport) 1649 { 1650 struct sctppcbhead *head; 1651 struct sctp_inpcb *t_inp; 1652 1653 head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport, 1654 sctppcbinfo.hashmark)]; 1655 1656 LIST_FOREACH(t_inp, head, sctp_hash) { 1657 if (t_inp->sctp_lport != lport) { 1658 continue; 1659 } 1660 /* This one is in use. */ 1661 /* check the v6/v4 binding issue */ 1662 if ((t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 1663 (((struct inpcb *)t_inp)->inp_flags & IN6P_IPV6_V6ONLY) 1664 ) { 1665 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1666 /* collision in V6 space */ 1667 return (1); 1668 } else { 1669 /* inp is BOUND_V4 no conflict */ 1670 continue; 1671 } 1672 } else if (t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1673 /* t_inp is bound v4 and v6, conflict always */ 1674 return (1); 1675 } else { 1676 /* t_inp is bound only V4 */ 1677 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 1678 (((struct inpcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY) 1679 ) { 1680 /* no conflict */ 1681 continue; 1682 } 1683 /* else fall through to conflict */ 1684 } 1685 return (1); 1686 } 1687 return (0); 1688 } 1689 1690 1691 1692 int 1693 sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, struct thread *p) 1694 { 1695 /* bind a ep to a socket address */ 1696 struct sctppcbhead *head; 1697 struct sctp_inpcb *inp, *inp_tmp; 1698 struct inpcb *ip_inp; 1699 int bindall; 1700 uint16_t lport; 1701 int error; 1702 1703 lport = 0; 1704 error = 0; 1705 bindall = 1; 1706 inp = (struct sctp_inpcb *)so->so_pcb; 1707 ip_inp = (struct inpcb *)so->so_pcb; 1708 #ifdef SCTP_DEBUG 1709 if (sctp_debug_on & SCTP_DEBUG_PCB1) { 1710 if (addr) { 1711 printf("Bind called port:%d\n", 1712 ntohs(((struct sockaddr_in *)addr)->sin_port)); 1713 printf("Addr :"); 1714 sctp_print_address(addr); 1715 } 1716 } 1717 #endif /* SCTP_DEBUG */ 1718 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) { 1719 /* already did a bind, subsequent binds NOT allowed ! */ 1720 return (EINVAL); 1721 } 1722 if (addr != NULL) { 1723 if (addr->sa_family == AF_INET) { 1724 struct sockaddr_in *sin; 1725 1726 /* IPV6_V6ONLY socket? */ 1727 if ( 1728 (ip_inp->inp_flags & IN6P_IPV6_V6ONLY) 1729 ) { 1730 return (EINVAL); 1731 } 1732 if (addr->sa_len != sizeof(*sin)) 1733 return (EINVAL); 1734 1735 sin = (struct sockaddr_in *)addr; 1736 lport = sin->sin_port; 1737 1738 if (sin->sin_addr.s_addr != INADDR_ANY) { 1739 bindall = 0; 1740 } 1741 } else if (addr->sa_family == AF_INET6) { 1742 /* Only for pure IPv6 Address. (No IPv4 Mapped!) */ 1743 struct sockaddr_in6 *sin6; 1744 1745 sin6 = (struct sockaddr_in6 *)addr; 1746 1747 if (addr->sa_len != sizeof(*sin6)) 1748 return (EINVAL); 1749 1750 lport = sin6->sin6_port; 1751 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1752 bindall = 0; 1753 /* KAME hack: embed scopeid */ 1754 if (sa6_embedscope(sin6, ip6_use_defzone) != 0) 1755 return (EINVAL); 1756 } 1757 /* this must be cleared for ifa_ifwithaddr() */ 1758 sin6->sin6_scope_id = 0; 1759 } else { 1760 return (EAFNOSUPPORT); 1761 } 1762 } 1763 SCTP_INP_INFO_WLOCK(); 1764 SCTP_INP_WLOCK(inp); 1765 /* increase our count due to the unlock we do */ 1766 SCTP_INP_INCR_REF(inp); 1767 if (lport) { 1768 /* 1769 * Did the caller specify a port? if so we must see if a ep 1770 * already has this one bound. 1771 */ 1772 /* got to be root to get at low ports */ 1773 if (ntohs(lport) < IPPORT_RESERVED) { 1774 if (p && (error = 1775 priv_check(p, 1776 PRIV_NETINET_RESERVEDPORT) 1777 )) { 1778 SCTP_INP_DECR_REF(inp); 1779 SCTP_INP_WUNLOCK(inp); 1780 SCTP_INP_INFO_WUNLOCK(); 1781 return (error); 1782 } 1783 } 1784 if (p == NULL) { 1785 SCTP_INP_DECR_REF(inp); 1786 SCTP_INP_WUNLOCK(inp); 1787 SCTP_INP_INFO_WUNLOCK(); 1788 return (error); 1789 } 1790 SCTP_INP_WUNLOCK(inp); 1791 inp_tmp = sctp_pcb_findep(addr, 0, 1); 1792 if (inp_tmp != NULL) { 1793 /* 1794 * lock guy returned and lower count note that we 1795 * are not bound so inp_tmp should NEVER be inp. And 1796 * it is this inp (inp_tmp) that gets the reference 1797 * bump, so we must lower it. 1798 */ 1799 SCTP_INP_DECR_REF(inp_tmp); 1800 SCTP_INP_DECR_REF(inp); 1801 /* unlock info */ 1802 SCTP_INP_INFO_WUNLOCK(); 1803 return (EADDRNOTAVAIL); 1804 } 1805 SCTP_INP_WLOCK(inp); 1806 if (bindall) { 1807 /* verify that no lport is not used by a singleton */ 1808 if (sctp_isport_inuse(inp, lport)) { 1809 /* Sorry someone already has this one bound */ 1810 SCTP_INP_DECR_REF(inp); 1811 SCTP_INP_WUNLOCK(inp); 1812 SCTP_INP_INFO_WUNLOCK(); 1813 return (EADDRNOTAVAIL); 1814 } 1815 } 1816 } else { 1817 /* 1818 * get any port but lets make sure no one has any address 1819 * with this port bound 1820 */ 1821 1822 /* 1823 * setup the inp to the top (I could use the union but this 1824 * is just as easy 1825 */ 1826 uint32_t port_guess; 1827 uint16_t port_attempt; 1828 int not_done = 1; 1829 1830 while (not_done) { 1831 port_guess = sctp_select_initial_TSN(&inp->sctp_ep); 1832 port_attempt = (port_guess & 0x0000ffff); 1833 if (port_attempt == 0) { 1834 goto next_half; 1835 } 1836 if (port_attempt < IPPORT_RESERVED) { 1837 port_attempt += IPPORT_RESERVED; 1838 } 1839 if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) { 1840 /* got a port we can use */ 1841 not_done = 0; 1842 continue; 1843 } 1844 /* try upper half */ 1845 next_half: 1846 port_attempt = ((port_guess >> 16) & 0x0000ffff); 1847 if (port_attempt == 0) { 1848 goto last_try; 1849 } 1850 if (port_attempt < IPPORT_RESERVED) { 1851 port_attempt += IPPORT_RESERVED; 1852 } 1853 if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) { 1854 /* got a port we can use */ 1855 not_done = 0; 1856 continue; 1857 } 1858 /* try two half's added together */ 1859 last_try: 1860 port_attempt = (((port_guess >> 16) & 0x0000ffff) + 1861 (port_guess & 0x0000ffff)); 1862 if (port_attempt == 0) { 1863 /* get a new random number */ 1864 continue; 1865 } 1866 if (port_attempt < IPPORT_RESERVED) { 1867 port_attempt += IPPORT_RESERVED; 1868 } 1869 if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) { 1870 /* got a port we can use */ 1871 not_done = 0; 1872 continue; 1873 } 1874 } 1875 /* we don't get out of the loop until we have a port */ 1876 lport = htons(port_attempt); 1877 } 1878 SCTP_INP_DECR_REF(inp); 1879 if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | 1880 SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 1881 /* 1882 * this really should not happen. The guy did a non-blocking 1883 * bind and then did a close at the same time. 1884 */ 1885 SCTP_INP_WUNLOCK(inp); 1886 SCTP_INP_INFO_WUNLOCK(); 1887 return (EINVAL); 1888 } 1889 /* ok we look clear to give out this port, so lets setup the binding */ 1890 if (bindall) { 1891 /* binding to all addresses, so just set in the proper flags */ 1892 inp->sctp_flags |= SCTP_PCB_FLAGS_BOUNDALL; 1893 sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF); 1894 /* set the automatic addr changes from kernel flag */ 1895 if (sctp_auto_asconf == 0) { 1896 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 1897 } else { 1898 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 1899 } 1900 } else { 1901 /* 1902 * bind specific, make sure flags is off and add a new 1903 * address structure to the sctp_addr_list inside the ep 1904 * structure. 1905 * 1906 * We will need to allocate one and insert it at the head. The 1907 * socketopt call can just insert new addresses in there as 1908 * well. It will also have to do the embed scope kame hack 1909 * too (before adding). 1910 */ 1911 struct ifaddr *ifa; 1912 struct sockaddr_storage store_sa; 1913 1914 memset(&store_sa, 0, sizeof(store_sa)); 1915 if (addr->sa_family == AF_INET) { 1916 struct sockaddr_in *sin; 1917 1918 sin = (struct sockaddr_in *)&store_sa; 1919 memcpy(sin, addr, sizeof(struct sockaddr_in)); 1920 sin->sin_port = 0; 1921 } else if (addr->sa_family == AF_INET6) { 1922 struct sockaddr_in6 *sin6; 1923 1924 sin6 = (struct sockaddr_in6 *)&store_sa; 1925 memcpy(sin6, addr, sizeof(struct sockaddr_in6)); 1926 sin6->sin6_port = 0; 1927 } 1928 /* 1929 * first find the interface with the bound address need to 1930 * zero out the port to find the address! yuck! can't do 1931 * this earlier since need port for sctp_pcb_findep() 1932 */ 1933 ifa = sctp_find_ifa_by_addr((struct sockaddr *)&store_sa); 1934 if (ifa == NULL) { 1935 /* Can't find an interface with that address */ 1936 SCTP_INP_WUNLOCK(inp); 1937 SCTP_INP_INFO_WUNLOCK(); 1938 return (EADDRNOTAVAIL); 1939 } 1940 if (addr->sa_family == AF_INET6) { 1941 struct in6_ifaddr *ifa6; 1942 1943 ifa6 = (struct in6_ifaddr *)ifa; 1944 /* 1945 * allow binding of deprecated addresses as per RFC 1946 * 2462 and ipng discussion 1947 */ 1948 if (ifa6->ia6_flags & (IN6_IFF_DETACHED | 1949 IN6_IFF_ANYCAST | 1950 IN6_IFF_NOTREADY)) { 1951 /* Can't bind a non-existent addr. */ 1952 SCTP_INP_WUNLOCK(inp); 1953 SCTP_INP_INFO_WUNLOCK(); 1954 return (EINVAL); 1955 } 1956 } 1957 /* we're not bound all */ 1958 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUNDALL; 1959 /* set the automatic addr changes from kernel flag */ 1960 if (sctp_auto_asconf == 0) { 1961 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 1962 } else { 1963 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 1964 } 1965 /* allow bindx() to send ASCONF's for binding changes */ 1966 sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF); 1967 /* add this address to the endpoint list */ 1968 error = sctp_insert_laddr(&inp->sctp_addr_list, ifa); 1969 if (error != 0) { 1970 SCTP_INP_WUNLOCK(inp); 1971 SCTP_INP_INFO_WUNLOCK(); 1972 return (error); 1973 } 1974 inp->laddr_count++; 1975 } 1976 /* find the bucket */ 1977 head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport, 1978 sctppcbinfo.hashmark)]; 1979 /* put it in the bucket */ 1980 LIST_INSERT_HEAD(head, inp, sctp_hash); 1981 #ifdef SCTP_DEBUG 1982 if (sctp_debug_on & SCTP_DEBUG_PCB1) { 1983 printf("Main hash to bind at head:%p, bound port:%d\n", head, ntohs(lport)); 1984 } 1985 #endif 1986 /* set in the port */ 1987 inp->sctp_lport = lport; 1988 1989 /* turn off just the unbound flag */ 1990 inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND; 1991 SCTP_INP_WUNLOCK(inp); 1992 SCTP_INP_INFO_WUNLOCK(); 1993 return (0); 1994 } 1995 1996 1997 static void 1998 sctp_iterator_inp_being_freed(struct sctp_inpcb *inp, struct sctp_inpcb *inp_next) 1999 { 2000 struct sctp_iterator *it; 2001 2002 /* 2003 * We enter with the only the ITERATOR_LOCK in place and a write 2004 * lock on the inp_info stuff. 2005 */ 2006 2007 /* 2008 * Go through all iterators, we must do this since it is possible 2009 * that some iterator does NOT have the lock, but is waiting for it. 2010 * And the one that had the lock has either moved in the last 2011 * iteration or we just cleared it above. We need to find all of 2012 * those guys. The list of iterators should never be very big 2013 * though. 2014 */ 2015 LIST_FOREACH(it, &sctppcbinfo.iteratorhead, sctp_nxt_itr) { 2016 if (it == inp->inp_starting_point_for_iterator) 2017 /* skip this guy, he's special */ 2018 continue; 2019 if (it->inp == inp) { 2020 /* 2021 * This is tricky and we DON'T lock the iterator. 2022 * Reason is he's running but waiting for me since 2023 * inp->inp_starting_point_for_iterator has the lock 2024 * on me (the guy above we skipped). This tells us 2025 * its is not running but waiting for 2026 * inp->inp_starting_point_for_iterator to be 2027 * released by the guy that does have our INP in a 2028 * lock. 2029 */ 2030 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 2031 it->inp = NULL; 2032 it->stcb = NULL; 2033 } else { 2034 /* set him up to do the next guy not me */ 2035 it->inp = inp_next; 2036 it->stcb = NULL; 2037 } 2038 } 2039 } 2040 it = inp->inp_starting_point_for_iterator; 2041 if (it) { 2042 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 2043 it->inp = NULL; 2044 } else { 2045 it->inp = inp_next; 2046 } 2047 it->stcb = NULL; 2048 } 2049 } 2050 2051 /* release sctp_inpcb unbind the port */ 2052 void 2053 sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) 2054 { 2055 /* 2056 * Here we free a endpoint. We must find it (if it is in the Hash 2057 * table) and remove it from there. Then we must also find it in the 2058 * overall list and remove it from there. After all removals are 2059 * complete then any timer has to be stopped. Then start the actual 2060 * freeing. a) Any local lists. b) Any associations. c) The hash of 2061 * all associations. d) finally the ep itself. 2062 */ 2063 struct sctp_pcb *m; 2064 struct sctp_inpcb *inp_save; 2065 struct sctp_tcb *asoc, *nasoc; 2066 struct sctp_laddr *laddr, *nladdr; 2067 struct inpcb *ip_pcb; 2068 struct socket *so; 2069 2070 struct sctp_queued_to_read *sq; 2071 2072 int s, cnt; 2073 sctp_sharedkey_t *shared_key; 2074 2075 s = splnet(); 2076 2077 #ifdef SCTP_LOG_CLOSING 2078 sctp_log_closing(inp, NULL, 0); 2079 #endif 2080 2081 SCTP_ITERATOR_LOCK(); 2082 so = inp->sctp_socket; 2083 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 2084 /* been here before.. eeks.. get out of here */ 2085 splx(s); 2086 printf("This conflict in free SHOULD not be happening!\n"); 2087 SCTP_ITERATOR_UNLOCK(); 2088 #ifdef SCTP_LOG_CLOSING 2089 sctp_log_closing(inp, NULL, 1); 2090 #endif 2091 return; 2092 } 2093 SCTP_ASOC_CREATE_LOCK(inp); 2094 SCTP_INP_INFO_WLOCK(); 2095 2096 SCTP_INP_WLOCK(inp); 2097 /* 2098 * First time through we have the socket lock, after that no more. 2099 */ 2100 if (from == 1) { 2101 /* 2102 * Once we are in we can remove the flag from = 1 is only 2103 * passed from the actual closing routines that are called 2104 * via the sockets layer. 2105 */ 2106 inp->sctp_flags &= ~SCTP_PCB_FLAGS_CLOSE_IP; 2107 } 2108 sctp_timer_stop(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL, 2109 SCTP_FROM_SCTP_PCB + SCTP_LOC_1); 2110 2111 if (inp->control) { 2112 sctp_m_freem(inp->control); 2113 inp->control = NULL; 2114 } 2115 if (inp->pkt) { 2116 sctp_m_freem(inp->pkt); 2117 inp->pkt = NULL; 2118 } 2119 m = &inp->sctp_ep; 2120 ip_pcb = &inp->ip_inp.inp; /* we could just cast the main pointer 2121 * here but I will be nice :> (i.e. 2122 * ip_pcb = ep;) */ 2123 if (immediate == 0) { 2124 int cnt_in_sd; 2125 2126 cnt_in_sd = 0; 2127 for ((asoc = LIST_FIRST(&inp->sctp_asoc_list)); asoc != NULL; 2128 asoc = nasoc) { 2129 nasoc = LIST_NEXT(asoc, sctp_tcblist); 2130 if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 2131 /* Skip guys being freed */ 2132 asoc->sctp_socket = NULL; 2133 cnt_in_sd++; 2134 continue; 2135 } 2136 if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_WAIT) || 2137 (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 2138 /* Just abandon things in the front states */ 2139 if (asoc->asoc.total_output_queue_size == 0) { 2140 sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_2); 2141 continue; 2142 } 2143 } 2144 SCTP_TCB_LOCK(asoc); 2145 /* Disconnect the socket please */ 2146 asoc->sctp_socket = NULL; 2147 asoc->asoc.state |= SCTP_STATE_CLOSED_SOCKET; 2148 if ((asoc->asoc.size_on_reasm_queue > 0) || 2149 (asoc->asoc.control_pdapi) || 2150 (asoc->asoc.size_on_all_streams > 0) || 2151 (so && (so->so_rcv.sb_cc > 0)) 2152 ) { 2153 /* Left with Data unread */ 2154 struct mbuf *op_err; 2155 2156 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 2157 0, M_DONTWAIT, 1, MT_DATA); 2158 if (op_err) { 2159 /* Fill in the user initiated abort */ 2160 struct sctp_paramhdr *ph; 2161 uint32_t *ippp; 2162 2163 SCTP_BUF_LEN(op_err) = 2164 sizeof(struct sctp_paramhdr) + sizeof(uint32_t); 2165 ph = mtod(op_err, 2166 struct sctp_paramhdr *); 2167 ph->param_type = htons( 2168 SCTP_CAUSE_USER_INITIATED_ABT); 2169 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 2170 ippp = (uint32_t *) (ph + 1); 2171 *ippp = htonl(SCTP_FROM_SCTP_PCB + SCTP_LOC_3); 2172 } 2173 asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_3; 2174 sctp_send_abort_tcb(asoc, op_err); 2175 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 2176 if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) || 2177 (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 2178 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 2179 } 2180 sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_4); 2181 continue; 2182 } else if (TAILQ_EMPTY(&asoc->asoc.send_queue) && 2183 TAILQ_EMPTY(&asoc->asoc.sent_queue) && 2184 (asoc->asoc.stream_queue_cnt == 0) 2185 ) { 2186 if (asoc->asoc.locked_on_sending) { 2187 goto abort_anyway; 2188 } 2189 if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_SENT) && 2190 (SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 2191 /* 2192 * there is nothing queued to send, 2193 * so I send shutdown 2194 */ 2195 sctp_send_shutdown(asoc, asoc->asoc.primary_destination); 2196 asoc->asoc.state = SCTP_STATE_SHUTDOWN_SENT; 2197 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 2198 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, asoc->sctp_ep, asoc, 2199 asoc->asoc.primary_destination); 2200 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc, 2201 asoc->asoc.primary_destination); 2202 sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_SHUT_TMR); 2203 } 2204 } else { 2205 /* mark into shutdown pending */ 2206 struct sctp_stream_queue_pending *sp; 2207 2208 asoc->asoc.state |= SCTP_STATE_SHUTDOWN_PENDING; 2209 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc, 2210 asoc->asoc.primary_destination); 2211 if (asoc->asoc.locked_on_sending) { 2212 sp = TAILQ_LAST(&((asoc->asoc.locked_on_sending)->outqueue), 2213 sctp_streamhead); 2214 if (sp == NULL) { 2215 printf("Error, sp is NULL, locked on sending is %p strm:%d\n", 2216 asoc->asoc.locked_on_sending, 2217 asoc->asoc.locked_on_sending->stream_no); 2218 } else { 2219 if ((sp->length == 0) && (sp->msg_is_complete == 0)) 2220 asoc->asoc.state |= SCTP_STATE_PARTIAL_MSG_LEFT; 2221 } 2222 } 2223 if (TAILQ_EMPTY(&asoc->asoc.send_queue) && 2224 TAILQ_EMPTY(&asoc->asoc.sent_queue) && 2225 (asoc->asoc.state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 2226 struct mbuf *op_err; 2227 2228 abort_anyway: 2229 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 2230 0, M_DONTWAIT, 1, MT_DATA); 2231 if (op_err) { 2232 /* 2233 * Fill in the user 2234 * initiated abort 2235 */ 2236 struct sctp_paramhdr *ph; 2237 uint32_t *ippp; 2238 2239 SCTP_BUF_LEN(op_err) = 2240 (sizeof(struct sctp_paramhdr) + 2241 sizeof(uint32_t)); 2242 ph = mtod(op_err, 2243 struct sctp_paramhdr *); 2244 ph->param_type = htons( 2245 SCTP_CAUSE_USER_INITIATED_ABT); 2246 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 2247 ippp = (uint32_t *) (ph + 1); 2248 *ippp = htonl(SCTP_FROM_SCTP_PCB + SCTP_LOC_5); 2249 } 2250 asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_5; 2251 sctp_send_abort_tcb(asoc, op_err); 2252 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 2253 if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) || 2254 (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 2255 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 2256 } 2257 sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_6); 2258 continue; 2259 } 2260 } 2261 cnt_in_sd++; 2262 SCTP_TCB_UNLOCK(asoc); 2263 } 2264 /* now is there some left in our SHUTDOWN state? */ 2265 if (cnt_in_sd) { 2266 splx(s); 2267 2268 SCTP_INP_WUNLOCK(inp); 2269 SCTP_ASOC_CREATE_UNLOCK(inp); 2270 SCTP_INP_INFO_WUNLOCK(); 2271 SCTP_ITERATOR_UNLOCK(); 2272 #ifdef SCTP_LOG_CLOSING 2273 sctp_log_closing(inp, NULL, 2); 2274 #endif 2275 return; 2276 } 2277 } 2278 inp->sctp_socket = NULL; 2279 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) != 2280 SCTP_PCB_FLAGS_UNBOUND) { 2281 /* 2282 * ok, this guy has been bound. It's port is somewhere in 2283 * the sctppcbinfo hash table. Remove it! 2284 */ 2285 LIST_REMOVE(inp, sctp_hash); 2286 inp->sctp_flags |= SCTP_PCB_FLAGS_UNBOUND; 2287 } 2288 /* 2289 * If there is a timer running to kill us, forget it, since it may 2290 * have a contest on the INP lock.. which would cause us to die ... 2291 */ 2292 cnt = 0; 2293 for ((asoc = LIST_FIRST(&inp->sctp_asoc_list)); asoc != NULL; 2294 asoc = nasoc) { 2295 nasoc = LIST_NEXT(asoc, sctp_tcblist); 2296 if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 2297 cnt++; 2298 continue; 2299 } 2300 /* Free associations that are NOT killing us */ 2301 SCTP_TCB_LOCK(asoc); 2302 if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_COOKIE_WAIT) && 2303 ((asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) { 2304 struct mbuf *op_err; 2305 uint32_t *ippp; 2306 2307 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 2308 0, M_DONTWAIT, 1, MT_DATA); 2309 if (op_err) { 2310 /* Fill in the user initiated abort */ 2311 struct sctp_paramhdr *ph; 2312 2313 SCTP_BUF_LEN(op_err) = (sizeof(struct sctp_paramhdr) + 2314 sizeof(uint32_t)); 2315 ph = mtod(op_err, struct sctp_paramhdr *); 2316 ph->param_type = htons( 2317 SCTP_CAUSE_USER_INITIATED_ABT); 2318 ph->param_length = htons(SCTP_BUF_LEN(op_err)); 2319 ippp = (uint32_t *) (ph + 1); 2320 *ippp = htonl(SCTP_FROM_SCTP_PCB + SCTP_LOC_7); 2321 2322 } 2323 asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_7; 2324 sctp_send_abort_tcb(asoc, op_err); 2325 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 2326 } else if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 2327 cnt++; 2328 SCTP_TCB_UNLOCK(asoc); 2329 continue; 2330 } 2331 if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) || 2332 (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 2333 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 2334 } 2335 sctp_free_assoc(inp, asoc, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_8); 2336 } 2337 if (cnt) { 2338 /* Ok we have someone out there that will kill us */ 2339 SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer); 2340 SCTP_INP_WUNLOCK(inp); 2341 SCTP_ASOC_CREATE_UNLOCK(inp); 2342 SCTP_INP_INFO_WUNLOCK(); 2343 SCTP_ITERATOR_UNLOCK(); 2344 #ifdef SCTP_LOG_CLOSING 2345 sctp_log_closing(inp, NULL, 3); 2346 #endif 2347 return; 2348 } 2349 if ((inp->refcount) || (inp->sctp_flags & SCTP_PCB_FLAGS_CLOSE_IP)) { 2350 SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer); 2351 sctp_timer_start(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL); 2352 SCTP_INP_WUNLOCK(inp); 2353 SCTP_ASOC_CREATE_UNLOCK(inp); 2354 SCTP_INP_INFO_WUNLOCK(); 2355 SCTP_ITERATOR_UNLOCK(); 2356 #ifdef SCTP_LOG_CLOSING 2357 sctp_log_closing(inp, NULL, 4); 2358 #endif 2359 return; 2360 } 2361 SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer); 2362 inp->sctp_ep.signature_change.type = 0; 2363 inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_ALLGONE; 2364 2365 #ifdef SCTP_LOG_CLOSING 2366 sctp_log_closing(inp, NULL, 5); 2367 #endif 2368 2369 SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer); 2370 inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NONE; 2371 /* Clear the read queue */ 2372 while ((sq = TAILQ_FIRST(&inp->read_queue)) != NULL) { 2373 TAILQ_REMOVE(&inp->read_queue, sq, next); 2374 sctp_free_remote_addr(sq->whoFrom); 2375 if (so) 2376 so->so_rcv.sb_cc -= sq->length; 2377 if (sq->data) { 2378 sctp_m_freem(sq->data); 2379 sq->data = NULL; 2380 } 2381 /* 2382 * no need to free the net count, since at this point all 2383 * assoc's are gone. 2384 */ 2385 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, sq); 2386 SCTP_DECR_READQ_COUNT(); 2387 } 2388 /* Now the sctp_pcb things */ 2389 /* 2390 * free each asoc if it is not already closed/free. we can't use the 2391 * macro here since le_next will get freed as part of the 2392 * sctp_free_assoc() call. 2393 */ 2394 cnt = 0; 2395 if (so) { 2396 #ifdef IPSEC 2397 ipsec4_delete_pcbpolicy(ip_pcb); 2398 #endif /* IPSEC */ 2399 2400 /* Unlocks not needed since the socket is gone now */ 2401 } 2402 if (ip_pcb->inp_options) { 2403 (void)sctp_m_free(ip_pcb->inp_options); 2404 ip_pcb->inp_options = 0; 2405 } 2406 if (ip_pcb->inp_moptions) { 2407 ip_freemoptions(ip_pcb->inp_moptions); 2408 ip_pcb->inp_moptions = 0; 2409 } 2410 #ifdef INET6 2411 if (ip_pcb->inp_vflag & INP_IPV6) { 2412 struct in6pcb *in6p; 2413 2414 in6p = (struct in6pcb *)inp; 2415 ip6_freepcbopts(in6p->in6p_outputopts); 2416 } 2417 #endif /* INET6 */ 2418 ip_pcb->inp_vflag = 0; 2419 /* free up authentication fields */ 2420 if (inp->sctp_ep.local_auth_chunks != NULL) 2421 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 2422 if (inp->sctp_ep.local_hmacs != NULL) 2423 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2424 2425 shared_key = LIST_FIRST(&inp->sctp_ep.shared_keys); 2426 while (shared_key) { 2427 LIST_REMOVE(shared_key, next); 2428 sctp_free_sharedkey(shared_key); 2429 shared_key = LIST_FIRST(&inp->sctp_ep.shared_keys); 2430 } 2431 2432 inp_save = LIST_NEXT(inp, sctp_list); 2433 LIST_REMOVE(inp, sctp_list); 2434 2435 /* fix any iterators only after out of the list */ 2436 sctp_iterator_inp_being_freed(inp, inp_save); 2437 /* 2438 * if we have an address list the following will free the list of 2439 * ifaddr's that are set into this ep. Again macro limitations here, 2440 * since the LIST_FOREACH could be a bad idea. 2441 */ 2442 for ((laddr = LIST_FIRST(&inp->sctp_addr_list)); laddr != NULL; 2443 laddr = nladdr) { 2444 nladdr = LIST_NEXT(laddr, sctp_nxt_addr); 2445 LIST_REMOVE(laddr, sctp_nxt_addr); 2446 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr); 2447 SCTP_DECR_LADDR_COUNT(); 2448 } 2449 2450 #ifdef SCTP_TRACK_FREED_ASOCS 2451 /* TEMP CODE */ 2452 for ((asoc = LIST_FIRST(&inp->sctp_asoc_free_list)); asoc != NULL; 2453 asoc = nasoc) { 2454 nasoc = LIST_NEXT(asoc, sctp_tcblist); 2455 LIST_REMOVE(asoc, sctp_tcblist); 2456 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, asoc); 2457 SCTP_DECR_ASOC_COUNT(); 2458 } 2459 /* *** END TEMP CODE *** */ 2460 #endif 2461 /* Now lets see about freeing the EP hash table. */ 2462 if (inp->sctp_tcbhash != NULL) { 2463 SCTP_FREE(inp->sctp_tcbhash); 2464 inp->sctp_tcbhash = 0; 2465 } 2466 /* Now we must put the ep memory back into the zone pool */ 2467 SCTP_INP_LOCK_DESTROY(inp); 2468 SCTP_INP_READ_DESTROY(inp); 2469 SCTP_ASOC_CREATE_LOCK_DESTROY(inp); 2470 SCTP_INP_INFO_WUNLOCK(); 2471 2472 SCTP_ITERATOR_UNLOCK(); 2473 2474 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp); 2475 SCTP_DECR_EP_COUNT(); 2476 2477 splx(s); 2478 } 2479 2480 2481 struct sctp_nets * 2482 sctp_findnet(struct sctp_tcb *stcb, struct sockaddr *addr) 2483 { 2484 struct sctp_nets *net; 2485 2486 /* locate the address */ 2487 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 2488 if (sctp_cmpaddr(addr, (struct sockaddr *)&net->ro._l_addr)) 2489 return (net); 2490 } 2491 return (NULL); 2492 } 2493 2494 2495 /* 2496 * add's a remote endpoint address, done with the INIT/INIT-ACK as well as 2497 * when a ASCONF arrives that adds it. It will also initialize all the cwnd 2498 * stats of stuff. 2499 */ 2500 int 2501 sctp_is_address_on_local_host(struct sockaddr *addr) 2502 { 2503 struct ifnet *ifn; 2504 struct ifaddr *ifa; 2505 2506 TAILQ_FOREACH(ifn, &ifnet, if_list) { 2507 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) { 2508 if (addr->sa_family == ifa->ifa_addr->sa_family) { 2509 /* same family */ 2510 if (addr->sa_family == AF_INET) { 2511 struct sockaddr_in *sin, *sin_c; 2512 2513 sin = (struct sockaddr_in *)addr; 2514 sin_c = (struct sockaddr_in *) 2515 ifa->ifa_addr; 2516 if (sin->sin_addr.s_addr == 2517 sin_c->sin_addr.s_addr) { 2518 /* 2519 * we are on the same 2520 * machine 2521 */ 2522 return (1); 2523 } 2524 } else if (addr->sa_family == AF_INET6) { 2525 struct sockaddr_in6 *sin6, *sin_c6; 2526 2527 sin6 = (struct sockaddr_in6 *)addr; 2528 sin_c6 = (struct sockaddr_in6 *) 2529 ifa->ifa_addr; 2530 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr, 2531 &sin_c6->sin6_addr)) { 2532 /* 2533 * we are on the same 2534 * machine 2535 */ 2536 return (1); 2537 } 2538 } 2539 } 2540 } 2541 } 2542 return (0); 2543 } 2544 2545 int 2546 sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr, 2547 int set_scope, int from) 2548 { 2549 /* 2550 * The following is redundant to the same lines in the 2551 * sctp_aloc_assoc() but is needed since other's call the add 2552 * address function 2553 */ 2554 struct sctp_nets *net, *netfirst; 2555 int addr_inscope; 2556 2557 #ifdef SCTP_DEBUG 2558 if (sctp_debug_on & SCTP_DEBUG_PCB1) { 2559 printf("Adding an address (from:%d) to the peer: ", from); 2560 sctp_print_address(newaddr); 2561 } 2562 #endif 2563 2564 netfirst = sctp_findnet(stcb, newaddr); 2565 if (netfirst) { 2566 /* 2567 * Lie and return ok, we don't want to make the association 2568 * go away for this behavior. It will happen in the TCP 2569 * model in a connected socket. It does not reach the hash 2570 * table until after the association is built so it can't be 2571 * found. Mark as reachable, since the initial creation will 2572 * have been cleared and the NOT_IN_ASSOC flag will have 2573 * been added... and we don't want to end up removing it 2574 * back out. 2575 */ 2576 if (netfirst->dest_state & SCTP_ADDR_UNCONFIRMED) { 2577 netfirst->dest_state = (SCTP_ADDR_REACHABLE | 2578 SCTP_ADDR_UNCONFIRMED); 2579 } else { 2580 netfirst->dest_state = SCTP_ADDR_REACHABLE; 2581 } 2582 2583 return (0); 2584 } 2585 addr_inscope = 1; 2586 if (newaddr->sa_family == AF_INET) { 2587 struct sockaddr_in *sin; 2588 2589 sin = (struct sockaddr_in *)newaddr; 2590 if (sin->sin_addr.s_addr == 0) { 2591 /* Invalid address */ 2592 return (-1); 2593 } 2594 /* zero out the bzero area */ 2595 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 2596 2597 /* assure len is set */ 2598 sin->sin_len = sizeof(struct sockaddr_in); 2599 if (set_scope) { 2600 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE 2601 stcb->ipv4_local_scope = 1; 2602 #else 2603 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) { 2604 stcb->asoc.ipv4_local_scope = 1; 2605 } 2606 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */ 2607 2608 if (sctp_is_address_on_local_host(newaddr)) { 2609 stcb->asoc.loopback_scope = 1; 2610 stcb->asoc.ipv4_local_scope = 1; 2611 stcb->asoc.local_scope = 1; 2612 stcb->asoc.site_scope = 1; 2613 } 2614 } else { 2615 if (from == SCTP_ADDR_IS_CONFIRMED) { 2616 /* From connectx */ 2617 if (sctp_is_address_on_local_host(newaddr)) { 2618 stcb->asoc.loopback_scope = 1; 2619 stcb->asoc.ipv4_local_scope = 1; 2620 stcb->asoc.local_scope = 1; 2621 stcb->asoc.site_scope = 1; 2622 } 2623 } 2624 /* Validate the address is in scope */ 2625 if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) && 2626 (stcb->asoc.ipv4_local_scope == 0)) { 2627 addr_inscope = 0; 2628 } 2629 } 2630 } else if (newaddr->sa_family == AF_INET6) { 2631 struct sockaddr_in6 *sin6; 2632 2633 sin6 = (struct sockaddr_in6 *)newaddr; 2634 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 2635 /* Invalid address */ 2636 return (-1); 2637 } 2638 /* assure len is set */ 2639 sin6->sin6_len = sizeof(struct sockaddr_in6); 2640 if (set_scope) { 2641 if (sctp_is_address_on_local_host(newaddr)) { 2642 stcb->asoc.loopback_scope = 1; 2643 stcb->asoc.local_scope = 1; 2644 stcb->asoc.ipv4_local_scope = 1; 2645 stcb->asoc.site_scope = 1; 2646 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 2647 /* 2648 * If the new destination is a LINK_LOCAL we 2649 * must have common site scope. Don't set 2650 * the local scope since we may not share 2651 * all links, only loopback can do this. 2652 * Links on the local network would also be 2653 * on our private network for v4 too. 2654 */ 2655 stcb->asoc.ipv4_local_scope = 1; 2656 stcb->asoc.site_scope = 1; 2657 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) { 2658 /* 2659 * If the new destination is SITE_LOCAL then 2660 * we must have site scope in common. 2661 */ 2662 stcb->asoc.site_scope = 1; 2663 } 2664 } else { 2665 if (from == SCTP_ADDR_IS_CONFIRMED) { 2666 /* From connectx so we check for localhost. */ 2667 if (sctp_is_address_on_local_host(newaddr)) { 2668 stcb->asoc.loopback_scope = 1; 2669 stcb->asoc.ipv4_local_scope = 1; 2670 stcb->asoc.local_scope = 1; 2671 stcb->asoc.site_scope = 1; 2672 } 2673 } 2674 /* Validate the address is in scope */ 2675 if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr) && 2676 (stcb->asoc.loopback_scope == 0)) { 2677 addr_inscope = 0; 2678 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) && 2679 (stcb->asoc.local_scope == 0)) { 2680 addr_inscope = 0; 2681 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr) && 2682 (stcb->asoc.site_scope == 0)) { 2683 addr_inscope = 0; 2684 } 2685 } 2686 } else { 2687 /* not supported family type */ 2688 return (-1); 2689 } 2690 net = (struct sctp_nets *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_net); 2691 if (net == NULL) { 2692 return (-1); 2693 } 2694 SCTP_INCR_RADDR_COUNT(); 2695 bzero(net, sizeof(*net)); 2696 memcpy(&net->ro._l_addr, newaddr, newaddr->sa_len); 2697 if (newaddr->sa_family == AF_INET) { 2698 ((struct sockaddr_in *)&net->ro._l_addr)->sin_port = stcb->rport; 2699 } else if (newaddr->sa_family == AF_INET6) { 2700 ((struct sockaddr_in6 *)&net->ro._l_addr)->sin6_port = stcb->rport; 2701 } 2702 net->addr_is_local = sctp_is_address_on_local_host(newaddr); 2703 net->failure_threshold = stcb->asoc.def_net_failure; 2704 if (addr_inscope == 0) { 2705 net->dest_state = (SCTP_ADDR_REACHABLE | 2706 SCTP_ADDR_OUT_OF_SCOPE); 2707 } else { 2708 if (from == SCTP_ADDR_IS_CONFIRMED) 2709 /* SCTP_ADDR_IS_CONFIRMED is passed by connect_x */ 2710 net->dest_state = SCTP_ADDR_REACHABLE; 2711 else 2712 net->dest_state = SCTP_ADDR_REACHABLE | 2713 SCTP_ADDR_UNCONFIRMED; 2714 } 2715 net->RTO = stcb->asoc.initial_rto; 2716 stcb->asoc.numnets++; 2717 *(&net->ref_count) = 1; 2718 net->tos_flowlabel = 0; 2719 #ifdef AF_INET 2720 if (newaddr->sa_family == AF_INET) 2721 net->tos_flowlabel = stcb->asoc.default_tos; 2722 #endif 2723 #ifdef AF_INET6 2724 if (newaddr->sa_family == AF_INET6) 2725 net->tos_flowlabel = stcb->asoc.default_flowlabel; 2726 #endif 2727 /* Init the timer structure */ 2728 SCTP_OS_TIMER_INIT(&net->rxt_timer.timer); 2729 SCTP_OS_TIMER_INIT(&net->fr_timer.timer); 2730 SCTP_OS_TIMER_INIT(&net->pmtu_timer.timer); 2731 2732 /* Now generate a route for this guy */ 2733 /* KAME hack: embed scopeid */ 2734 if (newaddr->sa_family == AF_INET6) { 2735 struct sockaddr_in6 *sin6; 2736 2737 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 2738 (void)sa6_embedscope(sin6, ip6_use_defzone); 2739 sin6->sin6_scope_id = 0; 2740 } 2741 rtalloc_ign((struct route *)&net->ro, 0UL); 2742 if (newaddr->sa_family == AF_INET6) { 2743 struct sockaddr_in6 *sin6; 2744 2745 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 2746 (void)sa6_recoverscope(sin6); 2747 } 2748 if ((net->ro.ro_rt) && 2749 (net->ro.ro_rt->rt_ifp)) { 2750 net->mtu = net->ro.ro_rt->rt_ifp->if_mtu; 2751 if (from == SCTP_ALLOC_ASOC) { 2752 stcb->asoc.smallest_mtu = net->mtu; 2753 } 2754 /* start things off to match mtu of interface please. */ 2755 net->ro.ro_rt->rt_rmx.rmx_mtu = net->ro.ro_rt->rt_ifp->if_mtu; 2756 } else { 2757 net->mtu = stcb->asoc.smallest_mtu; 2758 } 2759 2760 if (stcb->asoc.smallest_mtu > net->mtu) { 2761 stcb->asoc.smallest_mtu = net->mtu; 2762 } 2763 /* 2764 * We take the max of the burst limit times a MTU or the 2765 * INITIAL_CWND. We then limit this to 4 MTU's of sending. 2766 */ 2767 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 2768 2769 /* we always get at LEAST 2 MTU's */ 2770 if (net->cwnd < (2 * net->mtu)) { 2771 net->cwnd = 2 * net->mtu; 2772 } 2773 net->ssthresh = stcb->asoc.peers_rwnd; 2774 2775 #if defined(SCTP_CWND_MONITOR) || defined(SCTP_CWND_LOGGING) 2776 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION); 2777 #endif 2778 2779 /* 2780 * CMT: CUC algo - set find_pseudo_cumack to TRUE (1) at beginning 2781 * of assoc (2005/06/27, iyengar@cis.udel.edu) 2782 */ 2783 net->find_pseudo_cumack = 1; 2784 net->find_rtx_pseudo_cumack = 1; 2785 net->src_addr_selected = 0; 2786 netfirst = TAILQ_FIRST(&stcb->asoc.nets); 2787 if (net->ro.ro_rt == NULL) { 2788 /* Since we have no route put it at the back */ 2789 TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next); 2790 } else if (netfirst == NULL) { 2791 /* We are the first one in the pool. */ 2792 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); 2793 } else if (netfirst->ro.ro_rt == NULL) { 2794 /* 2795 * First one has NO route. Place this one ahead of the first 2796 * one. 2797 */ 2798 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); 2799 } else if (net->ro.ro_rt->rt_ifp != netfirst->ro.ro_rt->rt_ifp) { 2800 /* 2801 * This one has a different interface than the one at the 2802 * top of the list. Place it ahead. 2803 */ 2804 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); 2805 } else { 2806 /* 2807 * Ok we have the same interface as the first one. Move 2808 * forward until we find either a) one with a NULL route... 2809 * insert ahead of that b) one with a different ifp.. insert 2810 * after that. c) end of the list.. insert at the tail. 2811 */ 2812 struct sctp_nets *netlook; 2813 2814 do { 2815 netlook = TAILQ_NEXT(netfirst, sctp_next); 2816 if (netlook == NULL) { 2817 /* End of the list */ 2818 TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, 2819 sctp_next); 2820 break; 2821 } else if (netlook->ro.ro_rt == NULL) { 2822 /* next one has NO route */ 2823 TAILQ_INSERT_BEFORE(netfirst, net, sctp_next); 2824 break; 2825 } else if (netlook->ro.ro_rt->rt_ifp != 2826 net->ro.ro_rt->rt_ifp) { 2827 TAILQ_INSERT_AFTER(&stcb->asoc.nets, netlook, 2828 net, sctp_next); 2829 break; 2830 } 2831 /* Shift forward */ 2832 netfirst = netlook; 2833 } while (netlook != NULL); 2834 } 2835 2836 /* got to have a primary set */ 2837 if (stcb->asoc.primary_destination == 0) { 2838 stcb->asoc.primary_destination = net; 2839 } else if ((stcb->asoc.primary_destination->ro.ro_rt == NULL) && 2840 (net->ro.ro_rt) && 2841 ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) { 2842 /* No route to current primary adopt new primary */ 2843 stcb->asoc.primary_destination = net; 2844 } 2845 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, 2846 net); 2847 /* Validate primary is first */ 2848 net = TAILQ_FIRST(&stcb->asoc.nets); 2849 if ((net != stcb->asoc.primary_destination) && 2850 (stcb->asoc.primary_destination)) { 2851 /* 2852 * first one on the list is NOT the primary sctp_cmpaddr() 2853 * is much more efficent if the primary is the first on the 2854 * list, make it so. 2855 */ 2856 TAILQ_REMOVE(&stcb->asoc.nets, 2857 stcb->asoc.primary_destination, sctp_next); 2858 TAILQ_INSERT_HEAD(&stcb->asoc.nets, 2859 stcb->asoc.primary_destination, sctp_next); 2860 } 2861 return (0); 2862 } 2863 2864 2865 /* 2866 * allocate an association and add it to the endpoint. The caller must be 2867 * careful to add all additional addresses once they are know right away or 2868 * else the assoc will be may experience a blackout scenario. 2869 */ 2870 struct sctp_tcb * 2871 sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, 2872 int for_a_init, int *error, uint32_t override_tag) 2873 { 2874 struct sctp_tcb *stcb; 2875 struct sctp_association *asoc; 2876 struct sctpasochead *head; 2877 uint16_t rport; 2878 int err; 2879 2880 /* 2881 * Assumption made here: Caller has done a 2882 * sctp_findassociation_ep_addr(ep, addr's); to make sure the 2883 * address does not exist already. 2884 */ 2885 if (sctppcbinfo.ipi_count_asoc >= SCTP_MAX_NUM_OF_ASOC) { 2886 /* Hit max assoc, sorry no more */ 2887 *error = ENOBUFS; 2888 return (NULL); 2889 } 2890 SCTP_INP_RLOCK(inp); 2891 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) { 2892 /* 2893 * If its in the TCP pool, its NOT allowed to create an 2894 * association. The parent listener needs to call 2895 * sctp_aloc_assoc.. or the one-2-many socket. If a peeled 2896 * off, or connected one does this.. its an error. 2897 */ 2898 SCTP_INP_RUNLOCK(inp); 2899 *error = EINVAL; 2900 return (NULL); 2901 } 2902 #ifdef SCTP_DEBUG 2903 if (sctp_debug_on & SCTP_DEBUG_PCB3) { 2904 printf("Allocate an association for peer:"); 2905 if (firstaddr) 2906 sctp_print_address(firstaddr); 2907 else 2908 printf("None\n"); 2909 printf("Port:%d\n", 2910 ntohs(((struct sockaddr_in *)firstaddr)->sin_port)); 2911 } 2912 #endif /* SCTP_DEBUG */ 2913 if (firstaddr->sa_family == AF_INET) { 2914 struct sockaddr_in *sin; 2915 2916 sin = (struct sockaddr_in *)firstaddr; 2917 if ((sin->sin_port == 0) || (sin->sin_addr.s_addr == 0)) { 2918 /* Invalid address */ 2919 SCTP_INP_RUNLOCK(inp); 2920 *error = EINVAL; 2921 return (NULL); 2922 } 2923 rport = sin->sin_port; 2924 } else if (firstaddr->sa_family == AF_INET6) { 2925 struct sockaddr_in6 *sin6; 2926 2927 sin6 = (struct sockaddr_in6 *)firstaddr; 2928 if ((sin6->sin6_port == 0) || 2929 (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))) { 2930 /* Invalid address */ 2931 SCTP_INP_RUNLOCK(inp); 2932 *error = EINVAL; 2933 return (NULL); 2934 } 2935 rport = sin6->sin6_port; 2936 } else { 2937 /* not supported family type */ 2938 SCTP_INP_RUNLOCK(inp); 2939 *error = EINVAL; 2940 return (NULL); 2941 } 2942 SCTP_INP_RUNLOCK(inp); 2943 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 2944 /* 2945 * If you have not performed a bind, then we need to do the 2946 * ephemerial bind for you. 2947 */ 2948 if ((err = sctp_inpcb_bind(inp->sctp_socket, 2949 (struct sockaddr *)NULL, 2950 (struct thread *)NULL 2951 ))) { 2952 /* bind error, probably perm */ 2953 *error = err; 2954 return (NULL); 2955 } 2956 } 2957 stcb = (struct sctp_tcb *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_asoc); 2958 if (stcb == NULL) { 2959 /* out of memory? */ 2960 *error = ENOMEM; 2961 return (NULL); 2962 } 2963 SCTP_INCR_ASOC_COUNT(); 2964 2965 bzero(stcb, sizeof(*stcb)); 2966 asoc = &stcb->asoc; 2967 SCTP_TCB_LOCK_INIT(stcb); 2968 SCTP_TCB_SEND_LOCK_INIT(stcb); 2969 /* setup back pointer's */ 2970 stcb->sctp_ep = inp; 2971 stcb->sctp_socket = inp->sctp_socket; 2972 if ((err = sctp_init_asoc(inp, asoc, for_a_init, override_tag))) { 2973 /* failed */ 2974 SCTP_TCB_LOCK_DESTROY(stcb); 2975 SCTP_TCB_SEND_LOCK_DESTROY(stcb); 2976 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb); 2977 SCTP_DECR_ASOC_COUNT(); 2978 *error = err; 2979 return (NULL); 2980 } 2981 /* and the port */ 2982 stcb->rport = rport; 2983 SCTP_INP_INFO_WLOCK(); 2984 SCTP_INP_WLOCK(inp); 2985 if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 2986 /* inpcb freed while alloc going on */ 2987 SCTP_TCB_LOCK_DESTROY(stcb); 2988 SCTP_TCB_SEND_LOCK_DESTROY(stcb); 2989 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb); 2990 SCTP_INP_WUNLOCK(inp); 2991 SCTP_INP_INFO_WUNLOCK(); 2992 SCTP_DECR_ASOC_COUNT(); 2993 *error = EINVAL; 2994 return (NULL); 2995 } 2996 SCTP_TCB_LOCK(stcb); 2997 2998 /* now that my_vtag is set, add it to the hash */ 2999 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, 3000 sctppcbinfo.hashasocmark)]; 3001 /* put it in the bucket in the vtag hash of assoc's for the system */ 3002 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 3003 SCTP_INP_INFO_WUNLOCK(); 3004 3005 if ((err = sctp_add_remote_addr(stcb, firstaddr, SCTP_DO_SETSCOPE, SCTP_ALLOC_ASOC))) { 3006 /* failure.. memory error? */ 3007 if (asoc->strmout) 3008 SCTP_FREE(asoc->strmout); 3009 if (asoc->mapping_array) 3010 SCTP_FREE(asoc->mapping_array); 3011 3012 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb); 3013 SCTP_DECR_ASOC_COUNT(); 3014 SCTP_TCB_LOCK_DESTROY(stcb); 3015 SCTP_TCB_SEND_LOCK_DESTROY(stcb); 3016 *error = ENOBUFS; 3017 return (NULL); 3018 } 3019 /* Init all the timers */ 3020 SCTP_OS_TIMER_INIT(&asoc->hb_timer.timer); 3021 SCTP_OS_TIMER_INIT(&asoc->dack_timer.timer); 3022 SCTP_OS_TIMER_INIT(&asoc->strreset_timer.timer); 3023 SCTP_OS_TIMER_INIT(&asoc->asconf_timer.timer); 3024 SCTP_OS_TIMER_INIT(&asoc->shut_guard_timer.timer); 3025 SCTP_OS_TIMER_INIT(&asoc->autoclose_timer.timer); 3026 SCTP_OS_TIMER_INIT(&asoc->delayed_event_timer.timer); 3027 3028 LIST_INSERT_HEAD(&inp->sctp_asoc_list, stcb, sctp_tcblist); 3029 /* now file the port under the hash as well */ 3030 if (inp->sctp_tcbhash != NULL) { 3031 head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(stcb->rport, 3032 inp->sctp_hashmark)]; 3033 LIST_INSERT_HEAD(head, stcb, sctp_tcbhash); 3034 } 3035 SCTP_INP_WUNLOCK(inp); 3036 #ifdef SCTP_DEBUG 3037 if (sctp_debug_on & SCTP_DEBUG_PCB1) { 3038 printf("Association %p now allocated\n", stcb); 3039 } 3040 #endif 3041 return (stcb); 3042 } 3043 3044 3045 void 3046 sctp_remove_net(struct sctp_tcb *stcb, struct sctp_nets *net) 3047 { 3048 struct sctp_association *asoc; 3049 3050 asoc = &stcb->asoc; 3051 asoc->numnets--; 3052 TAILQ_REMOVE(&asoc->nets, net, sctp_next); 3053 sctp_free_remote_addr(net); 3054 if (net == asoc->primary_destination) { 3055 /* Reset primary */ 3056 struct sctp_nets *lnet; 3057 3058 lnet = TAILQ_FIRST(&asoc->nets); 3059 /* Try to find a confirmed primary */ 3060 asoc->primary_destination = sctp_find_alternate_net(stcb, lnet, 3061 0); 3062 } 3063 if (net == asoc->last_data_chunk_from) { 3064 /* Reset primary */ 3065 asoc->last_data_chunk_from = TAILQ_FIRST(&asoc->nets); 3066 } 3067 if (net == asoc->last_control_chunk_from) { 3068 /* Clear net */ 3069 asoc->last_control_chunk_from = NULL; 3070 } 3071 /* if (net == asoc->asconf_last_sent_to) {*/ 3072 /* Reset primary */ 3073 /* asoc->asconf_last_sent_to = TAILQ_FIRST(&asoc->nets);*/ 3074 /* }*/ 3075 } 3076 3077 /* 3078 * remove a remote endpoint address from an association, it will fail if the 3079 * address does not exist. 3080 */ 3081 int 3082 sctp_del_remote_addr(struct sctp_tcb *stcb, struct sockaddr *remaddr) 3083 { 3084 /* 3085 * Here we need to remove a remote address. This is quite simple, we 3086 * first find it in the list of address for the association 3087 * (tasoc->asoc.nets) and then if it is there, we do a LIST_REMOVE 3088 * on that item. Note we do not allow it to be removed if there are 3089 * no other addresses. 3090 */ 3091 struct sctp_association *asoc; 3092 struct sctp_nets *net, *net_tmp; 3093 3094 asoc = &stcb->asoc; 3095 3096 /* locate the address */ 3097 for (net = TAILQ_FIRST(&asoc->nets); net != NULL; net = net_tmp) { 3098 net_tmp = TAILQ_NEXT(net, sctp_next); 3099 if (net->ro._l_addr.sa.sa_family != remaddr->sa_family) { 3100 continue; 3101 } 3102 if (sctp_cmpaddr((struct sockaddr *)&net->ro._l_addr, 3103 remaddr)) { 3104 /* we found the guy */ 3105 if (asoc->numnets < 2) { 3106 /* Must have at LEAST two remote addresses */ 3107 return (-1); 3108 } else { 3109 sctp_remove_net(stcb, net); 3110 return (0); 3111 } 3112 } 3113 } 3114 /* not found. */ 3115 return (-2); 3116 } 3117 3118 3119 static void 3120 sctp_add_vtag_to_timewait(struct sctp_inpcb *inp, uint32_t tag) 3121 { 3122 struct sctpvtaghead *chain; 3123 struct sctp_tagblock *twait_block; 3124 struct timeval now; 3125 int set, i; 3126 3127 SCTP_GETTIME_TIMEVAL(&now); 3128 chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; 3129 set = 0; 3130 if (!LIST_EMPTY(chain)) { 3131 /* Block(s) present, lets find space, and expire on the fly */ 3132 LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { 3133 for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { 3134 if ((twait_block->vtag_block[i].v_tag == 0) && 3135 !set) { 3136 twait_block->vtag_block[i].tv_sec_at_expire = 3137 now.tv_sec + SCTP_TIME_WAIT; 3138 twait_block->vtag_block[i].v_tag = tag; 3139 set = 1; 3140 } else if ((twait_block->vtag_block[i].v_tag) && 3141 ((long)twait_block->vtag_block[i].tv_sec_at_expire > 3142 now.tv_sec)) { 3143 /* Audit expires this guy */ 3144 twait_block->vtag_block[i].tv_sec_at_expire = 0; 3145 twait_block->vtag_block[i].v_tag = 0; 3146 if (set == 0) { 3147 /* Reuse it for my new tag */ 3148 twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec + SCTP_TIME_WAIT; 3149 twait_block->vtag_block[0].v_tag = tag; 3150 set = 1; 3151 } 3152 } 3153 } 3154 if (set) { 3155 /* 3156 * We only do up to the block where we can 3157 * place our tag for audits 3158 */ 3159 break; 3160 } 3161 } 3162 } 3163 /* Need to add a new block to chain */ 3164 if (!set) { 3165 SCTP_MALLOC(twait_block, struct sctp_tagblock *, 3166 sizeof(struct sctp_tagblock), "TimeWait"); 3167 if (twait_block == NULL) { 3168 return; 3169 } 3170 memset(twait_block, 0, sizeof(struct sctp_timewait)); 3171 LIST_INSERT_HEAD(chain, twait_block, sctp_nxt_tagblock); 3172 twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec + 3173 SCTP_TIME_WAIT; 3174 twait_block->vtag_block[0].v_tag = tag; 3175 } 3176 } 3177 3178 3179 static void 3180 sctp_iterator_asoc_being_freed(struct sctp_inpcb *inp, struct sctp_tcb *stcb) 3181 { 3182 struct sctp_iterator *it; 3183 3184 /* 3185 * Unlock the tcb lock we do this so we avoid a dead lock scenario 3186 * where the iterator is waiting on the TCB lock and the TCB lock is 3187 * waiting on the iterator lock. 3188 */ 3189 it = stcb->asoc.stcb_starting_point_for_iterator; 3190 if (it == NULL) { 3191 return; 3192 } 3193 if (it->inp != stcb->sctp_ep) { 3194 /* hmm, focused on the wrong one? */ 3195 return; 3196 } 3197 if (it->stcb != stcb) { 3198 return; 3199 } 3200 it->stcb = LIST_NEXT(stcb, sctp_tcblist); 3201 if (it->stcb == NULL) { 3202 /* done with all asoc's in this assoc */ 3203 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 3204 it->inp = NULL; 3205 } else { 3206 it->inp = LIST_NEXT(inp, sctp_list); 3207 } 3208 } 3209 } 3210 3211 /* 3212 * Free the association after un-hashing the remote port. 3213 */ 3214 int 3215 sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfree, int from_location) 3216 { 3217 int i; 3218 struct sctp_association *asoc; 3219 struct sctp_nets *net, *prev; 3220 struct sctp_laddr *laddr; 3221 struct sctp_tmit_chunk *chk; 3222 struct sctp_asconf_addr *aparam; 3223 struct sctp_stream_reset_list *liste; 3224 struct sctp_queued_to_read *sq; 3225 struct sctp_stream_queue_pending *sp; 3226 sctp_sharedkey_t *shared_key; 3227 struct socket *so; 3228 int ccnt = 0; 3229 int s, cnt = 0; 3230 3231 /* first, lets purge the entry from the hash table. */ 3232 s = splnet(); 3233 3234 #ifdef SCTP_LOG_CLOSING 3235 sctp_log_closing(inp, stcb, 6); 3236 #endif 3237 if (stcb->asoc.state == 0) { 3238 #ifdef SCTP_LOG_CLOSING 3239 sctp_log_closing(inp, NULL, 7); 3240 #endif 3241 splx(s); 3242 /* there is no asoc, really TSNH :-0 */ 3243 return (1); 3244 } 3245 /* TEMP CODE */ 3246 if (stcb->freed_from_where == 0) { 3247 /* Only record the first place free happened from */ 3248 stcb->freed_from_where = from_location; 3249 } 3250 /* TEMP CODE */ 3251 3252 asoc = &stcb->asoc; 3253 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3254 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) 3255 /* nothing around */ 3256 so = NULL; 3257 else 3258 so = inp->sctp_socket; 3259 3260 /* 3261 * We used timer based freeing if a reader or writer is in the way. 3262 * So we first check if we are actually being called from a timer, 3263 * if so we abort early if a reader or writer is still in the way. 3264 */ 3265 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 3266 (from_inpcbfree == SCTP_NORMAL_PROC)) { 3267 /* 3268 * is it the timer driving us? if so are the reader/writers 3269 * gone? 3270 */ 3271 if (stcb->asoc.refcnt) { 3272 /* nope, reader or writer in the way */ 3273 sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); 3274 /* no asoc destroyed */ 3275 SCTP_TCB_UNLOCK(stcb); 3276 splx(s); 3277 #ifdef SCTP_LOG_CLOSING 3278 sctp_log_closing(inp, stcb, 8); 3279 #endif 3280 return (0); 3281 } 3282 } 3283 /* now clean up any other timers */ 3284 SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer); 3285 SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 3286 SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 3287 SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 3288 SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 3289 SCTP_OS_TIMER_STOP(&asoc->shut_guard_timer.timer); 3290 SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 3291 3292 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3293 SCTP_OS_TIMER_STOP(&net->fr_timer.timer); 3294 SCTP_OS_TIMER_STOP(&net->rxt_timer.timer); 3295 SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 3296 } 3297 /* Now the read queue needs to be cleaned up (only once) */ 3298 cnt = 0; 3299 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0) { 3300 SCTP_INP_READ_LOCK(inp); 3301 TAILQ_FOREACH(sq, &inp->read_queue, next) { 3302 if (sq->stcb == stcb) { 3303 sq->do_not_ref_stcb = 1; 3304 sq->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 3305 /* 3306 * If there is no end, there never will be 3307 * now. 3308 */ 3309 if (sq->end_added == 0) { 3310 /* Held for PD-API clear that. */ 3311 sq->pdapi_aborted = 1; 3312 sq->held_length = 0; 3313 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3314 /* 3315 * Need to add a PD-API 3316 * aborted indication. 3317 * Setting the control_pdapi 3318 * assures that it will be 3319 * added right after this 3320 * msg. 3321 */ 3322 stcb->asoc.control_pdapi = sq; 3323 sctp_notify_partial_delivery_indication(stcb, 3324 SCTP_PARTIAL_DELIVERY_ABORTED, 1); 3325 stcb->asoc.control_pdapi = NULL; 3326 } 3327 } 3328 /* Add an end to wake them */ 3329 sq->end_added = 1; 3330 cnt++; 3331 } 3332 } 3333 SCTP_INP_READ_UNLOCK(inp); 3334 if (stcb->block_entry) { 3335 cnt++; 3336 stcb->block_entry->error = ECONNRESET; 3337 stcb->block_entry = NULL; 3338 } 3339 } 3340 stcb->asoc.state |= SCTP_STATE_ABOUT_TO_BE_FREED; 3341 if ((from_inpcbfree != SCTP_PCBFREE_FORCE) && (stcb->asoc.refcnt)) { 3342 /* 3343 * reader or writer in the way, we have hopefully given him 3344 * something to chew on above. 3345 */ 3346 sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); 3347 SCTP_TCB_UNLOCK(stcb); 3348 if (so) { 3349 SCTP_INP_RLOCK(inp); 3350 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3351 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) 3352 /* nothing around */ 3353 so = NULL; 3354 if (so) { 3355 /* Wake any reader/writers */ 3356 sctp_sorwakeup(inp, so); 3357 sctp_sowwakeup(inp, so); 3358 } 3359 SCTP_INP_RUNLOCK(inp); 3360 3361 } 3362 splx(s); 3363 #ifdef SCTP_LOG_CLOSING 3364 sctp_log_closing(inp, stcb, 9); 3365 #endif 3366 /* no asoc destroyed */ 3367 return (0); 3368 } 3369 #ifdef SCTP_LOG_CLOSING 3370 sctp_log_closing(inp, stcb, 10); 3371 #endif 3372 /* 3373 * When I reach here, no others want to kill the assoc yet.. and I 3374 * own the lock. Now its possible an abort comes in when I do the 3375 * lock exchange below to grab all the locks to do the final take 3376 * out. to prevent this we increment the count, which will start a 3377 * timer and blow out above thus assuring us that we hold exclusive 3378 * killing of the asoc. Note that after getting back the TCB lock we 3379 * will go ahead and increment the counter back up and stop any 3380 * timer a passing stranger may have started :-S 3381 */ 3382 if (from_inpcbfree == SCTP_NORMAL_PROC) { 3383 atomic_add_int(&stcb->asoc.refcnt, 1); 3384 3385 SCTP_TCB_UNLOCK(stcb); 3386 3387 SCTP_ITERATOR_LOCK(); 3388 SCTP_INP_INFO_WLOCK(); 3389 SCTP_INP_WLOCK(inp); 3390 SCTP_TCB_LOCK(stcb); 3391 } 3392 /* Double check the GONE flag */ 3393 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3394 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) 3395 /* nothing around */ 3396 so = NULL; 3397 3398 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3399 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3400 /* 3401 * For TCP type we need special handling when we are 3402 * connected. We also include the peel'ed off ones to. 3403 */ 3404 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 3405 inp->sctp_flags &= ~SCTP_PCB_FLAGS_CONNECTED; 3406 inp->sctp_flags |= SCTP_PCB_FLAGS_WAS_CONNECTED; 3407 if (so) { 3408 SOCK_LOCK(so); 3409 if (so->so_rcv.sb_cc == 0) { 3410 so->so_state &= ~(SS_ISCONNECTING | 3411 SS_ISDISCONNECTING | 3412 SS_ISCONFIRMING | 3413 SS_ISCONNECTED); 3414 } 3415 SOCK_UNLOCK(so); 3416 sctp_sowwakeup(inp, so); 3417 sctp_sorwakeup(inp, so); 3418 wakeup(&so->so_timeo); 3419 } 3420 } 3421 } 3422 /* Stop any timer someone may have started */ 3423 SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 3424 /* 3425 * Make it invalid too, that way if its about to run it will abort 3426 * and return. 3427 */ 3428 asoc->strreset_timer.type = SCTP_TIMER_TYPE_NONE; 3429 sctp_iterator_asoc_being_freed(inp, stcb); 3430 /* re-increment the lock */ 3431 if (from_inpcbfree == SCTP_NORMAL_PROC) { 3432 atomic_add_int(&stcb->asoc.refcnt, -1); 3433 } 3434 /* now restop the timers to be sure - this is paranoia at is finest! */ 3435 SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer); 3436 SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 3437 SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 3438 SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 3439 SCTP_OS_TIMER_STOP(&asoc->shut_guard_timer.timer); 3440 SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 3441 SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 3442 3443 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3444 SCTP_OS_TIMER_STOP(&net->fr_timer.timer); 3445 SCTP_OS_TIMER_STOP(&net->rxt_timer.timer); 3446 SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 3447 } 3448 asoc->state = 0; 3449 if (inp->sctp_tcbhash) { 3450 LIST_REMOVE(stcb, sctp_tcbhash); 3451 } 3452 if (stcb->asoc.in_restart_hash) { 3453 LIST_REMOVE(stcb, sctp_tcbrestarhash); 3454 } 3455 /* Now lets remove it from the list of ALL associations in the EP */ 3456 LIST_REMOVE(stcb, sctp_tcblist); 3457 if (from_inpcbfree == SCTP_NORMAL_PROC) { 3458 SCTP_INP_INCR_REF(inp); 3459 SCTP_INP_WUNLOCK(inp); 3460 SCTP_ITERATOR_UNLOCK(); 3461 } 3462 /* pull from vtag hash */ 3463 LIST_REMOVE(stcb, sctp_asocs); 3464 sctp_add_vtag_to_timewait(inp, asoc->my_vtag); 3465 3466 prev = NULL; 3467 /* 3468 * The chunk lists and such SHOULD be empty but we check them just 3469 * in case. 3470 */ 3471 /* anything on the wheel needs to be removed */ 3472 for (i = 0; i < asoc->streamoutcnt; i++) { 3473 struct sctp_stream_out *outs; 3474 3475 outs = &asoc->strmout[i]; 3476 /* now clean up any chunks here */ 3477 sp = TAILQ_FIRST(&outs->outqueue); 3478 while (sp) { 3479 TAILQ_REMOVE(&outs->outqueue, sp, next); 3480 if (sp->data) { 3481 sctp_m_freem(sp->data); 3482 sp->data = NULL; 3483 sp->tail_mbuf = NULL; 3484 } 3485 sctp_free_remote_addr(sp->net); 3486 sctp_free_spbufspace(stcb, asoc, sp); 3487 /* Free the zone stuff */ 3488 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_strmoq, sp); 3489 SCTP_DECR_STRMOQ_COUNT(); 3490 sp = TAILQ_FIRST(&outs->outqueue); 3491 } 3492 } 3493 3494 while ((sp = TAILQ_FIRST(&asoc->free_strmoq)) != NULL) { 3495 TAILQ_REMOVE(&asoc->free_strmoq, sp, next); 3496 if (sp->data) { 3497 sctp_m_freem(sp->data); 3498 sp->data = NULL; 3499 sp->tail_mbuf = NULL; 3500 } 3501 /* Free the zone stuff */ 3502 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_strmoq, sp); 3503 SCTP_DECR_STRMOQ_COUNT(); 3504 atomic_add_int(&sctppcbinfo.ipi_free_strmoq, -1); 3505 } 3506 3507 while ((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) { 3508 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 3509 SCTP_FREE(liste); 3510 } 3511 3512 sq = TAILQ_FIRST(&asoc->pending_reply_queue); 3513 while (sq) { 3514 TAILQ_REMOVE(&asoc->pending_reply_queue, sq, next); 3515 if (sq->data) { 3516 sctp_m_freem(sq->data); 3517 sq->data = NULL; 3518 } 3519 sctp_free_remote_addr(sq->whoFrom); 3520 sq->whoFrom = NULL; 3521 sq->stcb = NULL; 3522 /* Free the ctl entry */ 3523 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, sq); 3524 SCTP_DECR_READQ_COUNT(); 3525 sq = TAILQ_FIRST(&asoc->pending_reply_queue); 3526 } 3527 3528 chk = TAILQ_FIRST(&asoc->free_chunks); 3529 while (chk) { 3530 TAILQ_REMOVE(&asoc->free_chunks, chk, sctp_next); 3531 if (chk->data) { 3532 sctp_m_freem(chk->data); 3533 chk->data = NULL; 3534 } 3535 ccnt++; 3536 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); 3537 SCTP_DECR_CHK_COUNT(); 3538 atomic_subtract_int(&sctppcbinfo.ipi_free_chunks, 1); 3539 asoc->free_chunk_cnt--; 3540 chk = TAILQ_FIRST(&asoc->free_chunks); 3541 } 3542 /* pending send queue SHOULD be empty */ 3543 if (!TAILQ_EMPTY(&asoc->send_queue)) { 3544 chk = TAILQ_FIRST(&asoc->send_queue); 3545 while (chk) { 3546 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 3547 if (chk->data) { 3548 sctp_m_freem(chk->data); 3549 chk->data = NULL; 3550 } 3551 ccnt++; 3552 sctp_free_remote_addr(chk->whoTo); 3553 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); 3554 SCTP_DECR_CHK_COUNT(); 3555 chk = TAILQ_FIRST(&asoc->send_queue); 3556 } 3557 } 3558 /* 3559 if(ccnt) { 3560 printf("Freed %d from send_queue\n", ccnt); 3561 ccnt = 0; 3562 } 3563 */ 3564 /* sent queue SHOULD be empty */ 3565 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3566 chk = TAILQ_FIRST(&asoc->sent_queue); 3567 while (chk) { 3568 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3569 if (chk->data) { 3570 sctp_m_freem(chk->data); 3571 chk->data = NULL; 3572 } 3573 ccnt++; 3574 sctp_free_remote_addr(chk->whoTo); 3575 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); 3576 SCTP_DECR_CHK_COUNT(); 3577 chk = TAILQ_FIRST(&asoc->sent_queue); 3578 } 3579 } 3580 /* 3581 if(ccnt) { 3582 printf("Freed %d from sent_queue\n", ccnt); 3583 ccnt = 0; 3584 } 3585 */ 3586 /* control queue MAY not be empty */ 3587 if (!TAILQ_EMPTY(&asoc->control_send_queue)) { 3588 chk = TAILQ_FIRST(&asoc->control_send_queue); 3589 while (chk) { 3590 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 3591 if (chk->data) { 3592 sctp_m_freem(chk->data); 3593 chk->data = NULL; 3594 } 3595 ccnt++; 3596 sctp_free_remote_addr(chk->whoTo); 3597 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); 3598 SCTP_DECR_CHK_COUNT(); 3599 chk = TAILQ_FIRST(&asoc->control_send_queue); 3600 } 3601 } 3602 /* 3603 if(ccnt) { 3604 printf("Freed %d from ctrl_queue\n", ccnt); 3605 ccnt = 0; 3606 } 3607 */ 3608 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 3609 chk = TAILQ_FIRST(&asoc->reasmqueue); 3610 while (chk) { 3611 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 3612 if (chk->data) { 3613 sctp_m_freem(chk->data); 3614 chk->data = NULL; 3615 } 3616 sctp_free_remote_addr(chk->whoTo); 3617 ccnt++; 3618 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk); 3619 SCTP_DECR_CHK_COUNT(); 3620 chk = TAILQ_FIRST(&asoc->reasmqueue); 3621 } 3622 } 3623 /* 3624 if(ccnt) { 3625 printf("Freed %d from reasm_queue\n", ccnt); 3626 ccnt = 0; 3627 } 3628 */ 3629 if (asoc->mapping_array) { 3630 SCTP_FREE(asoc->mapping_array); 3631 asoc->mapping_array = NULL; 3632 } 3633 /* the stream outs */ 3634 if (asoc->strmout) { 3635 SCTP_FREE(asoc->strmout); 3636 asoc->strmout = NULL; 3637 } 3638 asoc->streamoutcnt = 0; 3639 if (asoc->strmin) { 3640 struct sctp_queued_to_read *ctl; 3641 int i; 3642 3643 for (i = 0; i < asoc->streamincnt; i++) { 3644 if (!TAILQ_EMPTY(&asoc->strmin[i].inqueue)) { 3645 /* We have somethings on the streamin queue */ 3646 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 3647 while (ctl) { 3648 TAILQ_REMOVE(&asoc->strmin[i].inqueue, 3649 ctl, next); 3650 sctp_free_remote_addr(ctl->whoFrom); 3651 if (ctl->data) { 3652 sctp_m_freem(ctl->data); 3653 ctl->data = NULL; 3654 } 3655 /* 3656 * We don't free the address here 3657 * since all the net's were freed 3658 * above. 3659 */ 3660 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, ctl); 3661 SCTP_DECR_READQ_COUNT(); 3662 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 3663 } 3664 } 3665 } 3666 SCTP_FREE(asoc->strmin); 3667 asoc->strmin = NULL; 3668 } 3669 asoc->streamincnt = 0; 3670 while (!TAILQ_EMPTY(&asoc->nets)) { 3671 net = TAILQ_FIRST(&asoc->nets); 3672 /* pull from list */ 3673 if ((sctppcbinfo.ipi_count_raddr == 0) || (prev == net)) { 3674 #ifdef INVARIANTS 3675 panic("no net's left alloc'ed, or list points to itself"); 3676 #endif 3677 break; 3678 } 3679 prev = net; 3680 TAILQ_REMOVE(&asoc->nets, net, sctp_next); 3681 sctp_free_remote_addr(net); 3682 } 3683 3684 /* local addresses, if any */ 3685 while (!LIST_EMPTY(&asoc->sctp_local_addr_list)) { 3686 laddr = LIST_FIRST(&asoc->sctp_local_addr_list); 3687 LIST_REMOVE(laddr, sctp_nxt_addr); 3688 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr); 3689 SCTP_DECR_LADDR_COUNT(); 3690 } 3691 /* pending asconf (address) parameters */ 3692 while (!TAILQ_EMPTY(&asoc->asconf_queue)) { 3693 aparam = TAILQ_FIRST(&asoc->asconf_queue); 3694 TAILQ_REMOVE(&asoc->asconf_queue, aparam, next); 3695 SCTP_FREE(aparam); 3696 } 3697 if (asoc->last_asconf_ack_sent != NULL) { 3698 sctp_m_freem(asoc->last_asconf_ack_sent); 3699 asoc->last_asconf_ack_sent = NULL; 3700 } 3701 /* clean up auth stuff */ 3702 if (asoc->local_hmacs) 3703 sctp_free_hmaclist(asoc->local_hmacs); 3704 if (asoc->peer_hmacs) 3705 sctp_free_hmaclist(asoc->peer_hmacs); 3706 3707 if (asoc->local_auth_chunks) 3708 sctp_free_chunklist(asoc->local_auth_chunks); 3709 if (asoc->peer_auth_chunks) 3710 sctp_free_chunklist(asoc->peer_auth_chunks); 3711 3712 sctp_free_authinfo(&asoc->authinfo); 3713 3714 shared_key = LIST_FIRST(&asoc->shared_keys); 3715 while (shared_key) { 3716 LIST_REMOVE(shared_key, next); 3717 sctp_free_sharedkey(shared_key); 3718 shared_key = LIST_FIRST(&asoc->shared_keys); 3719 } 3720 3721 /* Insert new items here :> */ 3722 3723 /* Get rid of LOCK */ 3724 SCTP_TCB_LOCK_DESTROY(stcb); 3725 SCTP_TCB_SEND_LOCK_DESTROY(stcb); 3726 if (from_inpcbfree == SCTP_NORMAL_PROC) { 3727 SCTP_INP_INFO_WUNLOCK(); 3728 SCTP_INP_RLOCK(inp); 3729 } 3730 #ifdef SCTP_TRACK_FREED_ASOCS 3731 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3732 /* now clean up the tasoc itself */ 3733 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb); 3734 SCTP_DECR_ASOC_COUNT(); 3735 } else { 3736 LIST_INSERT_HEAD(&inp->sctp_asoc_free_list, stcb, sctp_tcblist); 3737 } 3738 #else 3739 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb); 3740 SCTP_DECR_ASOC_COUNT(); 3741 #endif 3742 if (from_inpcbfree == SCTP_NORMAL_PROC) { 3743 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3744 /* 3745 * If its NOT the inp_free calling us AND sctp_close 3746 * as been called, we call back... 3747 */ 3748 SCTP_INP_RUNLOCK(inp); 3749 /* 3750 * This will start the kill timer (if we are the 3751 * lastone) since we hold an increment yet. But this 3752 * is the only safe way to do this since otherwise 3753 * if the socket closes at the same time we are here 3754 * we might collide in the cleanup. 3755 */ 3756 sctp_inpcb_free(inp, 0, 0); 3757 SCTP_INP_DECR_REF(inp); 3758 goto out_of; 3759 } else { 3760 /* The socket is still open. */ 3761 SCTP_INP_DECR_REF(inp); 3762 } 3763 } 3764 if (from_inpcbfree == SCTP_NORMAL_PROC) { 3765 SCTP_INP_RUNLOCK(inp); 3766 } 3767 out_of: 3768 splx(s); 3769 /* destroyed the asoc */ 3770 #ifdef SCTP_LOG_CLOSING 3771 sctp_log_closing(inp, NULL, 11); 3772 #endif 3773 return (1); 3774 } 3775 3776 3777 3778 /* 3779 * determine if a destination is "reachable" based upon the addresses bound 3780 * to the current endpoint (e.g. only v4 or v6 currently bound) 3781 */ 3782 /* 3783 * FIX: if we allow assoc-level bindx(), then this needs to be fixed to use 3784 * assoc level v4/v6 flags, as the assoc *may* not have the same address 3785 * types bound as its endpoint 3786 */ 3787 int 3788 sctp_destination_is_reachable(struct sctp_tcb *stcb, struct sockaddr *destaddr) 3789 { 3790 struct sctp_inpcb *inp; 3791 int answer; 3792 3793 /* 3794 * No locks here, the TCB, in all cases is already locked and an 3795 * assoc is up. There is either a INP lock by the caller applied (in 3796 * asconf case when deleting an address) or NOT in the HB case, 3797 * however if HB then the INP increment is up and the INP will not 3798 * be removed (on top of the fact that we have a TCB lock). So we 3799 * only want to read the sctp_flags, which is either bound-all or 3800 * not.. no protection needed since once an assoc is up you can't be 3801 * changing your binding. 3802 */ 3803 inp = stcb->sctp_ep; 3804 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 3805 /* if bound all, destination is not restricted */ 3806 /* 3807 * RRS: Question during lock work: Is this correct? If you 3808 * are bound-all you still might need to obey the V4--V6 3809 * flags??? IMO this bound-all stuff needs to be removed! 3810 */ 3811 return (1); 3812 } 3813 /* NOTE: all "scope" checks are done when local addresses are added */ 3814 if (destaddr->sa_family == AF_INET6) { 3815 answer = inp->ip_inp.inp.inp_vflag & INP_IPV6; 3816 } else if (destaddr->sa_family == AF_INET) { 3817 answer = inp->ip_inp.inp.inp_vflag & INP_IPV4; 3818 } else { 3819 /* invalid family, so it's unreachable */ 3820 answer = 0; 3821 } 3822 return (answer); 3823 } 3824 3825 /* 3826 * update the inp_vflags on an endpoint 3827 */ 3828 static void 3829 sctp_update_ep_vflag(struct sctp_inpcb *inp) 3830 { 3831 struct sctp_laddr *laddr; 3832 3833 /* first clear the flag */ 3834 inp->ip_inp.inp.inp_vflag = 0; 3835 /* set the flag based on addresses on the ep list */ 3836 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 3837 if (laddr->ifa == NULL) { 3838 #ifdef SCTP_DEBUG 3839 if (sctp_debug_on & SCTP_DEBUG_PCB1) { 3840 printf("An ounce of prevention is worth a pound of cure\n"); 3841 } 3842 #endif /* SCTP_DEBUG */ 3843 continue; 3844 } 3845 if (laddr->ifa->ifa_addr) { 3846 continue; 3847 } 3848 if (laddr->ifa->ifa_addr->sa_family == AF_INET6) { 3849 inp->ip_inp.inp.inp_vflag |= INP_IPV6; 3850 } else if (laddr->ifa->ifa_addr->sa_family == AF_INET) { 3851 inp->ip_inp.inp.inp_vflag |= INP_IPV4; 3852 } 3853 } 3854 } 3855 3856 /* 3857 * Add the address to the endpoint local address list There is nothing to be 3858 * done if we are bound to all addresses 3859 */ 3860 int 3861 sctp_add_local_addr_ep(struct sctp_inpcb *inp, struct ifaddr *ifa) 3862 { 3863 struct sctp_laddr *laddr; 3864 int fnd, error; 3865 3866 fnd = 0; 3867 3868 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 3869 /* You are already bound to all. You have it already */ 3870 return (0); 3871 } 3872 if (ifa->ifa_addr->sa_family == AF_INET6) { 3873 struct in6_ifaddr *ifa6; 3874 3875 ifa6 = (struct in6_ifaddr *)ifa; 3876 if (ifa6->ia6_flags & (IN6_IFF_DETACHED | 3877 IN6_IFF_DEPRECATED | IN6_IFF_ANYCAST | IN6_IFF_NOTREADY)) 3878 /* Can't bind a non-existent addr. */ 3879 return (-1); 3880 } 3881 /* first, is it already present? */ 3882 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 3883 if (laddr->ifa == ifa) { 3884 fnd = 1; 3885 break; 3886 } 3887 } 3888 3889 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && (fnd == 0)) { 3890 /* Not bound to all */ 3891 error = sctp_insert_laddr(&inp->sctp_addr_list, ifa); 3892 if (error != 0) 3893 return (error); 3894 inp->laddr_count++; 3895 /* update inp_vflag flags */ 3896 if (ifa->ifa_addr->sa_family == AF_INET6) { 3897 inp->ip_inp.inp.inp_vflag |= INP_IPV6; 3898 } else if (ifa->ifa_addr->sa_family == AF_INET) { 3899 inp->ip_inp.inp.inp_vflag |= INP_IPV4; 3900 } 3901 } 3902 return (0); 3903 } 3904 3905 3906 /* 3907 * select a new (hopefully reachable) destination net (should only be used 3908 * when we deleted an ep addr that is the only usable source address to reach 3909 * the destination net) 3910 */ 3911 static void 3912 sctp_select_primary_destination(struct sctp_tcb *stcb) 3913 { 3914 struct sctp_nets *net; 3915 3916 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3917 /* for now, we'll just pick the first reachable one we find */ 3918 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) 3919 continue; 3920 if (sctp_destination_is_reachable(stcb, 3921 (struct sockaddr *)&net->ro._l_addr)) { 3922 /* found a reachable destination */ 3923 stcb->asoc.primary_destination = net; 3924 } 3925 } 3926 /* I can't there from here! ...we're gonna die shortly... */ 3927 } 3928 3929 3930 /* 3931 * Delete the address from the endpoint local address list There is nothing 3932 * to be done if we are bound to all addresses 3933 */ 3934 int 3935 sctp_del_local_addr_ep(struct sctp_inpcb *inp, struct ifaddr *ifa) 3936 { 3937 struct sctp_laddr *laddr; 3938 int fnd; 3939 3940 fnd = 0; 3941 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 3942 /* You are already bound to all. You have it already */ 3943 return (EINVAL); 3944 } 3945 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 3946 if (laddr->ifa == ifa) { 3947 fnd = 1; 3948 break; 3949 } 3950 } 3951 if (fnd && (inp->laddr_count < 2)) { 3952 /* can't delete unless there are at LEAST 2 addresses */ 3953 return (-1); 3954 } 3955 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && (fnd)) { 3956 /* 3957 * clean up any use of this address go through our 3958 * associations and clear any last_used_address that match 3959 * this one for each assoc, see if a new primary_destination 3960 * is needed 3961 */ 3962 struct sctp_tcb *stcb; 3963 3964 /* clean up "next_addr_touse" */ 3965 if (inp->next_addr_touse == laddr) 3966 /* delete this address */ 3967 inp->next_addr_touse = NULL; 3968 3969 /* clean up "last_used_address" */ 3970 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 3971 if (stcb->asoc.last_used_address == laddr) 3972 /* delete this address */ 3973 stcb->asoc.last_used_address = NULL; 3974 } /* for each tcb */ 3975 3976 /* remove it from the ep list */ 3977 sctp_remove_laddr(laddr); 3978 inp->laddr_count--; 3979 /* update inp_vflag flags */ 3980 sctp_update_ep_vflag(inp); 3981 /* select a new primary destination if needed */ 3982 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 3983 /* 3984 * presume caller (sctp_asconf.c) already owns INP 3985 * lock 3986 */ 3987 SCTP_TCB_LOCK(stcb); 3988 if (sctp_destination_is_reachable(stcb, 3989 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr) == 0) { 3990 sctp_select_primary_destination(stcb); 3991 } 3992 SCTP_TCB_UNLOCK(stcb); 3993 } /* for each tcb */ 3994 } 3995 return (0); 3996 } 3997 3998 /* 3999 * Add the addr to the TCB local address list For the BOUNDALL or dynamic 4000 * case, this is a "pending" address list (eg. addresses waiting for an 4001 * ASCONF-ACK response) For the subset binding, static case, this is a 4002 * "valid" address list 4003 */ 4004 int 4005 sctp_add_local_addr_assoc(struct sctp_tcb *stcb, struct ifaddr *ifa) 4006 { 4007 struct sctp_inpcb *inp; 4008 struct sctp_laddr *laddr; 4009 int error; 4010 4011 /* 4012 * Assumes TCP is locked.. and possiblye the INP. May need to 4013 * confirm/fix that if we need it and is not the case. 4014 */ 4015 inp = stcb->sctp_ep; 4016 if (ifa->ifa_addr->sa_family == AF_INET6) { 4017 struct in6_ifaddr *ifa6; 4018 4019 ifa6 = (struct in6_ifaddr *)ifa; 4020 if (ifa6->ia6_flags & (IN6_IFF_DETACHED | 4021 /* IN6_IFF_DEPRECATED | */ 4022 IN6_IFF_ANYCAST | 4023 IN6_IFF_NOTREADY)) 4024 /* Can't bind a non-existent addr. */ 4025 return (-1); 4026 } 4027 /* does the address already exist? */ 4028 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) { 4029 if (laddr->ifa == ifa) { 4030 return (-1); 4031 } 4032 } 4033 4034 /* add to the list */ 4035 error = sctp_insert_laddr(&stcb->asoc.sctp_local_addr_list, ifa); 4036 if (error != 0) 4037 return (error); 4038 return (0); 4039 } 4040 4041 /* 4042 * insert an laddr entry with the given ifa for the desired list 4043 */ 4044 int 4045 sctp_insert_laddr(struct sctpladdr *list, struct ifaddr *ifa) 4046 { 4047 struct sctp_laddr *laddr; 4048 int s; 4049 4050 s = splnet(); 4051 4052 laddr = (struct sctp_laddr *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr); 4053 if (laddr == NULL) { 4054 /* out of memory? */ 4055 splx(s); 4056 return (EINVAL); 4057 } 4058 SCTP_INCR_LADDR_COUNT(); 4059 bzero(laddr, sizeof(*laddr)); 4060 laddr->ifa = ifa; 4061 /* insert it */ 4062 LIST_INSERT_HEAD(list, laddr, sctp_nxt_addr); 4063 4064 splx(s); 4065 return (0); 4066 } 4067 4068 /* 4069 * Remove an laddr entry from the local address list (on an assoc) 4070 */ 4071 void 4072 sctp_remove_laddr(struct sctp_laddr *laddr) 4073 { 4074 int s; 4075 4076 s = splnet(); 4077 /* remove from the list */ 4078 LIST_REMOVE(laddr, sctp_nxt_addr); 4079 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr); 4080 SCTP_DECR_LADDR_COUNT(); 4081 splx(s); 4082 } 4083 4084 /* 4085 * Remove an address from the TCB local address list 4086 */ 4087 int 4088 sctp_del_local_addr_assoc(struct sctp_tcb *stcb, struct ifaddr *ifa) 4089 { 4090 struct sctp_inpcb *inp; 4091 struct sctp_laddr *laddr; 4092 4093 /* 4094 * This is called by asconf work. It is assumed that a) The TCB is 4095 * locked and b) The INP is locked. This is true in as much as I can 4096 * trace through the entry asconf code where I did these locks. 4097 * Again, the ASCONF code is a bit different in that it does lock 4098 * the INP during its work often times. This must be since we don't 4099 * want other proc's looking up things while what they are looking 4100 * up is changing :-D 4101 */ 4102 4103 inp = stcb->sctp_ep; 4104 /* if subset bound and don't allow ASCONF's, can't delete last */ 4105 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && 4106 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF) == 0)) { 4107 if (stcb->asoc.numnets < 2) { 4108 /* can't delete last address */ 4109 return (-1); 4110 } 4111 } 4112 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) { 4113 /* remove the address if it exists */ 4114 if (laddr->ifa == NULL) 4115 continue; 4116 if (laddr->ifa == ifa) { 4117 sctp_remove_laddr(laddr); 4118 return (0); 4119 } 4120 } 4121 4122 /* address not found! */ 4123 return (-1); 4124 } 4125 4126 /* 4127 * Remove an address from the TCB local address list lookup using a sockaddr 4128 * addr 4129 */ 4130 int 4131 sctp_del_local_addr_assoc_sa(struct sctp_tcb *stcb, struct sockaddr *sa) 4132 { 4133 struct sctp_inpcb *inp; 4134 struct sctp_laddr *laddr; 4135 struct sockaddr *l_sa; 4136 4137 /* 4138 * This function I find does not seem to have a caller. As such we 4139 * NEED TO DELETE this code. If we do find a caller, the caller MUST 4140 * have locked the TCB at the least and probably the INP as well. 4141 */ 4142 inp = stcb->sctp_ep; 4143 /* if subset bound and don't allow ASCONF's, can't delete last */ 4144 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && 4145 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF) == 0)) { 4146 if (stcb->asoc.numnets < 2) { 4147 /* can't delete last address */ 4148 return (-1); 4149 } 4150 } 4151 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) { 4152 /* make sure the address exists */ 4153 if (laddr->ifa == NULL) 4154 continue; 4155 if (laddr->ifa->ifa_addr == NULL) 4156 continue; 4157 4158 l_sa = laddr->ifa->ifa_addr; 4159 if (l_sa->sa_family == AF_INET6) { 4160 /* IPv6 address */ 4161 struct sockaddr_in6 *sin1, *sin2; 4162 4163 sin1 = (struct sockaddr_in6 *)l_sa; 4164 sin2 = (struct sockaddr_in6 *)sa; 4165 if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr, 4166 sizeof(struct in6_addr)) == 0) { 4167 /* matched */ 4168 sctp_remove_laddr(laddr); 4169 return (0); 4170 } 4171 } else if (l_sa->sa_family == AF_INET) { 4172 /* IPv4 address */ 4173 struct sockaddr_in *sin1, *sin2; 4174 4175 sin1 = (struct sockaddr_in *)l_sa; 4176 sin2 = (struct sockaddr_in *)sa; 4177 if (sin1->sin_addr.s_addr == sin2->sin_addr.s_addr) { 4178 /* matched */ 4179 sctp_remove_laddr(laddr); 4180 return (0); 4181 } 4182 } else { 4183 /* invalid family */ 4184 return (-1); 4185 } 4186 } /* end foreach */ 4187 /* address not found! */ 4188 return (-1); 4189 } 4190 4191 static char sctp_pcb_initialized = 0; 4192 4193 /* 4194 * Temporarily remove for __APPLE__ until we use the Tiger equivalents 4195 */ 4196 /* sysctl */ 4197 static int sctp_max_number_of_assoc = SCTP_MAX_NUM_OF_ASOC; 4198 static int sctp_scale_up_for_address = SCTP_SCALE_FOR_ADDR; 4199 4200 void 4201 sctp_pcb_init() 4202 { 4203 /* 4204 * SCTP initialization for the PCB structures should be called by 4205 * the sctp_init() funciton. 4206 */ 4207 int i; 4208 4209 if (sctp_pcb_initialized != 0) { 4210 /* error I was called twice */ 4211 return; 4212 } 4213 sctp_pcb_initialized = 1; 4214 4215 bzero(&sctpstat, sizeof(struct sctpstat)); 4216 4217 /* init the empty list of (All) Endpoints */ 4218 LIST_INIT(&sctppcbinfo.listhead); 4219 4220 /* init the iterator head */ 4221 LIST_INIT(&sctppcbinfo.iteratorhead); 4222 4223 /* init the hash table of endpoints */ 4224 TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &sctp_hashtblsize); 4225 TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &sctp_pcbtblsize); 4226 TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &sctp_chunkscale); 4227 4228 sctppcbinfo.sctp_asochash = hashinit((sctp_hashtblsize * 31), 4229 M_PCB, 4230 &sctppcbinfo.hashasocmark); 4231 4232 sctppcbinfo.sctp_ephash = hashinit(sctp_hashtblsize, 4233 M_PCB, 4234 &sctppcbinfo.hashmark); 4235 4236 sctppcbinfo.sctp_tcpephash = hashinit(sctp_hashtblsize, 4237 M_PCB, 4238 &sctppcbinfo.hashtcpmark); 4239 4240 sctppcbinfo.hashtblsize = sctp_hashtblsize; 4241 4242 /* 4243 * init the small hash table we use to track restarted asoc's 4244 */ 4245 sctppcbinfo.sctp_restarthash = hashinit(SCTP_STACK_VTAG_HASH_SIZE, 4246 M_PCB, 4247 &sctppcbinfo.hashrestartmark); 4248 4249 /* init the zones */ 4250 /* 4251 * FIX ME: Should check for NULL returns, but if it does fail we are 4252 * doomed to panic anyways... add later maybe. 4253 */ 4254 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_ep, "sctp_ep", 4255 sizeof(struct sctp_inpcb), maxsockets); 4256 4257 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_asoc, "sctp_asoc", 4258 sizeof(struct sctp_tcb), sctp_max_number_of_assoc); 4259 4260 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_laddr, "sctp_laddr", 4261 sizeof(struct sctp_laddr), 4262 (sctp_max_number_of_assoc * sctp_scale_up_for_address)); 4263 4264 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_net, "sctp_raddr", 4265 sizeof(struct sctp_nets), 4266 (sctp_max_number_of_assoc * sctp_scale_up_for_address)); 4267 4268 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_chunk, "sctp_chunk", 4269 sizeof(struct sctp_tmit_chunk), 4270 (sctp_max_number_of_assoc * sctp_chunkscale)); 4271 4272 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_readq, "sctp_readq", 4273 sizeof(struct sctp_queued_to_read), 4274 (sctp_max_number_of_assoc * sctp_chunkscale)); 4275 4276 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_strmoq, "sctp_stream_msg_out", 4277 sizeof(struct sctp_stream_queue_pending), 4278 (sctp_max_number_of_assoc * sctp_chunkscale)); 4279 4280 /* Master Lock INIT for info structure */ 4281 SCTP_INP_INFO_LOCK_INIT(); 4282 SCTP_STATLOG_INIT_LOCK(); 4283 SCTP_ITERATOR_LOCK_INIT(); 4284 4285 SCTP_IPI_COUNT_INIT(); 4286 SCTP_IPI_ADDR_INIT(); 4287 LIST_INIT(&sctppcbinfo.addr_wq); 4288 4289 /* not sure if we need all the counts */ 4290 sctppcbinfo.ipi_count_ep = 0; 4291 /* assoc/tcb zone info */ 4292 sctppcbinfo.ipi_count_asoc = 0; 4293 /* local addrlist zone info */ 4294 sctppcbinfo.ipi_count_laddr = 0; 4295 /* remote addrlist zone info */ 4296 sctppcbinfo.ipi_count_raddr = 0; 4297 /* chunk info */ 4298 sctppcbinfo.ipi_count_chunk = 0; 4299 4300 /* socket queue zone info */ 4301 sctppcbinfo.ipi_count_readq = 0; 4302 4303 /* stream out queue cont */ 4304 sctppcbinfo.ipi_count_strmoq = 0; 4305 4306 sctppcbinfo.ipi_free_strmoq = 0; 4307 sctppcbinfo.ipi_free_chunks = 0; 4308 4309 SCTP_OS_TIMER_INIT(&sctppcbinfo.addr_wq_timer.timer); 4310 4311 /* port stuff */ 4312 sctppcbinfo.lastlow = ipport_firstauto; 4313 /* Init the TIMEWAIT list */ 4314 for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) { 4315 LIST_INIT(&sctppcbinfo.vtag_timewait[i]); 4316 } 4317 4318 } 4319 4320 4321 int 4322 sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m, 4323 int iphlen, int offset, int limit, struct sctphdr *sh, 4324 struct sockaddr *altsa) 4325 { 4326 /* 4327 * grub through the INIT pulling addresses and loading them to the 4328 * nets structure in the asoc. The from address in the mbuf should 4329 * also be loaded (if it is not already). This routine can be called 4330 * with either INIT or INIT-ACK's as long as the m points to the IP 4331 * packet and the offset points to the beginning of the parameters. 4332 */ 4333 struct sctp_inpcb *inp, *l_inp; 4334 struct sctp_nets *net, *net_tmp; 4335 struct ip *iph; 4336 struct sctp_paramhdr *phdr, parm_buf; 4337 struct sctp_tcb *stcb_tmp; 4338 uint16_t ptype, plen; 4339 struct sockaddr *sa; 4340 struct sockaddr_storage dest_store; 4341 struct sockaddr *local_sa = (struct sockaddr *)&dest_store; 4342 struct sockaddr_in sin; 4343 struct sockaddr_in6 sin6; 4344 uint8_t store[384]; 4345 struct sctp_auth_random *random = NULL; 4346 uint16_t random_len = 0; 4347 struct sctp_auth_hmac_algo *hmacs = NULL; 4348 uint16_t hmacs_len = 0; 4349 struct sctp_auth_chunk_list *chunks = NULL; 4350 uint16_t num_chunks = 0; 4351 sctp_key_t *new_key; 4352 uint32_t keylen; 4353 int got_random = 0, got_hmacs = 0, got_chklist = 0; 4354 4355 /* First get the destination address setup too. */ 4356 memset(&sin, 0, sizeof(sin)); 4357 memset(&sin6, 0, sizeof(sin6)); 4358 4359 sin.sin_family = AF_INET; 4360 sin.sin_len = sizeof(sin); 4361 sin.sin_port = stcb->rport; 4362 4363 sin6.sin6_family = AF_INET6; 4364 sin6.sin6_len = sizeof(struct sockaddr_in6); 4365 sin6.sin6_port = stcb->rport; 4366 if (altsa == NULL) { 4367 iph = mtod(m, struct ip *); 4368 if (iph->ip_v == IPVERSION) { 4369 /* its IPv4 */ 4370 struct sockaddr_in *sin_2; 4371 4372 sin_2 = (struct sockaddr_in *)(local_sa); 4373 memset(sin_2, 0, sizeof(sin)); 4374 sin_2->sin_family = AF_INET; 4375 sin_2->sin_len = sizeof(sin); 4376 sin_2->sin_port = sh->dest_port; 4377 sin_2->sin_addr.s_addr = iph->ip_dst.s_addr; 4378 sin.sin_addr = iph->ip_src; 4379 sa = (struct sockaddr *)&sin; 4380 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 4381 /* its IPv6 */ 4382 struct ip6_hdr *ip6; 4383 struct sockaddr_in6 *sin6_2; 4384 4385 ip6 = mtod(m, struct ip6_hdr *); 4386 sin6_2 = (struct sockaddr_in6 *)(local_sa); 4387 memset(sin6_2, 0, sizeof(sin6)); 4388 sin6_2->sin6_family = AF_INET6; 4389 sin6_2->sin6_len = sizeof(struct sockaddr_in6); 4390 sin6_2->sin6_port = sh->dest_port; 4391 sin6.sin6_addr = ip6->ip6_src; 4392 sa = (struct sockaddr *)&sin6; 4393 } else { 4394 sa = NULL; 4395 } 4396 } else { 4397 /* 4398 * For cookies we use the src address NOT from the packet 4399 * but from the original INIT 4400 */ 4401 sa = altsa; 4402 } 4403 /* Turn off ECN until we get through all params */ 4404 stcb->asoc.ecn_allowed = 0; 4405 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4406 /* mark all addresses that we have currently on the list */ 4407 net->dest_state |= SCTP_ADDR_NOT_IN_ASSOC; 4408 } 4409 /* does the source address already exist? if so skip it */ 4410 l_inp = inp = stcb->sctp_ep; 4411 4412 atomic_add_int(&stcb->asoc.refcnt, 1); 4413 stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net_tmp, local_sa, stcb); 4414 atomic_add_int(&stcb->asoc.refcnt, -1); 4415 4416 if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || inp == NULL) { 4417 /* we must add the source address */ 4418 /* no scope set here since we have a tcb already. */ 4419 if ((sa->sa_family == AF_INET) && 4420 (stcb->asoc.ipv4_addr_legal)) { 4421 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_2)) { 4422 return (-1); 4423 } 4424 } else if ((sa->sa_family == AF_INET6) && 4425 (stcb->asoc.ipv6_addr_legal)) { 4426 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_3)) { 4427 return (-2); 4428 } 4429 } 4430 } else { 4431 if (net_tmp != NULL && stcb_tmp == stcb) { 4432 net_tmp->dest_state &= ~SCTP_ADDR_NOT_IN_ASSOC; 4433 } else if (stcb_tmp != stcb) { 4434 /* It belongs to another association? */ 4435 SCTP_TCB_UNLOCK(stcb_tmp); 4436 return (-3); 4437 } 4438 } 4439 if (stcb->asoc.state == 0) { 4440 /* the assoc was freed? */ 4441 return (-4); 4442 } 4443 /* now we must go through each of the params. */ 4444 phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf)); 4445 while (phdr) { 4446 ptype = ntohs(phdr->param_type); 4447 plen = ntohs(phdr->param_length); 4448 /* 4449 * printf("ptype => %0x, plen => %d\n", (uint32_t)ptype, 4450 * (int)plen); 4451 */ 4452 if (offset + plen > limit) { 4453 break; 4454 } 4455 if (plen == 0) { 4456 break; 4457 } 4458 if (ptype == SCTP_IPV4_ADDRESS) { 4459 if (stcb->asoc.ipv4_addr_legal) { 4460 struct sctp_ipv4addr_param *p4, p4_buf; 4461 4462 /* ok get the v4 address and check/add */ 4463 phdr = sctp_get_next_param(m, offset, 4464 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf)); 4465 if (plen != sizeof(struct sctp_ipv4addr_param) || 4466 phdr == NULL) { 4467 return (-5); 4468 } 4469 p4 = (struct sctp_ipv4addr_param *)phdr; 4470 sin.sin_addr.s_addr = p4->addr; 4471 sa = (struct sockaddr *)&sin; 4472 inp = stcb->sctp_ep; 4473 atomic_add_int(&stcb->asoc.refcnt, 1); 4474 stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net, 4475 local_sa, stcb); 4476 atomic_add_int(&stcb->asoc.refcnt, -1); 4477 4478 if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || 4479 inp == NULL) { 4480 /* we must add the source address */ 4481 /* 4482 * no scope set since we have a tcb 4483 * already 4484 */ 4485 4486 /* 4487 * we must validate the state again 4488 * here 4489 */ 4490 if (stcb->asoc.state == 0) { 4491 /* the assoc was freed? */ 4492 return (-7); 4493 } 4494 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_4)) { 4495 return (-8); 4496 } 4497 } else if (stcb_tmp == stcb) { 4498 if (stcb->asoc.state == 0) { 4499 /* the assoc was freed? */ 4500 return (-10); 4501 } 4502 if (net != NULL) { 4503 /* clear flag */ 4504 net->dest_state &= 4505 ~SCTP_ADDR_NOT_IN_ASSOC; 4506 } 4507 } else { 4508 /* 4509 * strange, address is in another 4510 * assoc? straighten out locks. 4511 */ 4512 if (stcb->asoc.state == 0) { 4513 /* the assoc was freed? */ 4514 return (-12); 4515 } 4516 return (-13); 4517 } 4518 } 4519 } else if (ptype == SCTP_IPV6_ADDRESS) { 4520 if (stcb->asoc.ipv6_addr_legal) { 4521 /* ok get the v6 address and check/add */ 4522 struct sctp_ipv6addr_param *p6, p6_buf; 4523 4524 phdr = sctp_get_next_param(m, offset, 4525 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf)); 4526 if (plen != sizeof(struct sctp_ipv6addr_param) || 4527 phdr == NULL) { 4528 return (-14); 4529 } 4530 p6 = (struct sctp_ipv6addr_param *)phdr; 4531 memcpy((caddr_t)&sin6.sin6_addr, p6->addr, 4532 sizeof(p6->addr)); 4533 sa = (struct sockaddr *)&sin6; 4534 inp = stcb->sctp_ep; 4535 atomic_add_int(&stcb->asoc.refcnt, 1); 4536 stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net, 4537 local_sa, stcb); 4538 atomic_add_int(&stcb->asoc.refcnt, -1); 4539 if (stcb_tmp == NULL && (inp == stcb->sctp_ep || 4540 inp == NULL)) { 4541 /* 4542 * we must validate the state again 4543 * here 4544 */ 4545 if (stcb->asoc.state == 0) { 4546 /* the assoc was freed? */ 4547 return (-16); 4548 } 4549 /* 4550 * we must add the address, no scope 4551 * set 4552 */ 4553 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_5)) { 4554 return (-17); 4555 } 4556 } else if (stcb_tmp == stcb) { 4557 /* 4558 * we must validate the state again 4559 * here 4560 */ 4561 if (stcb->asoc.state == 0) { 4562 /* the assoc was freed? */ 4563 return (-19); 4564 } 4565 if (net != NULL) { 4566 /* clear flag */ 4567 net->dest_state &= 4568 ~SCTP_ADDR_NOT_IN_ASSOC; 4569 } 4570 } else { 4571 /* 4572 * strange, address is in another 4573 * assoc? straighten out locks. 4574 */ 4575 if (stcb->asoc.state == 0) { 4576 /* the assoc was freed? */ 4577 return (-21); 4578 } 4579 return (-22); 4580 } 4581 } 4582 } else if (ptype == SCTP_ECN_CAPABLE) { 4583 stcb->asoc.ecn_allowed = 1; 4584 } else if (ptype == SCTP_ULP_ADAPTATION) { 4585 if (stcb->asoc.state != SCTP_STATE_OPEN) { 4586 struct sctp_adaptation_layer_indication ai, 4587 *aip; 4588 4589 phdr = sctp_get_next_param(m, offset, 4590 (struct sctp_paramhdr *)&ai, sizeof(ai)); 4591 aip = (struct sctp_adaptation_layer_indication *)phdr; 4592 sctp_ulp_notify(SCTP_NOTIFY_ADAPTATION_INDICATION, 4593 stcb, ntohl(aip->indication), NULL); 4594 } 4595 } else if (ptype == SCTP_SET_PRIM_ADDR) { 4596 struct sctp_asconf_addr_param lstore, *fee; 4597 struct sctp_asconf_addrv4_param *fii; 4598 int lptype; 4599 struct sockaddr *lsa = NULL; 4600 4601 stcb->asoc.peer_supports_asconf = 1; 4602 if (plen > sizeof(lstore)) { 4603 return (-23); 4604 } 4605 phdr = sctp_get_next_param(m, offset, 4606 (struct sctp_paramhdr *)&lstore, plen); 4607 if (phdr == NULL) { 4608 return (-24); 4609 } 4610 fee = (struct sctp_asconf_addr_param *)phdr; 4611 lptype = ntohs(fee->addrp.ph.param_type); 4612 if (lptype == SCTP_IPV4_ADDRESS) { 4613 if (plen != 4614 sizeof(struct sctp_asconf_addrv4_param)) { 4615 printf("Sizeof setprim in init/init ack not %d but %d - ignored\n", 4616 (int)sizeof(struct sctp_asconf_addrv4_param), 4617 plen); 4618 } else { 4619 fii = (struct sctp_asconf_addrv4_param *)fee; 4620 sin.sin_addr.s_addr = fii->addrp.addr; 4621 lsa = (struct sockaddr *)&sin; 4622 } 4623 } else if (lptype == SCTP_IPV6_ADDRESS) { 4624 if (plen != 4625 sizeof(struct sctp_asconf_addr_param)) { 4626 printf("Sizeof setprim (v6) in init/init ack not %d but %d - ignored\n", 4627 (int)sizeof(struct sctp_asconf_addr_param), 4628 plen); 4629 } else { 4630 memcpy(sin6.sin6_addr.s6_addr, 4631 fee->addrp.addr, 4632 sizeof(fee->addrp.addr)); 4633 lsa = (struct sockaddr *)&sin6; 4634 } 4635 } 4636 if (lsa) { 4637 sctp_set_primary_addr(stcb, sa, NULL); 4638 } 4639 } else if (ptype == SCTP_PRSCTP_SUPPORTED) { 4640 /* Peer supports pr-sctp */ 4641 stcb->asoc.peer_supports_prsctp = 1; 4642 } else if (ptype == SCTP_SUPPORTED_CHUNK_EXT) { 4643 /* A supported extension chunk */ 4644 struct sctp_supported_chunk_types_param *pr_supported; 4645 uint8_t local_store[128]; 4646 int num_ent, i; 4647 4648 phdr = sctp_get_next_param(m, offset, 4649 (struct sctp_paramhdr *)&local_store, plen); 4650 if (phdr == NULL) { 4651 return (-25); 4652 } 4653 stcb->asoc.peer_supports_asconf = 0; 4654 stcb->asoc.peer_supports_prsctp = 0; 4655 stcb->asoc.peer_supports_pktdrop = 0; 4656 stcb->asoc.peer_supports_strreset = 0; 4657 stcb->asoc.peer_supports_auth = 0; 4658 pr_supported = (struct sctp_supported_chunk_types_param *)phdr; 4659 num_ent = plen - sizeof(struct sctp_paramhdr); 4660 for (i = 0; i < num_ent; i++) { 4661 switch (pr_supported->chunk_types[i]) { 4662 case SCTP_ASCONF: 4663 case SCTP_ASCONF_ACK: 4664 stcb->asoc.peer_supports_asconf = 1; 4665 break; 4666 case SCTP_FORWARD_CUM_TSN: 4667 stcb->asoc.peer_supports_prsctp = 1; 4668 break; 4669 case SCTP_PACKET_DROPPED: 4670 stcb->asoc.peer_supports_pktdrop = 1; 4671 break; 4672 case SCTP_STREAM_RESET: 4673 stcb->asoc.peer_supports_strreset = 1; 4674 break; 4675 case SCTP_AUTHENTICATION: 4676 stcb->asoc.peer_supports_auth = 1; 4677 break; 4678 default: 4679 /* one I have not learned yet */ 4680 break; 4681 4682 } 4683 } 4684 } else if (ptype == SCTP_ECN_NONCE_SUPPORTED) { 4685 /* Peer supports ECN-nonce */ 4686 stcb->asoc.peer_supports_ecn_nonce = 1; 4687 stcb->asoc.ecn_nonce_allowed = 1; 4688 } else if (ptype == SCTP_RANDOM) { 4689 if (plen > sizeof(store)) 4690 break; 4691 if (got_random) { 4692 /* already processed a RANDOM */ 4693 goto next_param; 4694 } 4695 phdr = sctp_get_next_param(m, offset, 4696 (struct sctp_paramhdr *)store, 4697 plen); 4698 if (phdr == NULL) 4699 return (-26); 4700 random = (struct sctp_auth_random *)phdr; 4701 random_len = plen - sizeof(*random); 4702 /* enforce the random length */ 4703 if (random_len != SCTP_AUTH_RANDOM_SIZE_REQUIRED) { 4704 #ifdef SCTP_DEBUG 4705 if (sctp_debug_on & SCTP_DEBUG_AUTH1) 4706 printf("SCTP: invalid RANDOM len\n"); 4707 #endif 4708 return (-27); 4709 } 4710 got_random = 1; 4711 } else if (ptype == SCTP_HMAC_LIST) { 4712 int num_hmacs; 4713 int i; 4714 4715 if (plen > sizeof(store)) 4716 break; 4717 if (got_hmacs) { 4718 /* already processed a HMAC list */ 4719 goto next_param; 4720 } 4721 phdr = sctp_get_next_param(m, offset, 4722 (struct sctp_paramhdr *)store, 4723 plen); 4724 if (phdr == NULL) 4725 return (-28); 4726 hmacs = (struct sctp_auth_hmac_algo *)phdr; 4727 hmacs_len = plen - sizeof(*hmacs); 4728 num_hmacs = hmacs_len / sizeof(hmacs->hmac_ids[0]); 4729 /* validate the hmac list */ 4730 if (sctp_verify_hmac_param(hmacs, num_hmacs)) { 4731 return (-29); 4732 } 4733 if (stcb->asoc.peer_hmacs != NULL) 4734 sctp_free_hmaclist(stcb->asoc.peer_hmacs); 4735 stcb->asoc.peer_hmacs = sctp_alloc_hmaclist(num_hmacs); 4736 if (stcb->asoc.peer_hmacs != NULL) { 4737 for (i = 0; i < num_hmacs; i++) { 4738 sctp_auth_add_hmacid(stcb->asoc.peer_hmacs, 4739 ntohs(hmacs->hmac_ids[i])); 4740 } 4741 } 4742 got_hmacs = 1; 4743 } else if (ptype == SCTP_CHUNK_LIST) { 4744 int i; 4745 4746 if (plen > sizeof(store)) 4747 break; 4748 if (got_chklist) { 4749 /* already processed a Chunks list */ 4750 goto next_param; 4751 } 4752 phdr = sctp_get_next_param(m, offset, 4753 (struct sctp_paramhdr *)store, 4754 plen); 4755 if (phdr == NULL) 4756 return (-30); 4757 chunks = (struct sctp_auth_chunk_list *)phdr; 4758 num_chunks = plen - sizeof(*chunks); 4759 if (stcb->asoc.peer_auth_chunks != NULL) 4760 sctp_clear_chunklist(stcb->asoc.peer_auth_chunks); 4761 else 4762 stcb->asoc.peer_auth_chunks = sctp_alloc_chunklist(); 4763 for (i = 0; i < num_chunks; i++) { 4764 sctp_auth_add_chunk(chunks->chunk_types[i], 4765 stcb->asoc.peer_auth_chunks); 4766 } 4767 got_chklist = 1; 4768 } else if ((ptype == SCTP_HEARTBEAT_INFO) || 4769 (ptype == SCTP_STATE_COOKIE) || 4770 (ptype == SCTP_UNRECOG_PARAM) || 4771 (ptype == SCTP_COOKIE_PRESERVE) || 4772 (ptype == SCTP_SUPPORTED_ADDRTYPE) || 4773 (ptype == SCTP_ADD_IP_ADDRESS) || 4774 (ptype == SCTP_DEL_IP_ADDRESS) || 4775 (ptype == SCTP_ERROR_CAUSE_IND) || 4776 (ptype == SCTP_SUCCESS_REPORT)) { 4777 /* don't care */ ; 4778 } else { 4779 if ((ptype & 0x8000) == 0x0000) { 4780 /* 4781 * must stop processing the rest of the 4782 * param's. Any report bits were handled 4783 * with the call to 4784 * sctp_arethere_unrecognized_parameters() 4785 * when the INIT or INIT-ACK was first seen. 4786 */ 4787 break; 4788 } 4789 } 4790 next_param: 4791 offset += SCTP_SIZE32(plen); 4792 if (offset >= limit) { 4793 break; 4794 } 4795 phdr = sctp_get_next_param(m, offset, &parm_buf, 4796 sizeof(parm_buf)); 4797 } 4798 /* Now check to see if we need to purge any addresses */ 4799 for (net = TAILQ_FIRST(&stcb->asoc.nets); net != NULL; net = net_tmp) { 4800 net_tmp = TAILQ_NEXT(net, sctp_next); 4801 if ((net->dest_state & SCTP_ADDR_NOT_IN_ASSOC) == 4802 SCTP_ADDR_NOT_IN_ASSOC) { 4803 /* This address has been removed from the asoc */ 4804 /* remove and free it */ 4805 stcb->asoc.numnets--; 4806 TAILQ_REMOVE(&stcb->asoc.nets, net, sctp_next); 4807 sctp_free_remote_addr(net); 4808 if (net == stcb->asoc.primary_destination) { 4809 stcb->asoc.primary_destination = NULL; 4810 sctp_select_primary_destination(stcb); 4811 } 4812 } 4813 } 4814 /* validate authentication required parameters */ 4815 if (got_random && got_hmacs) { 4816 stcb->asoc.peer_supports_auth = 1; 4817 } else { 4818 stcb->asoc.peer_supports_auth = 0; 4819 } 4820 if (!sctp_asconf_auth_nochk && stcb->asoc.peer_supports_asconf && 4821 !stcb->asoc.peer_supports_auth) { 4822 return (-31); 4823 } 4824 /* concatenate the full random key */ 4825 keylen = random_len + num_chunks + hmacs_len; 4826 new_key = sctp_alloc_key(keylen); 4827 if (new_key != NULL) { 4828 /* copy in the RANDOM */ 4829 if (random != NULL) 4830 bcopy(random->random_data, new_key->key, random_len); 4831 /* append in the AUTH chunks */ 4832 if (chunks != NULL) 4833 bcopy(chunks->chunk_types, new_key->key + random_len, 4834 num_chunks); 4835 /* append in the HMACs */ 4836 if (hmacs != NULL) 4837 bcopy(hmacs->hmac_ids, new_key->key + random_len + num_chunks, 4838 hmacs_len); 4839 } else { 4840 return (-32); 4841 } 4842 if (stcb->asoc.authinfo.peer_random != NULL) 4843 sctp_free_key(stcb->asoc.authinfo.peer_random); 4844 stcb->asoc.authinfo.peer_random = new_key; 4845 #ifdef SCTP_AUTH_DRAFT_04 4846 /* don't include the chunks and hmacs for draft -04 */ 4847 stcb->asoc.authinfo.peer_random->keylen = random_len; 4848 #endif 4849 sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.assoc_keyid); 4850 sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.recv_keyid); 4851 4852 return (0); 4853 } 4854 4855 int 4856 sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa, 4857 struct sctp_nets *net) 4858 { 4859 /* make sure the requested primary address exists in the assoc */ 4860 if (net == NULL && sa) 4861 net = sctp_findnet(stcb, sa); 4862 4863 if (net == NULL) { 4864 /* didn't find the requested primary address! */ 4865 return (-1); 4866 } else { 4867 /* set the primary address */ 4868 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 4869 /* Must be confirmed */ 4870 return (-1); 4871 } 4872 stcb->asoc.primary_destination = net; 4873 net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY; 4874 net = TAILQ_FIRST(&stcb->asoc.nets); 4875 if (net != stcb->asoc.primary_destination) { 4876 /* 4877 * first one on the list is NOT the primary 4878 * sctp_cmpaddr() is much more efficent if the 4879 * primary is the first on the list, make it so. 4880 */ 4881 TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 4882 TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 4883 } 4884 return (0); 4885 } 4886 } 4887 4888 4889 int 4890 sctp_is_vtag_good(struct sctp_inpcb *inp, uint32_t tag, struct timeval *now) 4891 { 4892 /* 4893 * This function serves two purposes. It will see if a TAG can be 4894 * re-used and return 1 for yes it is ok and 0 for don't use that 4895 * tag. A secondary function it will do is purge out old tags that 4896 * can be removed. 4897 */ 4898 struct sctpasochead *head; 4899 struct sctpvtaghead *chain; 4900 struct sctp_tagblock *twait_block; 4901 struct sctp_tcb *stcb; 4902 int i; 4903 4904 SCTP_INP_INFO_WLOCK(); 4905 chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; 4906 /* First is the vtag in use ? */ 4907 4908 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(tag, 4909 sctppcbinfo.hashasocmark)]; 4910 if (head == NULL) { 4911 goto check_restart; 4912 } 4913 LIST_FOREACH(stcb, head, sctp_asocs) { 4914 4915 if (stcb->asoc.my_vtag == tag) { 4916 /* 4917 * We should remove this if and return 0 always if 4918 * we want vtags unique across all endpoints. For 4919 * now within a endpoint is ok. 4920 */ 4921 if (inp == stcb->sctp_ep) { 4922 /* bad tag, in use */ 4923 SCTP_INP_INFO_WUNLOCK(); 4924 return (0); 4925 } 4926 } 4927 } 4928 check_restart: 4929 /* Now lets check the restart hash */ 4930 head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(tag, 4931 sctppcbinfo.hashrestartmark)]; 4932 if (head == NULL) { 4933 goto check_time_wait; 4934 } 4935 LIST_FOREACH(stcb, head, sctp_tcbrestarhash) { 4936 if (stcb->asoc.assoc_id == tag) { 4937 /* candidate */ 4938 if (inp == stcb->sctp_ep) { 4939 /* bad tag, in use */ 4940 SCTP_INP_INFO_WUNLOCK(); 4941 return (0); 4942 } 4943 } 4944 } 4945 check_time_wait: 4946 /* Now what about timed wait ? */ 4947 if (!LIST_EMPTY(chain)) { 4948 /* 4949 * Block(s) are present, lets see if we have this tag in the 4950 * list 4951 */ 4952 LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { 4953 for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { 4954 if (twait_block->vtag_block[i].v_tag == 0) { 4955 /* not used */ 4956 continue; 4957 } else if ((long)twait_block->vtag_block[i].tv_sec_at_expire > 4958 now->tv_sec) { 4959 /* Audit expires this guy */ 4960 twait_block->vtag_block[i].tv_sec_at_expire = 0; 4961 twait_block->vtag_block[i].v_tag = 0; 4962 } else if (twait_block->vtag_block[i].v_tag == 4963 tag) { 4964 /* Bad tag, sorry :< */ 4965 SCTP_INP_INFO_WUNLOCK(); 4966 return (0); 4967 } 4968 } 4969 } 4970 } 4971 /* Not found, ok to use the tag */ 4972 SCTP_INP_INFO_WUNLOCK(); 4973 return (1); 4974 } 4975 4976 4977 /* 4978 * Delete the address from the endpoint local address list Lookup using a 4979 * sockaddr address (ie. not an ifaddr) 4980 */ 4981 int 4982 sctp_del_local_addr_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa) 4983 { 4984 struct sctp_laddr *laddr; 4985 struct sockaddr *l_sa; 4986 int found = 0; 4987 4988 /* 4989 * Here is another function I cannot find a caller for. As such we 4990 * SHOULD delete it if we have no users. If we find a user that user 4991 * MUST have the INP locked. 4992 * 4993 */ 4994 4995 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 4996 /* You are already bound to all. You have it already */ 4997 return (EINVAL); 4998 } 4999 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 5000 /* make sure the address exists */ 5001 if (laddr->ifa == NULL) 5002 continue; 5003 if (laddr->ifa->ifa_addr == NULL) 5004 continue; 5005 5006 l_sa = laddr->ifa->ifa_addr; 5007 if (l_sa->sa_family == AF_INET6) { 5008 /* IPv6 address */ 5009 struct sockaddr_in6 *sin1, *sin2; 5010 5011 sin1 = (struct sockaddr_in6 *)l_sa; 5012 sin2 = (struct sockaddr_in6 *)sa; 5013 if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr, 5014 sizeof(struct in6_addr)) == 0) { 5015 /* matched */ 5016 found = 1; 5017 break; 5018 } 5019 } else if (l_sa->sa_family == AF_INET) { 5020 /* IPv4 address */ 5021 struct sockaddr_in *sin1, *sin2; 5022 5023 sin1 = (struct sockaddr_in *)l_sa; 5024 sin2 = (struct sockaddr_in *)sa; 5025 if (sin1->sin_addr.s_addr == sin2->sin_addr.s_addr) { 5026 /* matched */ 5027 found = 1; 5028 break; 5029 } 5030 } else { 5031 /* invalid family */ 5032 return (-1); 5033 } 5034 } 5035 5036 if (found && inp->laddr_count < 2) { 5037 /* can't delete unless there are at LEAST 2 addresses */ 5038 return (-1); 5039 } 5040 if (found && (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 5041 /* 5042 * remove it from the ep list, this should NOT be done until 5043 * its really gone from the interface list and we won't be 5044 * receiving more of these. Probably right away. If we do 5045 * allow a removal of an address from an association 5046 * (sub-set bind) than this should NOT be called until the 5047 * all ASCONF come back from this association. 5048 */ 5049 sctp_remove_laddr(laddr); 5050 return (0); 5051 } else { 5052 return (-1); 5053 } 5054 } 5055 5056 static sctp_assoc_t reneged_asoc_ids[256]; 5057 static uint8_t reneged_at = 0; 5058 5059 extern int sctp_do_drain; 5060 5061 static void 5062 sctp_drain_mbufs(struct sctp_inpcb *inp, struct sctp_tcb *stcb) 5063 { 5064 /* 5065 * We must hunt this association for MBUF's past the cumack (i.e. 5066 * out of order data that we can renege on). 5067 */ 5068 struct sctp_association *asoc; 5069 struct sctp_tmit_chunk *chk, *nchk; 5070 uint32_t cumulative_tsn_p1, tsn; 5071 struct sctp_queued_to_read *ctl, *nctl; 5072 int cnt, strmat, gap; 5073 5074 /* We look for anything larger than the cum-ack + 1 */ 5075 5076 SCTP_STAT_INCR(sctps_protocol_drain_calls); 5077 if (sctp_do_drain == 0) { 5078 return; 5079 } 5080 asoc = &stcb->asoc; 5081 if (asoc->cumulative_tsn == asoc->highest_tsn_inside_map) { 5082 /* none we can reneg on. */ 5083 return; 5084 } 5085 SCTP_STAT_INCR(sctps_protocol_drains_done); 5086 cumulative_tsn_p1 = asoc->cumulative_tsn + 1; 5087 cnt = 0; 5088 /* First look in the re-assembly queue */ 5089 chk = TAILQ_FIRST(&asoc->reasmqueue); 5090 while (chk) { 5091 /* Get the next one */ 5092 nchk = TAILQ_NEXT(chk, sctp_next); 5093 if (compare_with_wrap(chk->rec.data.TSN_seq, 5094 cumulative_tsn_p1, MAX_TSN)) { 5095 /* Yep it is above cum-ack */ 5096 cnt++; 5097 tsn = chk->rec.data.TSN_seq; 5098 if (tsn >= asoc->mapping_array_base_tsn) { 5099 gap = tsn - asoc->mapping_array_base_tsn; 5100 } else { 5101 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + 5102 tsn + 1; 5103 } 5104 asoc->size_on_reasm_queue = sctp_sbspace_sub(asoc->size_on_reasm_queue, chk->send_size); 5105 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5106 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 5107 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 5108 if (chk->data) { 5109 sctp_m_freem(chk->data); 5110 chk->data = NULL; 5111 } 5112 sctp_free_remote_addr(chk->whoTo); 5113 sctp_free_a_chunk(stcb, chk); 5114 } 5115 chk = nchk; 5116 } 5117 /* Ok that was fun, now we will drain all the inbound streams? */ 5118 for (strmat = 0; strmat < asoc->streamincnt; strmat++) { 5119 ctl = TAILQ_FIRST(&asoc->strmin[strmat].inqueue); 5120 while (ctl) { 5121 nctl = TAILQ_NEXT(ctl, next); 5122 if (compare_with_wrap(ctl->sinfo_tsn, 5123 cumulative_tsn_p1, MAX_TSN)) { 5124 /* Yep it is above cum-ack */ 5125 cnt++; 5126 tsn = ctl->sinfo_tsn; 5127 if (tsn >= asoc->mapping_array_base_tsn) { 5128 gap = tsn - 5129 asoc->mapping_array_base_tsn; 5130 } else { 5131 gap = (MAX_TSN - 5132 asoc->mapping_array_base_tsn) + 5133 tsn + 1; 5134 } 5135 asoc->size_on_all_streams = sctp_sbspace_sub(asoc->size_on_all_streams, ctl->length); 5136 sctp_ucount_decr(asoc->cnt_on_all_streams); 5137 5138 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, 5139 gap); 5140 TAILQ_REMOVE(&asoc->strmin[strmat].inqueue, 5141 ctl, next); 5142 if (ctl->data) { 5143 sctp_m_freem(ctl->data); 5144 ctl->data = NULL; 5145 } 5146 sctp_free_remote_addr(ctl->whoFrom); 5147 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, ctl); 5148 SCTP_DECR_READQ_COUNT(); 5149 } 5150 ctl = nctl; 5151 } 5152 } 5153 /* 5154 * Question, should we go through the delivery queue? The only 5155 * reason things are on here is the app not reading OR a p-d-api up. 5156 * An attacker COULD send enough in to initiate the PD-API and then 5157 * send a bunch of stuff to other streams... these would wind up on 5158 * the delivery queue.. and then we would not get to them. But in 5159 * order to do this I then have to back-track and un-deliver 5160 * sequence numbers in streams.. el-yucko. I think for now we will 5161 * NOT look at the delivery queue and leave it to be something to 5162 * consider later. An alternative would be to abort the P-D-API with 5163 * a notification and then deliver the data.... Or another method 5164 * might be to keep track of how many times the situation occurs and 5165 * if we see a possible attack underway just abort the association. 5166 */ 5167 #ifdef SCTP_DEBUG 5168 if (sctp_debug_on & SCTP_DEBUG_PCB1) { 5169 if (cnt) { 5170 printf("Freed %d chunks from reneg harvest\n", cnt); 5171 } 5172 } 5173 #endif /* SCTP_DEBUG */ 5174 if (cnt) { 5175 /* 5176 * Now do we need to find a new 5177 * asoc->highest_tsn_inside_map? 5178 */ 5179 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) { 5180 gap = asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn; 5181 } else { 5182 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + 5183 asoc->highest_tsn_inside_map + 1; 5184 } 5185 if (gap >= (asoc->mapping_array_size << 3)) { 5186 /* 5187 * Something bad happened or cum-ack and high were 5188 * behind the base, but if so earlier checks should 5189 * have found NO data... wierd... we will start at 5190 * end of mapping array. 5191 */ 5192 printf("Gap was larger than array?? %d set to max:%d maparraymax:%x\n", 5193 (int)gap, 5194 (int)(asoc->mapping_array_size << 3), 5195 (int)asoc->highest_tsn_inside_map); 5196 gap = asoc->mapping_array_size << 3; 5197 } 5198 while (gap > 0) { 5199 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 5200 /* found the new highest */ 5201 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn + gap; 5202 break; 5203 } 5204 gap--; 5205 } 5206 if (gap == 0) { 5207 /* Nothing left in map */ 5208 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 5209 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 5210 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 5211 } 5212 asoc->last_revoke_count = cnt; 5213 SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 5214 sctp_send_sack(stcb); 5215 reneged_asoc_ids[reneged_at] = sctp_get_associd(stcb); 5216 reneged_at++; 5217 } 5218 /* 5219 * Another issue, in un-setting the TSN's in the mapping array we 5220 * DID NOT adjust the higest_tsn marker. This will cause one of two 5221 * things to occur. It may cause us to do extra work in checking for 5222 * our mapping array movement. More importantly it may cause us to 5223 * SACK every datagram. This may not be a bad thing though since we 5224 * will recover once we get our cum-ack above and all this stuff we 5225 * dumped recovered. 5226 */ 5227 } 5228 5229 void 5230 sctp_drain() 5231 { 5232 /* 5233 * We must walk the PCB lists for ALL associations here. The system 5234 * is LOW on MBUF's and needs help. This is where reneging will 5235 * occur. We really hope this does NOT happen! 5236 */ 5237 struct sctp_inpcb *inp; 5238 struct sctp_tcb *stcb; 5239 5240 SCTP_INP_INFO_RLOCK(); 5241 LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) { 5242 /* For each endpoint */ 5243 SCTP_INP_RLOCK(inp); 5244 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 5245 /* For each association */ 5246 SCTP_TCB_LOCK(stcb); 5247 sctp_drain_mbufs(inp, stcb); 5248 SCTP_TCB_UNLOCK(stcb); 5249 } 5250 SCTP_INP_RUNLOCK(inp); 5251 } 5252 SCTP_INP_INFO_RUNLOCK(); 5253 } 5254 5255 /* 5256 * start a new iterator 5257 * iterates through all endpoints and associations based on the pcb_state 5258 * flags and asoc_state. "af" (mandatory) is executed for all matching 5259 * assocs and "ef" (optional) is executed when the iterator completes. 5260 * "inpf" (optional) is executed for each new endpoint as it is being 5261 * iterated through. 5262 */ 5263 int 5264 sctp_initiate_iterator(inp_func inpf, asoc_func af, uint32_t pcb_state, 5265 uint32_t pcb_features, uint32_t asoc_state, void *argp, uint32_t argi, 5266 end_func ef, struct sctp_inpcb *s_inp, uint8_t chunk_output_off) 5267 { 5268 struct sctp_iterator *it = NULL; 5269 int s; 5270 5271 if (af == NULL) { 5272 return (-1); 5273 } 5274 SCTP_MALLOC(it, struct sctp_iterator *, sizeof(struct sctp_iterator), 5275 "Iterator"); 5276 if (it == NULL) { 5277 return (ENOMEM); 5278 } 5279 memset(it, 0, sizeof(*it)); 5280 it->function_assoc = af; 5281 it->function_inp = inpf; 5282 it->function_atend = ef; 5283 it->pointer = argp; 5284 it->val = argi; 5285 it->pcb_flags = pcb_state; 5286 it->pcb_features = pcb_features; 5287 it->asoc_state = asoc_state; 5288 it->no_chunk_output = chunk_output_off; 5289 if (s_inp) { 5290 it->inp = s_inp; 5291 it->iterator_flags = SCTP_ITERATOR_DO_SINGLE_INP; 5292 } else { 5293 SCTP_INP_INFO_RLOCK(); 5294 it->inp = LIST_FIRST(&sctppcbinfo.listhead); 5295 SCTP_INP_INFO_RUNLOCK(); 5296 it->iterator_flags = SCTP_ITERATOR_DO_ALL_INP; 5297 5298 } 5299 /* Init the timer */ 5300 SCTP_OS_TIMER_INIT(&it->tmr.timer); 5301 /* add to the list of all iterators */ 5302 SCTP_INP_INFO_WLOCK(); 5303 LIST_INSERT_HEAD(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr); 5304 SCTP_INP_INFO_WUNLOCK(); 5305 s = splnet(); 5306 sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR, (struct sctp_inpcb *)it, 5307 NULL, NULL); 5308 splx(s); 5309 return (0); 5310 } 5311