1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/systm.h> 28 #include <sys/stream.h> 29 #include <sys/strsubr.h> 30 #include <sys/ddi.h> 31 #include <sys/sunddi.h> 32 #include <sys/kmem.h> 33 #include <sys/socket.h> 34 #include <sys/random.h> 35 #include <sys/tsol/tndb.h> 36 #include <sys/tsol/tnet.h> 37 38 #include <netinet/in.h> 39 #include <netinet/ip6.h> 40 #include <netinet/sctp.h> 41 42 #include <inet/common.h> 43 #include <inet/ip.h> 44 #include <inet/ip6.h> 45 #include <inet/ip_ire.h> 46 #include <inet/ip_if.h> 47 #include <inet/ip_ndp.h> 48 #include <inet/mib2.h> 49 #include <inet/nd.h> 50 #include <inet/optcom.h> 51 #include <inet/sctp_ip.h> 52 #include <inet/ipclassifier.h> 53 54 #include "sctp_impl.h" 55 #include "sctp_addr.h" 56 #include "sctp_asconf.h" 57 58 static struct kmem_cache *sctp_kmem_faddr_cache; 59 static void sctp_init_faddr(sctp_t *, sctp_faddr_t *, in6_addr_t *, mblk_t *); 60 61 /* Set the source address. Refer to comments in sctp_get_dest(). */ 62 void 63 sctp_set_saddr(sctp_t *sctp, sctp_faddr_t *fp) 64 { 65 boolean_t v6 = !fp->sf_isv4; 66 boolean_t addr_set; 67 68 fp->sf_saddr = sctp_get_valid_addr(sctp, v6, &addr_set); 69 /* 70 * If there is no source address avaialble, mark this peer address 71 * as unreachable for now. When the heartbeat timer fires, it will 72 * call sctp_get_dest() to re-check if there is any source address 73 * available. 74 */ 75 if (!addr_set) 76 fp->sf_state = SCTP_FADDRS_UNREACH; 77 } 78 79 /* 80 * Call this function to get information about a peer addr fp. 81 * 82 * Uses ip_attr_connect to avoid explicit use of ire and source address 83 * selection. 84 */ 85 void 86 sctp_get_dest(sctp_t *sctp, sctp_faddr_t *fp) 87 { 88 in6_addr_t laddr; 89 in6_addr_t nexthop; 90 sctp_saddr_ipif_t *sp; 91 int hdrlen; 92 sctp_stack_t *sctps = sctp->sctp_sctps; 93 conn_t *connp = sctp->sctp_connp; 94 iulp_t uinfo; 95 uint_t pmtu; 96 int error; 97 uint32_t flags = IPDF_VERIFY_DST | IPDF_IPSEC | 98 IPDF_SELECT_SRC | IPDF_UNIQUE_DCE; 99 100 /* 101 * Tell sctp_make_mp it needs to call us again should we not 102 * complete and set the saddr. 103 */ 104 fp->sf_saddr = ipv6_all_zeros; 105 106 /* 107 * If this addr is not reachable, mark it as unconfirmed for now, the 108 * state will be changed back to unreachable later in this function 109 * if it is still the case. 110 */ 111 if (fp->sf_state == SCTP_FADDRS_UNREACH) { 112 fp->sf_state = SCTP_FADDRS_UNCONFIRMED; 113 } 114 115 /* 116 * Socket is connected - enable PMTU discovery. 117 */ 118 if (!sctps->sctps_ignore_path_mtu) 119 fp->sf_ixa->ixa_flags |= IXAF_PMTU_DISCOVERY; 120 121 ip_attr_nexthop(&connp->conn_xmit_ipp, fp->sf_ixa, &fp->sf_faddr, 122 &nexthop); 123 124 laddr = fp->sf_saddr; 125 error = ip_attr_connect(connp, fp->sf_ixa, &laddr, &fp->sf_faddr, 126 &nexthop, connp->conn_fport, &laddr, &uinfo, flags); 127 128 if (error != 0) { 129 dprint(3, ("sctp_get_dest: no ire for %x:%x:%x:%x\n", 130 SCTP_PRINTADDR(fp->sf_faddr))); 131 /* 132 * It is tempting to just leave the src addr 133 * unspecified and let IP figure it out, but we 134 * *cannot* do this, since IP may choose a src addr 135 * that is not part of this association... unless 136 * this sctp has bound to all addrs. So if the dest 137 * lookup fails, try to find one in our src addr 138 * list, unless the sctp has bound to all addrs, in 139 * which case we change the src addr to unspec. 140 * 141 * Note that if this is a v6 endpoint but it does 142 * not have any v4 address at this point (e.g. may 143 * have been deleted), sctp_get_valid_addr() will 144 * return mapped INADDR_ANY. In this case, this 145 * address should be marked not reachable so that 146 * it won't be used to send data. 147 */ 148 sctp_set_saddr(sctp, fp); 149 if (fp->sf_state == SCTP_FADDRS_UNREACH) 150 return; 151 goto check_current; 152 } 153 ASSERT(fp->sf_ixa->ixa_ire != NULL); 154 ASSERT(!(fp->sf_ixa->ixa_ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))); 155 156 if (!sctp->sctp_loopback) 157 sctp->sctp_loopback = uinfo.iulp_loopback; 158 159 /* Make sure the laddr is part of this association */ 160 if ((sp = sctp_saddr_lookup(sctp, &laddr, 0)) != NULL && 161 !sp->saddr_ipif_dontsrc) { 162 if (sp->saddr_ipif_unconfirmed == 1) 163 sp->saddr_ipif_unconfirmed = 0; 164 /* We did IPsec policy lookup for laddr already */ 165 fp->sf_saddr = laddr; 166 } else { 167 dprint(2, ("sctp_get_dest: src addr is not part of assoc " 168 "%x:%x:%x:%x\n", SCTP_PRINTADDR(laddr))); 169 170 /* 171 * Set the src to the first saddr and hope for the best. 172 * Note that this case should very seldomly 173 * happen. One scenario this can happen is an app 174 * explicitly bind() to an address. But that address is 175 * not the preferred source address to send to the peer. 176 */ 177 sctp_set_saddr(sctp, fp); 178 if (fp->sf_state == SCTP_FADDRS_UNREACH) { 179 return; 180 } 181 } 182 183 /* 184 * Pull out RTO information for this faddr and use it if we don't 185 * have any yet. 186 */ 187 if (fp->sf_srtt == -1 && uinfo.iulp_rtt != 0) { 188 /* The cached value is in ms. */ 189 fp->sf_srtt = MSEC_TO_TICK(uinfo.iulp_rtt); 190 fp->sf_rttvar = MSEC_TO_TICK(uinfo.iulp_rtt_sd); 191 fp->sf_rto = 3 * fp->sf_srtt; 192 193 /* Bound the RTO by configured min and max values */ 194 if (fp->sf_rto < sctp->sctp_rto_min) { 195 fp->sf_rto = sctp->sctp_rto_min; 196 } 197 if (fp->sf_rto > sctp->sctp_rto_max) { 198 fp->sf_rto = sctp->sctp_rto_max; 199 } 200 SCTP_MAX_RTO(sctp, fp); 201 } 202 pmtu = uinfo.iulp_mtu; 203 204 /* 205 * Record the MTU for this faddr. If the MTU for this faddr has 206 * changed, check if the assc MTU will also change. 207 */ 208 if (fp->sf_isv4) { 209 hdrlen = sctp->sctp_hdr_len; 210 } else { 211 hdrlen = sctp->sctp_hdr6_len; 212 } 213 if ((fp->sf_pmss + hdrlen) != pmtu) { 214 /* Make sure that sf_pmss is a multiple of SCTP_ALIGN. */ 215 fp->sf_pmss = (pmtu - hdrlen) & ~(SCTP_ALIGN - 1); 216 if (fp->sf_cwnd < (fp->sf_pmss * 2)) { 217 SET_CWND(fp, fp->sf_pmss, 218 sctps->sctps_slow_start_initial); 219 } 220 } 221 222 check_current: 223 if (fp == sctp->sctp_current) 224 sctp_set_faddr_current(sctp, fp); 225 } 226 227 void 228 sctp_update_dce(sctp_t *sctp) 229 { 230 sctp_faddr_t *fp; 231 sctp_stack_t *sctps = sctp->sctp_sctps; 232 iulp_t uinfo; 233 ip_stack_t *ipst = sctps->sctps_netstack->netstack_ip; 234 uint_t ifindex; 235 236 for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->sf_next) { 237 bzero(&uinfo, sizeof (uinfo)); 238 /* 239 * Only record the PMTU for this faddr if we actually have 240 * done discovery. This prevents initialized default from 241 * clobbering any real info that IP may have. 242 */ 243 if (fp->sf_pmtu_discovered) { 244 if (fp->sf_isv4) { 245 uinfo.iulp_mtu = fp->sf_pmss + 246 sctp->sctp_hdr_len; 247 } else { 248 uinfo.iulp_mtu = fp->sf_pmss + 249 sctp->sctp_hdr6_len; 250 } 251 } 252 if (sctps->sctps_rtt_updates != 0 && 253 fp->sf_rtt_updates >= sctps->sctps_rtt_updates) { 254 /* 255 * dce_update_uinfo() merges these values with the 256 * old values. 257 */ 258 uinfo.iulp_rtt = TICK_TO_MSEC(fp->sf_srtt); 259 uinfo.iulp_rtt_sd = TICK_TO_MSEC(fp->sf_rttvar); 260 fp->sf_rtt_updates = 0; 261 } 262 ifindex = 0; 263 if (IN6_IS_ADDR_LINKSCOPE(&fp->sf_faddr)) { 264 /* 265 * If we are going to create a DCE we'd better have 266 * an ifindex 267 */ 268 if (fp->sf_ixa->ixa_nce != NULL) { 269 ifindex = fp->sf_ixa->ixa_nce->nce_common-> 270 ncec_ill->ill_phyint->phyint_ifindex; 271 } else { 272 continue; 273 } 274 } 275 276 (void) dce_update_uinfo(&fp->sf_faddr, ifindex, &uinfo, ipst); 277 } 278 } 279 280 /* 281 * The sender must later set the total length in the IP header. 282 */ 283 mblk_t * 284 sctp_make_mp(sctp_t *sctp, sctp_faddr_t *fp, int trailer) 285 { 286 mblk_t *mp; 287 size_t ipsctplen; 288 int isv4; 289 sctp_stack_t *sctps = sctp->sctp_sctps; 290 boolean_t src_changed = B_FALSE; 291 292 ASSERT(fp != NULL); 293 isv4 = fp->sf_isv4; 294 295 if (SCTP_IS_ADDR_UNSPEC(isv4, fp->sf_saddr) || 296 (fp->sf_ixa->ixa_ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))) { 297 /* Need to pick a source */ 298 sctp_get_dest(sctp, fp); 299 /* 300 * Although we still may not get an IRE, the source address 301 * may be changed in sctp_get_ire(). Set src_changed to 302 * true so that the source address is copied again. 303 */ 304 src_changed = B_TRUE; 305 } 306 307 /* There is no suitable source address to use, return. */ 308 if (fp->sf_state == SCTP_FADDRS_UNREACH) 309 return (NULL); 310 311 ASSERT(fp->sf_ixa->ixa_ire != NULL); 312 ASSERT(!SCTP_IS_ADDR_UNSPEC(isv4, fp->sf_saddr)); 313 314 if (isv4) { 315 ipsctplen = sctp->sctp_hdr_len; 316 } else { 317 ipsctplen = sctp->sctp_hdr6_len; 318 } 319 320 mp = allocb(ipsctplen + sctps->sctps_wroff_xtra + trailer, BPRI_MED); 321 if (mp == NULL) { 322 ip1dbg(("sctp_make_mp: error making mp..\n")); 323 return (NULL); 324 } 325 mp->b_rptr += sctps->sctps_wroff_xtra; 326 mp->b_wptr = mp->b_rptr + ipsctplen; 327 328 ASSERT(OK_32PTR(mp->b_wptr)); 329 330 if (isv4) { 331 ipha_t *iph = (ipha_t *)mp->b_rptr; 332 333 bcopy(sctp->sctp_iphc, mp->b_rptr, ipsctplen); 334 if (fp != sctp->sctp_current || src_changed) { 335 /* Fix the source and destination addresses. */ 336 IN6_V4MAPPED_TO_IPADDR(&fp->sf_faddr, iph->ipha_dst); 337 IN6_V4MAPPED_TO_IPADDR(&fp->sf_saddr, iph->ipha_src); 338 } 339 /* set or clear the don't fragment bit */ 340 if (fp->sf_df) { 341 iph->ipha_fragment_offset_and_flags = htons(IPH_DF); 342 } else { 343 iph->ipha_fragment_offset_and_flags = 0; 344 } 345 } else { 346 bcopy(sctp->sctp_iphc6, mp->b_rptr, ipsctplen); 347 if (fp != sctp->sctp_current || src_changed) { 348 /* Fix the source and destination addresses. */ 349 ((ip6_t *)(mp->b_rptr))->ip6_dst = fp->sf_faddr; 350 ((ip6_t *)(mp->b_rptr))->ip6_src = fp->sf_saddr; 351 } 352 } 353 ASSERT(sctp->sctp_connp != NULL); 354 return (mp); 355 } 356 357 /* 358 * Notify upper layers about preferred write offset, write size. 359 */ 360 void 361 sctp_set_ulp_prop(sctp_t *sctp) 362 { 363 int hdrlen; 364 struct sock_proto_props sopp; 365 366 sctp_stack_t *sctps = sctp->sctp_sctps; 367 368 if (sctp->sctp_current->sf_isv4) { 369 hdrlen = sctp->sctp_hdr_len; 370 } else { 371 hdrlen = sctp->sctp_hdr6_len; 372 } 373 ASSERT(sctp->sctp_ulpd); 374 375 sctp->sctp_connp->conn_wroff = sctps->sctps_wroff_xtra + hdrlen + 376 sizeof (sctp_data_hdr_t); 377 378 ASSERT(sctp->sctp_current->sf_pmss == sctp->sctp_mss); 379 bzero(&sopp, sizeof (sopp)); 380 sopp.sopp_flags = SOCKOPT_MAXBLK|SOCKOPT_WROFF; 381 sopp.sopp_wroff = sctp->sctp_connp->conn_wroff; 382 sopp.sopp_maxblk = sctp->sctp_mss - sizeof (sctp_data_hdr_t); 383 sctp->sctp_ulp_prop(sctp->sctp_ulpd, &sopp); 384 } 385 386 /* 387 * Set the lengths in the packet and the transmit attributes. 388 */ 389 void 390 sctp_set_iplen(sctp_t *sctp, mblk_t *mp, ip_xmit_attr_t *ixa) 391 { 392 uint16_t sum = 0; 393 ipha_t *iph; 394 ip6_t *ip6h; 395 mblk_t *pmp = mp; 396 boolean_t isv4; 397 398 isv4 = (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION); 399 for (; pmp; pmp = pmp->b_cont) 400 sum += pmp->b_wptr - pmp->b_rptr; 401 402 ixa->ixa_pktlen = sum; 403 if (isv4) { 404 iph = (ipha_t *)mp->b_rptr; 405 iph->ipha_length = htons(sum); 406 ixa->ixa_ip_hdr_length = sctp->sctp_ip_hdr_len; 407 } else { 408 ip6h = (ip6_t *)mp->b_rptr; 409 ip6h->ip6_plen = htons(sum - IPV6_HDR_LEN); 410 ixa->ixa_ip_hdr_length = sctp->sctp_ip_hdr6_len; 411 } 412 } 413 414 int 415 sctp_compare_faddrsets(sctp_faddr_t *a1, sctp_faddr_t *a2) 416 { 417 int na1 = 0; 418 int overlap = 0; 419 int equal = 1; 420 int onematch; 421 sctp_faddr_t *fp1, *fp2; 422 423 for (fp1 = a1; fp1; fp1 = fp1->sf_next) { 424 onematch = 0; 425 for (fp2 = a2; fp2; fp2 = fp2->sf_next) { 426 if (IN6_ARE_ADDR_EQUAL(&fp1->sf_faddr, 427 &fp2->sf_faddr)) { 428 overlap++; 429 onematch = 1; 430 break; 431 } 432 if (!onematch) { 433 equal = 0; 434 } 435 } 436 na1++; 437 } 438 439 if (equal) { 440 return (SCTP_ADDR_EQUAL); 441 } 442 if (overlap == na1) { 443 return (SCTP_ADDR_SUBSET); 444 } 445 if (overlap) { 446 return (SCTP_ADDR_OVERLAP); 447 } 448 return (SCTP_ADDR_DISJOINT); 449 } 450 451 /* 452 * Returns 0 on success, ENOMEM on memory allocation failure, EHOSTUNREACH 453 * if the connection credentials fail remote host accreditation or 454 * if the new destination does not support the previously established 455 * connection security label. If sleep is true, this function should 456 * never fail for a memory allocation failure. The boolean parameter 457 * "first" decides whether the newly created faddr structure should be 458 * added at the beginning of the list or at the end. 459 * 460 * Note: caller must hold conn fanout lock. 461 */ 462 int 463 sctp_add_faddr(sctp_t *sctp, in6_addr_t *addr, int sleep, boolean_t first) 464 { 465 sctp_faddr_t *faddr; 466 mblk_t *timer_mp; 467 int err; 468 conn_t *connp = sctp->sctp_connp; 469 470 if (is_system_labeled()) { 471 ip_xmit_attr_t *ixa = connp->conn_ixa; 472 ts_label_t *effective_tsl = NULL; 473 474 ASSERT(ixa->ixa_tsl != NULL); 475 476 /* 477 * Verify the destination is allowed to receive packets 478 * at the security label of the connection we are initiating. 479 * 480 * tsol_check_dest() will create a new effective label for 481 * this connection with a modified label or label flags only 482 * if there are changes from the original label. 483 * 484 * Accept whatever label we get if this is the first 485 * destination address for this connection. The security 486 * label and label flags must match any previuous settings 487 * for all subsequent destination addresses. 488 */ 489 if (IN6_IS_ADDR_V4MAPPED(addr)) { 490 uint32_t dst; 491 IN6_V4MAPPED_TO_IPADDR(addr, dst); 492 err = tsol_check_dest(ixa->ixa_tsl, 493 &dst, IPV4_VERSION, connp->conn_mac_mode, 494 connp->conn_zone_is_global, &effective_tsl); 495 } else { 496 err = tsol_check_dest(ixa->ixa_tsl, 497 addr, IPV6_VERSION, connp->conn_mac_mode, 498 connp->conn_zone_is_global, &effective_tsl); 499 } 500 if (err != 0) 501 return (err); 502 503 if (sctp->sctp_faddrs == NULL && effective_tsl != NULL) { 504 ip_xmit_attr_replace_tsl(ixa, effective_tsl); 505 } else if (effective_tsl != NULL) { 506 label_rele(effective_tsl); 507 return (EHOSTUNREACH); 508 } 509 } 510 511 if ((faddr = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep)) == NULL) 512 return (ENOMEM); 513 bzero(faddr, sizeof (*faddr)); 514 timer_mp = sctp_timer_alloc((sctp), sctp_rexmit_timer, sleep); 515 if (timer_mp == NULL) { 516 kmem_cache_free(sctp_kmem_faddr_cache, faddr); 517 return (ENOMEM); 518 } 519 ((sctpt_t *)(timer_mp->b_rptr))->sctpt_faddr = faddr; 520 521 /* Start with any options set on the conn */ 522 faddr->sf_ixa = conn_get_ixa_exclusive(connp); 523 if (faddr->sf_ixa == NULL) { 524 freemsg(timer_mp); 525 kmem_cache_free(sctp_kmem_faddr_cache, faddr); 526 return (ENOMEM); 527 } 528 faddr->sf_ixa->ixa_notify_cookie = connp->conn_sctp; 529 530 sctp_init_faddr(sctp, faddr, addr, timer_mp); 531 ASSERT(faddr->sf_ixa->ixa_cred != NULL); 532 533 /* ip_attr_connect didn't allow broadcats/multicast dest */ 534 ASSERT(faddr->sf_next == NULL); 535 536 if (sctp->sctp_faddrs == NULL) { 537 ASSERT(sctp->sctp_lastfaddr == NULL); 538 /* only element on list; first and last are same */ 539 sctp->sctp_faddrs = sctp->sctp_lastfaddr = faddr; 540 } else if (first) { 541 ASSERT(sctp->sctp_lastfaddr != NULL); 542 faddr->sf_next = sctp->sctp_faddrs; 543 sctp->sctp_faddrs = faddr; 544 } else { 545 sctp->sctp_lastfaddr->sf_next = faddr; 546 sctp->sctp_lastfaddr = faddr; 547 } 548 sctp->sctp_nfaddrs++; 549 550 return (0); 551 } 552 553 sctp_faddr_t * 554 sctp_lookup_faddr(sctp_t *sctp, in6_addr_t *addr) 555 { 556 sctp_faddr_t *fp; 557 558 for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->sf_next) { 559 if (IN6_ARE_ADDR_EQUAL(&fp->sf_faddr, addr)) 560 break; 561 } 562 563 return (fp); 564 } 565 566 sctp_faddr_t * 567 sctp_lookup_faddr_nosctp(sctp_faddr_t *fp, in6_addr_t *addr) 568 { 569 for (; fp; fp = fp->sf_next) { 570 if (IN6_ARE_ADDR_EQUAL(&fp->sf_faddr, addr)) { 571 break; 572 } 573 } 574 575 return (fp); 576 } 577 578 /* 579 * To change the currently used peer address to the specified one. 580 */ 581 void 582 sctp_set_faddr_current(sctp_t *sctp, sctp_faddr_t *fp) 583 { 584 /* Now setup the composite header. */ 585 if (fp->sf_isv4) { 586 IN6_V4MAPPED_TO_IPADDR(&fp->sf_faddr, 587 sctp->sctp_ipha->ipha_dst); 588 IN6_V4MAPPED_TO_IPADDR(&fp->sf_saddr, 589 sctp->sctp_ipha->ipha_src); 590 /* update don't fragment bit */ 591 if (fp->sf_df) { 592 sctp->sctp_ipha->ipha_fragment_offset_and_flags = 593 htons(IPH_DF); 594 } else { 595 sctp->sctp_ipha->ipha_fragment_offset_and_flags = 0; 596 } 597 } else { 598 sctp->sctp_ip6h->ip6_dst = fp->sf_faddr; 599 sctp->sctp_ip6h->ip6_src = fp->sf_saddr; 600 } 601 602 sctp->sctp_current = fp; 603 sctp->sctp_mss = fp->sf_pmss; 604 605 /* Update the uppper layer for the change. */ 606 if (!SCTP_IS_DETACHED(sctp)) 607 sctp_set_ulp_prop(sctp); 608 } 609 610 void 611 sctp_redo_faddr_srcs(sctp_t *sctp) 612 { 613 sctp_faddr_t *fp; 614 615 for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->sf_next) { 616 sctp_get_dest(sctp, fp); 617 } 618 } 619 620 void 621 sctp_faddr_alive(sctp_t *sctp, sctp_faddr_t *fp) 622 { 623 int64_t now = LBOLT_FASTPATH64; 624 625 /* 626 * If we are under memory pressure, we abort association waiting 627 * in zero window probing state for too long. We do this by not 628 * resetting sctp_strikes. So if sctp_zero_win_probe continues 629 * while under memory pressure, this association will eventually 630 * time out. 631 */ 632 if (!sctp->sctp_zero_win_probe || !sctp->sctp_sctps->sctps_reclaim) { 633 sctp->sctp_strikes = 0; 634 } 635 fp->sf_strikes = 0; 636 fp->sf_lastactive = now; 637 fp->sf_hb_expiry = now + SET_HB_INTVL(fp); 638 fp->sf_hb_pending = B_FALSE; 639 if (fp->sf_state != SCTP_FADDRS_ALIVE) { 640 fp->sf_state = SCTP_FADDRS_ALIVE; 641 sctp_intf_event(sctp, fp->sf_faddr, SCTP_ADDR_AVAILABLE, 0); 642 /* Should have a full IRE now */ 643 sctp_get_dest(sctp, fp); 644 645 /* 646 * If this is the primary, switch back to it now. And 647 * we probably want to reset the source addr used to reach 648 * it. 649 * Note that if we didn't find a source in sctp_get_dest 650 * then we'd be unreachable at this point in time. 651 */ 652 if (fp == sctp->sctp_primary && 653 fp->sf_state != SCTP_FADDRS_UNREACH) { 654 sctp_set_faddr_current(sctp, fp); 655 return; 656 } 657 } 658 } 659 660 /* 661 * Return B_TRUE if there is still an active peer address with zero strikes; 662 * otherwise rturn B_FALSE. 663 */ 664 boolean_t 665 sctp_is_a_faddr_clean(sctp_t *sctp) 666 { 667 sctp_faddr_t *fp; 668 669 for (fp = sctp->sctp_faddrs; fp; fp = fp->sf_next) { 670 if (fp->sf_state == SCTP_FADDRS_ALIVE && fp->sf_strikes == 0) { 671 return (B_TRUE); 672 } 673 } 674 675 return (B_FALSE); 676 } 677 678 /* 679 * Returns 0 if there is at leave one other active faddr, -1 if there 680 * are none. If there are none left, faddr_dead() will start killing the 681 * association. 682 * If the downed faddr was the current faddr, a new current faddr 683 * will be chosen. 684 */ 685 int 686 sctp_faddr_dead(sctp_t *sctp, sctp_faddr_t *fp, int newstate) 687 { 688 sctp_faddr_t *ofp; 689 sctp_stack_t *sctps = sctp->sctp_sctps; 690 691 if (fp->sf_state == SCTP_FADDRS_ALIVE) { 692 sctp_intf_event(sctp, fp->sf_faddr, SCTP_ADDR_UNREACHABLE, 0); 693 } 694 fp->sf_state = newstate; 695 696 dprint(1, ("sctp_faddr_dead: %x:%x:%x:%x down (state=%d)\n", 697 SCTP_PRINTADDR(fp->sf_faddr), newstate)); 698 699 if (fp == sctp->sctp_current) { 700 /* Current faddr down; need to switch it */ 701 sctp->sctp_current = NULL; 702 } 703 704 /* Find next alive faddr */ 705 ofp = fp; 706 for (fp = fp->sf_next; fp != NULL; fp = fp->sf_next) { 707 if (fp->sf_state == SCTP_FADDRS_ALIVE) { 708 break; 709 } 710 } 711 712 if (fp == NULL) { 713 /* Continue from beginning of list */ 714 for (fp = sctp->sctp_faddrs; fp != ofp; fp = fp->sf_next) { 715 if (fp->sf_state == SCTP_FADDRS_ALIVE) { 716 break; 717 } 718 } 719 } 720 721 /* 722 * Find a new fp, so if the current faddr is dead, use the new fp 723 * as the current one. 724 */ 725 if (fp != ofp) { 726 if (sctp->sctp_current == NULL) { 727 dprint(1, ("sctp_faddr_dead: failover->%x:%x:%x:%x\n", 728 SCTP_PRINTADDR(fp->sf_faddr))); 729 /* 730 * Note that we don't need to reset the source addr 731 * of the new fp. 732 */ 733 sctp_set_faddr_current(sctp, fp); 734 } 735 return (0); 736 } 737 738 739 /* All faddrs are down; kill the association */ 740 dprint(1, ("sctp_faddr_dead: all faddrs down, killing assoc\n")); 741 SCTPS_BUMP_MIB(sctps, sctpAborted); 742 sctp_assoc_event(sctp, sctp->sctp_state < SCTPS_ESTABLISHED ? 743 SCTP_CANT_STR_ASSOC : SCTP_COMM_LOST, 0, NULL); 744 sctp_clean_death(sctp, sctp->sctp_client_errno ? 745 sctp->sctp_client_errno : ETIMEDOUT); 746 747 return (-1); 748 } 749 750 sctp_faddr_t * 751 sctp_rotate_faddr(sctp_t *sctp, sctp_faddr_t *ofp) 752 { 753 sctp_faddr_t *nfp = NULL; 754 sctp_faddr_t *saved_fp = NULL; 755 int min_strikes; 756 757 if (ofp == NULL) { 758 ofp = sctp->sctp_current; 759 } 760 /* Nothing to do */ 761 if (sctp->sctp_nfaddrs < 2) 762 return (ofp); 763 764 /* 765 * Find the next live peer address with zero strikes. In case 766 * there is none, find the one with the lowest number of strikes. 767 */ 768 min_strikes = ofp->sf_strikes; 769 nfp = ofp->sf_next; 770 while (nfp != ofp) { 771 /* If reached end of list, continue scan from the head */ 772 if (nfp == NULL) { 773 nfp = sctp->sctp_faddrs; 774 continue; 775 } 776 if (nfp->sf_state == SCTP_FADDRS_ALIVE) { 777 if (nfp->sf_strikes == 0) 778 break; 779 if (nfp->sf_strikes < min_strikes) { 780 min_strikes = nfp->sf_strikes; 781 saved_fp = nfp; 782 } 783 } 784 nfp = nfp->sf_next; 785 } 786 /* If reached the old address, there is no zero strike path */ 787 if (nfp == ofp) 788 nfp = NULL; 789 790 /* 791 * If there is a peer address with zero strikes we use that, if not 792 * return a peer address with fewer strikes than the one last used, 793 * if neither exist we may as well stay with the old one. 794 */ 795 if (nfp != NULL) 796 return (nfp); 797 if (saved_fp != NULL) 798 return (saved_fp); 799 return (ofp); 800 } 801 802 void 803 sctp_unlink_faddr(sctp_t *sctp, sctp_faddr_t *fp) 804 { 805 sctp_faddr_t *fpp; 806 807 fpp = NULL; 808 809 if (!sctp->sctp_faddrs) { 810 return; 811 } 812 813 if (fp->sf_timer_mp != NULL) { 814 sctp_timer_free(fp->sf_timer_mp); 815 fp->sf_timer_mp = NULL; 816 fp->sf_timer_running = 0; 817 } 818 if (fp->sf_rc_timer_mp != NULL) { 819 sctp_timer_free(fp->sf_rc_timer_mp); 820 fp->sf_rc_timer_mp = NULL; 821 fp->sf_rc_timer_running = 0; 822 } 823 if (fp->sf_ixa != NULL) { 824 ixa_refrele(fp->sf_ixa); 825 fp->sf_ixa = NULL; 826 } 827 828 if (fp == sctp->sctp_faddrs) { 829 goto gotit; 830 } 831 832 for (fpp = sctp->sctp_faddrs; fpp->sf_next != fp; fpp = fpp->sf_next) 833 ; 834 835 gotit: 836 ASSERT(sctp->sctp_conn_tfp != NULL); 837 mutex_enter(&sctp->sctp_conn_tfp->tf_lock); 838 if (fp == sctp->sctp_faddrs) { 839 sctp->sctp_faddrs = fp->sf_next; 840 } else { 841 fpp->sf_next = fp->sf_next; 842 } 843 mutex_exit(&sctp->sctp_conn_tfp->tf_lock); 844 kmem_cache_free(sctp_kmem_faddr_cache, fp); 845 sctp->sctp_nfaddrs--; 846 } 847 848 void 849 sctp_zap_faddrs(sctp_t *sctp, int caller_holds_lock) 850 { 851 sctp_faddr_t *fp, *fpn; 852 853 if (sctp->sctp_faddrs == NULL) { 854 ASSERT(sctp->sctp_lastfaddr == NULL); 855 return; 856 } 857 858 ASSERT(sctp->sctp_lastfaddr != NULL); 859 sctp->sctp_lastfaddr = NULL; 860 sctp->sctp_current = NULL; 861 sctp->sctp_primary = NULL; 862 863 sctp_free_faddr_timers(sctp); 864 865 if (sctp->sctp_conn_tfp != NULL && !caller_holds_lock) { 866 /* in conn fanout; need to hold lock */ 867 mutex_enter(&sctp->sctp_conn_tfp->tf_lock); 868 } 869 870 for (fp = sctp->sctp_faddrs; fp; fp = fpn) { 871 fpn = fp->sf_next; 872 if (fp->sf_ixa != NULL) { 873 ixa_refrele(fp->sf_ixa); 874 fp->sf_ixa = NULL; 875 } 876 kmem_cache_free(sctp_kmem_faddr_cache, fp); 877 sctp->sctp_nfaddrs--; 878 } 879 880 sctp->sctp_faddrs = NULL; 881 ASSERT(sctp->sctp_nfaddrs == 0); 882 if (sctp->sctp_conn_tfp != NULL && !caller_holds_lock) { 883 mutex_exit(&sctp->sctp_conn_tfp->tf_lock); 884 } 885 886 } 887 888 void 889 sctp_zap_addrs(sctp_t *sctp) 890 { 891 sctp_zap_faddrs(sctp, 0); 892 sctp_free_saddrs(sctp); 893 } 894 895 /* 896 * Build two SCTP header templates; one for IPv4 and one for IPv6. 897 * Store them in sctp_iphc and sctp_iphc6 respectively (and related fields). 898 * There are no IP addresses in the templates, but the port numbers and 899 * verifier are field in from the conn_t and sctp_t. 900 * 901 * Returns failure if can't allocate memory, or if there is a problem 902 * with a routing header/option. 903 * 904 * We allocate space for the minimum sctp header (sctp_hdr_t). 905 * 906 * We massage an routing option/header. There is no checksum implication 907 * for a routing header for sctp. 908 * 909 * Caller needs to update conn_wroff if desired. 910 * 911 * TSol notes: This assumes that a SCTP association has a single peer label 912 * since we only track a single pair of ipp_label_v4/v6 and not a separate one 913 * for each faddr. 914 */ 915 int 916 sctp_build_hdrs(sctp_t *sctp, int sleep) 917 { 918 conn_t *connp = sctp->sctp_connp; 919 ip_pkt_t *ipp = &connp->conn_xmit_ipp; 920 uint_t ip_hdr_length; 921 uchar_t *hdrs; 922 uint_t hdrs_len; 923 uint_t ulp_hdr_length = sizeof (sctp_hdr_t); 924 ipha_t *ipha; 925 ip6_t *ip6h; 926 sctp_hdr_t *sctph; 927 in6_addr_t v6src, v6dst; 928 ipaddr_t v4src, v4dst; 929 930 v4src = connp->conn_saddr_v4; 931 v4dst = connp->conn_faddr_v4; 932 v6src = connp->conn_saddr_v6; 933 v6dst = connp->conn_faddr_v6; 934 935 /* First do IPv4 header */ 936 ip_hdr_length = ip_total_hdrs_len_v4(ipp); 937 938 /* In case of TX label and IP options it can be too much */ 939 if (ip_hdr_length > IP_MAX_HDR_LENGTH) { 940 /* Preserves existing TX errno for this */ 941 return (EHOSTUNREACH); 942 } 943 hdrs_len = ip_hdr_length + ulp_hdr_length; 944 ASSERT(hdrs_len != 0); 945 946 if (hdrs_len != sctp->sctp_iphc_len) { 947 /* Allocate new before we free any old */ 948 hdrs = kmem_alloc(hdrs_len, sleep); 949 if (hdrs == NULL) 950 return (ENOMEM); 951 952 if (sctp->sctp_iphc != NULL) 953 kmem_free(sctp->sctp_iphc, sctp->sctp_iphc_len); 954 sctp->sctp_iphc = hdrs; 955 sctp->sctp_iphc_len = hdrs_len; 956 } else { 957 hdrs = sctp->sctp_iphc; 958 } 959 sctp->sctp_hdr_len = sctp->sctp_iphc_len; 960 sctp->sctp_ip_hdr_len = ip_hdr_length; 961 962 sctph = (sctp_hdr_t *)(hdrs + ip_hdr_length); 963 sctp->sctp_sctph = sctph; 964 sctph->sh_sport = connp->conn_lport; 965 sctph->sh_dport = connp->conn_fport; 966 sctph->sh_verf = sctp->sctp_fvtag; 967 sctph->sh_chksum = 0; 968 969 ipha = (ipha_t *)hdrs; 970 sctp->sctp_ipha = ipha; 971 972 ipha->ipha_src = v4src; 973 ipha->ipha_dst = v4dst; 974 ip_build_hdrs_v4(hdrs, ip_hdr_length, ipp, connp->conn_proto); 975 ipha->ipha_length = htons(hdrs_len); 976 ipha->ipha_fragment_offset_and_flags = 0; 977 978 if (ipp->ipp_fields & IPPF_IPV4_OPTIONS) 979 (void) ip_massage_options(ipha, connp->conn_netstack); 980 981 /* Now IPv6 */ 982 ip_hdr_length = ip_total_hdrs_len_v6(ipp); 983 hdrs_len = ip_hdr_length + ulp_hdr_length; 984 ASSERT(hdrs_len != 0); 985 986 if (hdrs_len != sctp->sctp_iphc6_len) { 987 /* Allocate new before we free any old */ 988 hdrs = kmem_alloc(hdrs_len, sleep); 989 if (hdrs == NULL) 990 return (ENOMEM); 991 992 if (sctp->sctp_iphc6 != NULL) 993 kmem_free(sctp->sctp_iphc6, sctp->sctp_iphc6_len); 994 sctp->sctp_iphc6 = hdrs; 995 sctp->sctp_iphc6_len = hdrs_len; 996 } else { 997 hdrs = sctp->sctp_iphc6; 998 } 999 sctp->sctp_hdr6_len = sctp->sctp_iphc6_len; 1000 sctp->sctp_ip_hdr6_len = ip_hdr_length; 1001 1002 sctph = (sctp_hdr_t *)(hdrs + ip_hdr_length); 1003 sctp->sctp_sctph6 = sctph; 1004 sctph->sh_sport = connp->conn_lport; 1005 sctph->sh_dport = connp->conn_fport; 1006 sctph->sh_verf = sctp->sctp_fvtag; 1007 sctph->sh_chksum = 0; 1008 1009 ip6h = (ip6_t *)hdrs; 1010 sctp->sctp_ip6h = ip6h; 1011 1012 ip6h->ip6_src = v6src; 1013 ip6h->ip6_dst = v6dst; 1014 ip_build_hdrs_v6(hdrs, ip_hdr_length, ipp, connp->conn_proto, 1015 connp->conn_flowinfo); 1016 ip6h->ip6_plen = htons(hdrs_len - IPV6_HDR_LEN); 1017 1018 if (ipp->ipp_fields & IPPF_RTHDR) { 1019 uint8_t *end; 1020 ip6_rthdr_t *rth; 1021 1022 end = (uint8_t *)ip6h + ip_hdr_length; 1023 rth = ip_find_rthdr_v6(ip6h, end); 1024 if (rth != NULL) { 1025 (void) ip_massage_options_v6(ip6h, rth, 1026 connp->conn_netstack); 1027 } 1028 1029 /* 1030 * Verify that the first hop isn't a mapped address. 1031 * Routers along the path need to do this verification 1032 * for subsequent hops. 1033 */ 1034 if (IN6_IS_ADDR_V4MAPPED(&ip6h->ip6_dst)) 1035 return (EADDRNOTAVAIL); 1036 } 1037 return (0); 1038 } 1039 1040 static int 1041 sctp_v4_label(sctp_t *sctp, sctp_faddr_t *fp) 1042 { 1043 conn_t *connp = sctp->sctp_connp; 1044 1045 ASSERT(fp->sf_ixa->ixa_flags & IXAF_IS_IPV4); 1046 return (conn_update_label(connp, fp->sf_ixa, &fp->sf_faddr, 1047 &connp->conn_xmit_ipp)); 1048 } 1049 1050 static int 1051 sctp_v6_label(sctp_t *sctp, sctp_faddr_t *fp) 1052 { 1053 conn_t *connp = sctp->sctp_connp; 1054 1055 ASSERT(!(fp->sf_ixa->ixa_flags & IXAF_IS_IPV4)); 1056 return (conn_update_label(connp, fp->sf_ixa, &fp->sf_faddr, 1057 &connp->conn_xmit_ipp)); 1058 } 1059 1060 /* 1061 * XXX implement more sophisticated logic 1062 * 1063 * Tsol note: We have already verified the addresses using tsol_check_dest 1064 * in sctp_add_faddr, thus no need to redo that here. 1065 * We do setup ipp_label_v4 and ipp_label_v6 based on which addresses 1066 * we have. 1067 */ 1068 int 1069 sctp_set_hdraddrs(sctp_t *sctp) 1070 { 1071 sctp_faddr_t *fp; 1072 int gotv4 = 0; 1073 int gotv6 = 0; 1074 conn_t *connp = sctp->sctp_connp; 1075 1076 ASSERT(sctp->sctp_faddrs != NULL); 1077 ASSERT(sctp->sctp_nsaddrs > 0); 1078 1079 /* Set up using the primary first */ 1080 connp->conn_faddr_v6 = sctp->sctp_primary->sf_faddr; 1081 /* saddr may be unspec; make_mp() will handle this */ 1082 connp->conn_saddr_v6 = sctp->sctp_primary->sf_saddr; 1083 connp->conn_laddr_v6 = connp->conn_saddr_v6; 1084 if (IN6_IS_ADDR_V4MAPPED(&sctp->sctp_primary->sf_faddr)) { 1085 if (!is_system_labeled() || 1086 sctp_v4_label(sctp, sctp->sctp_primary) == 0) { 1087 gotv4 = 1; 1088 if (connp->conn_family == AF_INET) { 1089 goto done; 1090 } 1091 } 1092 } else { 1093 if (!is_system_labeled() || 1094 sctp_v6_label(sctp, sctp->sctp_primary) == 0) { 1095 gotv6 = 1; 1096 } 1097 } 1098 1099 for (fp = sctp->sctp_faddrs; fp; fp = fp->sf_next) { 1100 if (!gotv4 && IN6_IS_ADDR_V4MAPPED(&fp->sf_faddr)) { 1101 if (!is_system_labeled() || 1102 sctp_v4_label(sctp, fp) == 0) { 1103 gotv4 = 1; 1104 if (connp->conn_family == AF_INET || gotv6) { 1105 break; 1106 } 1107 } 1108 } else if (!gotv6 && !IN6_IS_ADDR_V4MAPPED(&fp->sf_faddr)) { 1109 if (!is_system_labeled() || 1110 sctp_v6_label(sctp, fp) == 0) { 1111 gotv6 = 1; 1112 if (gotv4) 1113 break; 1114 } 1115 } 1116 } 1117 1118 done: 1119 if (!gotv4 && !gotv6) 1120 return (EACCES); 1121 1122 return (0); 1123 } 1124 1125 /* 1126 * got_errchunk is set B_TRUE only if called from validate_init_params(), when 1127 * an ERROR chunk is already prepended the size of which needs updating for 1128 * additional unrecognized parameters. Other callers either prepend the ERROR 1129 * chunk with the correct size after calling this function, or they are calling 1130 * to add an invalid parameter to an INIT_ACK chunk, in that case no ERROR chunk 1131 * exists, the CAUSE blocks go into the INIT_ACK directly. 1132 * 1133 * *errmp will be non-NULL both when adding an additional CAUSE block to an 1134 * existing prepended COOKIE ERROR chunk (processing params of an INIT_ACK), 1135 * and when adding unrecognized parameters after the first, to an INIT_ACK 1136 * (processing params of an INIT chunk). 1137 */ 1138 void 1139 sctp_add_unrec_parm(sctp_parm_hdr_t *uph, mblk_t **errmp, 1140 boolean_t got_errchunk) 1141 { 1142 mblk_t *mp; 1143 sctp_parm_hdr_t *ph; 1144 size_t len; 1145 int pad; 1146 sctp_chunk_hdr_t *ecp; 1147 1148 len = sizeof (*ph) + ntohs(uph->sph_len); 1149 if ((pad = len % SCTP_ALIGN) != 0) { 1150 pad = SCTP_ALIGN - pad; 1151 len += pad; 1152 } 1153 mp = allocb(len, BPRI_MED); 1154 if (mp == NULL) { 1155 return; 1156 } 1157 1158 ph = (sctp_parm_hdr_t *)(mp->b_rptr); 1159 ph->sph_type = htons(PARM_UNRECOGNIZED); 1160 ph->sph_len = htons(len - pad); 1161 1162 /* copy in the unrecognized parameter */ 1163 bcopy(uph, ph + 1, ntohs(uph->sph_len)); 1164 1165 if (pad != 0) 1166 bzero((mp->b_rptr + len - pad), pad); 1167 1168 mp->b_wptr = mp->b_rptr + len; 1169 if (*errmp != NULL) { 1170 /* 1171 * Update total length if an ERROR chunk, then link 1172 * this CAUSE block to the possible chain of CAUSE 1173 * blocks attached to the ERROR chunk or INIT_ACK 1174 * being created. 1175 */ 1176 if (got_errchunk) { 1177 /* ERROR chunk already prepended */ 1178 ecp = (sctp_chunk_hdr_t *)((*errmp)->b_rptr); 1179 ecp->sch_len = htons(ntohs(ecp->sch_len) + len); 1180 } 1181 linkb(*errmp, mp); 1182 } else { 1183 *errmp = mp; 1184 } 1185 } 1186 1187 /* 1188 * o Bounds checking 1189 * o Updates remaining 1190 * o Checks alignment 1191 */ 1192 sctp_parm_hdr_t * 1193 sctp_next_parm(sctp_parm_hdr_t *current, ssize_t *remaining) 1194 { 1195 int pad; 1196 uint16_t len; 1197 1198 len = ntohs(current->sph_len); 1199 *remaining -= len; 1200 if (*remaining < sizeof (*current) || len < sizeof (*current)) { 1201 return (NULL); 1202 } 1203 if ((pad = len & (SCTP_ALIGN - 1)) != 0) { 1204 pad = SCTP_ALIGN - pad; 1205 *remaining -= pad; 1206 } 1207 /*LINTED pointer cast may result in improper alignment*/ 1208 current = (sctp_parm_hdr_t *)((char *)current + len + pad); 1209 return (current); 1210 } 1211 1212 /* 1213 * Sets the address parameters given in the INIT chunk into sctp's 1214 * faddrs; if psctp is non-NULL, copies psctp's saddrs. If there are 1215 * no address parameters in the INIT chunk, a single faddr is created 1216 * from the ip hdr at the beginning of pkt. 1217 * If there already are existing addresses hanging from sctp, merge 1218 * them in, if the old info contains addresses which are not present 1219 * in this new info, get rid of them, and clean the pointers if there's 1220 * messages which have this as their target address. 1221 * 1222 * We also re-adjust the source address list here since the list may 1223 * contain more than what is actually part of the association. If 1224 * we get here from sctp_send_cookie_echo(), we are on the active 1225 * side and psctp will be NULL and ich will be the INIT-ACK chunk. 1226 * If we get here from sctp_accept_comm(), ich will be the INIT chunk 1227 * and psctp will the listening endpoint. 1228 * 1229 * INIT processing: When processing the INIT we inherit the src address 1230 * list from the listener. For a loopback or linklocal association, we 1231 * delete the list and just take the address from the IP header (since 1232 * that's how we created the INIT-ACK). Additionally, for loopback we 1233 * ignore the address params in the INIT. For determining which address 1234 * types were sent in the INIT-ACK we follow the same logic as in 1235 * creating the INIT-ACK. We delete addresses of the type that are not 1236 * supported by the peer. 1237 * 1238 * INIT-ACK processing: When processing the INIT-ACK since we had not 1239 * included addr params for loopback or linklocal addresses when creating 1240 * the INIT, we just use the address from the IP header. Further, for 1241 * loopback we ignore the addr param list. We mark addresses of the 1242 * type not supported by the peer as unconfirmed. 1243 * 1244 * In case of INIT processing we look for supported address types in the 1245 * supported address param, if present. In both cases the address type in 1246 * the IP header is supported as well as types for addresses in the param 1247 * list, if any. 1248 * 1249 * Once we have the supported address types sctp_check_saddr() runs through 1250 * the source address list and deletes or marks as unconfirmed address of 1251 * types not supported by the peer. 1252 * 1253 * Returns 0 on success, sys errno on failure 1254 */ 1255 int 1256 sctp_get_addrparams(sctp_t *sctp, sctp_t *psctp, mblk_t *pkt, 1257 sctp_chunk_hdr_t *ich, uint_t *sctp_options) 1258 { 1259 sctp_init_chunk_t *init; 1260 ipha_t *iph; 1261 ip6_t *ip6h; 1262 in6_addr_t hdrsaddr[1]; 1263 in6_addr_t hdrdaddr[1]; 1264 sctp_parm_hdr_t *ph; 1265 ssize_t remaining; 1266 int isv4; 1267 int err; 1268 sctp_faddr_t *fp; 1269 int supp_af = 0; 1270 boolean_t check_saddr = B_TRUE; 1271 in6_addr_t curaddr; 1272 sctp_stack_t *sctps = sctp->sctp_sctps; 1273 conn_t *connp = sctp->sctp_connp; 1274 1275 if (sctp_options != NULL) 1276 *sctp_options = 0; 1277 1278 /* extract the address from the IP header */ 1279 isv4 = (IPH_HDR_VERSION(pkt->b_rptr) == IPV4_VERSION); 1280 if (isv4) { 1281 iph = (ipha_t *)pkt->b_rptr; 1282 IN6_IPADDR_TO_V4MAPPED(iph->ipha_src, hdrsaddr); 1283 IN6_IPADDR_TO_V4MAPPED(iph->ipha_dst, hdrdaddr); 1284 supp_af |= PARM_SUPP_V4; 1285 } else { 1286 ip6h = (ip6_t *)pkt->b_rptr; 1287 hdrsaddr[0] = ip6h->ip6_src; 1288 hdrdaddr[0] = ip6h->ip6_dst; 1289 supp_af |= PARM_SUPP_V6; 1290 } 1291 1292 /* 1293 * Unfortunately, we can't delay this because adding an faddr 1294 * looks for the presence of the source address (from the ire 1295 * for the faddr) in the source address list. We could have 1296 * delayed this if, say, this was a loopback/linklocal connection. 1297 * Now, we just end up nuking this list and taking the addr from 1298 * the IP header for loopback/linklocal. 1299 */ 1300 if (psctp != NULL && psctp->sctp_nsaddrs > 0) { 1301 ASSERT(sctp->sctp_nsaddrs == 0); 1302 1303 err = sctp_dup_saddrs(psctp, sctp, KM_NOSLEEP); 1304 if (err != 0) 1305 return (err); 1306 } 1307 /* 1308 * We will add the faddr before parsing the address list as this 1309 * might be a loopback connection and we would not have to 1310 * go through the list. 1311 * 1312 * Make sure the header's addr is in the list 1313 */ 1314 fp = sctp_lookup_faddr(sctp, hdrsaddr); 1315 if (fp == NULL) { 1316 /* not included; add it now */ 1317 err = sctp_add_faddr(sctp, hdrsaddr, KM_NOSLEEP, B_TRUE); 1318 if (err != 0) 1319 return (err); 1320 1321 /* sctp_faddrs will be the hdr addr */ 1322 fp = sctp->sctp_faddrs; 1323 } 1324 /* make the header addr the primary */ 1325 1326 if (cl_sctp_assoc_change != NULL && psctp == NULL) 1327 curaddr = sctp->sctp_current->sf_faddr; 1328 1329 sctp->sctp_primary = fp; 1330 sctp->sctp_current = fp; 1331 sctp->sctp_mss = fp->sf_pmss; 1332 1333 /* For loopback connections & linklocal get address from the header */ 1334 if (sctp->sctp_loopback || sctp->sctp_linklocal) { 1335 if (sctp->sctp_nsaddrs != 0) 1336 sctp_free_saddrs(sctp); 1337 if ((err = sctp_saddr_add_addr(sctp, hdrdaddr, 0)) != 0) 1338 return (err); 1339 /* For loopback ignore address list */ 1340 if (sctp->sctp_loopback) 1341 return (0); 1342 check_saddr = B_FALSE; 1343 } 1344 1345 /* Walk the params in the INIT [ACK], pulling out addr params */ 1346 remaining = ntohs(ich->sch_len) - sizeof (*ich) - 1347 sizeof (sctp_init_chunk_t); 1348 if (remaining < sizeof (*ph)) { 1349 if (check_saddr) { 1350 sctp_check_saddr(sctp, supp_af, psctp == NULL ? 1351 B_FALSE : B_TRUE, hdrdaddr); 1352 } 1353 ASSERT(sctp_saddr_lookup(sctp, hdrdaddr, 0) != NULL); 1354 return (0); 1355 } 1356 1357 init = (sctp_init_chunk_t *)(ich + 1); 1358 ph = (sctp_parm_hdr_t *)(init + 1); 1359 1360 /* params will have already been byteordered when validating */ 1361 while (ph != NULL) { 1362 if (ph->sph_type == htons(PARM_SUPP_ADDRS)) { 1363 int plen; 1364 uint16_t *p; 1365 uint16_t addrtype; 1366 1367 ASSERT(psctp != NULL); 1368 plen = ntohs(ph->sph_len); 1369 p = (uint16_t *)(ph + 1); 1370 while (plen > 0) { 1371 addrtype = ntohs(*p); 1372 switch (addrtype) { 1373 case PARM_ADDR6: 1374 supp_af |= PARM_SUPP_V6; 1375 break; 1376 case PARM_ADDR4: 1377 supp_af |= PARM_SUPP_V4; 1378 break; 1379 default: 1380 break; 1381 } 1382 p++; 1383 plen -= sizeof (*p); 1384 } 1385 } else if (ph->sph_type == htons(PARM_ADDR4)) { 1386 if (remaining >= PARM_ADDR4_LEN) { 1387 in6_addr_t addr; 1388 ipaddr_t ta; 1389 1390 supp_af |= PARM_SUPP_V4; 1391 /* 1392 * Screen out broad/multicasts & loopback. 1393 * If the endpoint only accepts v6 address, 1394 * go to the next one. 1395 * 1396 * Subnet broadcast check is done in 1397 * sctp_add_faddr(). If the address is 1398 * a broadcast address, it won't be added. 1399 */ 1400 bcopy(ph + 1, &ta, sizeof (ta)); 1401 if (ta == 0 || 1402 ta == INADDR_BROADCAST || 1403 ta == htonl(INADDR_LOOPBACK) || 1404 CLASSD(ta) || connp->conn_ipv6_v6only) { 1405 goto next; 1406 } 1407 IN6_INADDR_TO_V4MAPPED((struct in_addr *) 1408 (ph + 1), &addr); 1409 1410 /* Check for duplicate. */ 1411 if (sctp_lookup_faddr(sctp, &addr) != NULL) 1412 goto next; 1413 1414 /* OK, add it to the faddr set */ 1415 err = sctp_add_faddr(sctp, &addr, KM_NOSLEEP, 1416 B_FALSE); 1417 /* Something is wrong... Try the next one. */ 1418 if (err != 0) 1419 goto next; 1420 } 1421 } else if (ph->sph_type == htons(PARM_ADDR6) && 1422 connp->conn_family == AF_INET6) { 1423 /* An v4 socket should not take v6 addresses. */ 1424 if (remaining >= PARM_ADDR6_LEN) { 1425 in6_addr_t *addr6; 1426 1427 supp_af |= PARM_SUPP_V6; 1428 addr6 = (in6_addr_t *)(ph + 1); 1429 /* 1430 * Screen out link locals, mcast, loopback 1431 * and bogus v6 address. 1432 */ 1433 if (IN6_IS_ADDR_LINKLOCAL(addr6) || 1434 IN6_IS_ADDR_MULTICAST(addr6) || 1435 IN6_IS_ADDR_LOOPBACK(addr6) || 1436 IN6_IS_ADDR_V4MAPPED(addr6)) { 1437 goto next; 1438 } 1439 /* Check for duplicate. */ 1440 if (sctp_lookup_faddr(sctp, addr6) != NULL) 1441 goto next; 1442 1443 err = sctp_add_faddr(sctp, 1444 (in6_addr_t *)(ph + 1), KM_NOSLEEP, 1445 B_FALSE); 1446 /* Something is wrong... Try the next one. */ 1447 if (err != 0) 1448 goto next; 1449 } 1450 } else if (ph->sph_type == htons(PARM_FORWARD_TSN)) { 1451 if (sctp_options != NULL) 1452 *sctp_options |= SCTP_PRSCTP_OPTION; 1453 } /* else; skip */ 1454 1455 next: 1456 ph = sctp_next_parm(ph, &remaining); 1457 } 1458 if (check_saddr) { 1459 sctp_check_saddr(sctp, supp_af, psctp == NULL ? B_FALSE : 1460 B_TRUE, hdrdaddr); 1461 } 1462 ASSERT(sctp_saddr_lookup(sctp, hdrdaddr, 0) != NULL); 1463 /* 1464 * We have the right address list now, update clustering's 1465 * knowledge because when we sent the INIT we had just added 1466 * the address the INIT was sent to. 1467 */ 1468 if (psctp == NULL && cl_sctp_assoc_change != NULL) { 1469 uchar_t *alist; 1470 size_t asize; 1471 uchar_t *dlist; 1472 size_t dsize; 1473 1474 asize = sizeof (in6_addr_t) * sctp->sctp_nfaddrs; 1475 alist = kmem_alloc(asize, KM_NOSLEEP); 1476 if (alist == NULL) { 1477 SCTP_KSTAT(sctps, sctp_cl_assoc_change); 1478 return (ENOMEM); 1479 } 1480 /* 1481 * Just include the address the INIT was sent to in the 1482 * delete list and send the entire faddr list. We could 1483 * do it differently (i.e include all the addresses in the 1484 * add list even if it contains the original address OR 1485 * remove the original address from the add list etc.), but 1486 * this seems reasonable enough. 1487 */ 1488 dsize = sizeof (in6_addr_t); 1489 dlist = kmem_alloc(dsize, KM_NOSLEEP); 1490 if (dlist == NULL) { 1491 kmem_free(alist, asize); 1492 SCTP_KSTAT(sctps, sctp_cl_assoc_change); 1493 return (ENOMEM); 1494 } 1495 bcopy(&curaddr, dlist, sizeof (curaddr)); 1496 sctp_get_faddr_list(sctp, alist, asize); 1497 (*cl_sctp_assoc_change)(connp->conn_family, alist, asize, 1498 sctp->sctp_nfaddrs, dlist, dsize, 1, SCTP_CL_PADDR, 1499 (cl_sctp_handle_t)sctp); 1500 /* alist and dlist will be freed by the clustering module */ 1501 } 1502 return (0); 1503 } 1504 1505 /* 1506 * Returns 0 if the check failed and the restart should be refused, 1507 * 1 if the check succeeded. 1508 */ 1509 int 1510 sctp_secure_restart_check(mblk_t *pkt, sctp_chunk_hdr_t *ich, uint32_t ports, 1511 int sleep, sctp_stack_t *sctps, ip_recv_attr_t *ira) 1512 { 1513 sctp_faddr_t *fp, *fphead = NULL; 1514 sctp_parm_hdr_t *ph; 1515 ssize_t remaining; 1516 int isv4; 1517 ipha_t *iph; 1518 ip6_t *ip6h; 1519 in6_addr_t hdraddr[1]; 1520 int retval = 0; 1521 sctp_tf_t *tf; 1522 sctp_t *sctp; 1523 int compres; 1524 sctp_init_chunk_t *init; 1525 int nadded = 0; 1526 1527 /* extract the address from the IP header */ 1528 isv4 = (IPH_HDR_VERSION(pkt->b_rptr) == IPV4_VERSION); 1529 if (isv4) { 1530 iph = (ipha_t *)pkt->b_rptr; 1531 IN6_IPADDR_TO_V4MAPPED(iph->ipha_src, hdraddr); 1532 } else { 1533 ip6h = (ip6_t *)pkt->b_rptr; 1534 hdraddr[0] = ip6h->ip6_src; 1535 } 1536 1537 /* Walk the params in the INIT [ACK], pulling out addr params */ 1538 remaining = ntohs(ich->sch_len) - sizeof (*ich) - 1539 sizeof (sctp_init_chunk_t); 1540 if (remaining < sizeof (*ph)) { 1541 /* no parameters; restart OK */ 1542 return (1); 1543 } 1544 init = (sctp_init_chunk_t *)(ich + 1); 1545 ph = (sctp_parm_hdr_t *)(init + 1); 1546 1547 while (ph != NULL) { 1548 sctp_faddr_t *fpa = NULL; 1549 1550 /* params will have already been byteordered when validating */ 1551 if (ph->sph_type == htons(PARM_ADDR4)) { 1552 if (remaining >= PARM_ADDR4_LEN) { 1553 in6_addr_t addr; 1554 IN6_INADDR_TO_V4MAPPED((struct in_addr *) 1555 (ph + 1), &addr); 1556 fpa = kmem_cache_alloc(sctp_kmem_faddr_cache, 1557 sleep); 1558 if (fpa == NULL) { 1559 goto done; 1560 } 1561 bzero(fpa, sizeof (*fpa)); 1562 fpa->sf_faddr = addr; 1563 fpa->sf_next = NULL; 1564 } 1565 } else if (ph->sph_type == htons(PARM_ADDR6)) { 1566 if (remaining >= PARM_ADDR6_LEN) { 1567 fpa = kmem_cache_alloc(sctp_kmem_faddr_cache, 1568 sleep); 1569 if (fpa == NULL) { 1570 goto done; 1571 } 1572 bzero(fpa, sizeof (*fpa)); 1573 bcopy(ph + 1, &fpa->sf_faddr, 1574 sizeof (fpa->sf_faddr)); 1575 fpa->sf_next = NULL; 1576 } 1577 } 1578 /* link in the new addr, if it was an addr param */ 1579 if (fpa != NULL) { 1580 if (fphead == NULL) { 1581 fphead = fpa; 1582 } else { 1583 fpa->sf_next = fphead; 1584 fphead = fpa; 1585 } 1586 } 1587 1588 ph = sctp_next_parm(ph, &remaining); 1589 } 1590 1591 if (fphead == NULL) { 1592 /* no addr parameters; restart OK */ 1593 return (1); 1594 } 1595 1596 /* 1597 * got at least one; make sure the header's addr is 1598 * in the list 1599 */ 1600 fp = sctp_lookup_faddr_nosctp(fphead, hdraddr); 1601 if (fp == NULL) { 1602 /* not included; add it now */ 1603 fp = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep); 1604 if (fp == NULL) { 1605 goto done; 1606 } 1607 bzero(fp, sizeof (*fp)); 1608 fp->sf_faddr = *hdraddr; 1609 fp->sf_next = fphead; 1610 fphead = fp; 1611 } 1612 1613 /* 1614 * Now, we can finally do the check: For each sctp instance 1615 * on the hash line for ports, compare its faddr set against 1616 * the new one. If the new one is a strict subset of any 1617 * existing sctp's faddrs, the restart is OK. However, if there 1618 * is an overlap, this could be an attack, so return failure. 1619 * If all sctp's faddrs are disjoint, this is a legitimate new 1620 * association. 1621 */ 1622 tf = &(sctps->sctps_conn_fanout[SCTP_CONN_HASH(sctps, ports)]); 1623 mutex_enter(&tf->tf_lock); 1624 1625 for (sctp = tf->tf_sctp; sctp; sctp = sctp->sctp_conn_hash_next) { 1626 if (ports != sctp->sctp_connp->conn_ports) { 1627 continue; 1628 } 1629 compres = sctp_compare_faddrsets(fphead, sctp->sctp_faddrs); 1630 if (compres <= SCTP_ADDR_SUBSET) { 1631 retval = 1; 1632 mutex_exit(&tf->tf_lock); 1633 goto done; 1634 } 1635 if (compres == SCTP_ADDR_OVERLAP) { 1636 dprint(1, 1637 ("new assoc from %x:%x:%x:%x overlaps with %p\n", 1638 SCTP_PRINTADDR(*hdraddr), (void *)sctp)); 1639 /* 1640 * While we still hold the lock, we need to 1641 * figure out which addresses have been 1642 * added so we can include them in the abort 1643 * we will send back. Since these faddrs will 1644 * never be used, we overload the rto field 1645 * here, setting it to 0 if the address was 1646 * not added, 1 if it was added. 1647 */ 1648 for (fp = fphead; fp; fp = fp->sf_next) { 1649 if (sctp_lookup_faddr(sctp, &fp->sf_faddr)) { 1650 fp->sf_rto = 0; 1651 } else { 1652 fp->sf_rto = 1; 1653 nadded++; 1654 } 1655 } 1656 mutex_exit(&tf->tf_lock); 1657 goto done; 1658 } 1659 } 1660 mutex_exit(&tf->tf_lock); 1661 1662 /* All faddrs are disjoint; legit new association */ 1663 retval = 1; 1664 1665 done: 1666 /* If are attempted adds, send back an abort listing the addrs */ 1667 if (nadded > 0) { 1668 void *dtail; 1669 size_t dlen; 1670 1671 dtail = kmem_alloc(PARM_ADDR6_LEN * nadded, KM_NOSLEEP); 1672 if (dtail == NULL) { 1673 goto cleanup; 1674 } 1675 1676 ph = dtail; 1677 dlen = 0; 1678 for (fp = fphead; fp; fp = fp->sf_next) { 1679 if (fp->sf_rto == 0) { 1680 continue; 1681 } 1682 if (IN6_IS_ADDR_V4MAPPED(&fp->sf_faddr)) { 1683 ipaddr_t addr4; 1684 1685 ph->sph_type = htons(PARM_ADDR4); 1686 ph->sph_len = htons(PARM_ADDR4_LEN); 1687 IN6_V4MAPPED_TO_IPADDR(&fp->sf_faddr, addr4); 1688 ph++; 1689 bcopy(&addr4, ph, sizeof (addr4)); 1690 ph = (sctp_parm_hdr_t *) 1691 ((char *)ph + sizeof (addr4)); 1692 dlen += PARM_ADDR4_LEN; 1693 } else { 1694 ph->sph_type = htons(PARM_ADDR6); 1695 ph->sph_len = htons(PARM_ADDR6_LEN); 1696 ph++; 1697 bcopy(&fp->sf_faddr, ph, sizeof (fp->sf_faddr)); 1698 ph = (sctp_parm_hdr_t *) 1699 ((char *)ph + sizeof (fp->sf_faddr)); 1700 dlen += PARM_ADDR6_LEN; 1701 } 1702 } 1703 1704 /* Send off the abort */ 1705 sctp_send_abort(sctp, sctp_init2vtag(ich), 1706 SCTP_ERR_RESTART_NEW_ADDRS, dtail, dlen, pkt, 0, B_TRUE, 1707 ira); 1708 1709 kmem_free(dtail, PARM_ADDR6_LEN * nadded); 1710 } 1711 1712 cleanup: 1713 /* Clean up */ 1714 if (fphead) { 1715 sctp_faddr_t *fpn; 1716 for (fp = fphead; fp; fp = fpn) { 1717 fpn = fp->sf_next; 1718 if (fp->sf_ixa != NULL) { 1719 ixa_refrele(fp->sf_ixa); 1720 fp->sf_ixa = NULL; 1721 } 1722 kmem_cache_free(sctp_kmem_faddr_cache, fp); 1723 } 1724 } 1725 1726 return (retval); 1727 } 1728 1729 /* 1730 * Reset any state related to transmitted chunks. 1731 */ 1732 void 1733 sctp_congest_reset(sctp_t *sctp) 1734 { 1735 sctp_faddr_t *fp; 1736 sctp_stack_t *sctps = sctp->sctp_sctps; 1737 mblk_t *mp; 1738 1739 for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->sf_next) { 1740 fp->sf_ssthresh = sctps->sctps_initial_mtu; 1741 SET_CWND(fp, fp->sf_pmss, sctps->sctps_slow_start_initial); 1742 fp->sf_suna = 0; 1743 fp->sf_pba = 0; 1744 } 1745 /* 1746 * Clean up the transmit list as well since we have reset accounting 1747 * on all the fps. Send event upstream, if required. 1748 */ 1749 while ((mp = sctp->sctp_xmit_head) != NULL) { 1750 sctp->sctp_xmit_head = mp->b_next; 1751 mp->b_next = NULL; 1752 if (sctp->sctp_xmit_head != NULL) 1753 sctp->sctp_xmit_head->b_prev = NULL; 1754 sctp_sendfail_event(sctp, mp, 0, B_TRUE); 1755 } 1756 sctp->sctp_xmit_head = NULL; 1757 sctp->sctp_xmit_tail = NULL; 1758 sctp->sctp_xmit_unacked = NULL; 1759 1760 sctp->sctp_unacked = 0; 1761 /* 1762 * Any control message as well. We will clean-up this list as well. 1763 * This contains any pending ASCONF request that we have queued/sent. 1764 * If we do get an ACK we will just drop it. However, given that 1765 * we are restarting chances are we aren't going to get any. 1766 */ 1767 if (sctp->sctp_cxmit_list != NULL) 1768 sctp_asconf_free_cxmit(sctp, NULL); 1769 sctp->sctp_cxmit_list = NULL; 1770 sctp->sctp_cchunk_pend = 0; 1771 1772 sctp->sctp_rexmitting = B_FALSE; 1773 sctp->sctp_rxt_nxttsn = 0; 1774 sctp->sctp_rxt_maxtsn = 0; 1775 1776 sctp->sctp_zero_win_probe = B_FALSE; 1777 } 1778 1779 static void 1780 sctp_init_faddr(sctp_t *sctp, sctp_faddr_t *fp, in6_addr_t *addr, 1781 mblk_t *timer_mp) 1782 { 1783 sctp_stack_t *sctps = sctp->sctp_sctps; 1784 1785 ASSERT(fp->sf_ixa != NULL); 1786 1787 bcopy(addr, &fp->sf_faddr, sizeof (*addr)); 1788 if (IN6_IS_ADDR_V4MAPPED(addr)) { 1789 fp->sf_isv4 = 1; 1790 /* Make sure that sf_pmss is a multiple of SCTP_ALIGN. */ 1791 fp->sf_pmss = 1792 (sctps->sctps_initial_mtu - sctp->sctp_hdr_len) & 1793 ~(SCTP_ALIGN - 1); 1794 fp->sf_ixa->ixa_flags |= IXAF_IS_IPV4; 1795 } else { 1796 fp->sf_isv4 = 0; 1797 fp->sf_pmss = 1798 (sctps->sctps_initial_mtu - sctp->sctp_hdr6_len) & 1799 ~(SCTP_ALIGN - 1); 1800 fp->sf_ixa->ixa_flags &= ~IXAF_IS_IPV4; 1801 } 1802 fp->sf_cwnd = sctps->sctps_slow_start_initial * fp->sf_pmss; 1803 fp->sf_rto = MIN(sctp->sctp_rto_initial, sctp->sctp_rto_max_init); 1804 SCTP_MAX_RTO(sctp, fp); 1805 fp->sf_srtt = -1; 1806 fp->sf_rtt_updates = 0; 1807 fp->sf_strikes = 0; 1808 fp->sf_max_retr = sctp->sctp_pp_max_rxt; 1809 /* Mark it as not confirmed. */ 1810 fp->sf_state = SCTP_FADDRS_UNCONFIRMED; 1811 fp->sf_hb_interval = sctp->sctp_hb_interval; 1812 fp->sf_ssthresh = sctps->sctps_initial_ssthresh; 1813 fp->sf_suna = 0; 1814 fp->sf_pba = 0; 1815 fp->sf_acked = 0; 1816 fp->sf_lastactive = fp->sf_hb_expiry = ddi_get_lbolt64(); 1817 fp->sf_timer_mp = timer_mp; 1818 fp->sf_hb_pending = B_FALSE; 1819 fp->sf_hb_enabled = B_TRUE; 1820 fp->sf_df = 1; 1821 fp->sf_pmtu_discovered = 0; 1822 fp->sf_next = NULL; 1823 fp->sf_T3expire = 0; 1824 (void) random_get_pseudo_bytes((uint8_t *)&fp->sf_hb_secret, 1825 sizeof (fp->sf_hb_secret)); 1826 fp->sf_rxt_unacked = 0; 1827 1828 sctp_get_dest(sctp, fp); 1829 } 1830 1831 /*ARGSUSED*/ 1832 static int 1833 faddr_constructor(void *buf, void *arg, int flags) 1834 { 1835 sctp_faddr_t *fp = buf; 1836 1837 fp->sf_timer_mp = NULL; 1838 fp->sf_timer_running = 0; 1839 1840 fp->sf_rc_timer_mp = NULL; 1841 fp->sf_rc_timer_running = 0; 1842 1843 return (0); 1844 } 1845 1846 /*ARGSUSED*/ 1847 static void 1848 faddr_destructor(void *buf, void *arg) 1849 { 1850 sctp_faddr_t *fp = buf; 1851 1852 ASSERT(fp->sf_timer_mp == NULL); 1853 ASSERT(fp->sf_timer_running == 0); 1854 1855 ASSERT(fp->sf_rc_timer_mp == NULL); 1856 ASSERT(fp->sf_rc_timer_running == 0); 1857 } 1858 1859 void 1860 sctp_faddr_init(void) 1861 { 1862 sctp_kmem_faddr_cache = kmem_cache_create("sctp_faddr_cache", 1863 sizeof (sctp_faddr_t), 0, faddr_constructor, faddr_destructor, 1864 NULL, NULL, NULL, 0); 1865 } 1866 1867 void 1868 sctp_faddr_fini(void) 1869 { 1870 kmem_cache_destroy(sctp_kmem_faddr_cache); 1871 } 1872