1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/systm.h> 31 #include <sys/stream.h> 32 #include <sys/strsubr.h> 33 #include <sys/ddi.h> 34 #include <sys/sunddi.h> 35 #include <sys/kmem.h> 36 #include <sys/socket.h> 37 #include <sys/random.h> 38 #include <sys/tsol/tndb.h> 39 #include <sys/tsol/tnet.h> 40 41 #include <netinet/in.h> 42 #include <netinet/ip6.h> 43 #include <netinet/sctp.h> 44 45 #include <inet/common.h> 46 #include <inet/ip.h> 47 #include <inet/ip6.h> 48 #include <inet/ip_ire.h> 49 #include <inet/mib2.h> 50 #include <inet/nd.h> 51 #include <inet/optcom.h> 52 #include <inet/sctp_ip.h> 53 #include <inet/ipclassifier.h> 54 55 #include "sctp_impl.h" 56 #include "sctp_addr.h" 57 #include "sctp_asconf.h" 58 59 static struct kmem_cache *sctp_kmem_faddr_cache; 60 static void sctp_init_faddr(sctp_t *, sctp_faddr_t *, in6_addr_t *, mblk_t *); 61 62 /* Set the source address. Refer to comments in sctp_get_ire(). */ 63 void 64 sctp_set_saddr(sctp_t *sctp, sctp_faddr_t *fp) 65 { 66 boolean_t v6 = !fp->isv4; 67 boolean_t addr_set; 68 69 fp->saddr = sctp_get_valid_addr(sctp, v6, &addr_set); 70 /* 71 * If there is no source address avaialble, mark this peer address 72 * as unreachable for now. When the heartbeat timer fires, it will 73 * call sctp_get_ire() to re-check if there is any source address 74 * available. 75 */ 76 if (!addr_set) 77 fp->state = SCTP_FADDRS_UNREACH; 78 } 79 80 /* 81 * Call this function to update the cached IRE of a peer addr fp. 82 */ 83 void 84 sctp_get_ire(sctp_t *sctp, sctp_faddr_t *fp) 85 { 86 ire_t *ire; 87 ipaddr_t addr4; 88 in6_addr_t laddr; 89 sctp_saddr_ipif_t *sp; 90 int hdrlen; 91 ts_label_t *tsl; 92 sctp_stack_t *sctps = sctp->sctp_sctps; 93 ip_stack_t *ipst = sctps->sctps_netstack->netstack_ip; 94 95 /* Remove the previous cache IRE */ 96 if ((ire = fp->ire) != NULL) { 97 IRE_REFRELE_NOTR(ire); 98 fp->ire = NULL; 99 } 100 101 /* 102 * If this addr is not reachable, mark it as unconfirmed for now, the 103 * state will be changed back to unreachable later in this function 104 * if it is still the case. 105 */ 106 if (fp->state == SCTP_FADDRS_UNREACH) { 107 fp->state = SCTP_FADDRS_UNCONFIRMED; 108 } 109 110 tsl = crgetlabel(CONN_CRED(sctp->sctp_connp)); 111 112 if (fp->isv4) { 113 IN6_V4MAPPED_TO_IPADDR(&fp->faddr, addr4); 114 ire = ire_cache_lookup(addr4, sctp->sctp_zoneid, tsl, ipst); 115 if (ire != NULL) 116 IN6_IPADDR_TO_V4MAPPED(ire->ire_src_addr, &laddr); 117 } else { 118 ire = ire_cache_lookup_v6(&fp->faddr, sctp->sctp_zoneid, tsl, 119 ipst); 120 if (ire != NULL) 121 laddr = ire->ire_src_addr_v6; 122 } 123 124 if (ire == NULL) { 125 dprint(3, ("ire2faddr: no ire for %x:%x:%x:%x\n", 126 SCTP_PRINTADDR(fp->faddr))); 127 /* 128 * It is tempting to just leave the src addr 129 * unspecified and let IP figure it out, but we 130 * *cannot* do this, since IP may choose a src addr 131 * that is not part of this association... unless 132 * this sctp has bound to all addrs. So if the ire 133 * lookup fails, try to find one in our src addr 134 * list, unless the sctp has bound to all addrs, in 135 * which case we change the src addr to unspec. 136 * 137 * Note that if this is a v6 endpoint but it does 138 * not have any v4 address at this point (e.g. may 139 * have been deleted), sctp_get_valid_addr() will 140 * return mapped INADDR_ANY. In this case, this 141 * address should be marked not reachable so that 142 * it won't be used to send data. 143 */ 144 sctp_set_saddr(sctp, fp); 145 if (fp->state == SCTP_FADDRS_UNREACH) 146 return; 147 goto check_current; 148 } 149 150 /* Make sure the laddr is part of this association */ 151 if ((sp = sctp_saddr_lookup(sctp, &ire->ire_ipif->ipif_v6lcl_addr, 152 0)) != NULL && !sp->saddr_ipif_dontsrc) { 153 if (sp->saddr_ipif_unconfirmed == 1) 154 sp->saddr_ipif_unconfirmed = 0; 155 fp->saddr = laddr; 156 } else { 157 dprint(2, ("ire2faddr: src addr is not part of assc\n")); 158 159 /* 160 * Set the src to the first saddr and hope for the best. 161 * Note that we will still do the ire caching below. 162 * Otherwise, whenever we send a packet, we need to do 163 * the ire lookup again and still may not get the correct 164 * source address. Note that this case should very seldomly 165 * happen. One scenario this can happen is an app 166 * explicitly bind() to an address. But that address is 167 * not the preferred source address to send to the peer. 168 */ 169 sctp_set_saddr(sctp, fp); 170 if (fp->state == SCTP_FADDRS_UNREACH) { 171 IRE_REFRELE(ire); 172 return; 173 } 174 } 175 176 /* 177 * Note that ire_cache_lookup_*() returns an ire with the tracing 178 * bits enabled. This requires the thread holding the ire also 179 * do the IRE_REFRELE(). Thus we need to do IRE_REFHOLD_NOTR() 180 * and then IRE_REFRELE() the ire here to make the tracing bits 181 * work. 182 */ 183 IRE_REFHOLD_NOTR(ire); 184 IRE_REFRELE(ire); 185 186 /* Cache the IRE */ 187 fp->ire = ire; 188 if (fp->ire->ire_type == IRE_LOOPBACK && !sctp->sctp_loopback) 189 sctp->sctp_loopback = 1; 190 191 /* 192 * Pull out RTO information for this faddr and use it if we don't 193 * have any yet. 194 */ 195 if (fp->srtt == -1 && ire->ire_uinfo.iulp_rtt != 0) { 196 /* The cached value is in ms. */ 197 fp->srtt = MSEC_TO_TICK(ire->ire_uinfo.iulp_rtt); 198 fp->rttvar = MSEC_TO_TICK(ire->ire_uinfo.iulp_rtt_sd); 199 fp->rto = 3 * fp->srtt; 200 201 /* Bound the RTO by configured min and max values */ 202 if (fp->rto < sctp->sctp_rto_min) { 203 fp->rto = sctp->sctp_rto_min; 204 } 205 if (fp->rto > sctp->sctp_rto_max) { 206 fp->rto = sctp->sctp_rto_max; 207 } 208 } 209 210 /* 211 * Record the MTU for this faddr. If the MTU for this faddr has 212 * changed, check if the assc MTU will also change. 213 */ 214 if (fp->isv4) { 215 hdrlen = sctp->sctp_hdr_len; 216 } else { 217 hdrlen = sctp->sctp_hdr6_len; 218 } 219 if ((fp->sfa_pmss + hdrlen) != ire->ire_max_frag) { 220 /* Make sure that sfa_pmss is a multiple of SCTP_ALIGN. */ 221 fp->sfa_pmss = (ire->ire_max_frag - hdrlen) & ~(SCTP_ALIGN - 1); 222 if (fp->cwnd < (fp->sfa_pmss * 2)) { 223 SET_CWND(fp, fp->sfa_pmss, 224 sctps->sctps_slow_start_initial); 225 } 226 } 227 228 check_current: 229 if (fp == sctp->sctp_current) 230 sctp_set_faddr_current(sctp, fp); 231 } 232 233 void 234 sctp_update_ire(sctp_t *sctp) 235 { 236 ire_t *ire; 237 sctp_faddr_t *fp; 238 sctp_stack_t *sctps = sctp->sctp_sctps; 239 240 for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) { 241 if ((ire = fp->ire) == NULL) 242 continue; 243 mutex_enter(&ire->ire_lock); 244 245 /* 246 * If the cached IRE is going away, there is no point to 247 * update it. 248 */ 249 if (ire->ire_marks & IRE_MARK_CONDEMNED) { 250 mutex_exit(&ire->ire_lock); 251 IRE_REFRELE_NOTR(ire); 252 fp->ire = NULL; 253 continue; 254 } 255 256 /* 257 * Only record the PMTU for this faddr if we actually have 258 * done discovery. This prevents initialized default from 259 * clobbering any real info that IP may have. 260 */ 261 if (fp->pmtu_discovered) { 262 if (fp->isv4) { 263 ire->ire_max_frag = fp->sfa_pmss + 264 sctp->sctp_hdr_len; 265 } else { 266 ire->ire_max_frag = fp->sfa_pmss + 267 sctp->sctp_hdr6_len; 268 } 269 } 270 271 if (sctps->sctps_rtt_updates != 0 && 272 fp->rtt_updates >= sctps->sctps_rtt_updates) { 273 /* 274 * If there is no old cached values, initialize them 275 * conservatively. Set them to be (1.5 * new value). 276 * This code copied from ip_ire_advise(). The cached 277 * value is in ms. 278 */ 279 if (ire->ire_uinfo.iulp_rtt != 0) { 280 ire->ire_uinfo.iulp_rtt = 281 (ire->ire_uinfo.iulp_rtt + 282 TICK_TO_MSEC(fp->srtt)) >> 1; 283 } else { 284 ire->ire_uinfo.iulp_rtt = 285 TICK_TO_MSEC(fp->srtt + (fp->srtt >> 1)); 286 } 287 if (ire->ire_uinfo.iulp_rtt_sd != 0) { 288 ire->ire_uinfo.iulp_rtt_sd = 289 (ire->ire_uinfo.iulp_rtt_sd + 290 TICK_TO_MSEC(fp->rttvar)) >> 1; 291 } else { 292 ire->ire_uinfo.iulp_rtt_sd = 293 TICK_TO_MSEC(fp->rttvar + 294 (fp->rttvar >> 1)); 295 } 296 fp->rtt_updates = 0; 297 } 298 mutex_exit(&ire->ire_lock); 299 } 300 } 301 302 /* 303 * The sender must set the total length in the IP header. 304 * If sendto == NULL, the current will be used. 305 */ 306 mblk_t * 307 sctp_make_mp(sctp_t *sctp, sctp_faddr_t *sendto, int trailer) 308 { 309 mblk_t *mp; 310 size_t ipsctplen; 311 int isv4; 312 sctp_faddr_t *fp; 313 sctp_stack_t *sctps = sctp->sctp_sctps; 314 boolean_t src_changed = B_FALSE; 315 316 ASSERT(sctp->sctp_current != NULL || sendto != NULL); 317 if (sendto == NULL) { 318 fp = sctp->sctp_current; 319 } else { 320 fp = sendto; 321 } 322 isv4 = fp->isv4; 323 324 /* Try to look for another IRE again. */ 325 if (fp->ire == NULL) { 326 sctp_get_ire(sctp, fp); 327 /* 328 * Although we still may not get an IRE, the source address 329 * may be changed in sctp_get_ire(). Set src_changed to 330 * true so that the source address is copied again. 331 */ 332 src_changed = B_TRUE; 333 } 334 335 /* There is no suitable source address to use, return. */ 336 if (fp->state == SCTP_FADDRS_UNREACH) 337 return (NULL); 338 ASSERT(!IN6_IS_ADDR_V4MAPPED_ANY(&fp->saddr)); 339 340 if (isv4) { 341 ipsctplen = sctp->sctp_hdr_len; 342 } else { 343 ipsctplen = sctp->sctp_hdr6_len; 344 } 345 346 mp = allocb_cred(ipsctplen + sctps->sctps_wroff_xtra + trailer, 347 CONN_CRED(sctp->sctp_connp)); 348 if (mp == NULL) { 349 ip1dbg(("sctp_make_mp: error making mp..\n")); 350 return (NULL); 351 } 352 mp->b_rptr += sctps->sctps_wroff_xtra; 353 mp->b_wptr = mp->b_rptr + ipsctplen; 354 355 ASSERT(OK_32PTR(mp->b_wptr)); 356 357 if (isv4) { 358 ipha_t *iph = (ipha_t *)mp->b_rptr; 359 360 bcopy(sctp->sctp_iphc, mp->b_rptr, ipsctplen); 361 if (fp != sctp->sctp_current || src_changed) { 362 /* Fix the source and destination addresses. */ 363 IN6_V4MAPPED_TO_IPADDR(&fp->faddr, iph->ipha_dst); 364 IN6_V4MAPPED_TO_IPADDR(&fp->saddr, iph->ipha_src); 365 } 366 /* set or clear the don't fragment bit */ 367 if (fp->df) { 368 iph->ipha_fragment_offset_and_flags = htons(IPH_DF); 369 } else { 370 iph->ipha_fragment_offset_and_flags = 0; 371 } 372 } else { 373 bcopy(sctp->sctp_iphc6, mp->b_rptr, ipsctplen); 374 if (fp != sctp->sctp_current || src_changed) { 375 /* Fix the source and destination addresses. */ 376 ((ip6_t *)(mp->b_rptr))->ip6_dst = fp->faddr; 377 ((ip6_t *)(mp->b_rptr))->ip6_src = fp->saddr; 378 } 379 } 380 ASSERT(sctp->sctp_connp != NULL); 381 382 /* 383 * IP will not free this IRE if it is condemned. SCTP needs to 384 * free it. 385 */ 386 if ((fp->ire != NULL) && (fp->ire->ire_marks & IRE_MARK_CONDEMNED)) { 387 IRE_REFRELE_NOTR(fp->ire); 388 fp->ire = NULL; 389 } 390 /* Stash the conn and ire ptr info. for IP */ 391 SCTP_STASH_IPINFO(mp, fp->ire); 392 393 return (mp); 394 } 395 396 /* 397 * Notify upper layers about preferred write offset, write size. 398 */ 399 void 400 sctp_set_ulp_prop(sctp_t *sctp) 401 { 402 int hdrlen; 403 sctp_stack_t *sctps = sctp->sctp_sctps; 404 405 if (sctp->sctp_current->isv4) { 406 hdrlen = sctp->sctp_hdr_len; 407 } else { 408 hdrlen = sctp->sctp_hdr6_len; 409 } 410 ASSERT(sctp->sctp_ulpd); 411 412 ASSERT(sctp->sctp_current->sfa_pmss == sctp->sctp_mss); 413 sctp->sctp_ulp_prop(sctp->sctp_ulpd, 414 sctps->sctps_wroff_xtra + hdrlen + sizeof (sctp_data_hdr_t), 415 sctp->sctp_mss - sizeof (sctp_data_hdr_t)); 416 } 417 418 void 419 sctp_set_iplen(sctp_t *sctp, mblk_t *mp) 420 { 421 uint16_t sum = 0; 422 ipha_t *iph; 423 ip6_t *ip6h; 424 mblk_t *pmp = mp; 425 boolean_t isv4; 426 427 isv4 = (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION); 428 for (; pmp; pmp = pmp->b_cont) 429 sum += pmp->b_wptr - pmp->b_rptr; 430 431 if (isv4) { 432 iph = (ipha_t *)mp->b_rptr; 433 iph->ipha_length = htons(sum); 434 } else { 435 ip6h = (ip6_t *)mp->b_rptr; 436 /* 437 * If an ip6i_t is present, the real IPv6 header 438 * immediately follows. 439 */ 440 if (ip6h->ip6_nxt == IPPROTO_RAW) 441 ip6h = (ip6_t *)&ip6h[1]; 442 ip6h->ip6_plen = htons(sum - ((char *)&sctp->sctp_ip6h[1] - 443 sctp->sctp_iphc6)); 444 } 445 } 446 447 int 448 sctp_compare_faddrsets(sctp_faddr_t *a1, sctp_faddr_t *a2) 449 { 450 int na1 = 0; 451 int overlap = 0; 452 int equal = 1; 453 int onematch; 454 sctp_faddr_t *fp1, *fp2; 455 456 for (fp1 = a1; fp1; fp1 = fp1->next) { 457 onematch = 0; 458 for (fp2 = a2; fp2; fp2 = fp2->next) { 459 if (IN6_ARE_ADDR_EQUAL(&fp1->faddr, &fp2->faddr)) { 460 overlap++; 461 onematch = 1; 462 break; 463 } 464 if (!onematch) { 465 equal = 0; 466 } 467 } 468 na1++; 469 } 470 471 if (equal) { 472 return (SCTP_ADDR_EQUAL); 473 } 474 if (overlap == na1) { 475 return (SCTP_ADDR_SUBSET); 476 } 477 if (overlap) { 478 return (SCTP_ADDR_OVERLAP); 479 } 480 return (SCTP_ADDR_DISJOINT); 481 } 482 483 /* 484 * Returns 0 on success, -1 on memory allocation failure. If sleep 485 * is true, this function should never fail. The boolean parameter 486 * first decides whether the newly created faddr structure should be 487 * added at the beginning of the list or at the end. 488 * 489 * Note: caller must hold conn fanout lock. 490 */ 491 int 492 sctp_add_faddr(sctp_t *sctp, in6_addr_t *addr, int sleep, boolean_t first) 493 { 494 sctp_faddr_t *faddr; 495 mblk_t *timer_mp; 496 497 if (is_system_labeled()) { 498 ts_label_t *tsl; 499 tsol_tpc_t *rhtp; 500 int retv; 501 502 tsl = crgetlabel(CONN_CRED(sctp->sctp_connp)); 503 ASSERT(tsl != NULL); 504 505 /* find_tpc automatically does the right thing with IPv4 */ 506 rhtp = find_tpc(addr, IPV6_VERSION, B_FALSE); 507 if (rhtp == NULL) 508 return (EACCES); 509 510 retv = EACCES; 511 if (tsl->tsl_doi == rhtp->tpc_tp.tp_doi) { 512 switch (rhtp->tpc_tp.host_type) { 513 case UNLABELED: 514 /* 515 * Can talk to unlabeled hosts if any of the 516 * following are true: 517 * 1. zone's label matches the remote host's 518 * default label, 519 * 2. mac_exempt is on and the zone dominates 520 * the remote host's label, or 521 * 3. mac_exempt is on and the socket is from 522 * the global zone. 523 */ 524 if (blequal(&rhtp->tpc_tp.tp_def_label, 525 &tsl->tsl_label) || 526 (sctp->sctp_mac_exempt && 527 (sctp->sctp_zoneid == GLOBAL_ZONEID || 528 bldominates(&tsl->tsl_label, 529 &rhtp->tpc_tp.tp_def_label)))) 530 retv = 0; 531 break; 532 case SUN_CIPSO: 533 if (_blinrange(&tsl->tsl_label, 534 &rhtp->tpc_tp.tp_sl_range_cipso) || 535 blinlset(&tsl->tsl_label, 536 rhtp->tpc_tp.tp_sl_set_cipso)) 537 retv = 0; 538 break; 539 } 540 } 541 TPC_RELE(rhtp); 542 if (retv != 0) 543 return (retv); 544 } 545 546 if ((faddr = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep)) == NULL) 547 return (ENOMEM); 548 timer_mp = sctp_timer_alloc((sctp), sctp_rexmit_timer, sleep); 549 if (timer_mp == NULL) { 550 kmem_cache_free(sctp_kmem_faddr_cache, faddr); 551 return (ENOMEM); 552 } 553 ((sctpt_t *)(timer_mp->b_rptr))->sctpt_faddr = faddr; 554 555 sctp_init_faddr(sctp, faddr, addr, timer_mp); 556 557 /* Check for subnet broadcast. */ 558 if (faddr->ire != NULL && faddr->ire->ire_type & IRE_BROADCAST) { 559 IRE_REFRELE_NOTR(faddr->ire); 560 sctp_timer_free(timer_mp); 561 faddr->timer_mp = NULL; 562 kmem_cache_free(sctp_kmem_faddr_cache, faddr); 563 return (EADDRNOTAVAIL); 564 } 565 ASSERT(faddr->next == NULL); 566 567 if (sctp->sctp_faddrs == NULL) { 568 ASSERT(sctp->sctp_lastfaddr == NULL); 569 /* only element on list; first and last are same */ 570 sctp->sctp_faddrs = sctp->sctp_lastfaddr = faddr; 571 } else if (first) { 572 ASSERT(sctp->sctp_lastfaddr != NULL); 573 faddr->next = sctp->sctp_faddrs; 574 sctp->sctp_faddrs = faddr; 575 } else { 576 sctp->sctp_lastfaddr->next = faddr; 577 sctp->sctp_lastfaddr = faddr; 578 } 579 sctp->sctp_nfaddrs++; 580 581 return (0); 582 } 583 584 sctp_faddr_t * 585 sctp_lookup_faddr(sctp_t *sctp, in6_addr_t *addr) 586 { 587 sctp_faddr_t *fp; 588 589 for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) { 590 if (IN6_ARE_ADDR_EQUAL(&fp->faddr, addr)) 591 break; 592 } 593 594 return (fp); 595 } 596 597 sctp_faddr_t * 598 sctp_lookup_faddr_nosctp(sctp_faddr_t *fp, in6_addr_t *addr) 599 { 600 for (; fp; fp = fp->next) { 601 if (IN6_ARE_ADDR_EQUAL(&fp->faddr, addr)) { 602 break; 603 } 604 } 605 606 return (fp); 607 } 608 609 /* 610 * To change the currently used peer address to the specified one. 611 */ 612 void 613 sctp_set_faddr_current(sctp_t *sctp, sctp_faddr_t *fp) 614 { 615 /* Now setup the composite header. */ 616 if (fp->isv4) { 617 IN6_V4MAPPED_TO_IPADDR(&fp->faddr, 618 sctp->sctp_ipha->ipha_dst); 619 IN6_V4MAPPED_TO_IPADDR(&fp->saddr, sctp->sctp_ipha->ipha_src); 620 /* update don't fragment bit */ 621 if (fp->df) { 622 sctp->sctp_ipha->ipha_fragment_offset_and_flags = 623 htons(IPH_DF); 624 } else { 625 sctp->sctp_ipha->ipha_fragment_offset_and_flags = 0; 626 } 627 } else { 628 sctp->sctp_ip6h->ip6_dst = fp->faddr; 629 sctp->sctp_ip6h->ip6_src = fp->saddr; 630 } 631 632 sctp->sctp_current = fp; 633 sctp->sctp_mss = fp->sfa_pmss; 634 635 /* Update the uppper layer for the change. */ 636 if (!SCTP_IS_DETACHED(sctp)) 637 sctp_set_ulp_prop(sctp); 638 } 639 640 void 641 sctp_redo_faddr_srcs(sctp_t *sctp) 642 { 643 sctp_faddr_t *fp; 644 645 for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) { 646 sctp_get_ire(sctp, fp); 647 } 648 } 649 650 void 651 sctp_faddr_alive(sctp_t *sctp, sctp_faddr_t *fp) 652 { 653 int64_t now = lbolt64; 654 655 fp->strikes = 0; 656 sctp->sctp_strikes = 0; 657 fp->lastactive = now; 658 fp->hb_expiry = now + SET_HB_INTVL(fp); 659 fp->hb_pending = B_FALSE; 660 if (fp->state != SCTP_FADDRS_ALIVE) { 661 fp->state = SCTP_FADDRS_ALIVE; 662 sctp_intf_event(sctp, fp->faddr, SCTP_ADDR_AVAILABLE, 0); 663 /* Should have a full IRE now */ 664 sctp_get_ire(sctp, fp); 665 666 /* 667 * If this is the primary, switch back to it now. And 668 * we probably want to reset the source addr used to reach 669 * it. 670 */ 671 if (fp == sctp->sctp_primary) { 672 ASSERT(fp->state != SCTP_FADDRS_UNREACH); 673 sctp_set_faddr_current(sctp, fp); 674 return; 675 } 676 } 677 } 678 679 int 680 sctp_is_a_faddr_clean(sctp_t *sctp) 681 { 682 sctp_faddr_t *fp; 683 684 for (fp = sctp->sctp_faddrs; fp; fp = fp->next) { 685 if (fp->state == SCTP_FADDRS_ALIVE && fp->strikes == 0) { 686 return (1); 687 } 688 } 689 690 return (0); 691 } 692 693 /* 694 * Returns 0 if there is at leave one other active faddr, -1 if there 695 * are none. If there are none left, faddr_dead() will start killing the 696 * association. 697 * If the downed faddr was the current faddr, a new current faddr 698 * will be chosen. 699 */ 700 int 701 sctp_faddr_dead(sctp_t *sctp, sctp_faddr_t *fp, int newstate) 702 { 703 sctp_faddr_t *ofp; 704 sctp_stack_t *sctps = sctp->sctp_sctps; 705 706 if (fp->state == SCTP_FADDRS_ALIVE) { 707 sctp_intf_event(sctp, fp->faddr, SCTP_ADDR_UNREACHABLE, 0); 708 } 709 fp->state = newstate; 710 711 dprint(1, ("sctp_faddr_dead: %x:%x:%x:%x down (state=%d)\n", 712 SCTP_PRINTADDR(fp->faddr), newstate)); 713 714 if (fp == sctp->sctp_current) { 715 /* Current faddr down; need to switch it */ 716 sctp->sctp_current = NULL; 717 } 718 719 /* Find next alive faddr */ 720 ofp = fp; 721 for (fp = fp->next; fp != NULL; fp = fp->next) { 722 if (fp->state == SCTP_FADDRS_ALIVE) { 723 break; 724 } 725 } 726 727 if (fp == NULL) { 728 /* Continue from beginning of list */ 729 for (fp = sctp->sctp_faddrs; fp != ofp; fp = fp->next) { 730 if (fp->state == SCTP_FADDRS_ALIVE) { 731 break; 732 } 733 } 734 } 735 736 /* 737 * Find a new fp, so if the current faddr is dead, use the new fp 738 * as the current one. 739 */ 740 if (fp != ofp) { 741 if (sctp->sctp_current == NULL) { 742 dprint(1, ("sctp_faddr_dead: failover->%x:%x:%x:%x\n", 743 SCTP_PRINTADDR(fp->faddr))); 744 /* 745 * Note that we don't need to reset the source addr 746 * of the new fp. 747 */ 748 sctp_set_faddr_current(sctp, fp); 749 } 750 return (0); 751 } 752 753 754 /* All faddrs are down; kill the association */ 755 dprint(1, ("sctp_faddr_dead: all faddrs down, killing assoc\n")); 756 BUMP_MIB(&sctps->sctps_mib, sctpAborted); 757 sctp_assoc_event(sctp, sctp->sctp_state < SCTPS_ESTABLISHED ? 758 SCTP_CANT_STR_ASSOC : SCTP_COMM_LOST, 0, NULL); 759 sctp_clean_death(sctp, sctp->sctp_client_errno ? 760 sctp->sctp_client_errno : ETIMEDOUT); 761 762 return (-1); 763 } 764 765 sctp_faddr_t * 766 sctp_rotate_faddr(sctp_t *sctp, sctp_faddr_t *ofp) 767 { 768 sctp_faddr_t *nfp = NULL; 769 770 if (ofp == NULL) { 771 ofp = sctp->sctp_current; 772 } 773 774 /* Find the next live one */ 775 for (nfp = ofp->next; nfp != NULL; nfp = nfp->next) { 776 if (nfp->state == SCTP_FADDRS_ALIVE) { 777 break; 778 } 779 } 780 781 if (nfp == NULL) { 782 /* Continue from beginning of list */ 783 for (nfp = sctp->sctp_faddrs; nfp != ofp; nfp = nfp->next) { 784 if (nfp->state == SCTP_FADDRS_ALIVE) { 785 break; 786 } 787 } 788 } 789 790 /* 791 * nfp could only be NULL if all faddrs are down, and when 792 * this happens, faddr_dead() should have killed the 793 * association. Hence this assertion... 794 */ 795 ASSERT(nfp != NULL); 796 return (nfp); 797 } 798 799 void 800 sctp_unlink_faddr(sctp_t *sctp, sctp_faddr_t *fp) 801 { 802 sctp_faddr_t *fpp; 803 804 if (!sctp->sctp_faddrs) { 805 return; 806 } 807 808 if (fp->timer_mp != NULL) { 809 sctp_timer_free(fp->timer_mp); 810 fp->timer_mp = NULL; 811 fp->timer_running = 0; 812 } 813 if (fp->rc_timer_mp != NULL) { 814 sctp_timer_free(fp->rc_timer_mp); 815 fp->rc_timer_mp = NULL; 816 fp->rc_timer_running = 0; 817 } 818 if (fp->ire != NULL) { 819 IRE_REFRELE_NOTR(fp->ire); 820 fp->ire = NULL; 821 } 822 823 if (fp == sctp->sctp_faddrs) { 824 goto gotit; 825 } 826 827 for (fpp = sctp->sctp_faddrs; fpp->next != fp; fpp = fpp->next) 828 ; 829 830 gotit: 831 ASSERT(sctp->sctp_conn_tfp != NULL); 832 mutex_enter(&sctp->sctp_conn_tfp->tf_lock); 833 if (fp == sctp->sctp_faddrs) { 834 sctp->sctp_faddrs = fp->next; 835 } else { 836 fpp->next = fp->next; 837 } 838 mutex_exit(&sctp->sctp_conn_tfp->tf_lock); 839 /* XXX faddr2ire? */ 840 kmem_cache_free(sctp_kmem_faddr_cache, fp); 841 sctp->sctp_nfaddrs--; 842 } 843 844 void 845 sctp_zap_faddrs(sctp_t *sctp, int caller_holds_lock) 846 { 847 sctp_faddr_t *fp, *fpn; 848 849 if (sctp->sctp_faddrs == NULL) { 850 ASSERT(sctp->sctp_lastfaddr == NULL); 851 return; 852 } 853 854 ASSERT(sctp->sctp_lastfaddr != NULL); 855 sctp->sctp_lastfaddr = NULL; 856 sctp->sctp_current = NULL; 857 sctp->sctp_primary = NULL; 858 859 sctp_free_faddr_timers(sctp); 860 861 if (sctp->sctp_conn_tfp != NULL && !caller_holds_lock) { 862 /* in conn fanout; need to hold lock */ 863 mutex_enter(&sctp->sctp_conn_tfp->tf_lock); 864 } 865 866 for (fp = sctp->sctp_faddrs; fp; fp = fpn) { 867 fpn = fp->next; 868 if (fp->ire != NULL) 869 IRE_REFRELE_NOTR(fp->ire); 870 kmem_cache_free(sctp_kmem_faddr_cache, fp); 871 sctp->sctp_nfaddrs--; 872 } 873 874 sctp->sctp_faddrs = NULL; 875 ASSERT(sctp->sctp_nfaddrs == 0); 876 if (sctp->sctp_conn_tfp != NULL && !caller_holds_lock) { 877 mutex_exit(&sctp->sctp_conn_tfp->tf_lock); 878 } 879 880 } 881 882 void 883 sctp_zap_addrs(sctp_t *sctp) 884 { 885 sctp_zap_faddrs(sctp, 0); 886 sctp_free_saddrs(sctp); 887 } 888 889 /* 890 * Initialize the IPv4 header. Loses any record of any IP options. 891 */ 892 int 893 sctp_header_init_ipv4(sctp_t *sctp, int sleep) 894 { 895 sctp_hdr_t *sctph; 896 sctp_stack_t *sctps = sctp->sctp_sctps; 897 898 /* 899 * This is a simple initialization. If there's 900 * already a template, it should never be too small, 901 * so reuse it. Otherwise, allocate space for the new one. 902 */ 903 if (sctp->sctp_iphc != NULL) { 904 ASSERT(sctp->sctp_iphc_len >= SCTP_MAX_COMBINED_HEADER_LENGTH); 905 bzero(sctp->sctp_iphc, sctp->sctp_iphc_len); 906 } else { 907 sctp->sctp_iphc_len = SCTP_MAX_COMBINED_HEADER_LENGTH; 908 sctp->sctp_iphc = kmem_zalloc(sctp->sctp_iphc_len, sleep); 909 if (sctp->sctp_iphc == NULL) { 910 sctp->sctp_iphc_len = 0; 911 return (ENOMEM); 912 } 913 } 914 915 sctp->sctp_ipha = (ipha_t *)sctp->sctp_iphc; 916 917 sctp->sctp_hdr_len = sizeof (ipha_t) + sizeof (sctp_hdr_t); 918 sctp->sctp_ip_hdr_len = sizeof (ipha_t); 919 sctp->sctp_ipha->ipha_length = htons(sizeof (ipha_t) + 920 sizeof (sctp_hdr_t)); 921 sctp->sctp_ipha->ipha_version_and_hdr_length = 922 (IP_VERSION << 4) | IP_SIMPLE_HDR_LENGTH_IN_WORDS; 923 924 /* 925 * These two fields should be zero, and are already set above. 926 * 927 * sctp->sctp_ipha->ipha_ident, 928 * sctp->sctp_ipha->ipha_fragment_offset_and_flags. 929 */ 930 931 sctp->sctp_ipha->ipha_ttl = sctps->sctps_ipv4_ttl; 932 sctp->sctp_ipha->ipha_protocol = IPPROTO_SCTP; 933 934 sctph = (sctp_hdr_t *)(sctp->sctp_iphc + sizeof (ipha_t)); 935 sctp->sctp_sctph = sctph; 936 937 return (0); 938 } 939 940 /* 941 * Update sctp_sticky_hdrs based on sctp_sticky_ipp. 942 * The headers include ip6i_t (if needed), ip6_t, any sticky extension 943 * headers, and the maximum size sctp header (to avoid reallocation 944 * on the fly for additional sctp options). 945 * Returns failure if can't allocate memory. 946 */ 947 int 948 sctp_build_hdrs(sctp_t *sctp) 949 { 950 char *hdrs; 951 uint_t hdrs_len; 952 ip6i_t *ip6i; 953 char buf[SCTP_MAX_HDR_LENGTH]; 954 ip6_pkt_t *ipp = &sctp->sctp_sticky_ipp; 955 in6_addr_t src; 956 in6_addr_t dst; 957 sctp_stack_t *sctps = sctp->sctp_sctps; 958 959 /* 960 * save the existing sctp header and source/dest IP addresses 961 */ 962 bcopy(sctp->sctp_sctph6, buf, sizeof (sctp_hdr_t)); 963 src = sctp->sctp_ip6h->ip6_src; 964 dst = sctp->sctp_ip6h->ip6_dst; 965 hdrs_len = ip_total_hdrs_len_v6(ipp) + SCTP_MAX_HDR_LENGTH; 966 ASSERT(hdrs_len != 0); 967 if (hdrs_len > sctp->sctp_iphc6_len) { 968 /* Need to reallocate */ 969 hdrs = kmem_zalloc(hdrs_len, KM_NOSLEEP); 970 if (hdrs == NULL) 971 return (ENOMEM); 972 973 if (sctp->sctp_iphc6_len != 0) 974 kmem_free(sctp->sctp_iphc6, sctp->sctp_iphc6_len); 975 sctp->sctp_iphc6 = hdrs; 976 sctp->sctp_iphc6_len = hdrs_len; 977 } 978 ip_build_hdrs_v6((uchar_t *)sctp->sctp_iphc6, 979 hdrs_len - SCTP_MAX_HDR_LENGTH, ipp, IPPROTO_SCTP); 980 981 /* Set header fields not in ipp */ 982 if (ipp->ipp_fields & IPPF_HAS_IP6I) { 983 ip6i = (ip6i_t *)sctp->sctp_iphc6; 984 sctp->sctp_ip6h = (ip6_t *)&ip6i[1]; 985 } else { 986 sctp->sctp_ip6h = (ip6_t *)sctp->sctp_iphc6; 987 } 988 /* 989 * sctp->sctp_ip_hdr_len will include ip6i_t if there is one. 990 */ 991 sctp->sctp_ip_hdr6_len = hdrs_len - SCTP_MAX_HDR_LENGTH; 992 sctp->sctp_sctph6 = (sctp_hdr_t *)(sctp->sctp_iphc6 + 993 sctp->sctp_ip_hdr6_len); 994 sctp->sctp_hdr6_len = sctp->sctp_ip_hdr6_len + sizeof (sctp_hdr_t); 995 996 bcopy(buf, sctp->sctp_sctph6, sizeof (sctp_hdr_t)); 997 998 sctp->sctp_ip6h->ip6_src = src; 999 sctp->sctp_ip6h->ip6_dst = dst; 1000 /* 1001 * If the hoplimit was not set by ip_build_hdrs_v6(), we need to 1002 * set it to the default value for SCTP. 1003 */ 1004 if (!(ipp->ipp_fields & IPPF_UNICAST_HOPS)) 1005 sctp->sctp_ip6h->ip6_hops = sctps->sctps_ipv6_hoplimit; 1006 /* 1007 * If we're setting extension headers after a connection 1008 * has been established, and if we have a routing header 1009 * among the extension headers, call ip_massage_options_v6 to 1010 * manipulate the routing header/ip6_dst set the checksum 1011 * difference in the sctp header template. 1012 * (This happens in sctp_connect_ipv6 if the routing header 1013 * is set prior to the connect.) 1014 */ 1015 1016 if ((sctp->sctp_state >= SCTPS_COOKIE_WAIT) && 1017 (sctp->sctp_sticky_ipp.ipp_fields & IPPF_RTHDR)) { 1018 ip6_rthdr_t *rth; 1019 1020 rth = ip_find_rthdr_v6(sctp->sctp_ip6h, 1021 (uint8_t *)sctp->sctp_sctph6); 1022 if (rth != NULL) { 1023 (void) ip_massage_options_v6(sctp->sctp_ip6h, rth, 1024 sctps->sctps_netstack); 1025 } 1026 } 1027 return (0); 1028 } 1029 1030 /* 1031 * Initialize the IPv6 header. Loses any record of any IPv6 extension headers. 1032 */ 1033 int 1034 sctp_header_init_ipv6(sctp_t *sctp, int sleep) 1035 { 1036 sctp_hdr_t *sctph; 1037 sctp_stack_t *sctps = sctp->sctp_sctps; 1038 1039 /* 1040 * This is a simple initialization. If there's 1041 * already a template, it should never be too small, 1042 * so reuse it. Otherwise, allocate space for the new one. 1043 * Ensure that there is enough space to "downgrade" the sctp_t 1044 * to an IPv4 sctp_t. This requires having space for a full load 1045 * of IPv4 options 1046 */ 1047 if (sctp->sctp_iphc6 != NULL) { 1048 ASSERT(sctp->sctp_iphc6_len >= 1049 SCTP_MAX_COMBINED_HEADER_LENGTH); 1050 bzero(sctp->sctp_iphc6, sctp->sctp_iphc6_len); 1051 } else { 1052 sctp->sctp_iphc6_len = SCTP_MAX_COMBINED_HEADER_LENGTH; 1053 sctp->sctp_iphc6 = kmem_zalloc(sctp->sctp_iphc_len, sleep); 1054 if (sctp->sctp_iphc6 == NULL) { 1055 sctp->sctp_iphc6_len = 0; 1056 return (ENOMEM); 1057 } 1058 } 1059 sctp->sctp_hdr6_len = IPV6_HDR_LEN + sizeof (sctp_hdr_t); 1060 sctp->sctp_ip_hdr6_len = IPV6_HDR_LEN; 1061 sctp->sctp_ip6h = (ip6_t *)sctp->sctp_iphc6; 1062 1063 /* Initialize the header template */ 1064 1065 sctp->sctp_ip6h->ip6_vcf = IPV6_DEFAULT_VERS_AND_FLOW; 1066 sctp->sctp_ip6h->ip6_plen = ntohs(sizeof (sctp_hdr_t)); 1067 sctp->sctp_ip6h->ip6_nxt = IPPROTO_SCTP; 1068 sctp->sctp_ip6h->ip6_hops = sctps->sctps_ipv6_hoplimit; 1069 1070 sctph = (sctp_hdr_t *)(sctp->sctp_iphc6 + IPV6_HDR_LEN); 1071 sctp->sctp_sctph6 = sctph; 1072 1073 return (0); 1074 } 1075 1076 static int 1077 sctp_v4_label(sctp_t *sctp) 1078 { 1079 uchar_t optbuf[IP_MAX_OPT_LENGTH]; 1080 const cred_t *cr = CONN_CRED(sctp->sctp_connp); 1081 int added; 1082 1083 if (tsol_compute_label(cr, sctp->sctp_ipha->ipha_dst, optbuf, 1084 sctp->sctp_mac_exempt, 1085 sctp->sctp_sctps->sctps_netstack->netstack_ip) != 0) 1086 return (EACCES); 1087 1088 added = tsol_remove_secopt(sctp->sctp_ipha, sctp->sctp_hdr_len); 1089 if (added == -1) 1090 return (EACCES); 1091 sctp->sctp_hdr_len += added; 1092 sctp->sctp_sctph = (sctp_hdr_t *)((uchar_t *)sctp->sctp_sctph + added); 1093 sctp->sctp_ip_hdr_len += added; 1094 if ((sctp->sctp_v4label_len = optbuf[IPOPT_OLEN]) != 0) { 1095 sctp->sctp_v4label_len = (sctp->sctp_v4label_len + 3) & ~3; 1096 added = tsol_prepend_option(optbuf, sctp->sctp_ipha, 1097 sctp->sctp_hdr_len); 1098 if (added == -1) 1099 return (EACCES); 1100 sctp->sctp_hdr_len += added; 1101 sctp->sctp_sctph = (sctp_hdr_t *)((uchar_t *)sctp->sctp_sctph + 1102 added); 1103 sctp->sctp_ip_hdr_len += added; 1104 } 1105 return (0); 1106 } 1107 1108 static int 1109 sctp_v6_label(sctp_t *sctp) 1110 { 1111 uchar_t optbuf[TSOL_MAX_IPV6_OPTION]; 1112 const cred_t *cr = CONN_CRED(sctp->sctp_connp); 1113 1114 if (tsol_compute_label_v6(cr, &sctp->sctp_ip6h->ip6_dst, optbuf, 1115 sctp->sctp_mac_exempt, 1116 sctp->sctp_sctps->sctps_netstack->netstack_ip) != 0) 1117 return (EACCES); 1118 if (tsol_update_sticky(&sctp->sctp_sticky_ipp, &sctp->sctp_v6label_len, 1119 optbuf) != 0) 1120 return (EACCES); 1121 if (sctp_build_hdrs(sctp) != 0) 1122 return (EACCES); 1123 return (0); 1124 } 1125 1126 /* 1127 * XXX implement more sophisticated logic 1128 */ 1129 int 1130 sctp_set_hdraddrs(sctp_t *sctp) 1131 { 1132 sctp_faddr_t *fp; 1133 int gotv4 = 0; 1134 int gotv6 = 0; 1135 1136 ASSERT(sctp->sctp_faddrs != NULL); 1137 ASSERT(sctp->sctp_nsaddrs > 0); 1138 1139 /* Set up using the primary first */ 1140 if (IN6_IS_ADDR_V4MAPPED(&sctp->sctp_primary->faddr)) { 1141 IN6_V4MAPPED_TO_IPADDR(&sctp->sctp_primary->faddr, 1142 sctp->sctp_ipha->ipha_dst); 1143 /* saddr may be unspec; make_mp() will handle this */ 1144 IN6_V4MAPPED_TO_IPADDR(&sctp->sctp_primary->saddr, 1145 sctp->sctp_ipha->ipha_src); 1146 if (!is_system_labeled() || sctp_v4_label(sctp) == 0) { 1147 gotv4 = 1; 1148 if (sctp->sctp_ipversion == IPV4_VERSION) { 1149 goto copyports; 1150 } 1151 } 1152 } else { 1153 sctp->sctp_ip6h->ip6_dst = sctp->sctp_primary->faddr; 1154 /* saddr may be unspec; make_mp() will handle this */ 1155 sctp->sctp_ip6h->ip6_src = sctp->sctp_primary->saddr; 1156 if (!is_system_labeled() || sctp_v6_label(sctp) == 0) 1157 gotv6 = 1; 1158 } 1159 1160 for (fp = sctp->sctp_faddrs; fp; fp = fp->next) { 1161 if (!gotv4 && IN6_IS_ADDR_V4MAPPED(&fp->faddr)) { 1162 IN6_V4MAPPED_TO_IPADDR(&fp->faddr, 1163 sctp->sctp_ipha->ipha_dst); 1164 /* copy in the faddr_t's saddr */ 1165 IN6_V4MAPPED_TO_IPADDR(&fp->saddr, 1166 sctp->sctp_ipha->ipha_src); 1167 if (!is_system_labeled() || sctp_v4_label(sctp) == 0) { 1168 gotv4 = 1; 1169 if (sctp->sctp_ipversion == IPV4_VERSION || 1170 gotv6) { 1171 break; 1172 } 1173 } 1174 } else if (!gotv6 && !IN6_IS_ADDR_V4MAPPED(&fp->faddr)) { 1175 sctp->sctp_ip6h->ip6_dst = fp->faddr; 1176 /* copy in the faddr_t's saddr */ 1177 sctp->sctp_ip6h->ip6_src = fp->saddr; 1178 if (!is_system_labeled() || sctp_v6_label(sctp) == 0) { 1179 gotv6 = 1; 1180 if (gotv4) 1181 break; 1182 } 1183 } 1184 } 1185 1186 copyports: 1187 if (!gotv4 && !gotv6) 1188 return (EACCES); 1189 1190 /* copy in the ports for good measure */ 1191 sctp->sctp_sctph->sh_sport = sctp->sctp_lport; 1192 sctp->sctp_sctph->sh_dport = sctp->sctp_fport; 1193 1194 sctp->sctp_sctph6->sh_sport = sctp->sctp_lport; 1195 sctp->sctp_sctph6->sh_dport = sctp->sctp_fport; 1196 return (0); 1197 } 1198 1199 void 1200 sctp_add_unrec_parm(sctp_parm_hdr_t *uph, mblk_t **errmp) 1201 { 1202 mblk_t *mp; 1203 sctp_parm_hdr_t *ph; 1204 size_t len; 1205 int pad; 1206 1207 len = sizeof (*ph) + ntohs(uph->sph_len); 1208 if ((pad = len % 4) != 0) { 1209 pad = 4 - pad; 1210 len += pad; 1211 } 1212 mp = allocb(len, BPRI_MED); 1213 if (mp == NULL) { 1214 return; 1215 } 1216 1217 ph = (sctp_parm_hdr_t *)(mp->b_rptr); 1218 ph->sph_type = htons(PARM_UNRECOGNIZED); 1219 ph->sph_len = htons(len - pad); 1220 1221 /* copy in the unrecognized parameter */ 1222 bcopy(uph, ph + 1, ntohs(uph->sph_len)); 1223 1224 mp->b_wptr = mp->b_rptr + len; 1225 if (*errmp != NULL) { 1226 linkb(*errmp, mp); 1227 } else { 1228 *errmp = mp; 1229 } 1230 } 1231 1232 /* 1233 * o Bounds checking 1234 * o Updates remaining 1235 * o Checks alignment 1236 */ 1237 sctp_parm_hdr_t * 1238 sctp_next_parm(sctp_parm_hdr_t *current, ssize_t *remaining) 1239 { 1240 int pad; 1241 uint16_t len; 1242 1243 len = ntohs(current->sph_len); 1244 *remaining -= len; 1245 if (*remaining < sizeof (*current) || len < sizeof (*current)) { 1246 return (NULL); 1247 } 1248 if ((pad = len & (SCTP_ALIGN - 1)) != 0) { 1249 pad = SCTP_ALIGN - pad; 1250 *remaining -= pad; 1251 } 1252 /*LINTED pointer cast may result in improper alignment*/ 1253 current = (sctp_parm_hdr_t *)((char *)current + len + pad); 1254 return (current); 1255 } 1256 1257 /* 1258 * Sets the address parameters given in the INIT chunk into sctp's 1259 * faddrs; if psctp is non-NULL, copies psctp's saddrs. If there are 1260 * no address parameters in the INIT chunk, a single faddr is created 1261 * from the ip hdr at the beginning of pkt. 1262 * If there already are existing addresses hanging from sctp, merge 1263 * them in, if the old info contains addresses which are not present 1264 * in this new info, get rid of them, and clean the pointers if there's 1265 * messages which have this as their target address. 1266 * 1267 * We also re-adjust the source address list here since the list may 1268 * contain more than what is actually part of the association. If 1269 * we get here from sctp_send_cookie_echo(), we are on the active 1270 * side and psctp will be NULL and ich will be the INIT-ACK chunk. 1271 * If we get here from sctp_accept_comm(), ich will be the INIT chunk 1272 * and psctp will the listening endpoint. 1273 * 1274 * INIT processing: When processing the INIT we inherit the src address 1275 * list from the listener. For a loopback or linklocal association, we 1276 * delete the list and just take the address from the IP header (since 1277 * that's how we created the INIT-ACK). Additionally, for loopback we 1278 * ignore the address params in the INIT. For determining which address 1279 * types were sent in the INIT-ACK we follow the same logic as in 1280 * creating the INIT-ACK. We delete addresses of the type that are not 1281 * supported by the peer. 1282 * 1283 * INIT-ACK processing: When processing the INIT-ACK since we had not 1284 * included addr params for loopback or linklocal addresses when creating 1285 * the INIT, we just use the address from the IP header. Further, for 1286 * loopback we ignore the addr param list. We mark addresses of the 1287 * type not supported by the peer as unconfirmed. 1288 * 1289 * In case of INIT processing we look for supported address types in the 1290 * supported address param, if present. In both cases the address type in 1291 * the IP header is supported as well as types for addresses in the param 1292 * list, if any. 1293 * 1294 * Once we have the supported address types sctp_check_saddr() runs through 1295 * the source address list and deletes or marks as unconfirmed address of 1296 * types not supported by the peer. 1297 * 1298 * Returns 0 on success, sys errno on failure 1299 */ 1300 int 1301 sctp_get_addrparams(sctp_t *sctp, sctp_t *psctp, mblk_t *pkt, 1302 sctp_chunk_hdr_t *ich, uint_t *sctp_options) 1303 { 1304 sctp_init_chunk_t *init; 1305 ipha_t *iph; 1306 ip6_t *ip6h; 1307 in6_addr_t hdrsaddr[1]; 1308 in6_addr_t hdrdaddr[1]; 1309 sctp_parm_hdr_t *ph; 1310 ssize_t remaining; 1311 int isv4; 1312 int err; 1313 sctp_faddr_t *fp; 1314 int supp_af = 0; 1315 boolean_t check_saddr = B_TRUE; 1316 in6_addr_t curaddr; 1317 sctp_stack_t *sctps = sctp->sctp_sctps; 1318 1319 if (sctp_options != NULL) 1320 *sctp_options = 0; 1321 1322 /* extract the address from the IP header */ 1323 isv4 = (IPH_HDR_VERSION(pkt->b_rptr) == IPV4_VERSION); 1324 if (isv4) { 1325 iph = (ipha_t *)pkt->b_rptr; 1326 IN6_IPADDR_TO_V4MAPPED(iph->ipha_src, hdrsaddr); 1327 IN6_IPADDR_TO_V4MAPPED(iph->ipha_dst, hdrdaddr); 1328 supp_af |= PARM_SUPP_V4; 1329 } else { 1330 ip6h = (ip6_t *)pkt->b_rptr; 1331 hdrsaddr[0] = ip6h->ip6_src; 1332 hdrdaddr[0] = ip6h->ip6_dst; 1333 supp_af |= PARM_SUPP_V6; 1334 } 1335 1336 /* 1337 * Unfortunately, we can't delay this because adding an faddr 1338 * looks for the presence of the source address (from the ire 1339 * for the faddr) in the source address list. We could have 1340 * delayed this if, say, this was a loopback/linklocal connection. 1341 * Now, we just end up nuking this list and taking the addr from 1342 * the IP header for loopback/linklocal. 1343 */ 1344 if (psctp != NULL && psctp->sctp_nsaddrs > 0) { 1345 ASSERT(sctp->sctp_nsaddrs == 0); 1346 1347 err = sctp_dup_saddrs(psctp, sctp, KM_NOSLEEP); 1348 if (err != 0) 1349 return (err); 1350 } 1351 /* 1352 * We will add the faddr before parsing the address list as this 1353 * might be a loopback connection and we would not have to 1354 * go through the list. 1355 * 1356 * Make sure the header's addr is in the list 1357 */ 1358 fp = sctp_lookup_faddr(sctp, hdrsaddr); 1359 if (fp == NULL) { 1360 /* not included; add it now */ 1361 err = sctp_add_faddr(sctp, hdrsaddr, KM_NOSLEEP, B_TRUE); 1362 if (err != 0) 1363 return (err); 1364 1365 /* sctp_faddrs will be the hdr addr */ 1366 fp = sctp->sctp_faddrs; 1367 } 1368 /* make the header addr the primary */ 1369 1370 if (cl_sctp_assoc_change != NULL && psctp == NULL) 1371 curaddr = sctp->sctp_current->faddr; 1372 1373 sctp->sctp_primary = fp; 1374 sctp->sctp_current = fp; 1375 sctp->sctp_mss = fp->sfa_pmss; 1376 1377 /* For loopback connections & linklocal get address from the header */ 1378 if (sctp->sctp_loopback || sctp->sctp_linklocal) { 1379 if (sctp->sctp_nsaddrs != 0) 1380 sctp_free_saddrs(sctp); 1381 if ((err = sctp_saddr_add_addr(sctp, hdrdaddr, 0)) != 0) 1382 return (err); 1383 /* For loopback ignore address list */ 1384 if (sctp->sctp_loopback) 1385 return (0); 1386 check_saddr = B_FALSE; 1387 } 1388 1389 /* Walk the params in the INIT [ACK], pulling out addr params */ 1390 remaining = ntohs(ich->sch_len) - sizeof (*ich) - 1391 sizeof (sctp_init_chunk_t); 1392 if (remaining < sizeof (*ph)) { 1393 if (check_saddr) { 1394 sctp_check_saddr(sctp, supp_af, psctp == NULL ? 1395 B_FALSE : B_TRUE, hdrdaddr); 1396 } 1397 ASSERT(sctp_saddr_lookup(sctp, hdrdaddr, 0) != NULL); 1398 return (0); 1399 } 1400 1401 init = (sctp_init_chunk_t *)(ich + 1); 1402 ph = (sctp_parm_hdr_t *)(init + 1); 1403 1404 /* params will have already been byteordered when validating */ 1405 while (ph != NULL) { 1406 if (ph->sph_type == htons(PARM_SUPP_ADDRS)) { 1407 int plen; 1408 uint16_t *p; 1409 uint16_t addrtype; 1410 1411 ASSERT(psctp != NULL); 1412 plen = ntohs(ph->sph_len); 1413 p = (uint16_t *)(ph + 1); 1414 while (plen > 0) { 1415 addrtype = ntohs(*p); 1416 switch (addrtype) { 1417 case PARM_ADDR6: 1418 supp_af |= PARM_SUPP_V6; 1419 break; 1420 case PARM_ADDR4: 1421 supp_af |= PARM_SUPP_V4; 1422 break; 1423 default: 1424 break; 1425 } 1426 p++; 1427 plen -= sizeof (*p); 1428 } 1429 } else if (ph->sph_type == htons(PARM_ADDR4)) { 1430 if (remaining >= PARM_ADDR4_LEN) { 1431 in6_addr_t addr; 1432 ipaddr_t ta; 1433 1434 supp_af |= PARM_SUPP_V4; 1435 /* 1436 * Screen out broad/multicasts & loopback. 1437 * If the endpoint only accepts v6 address, 1438 * go to the next one. 1439 * 1440 * Subnet broadcast check is done in 1441 * sctp_add_faddr(). If the address is 1442 * a broadcast address, it won't be added. 1443 */ 1444 bcopy(ph + 1, &ta, sizeof (ta)); 1445 if (ta == 0 || 1446 ta == INADDR_BROADCAST || 1447 ta == htonl(INADDR_LOOPBACK) || 1448 CLASSD(ta) || 1449 sctp->sctp_connp->conn_ipv6_v6only) { 1450 goto next; 1451 } 1452 IN6_INADDR_TO_V4MAPPED((struct in_addr *) 1453 (ph + 1), &addr); 1454 1455 /* Check for duplicate. */ 1456 if (sctp_lookup_faddr(sctp, &addr) != NULL) 1457 goto next; 1458 1459 /* OK, add it to the faddr set */ 1460 err = sctp_add_faddr(sctp, &addr, KM_NOSLEEP, 1461 B_FALSE); 1462 /* Something is wrong... Try the next one. */ 1463 if (err != 0) 1464 goto next; 1465 } 1466 } else if (ph->sph_type == htons(PARM_ADDR6) && 1467 sctp->sctp_family == AF_INET6) { 1468 /* An v4 socket should not take v6 addresses. */ 1469 if (remaining >= PARM_ADDR6_LEN) { 1470 in6_addr_t *addr6; 1471 1472 supp_af |= PARM_SUPP_V6; 1473 addr6 = (in6_addr_t *)(ph + 1); 1474 /* 1475 * Screen out link locals, mcast, loopback 1476 * and bogus v6 address. 1477 */ 1478 if (IN6_IS_ADDR_LINKLOCAL(addr6) || 1479 IN6_IS_ADDR_MULTICAST(addr6) || 1480 IN6_IS_ADDR_LOOPBACK(addr6) || 1481 IN6_IS_ADDR_V4MAPPED(addr6)) { 1482 goto next; 1483 } 1484 /* Check for duplicate. */ 1485 if (sctp_lookup_faddr(sctp, addr6) != NULL) 1486 goto next; 1487 1488 err = sctp_add_faddr(sctp, 1489 (in6_addr_t *)(ph + 1), KM_NOSLEEP, 1490 B_FALSE); 1491 /* Something is wrong... Try the next one. */ 1492 if (err != 0) 1493 goto next; 1494 } 1495 } else if (ph->sph_type == htons(PARM_FORWARD_TSN)) { 1496 if (sctp_options != NULL) 1497 *sctp_options |= SCTP_PRSCTP_OPTION; 1498 } /* else; skip */ 1499 1500 next: 1501 ph = sctp_next_parm(ph, &remaining); 1502 } 1503 if (check_saddr) { 1504 sctp_check_saddr(sctp, supp_af, psctp == NULL ? B_FALSE : 1505 B_TRUE, hdrdaddr); 1506 } 1507 ASSERT(sctp_saddr_lookup(sctp, hdrdaddr, 0) != NULL); 1508 /* 1509 * We have the right address list now, update clustering's 1510 * knowledge because when we sent the INIT we had just added 1511 * the address the INIT was sent to. 1512 */ 1513 if (psctp == NULL && cl_sctp_assoc_change != NULL) { 1514 uchar_t *alist; 1515 size_t asize; 1516 uchar_t *dlist; 1517 size_t dsize; 1518 1519 asize = sizeof (in6_addr_t) * sctp->sctp_nfaddrs; 1520 alist = kmem_alloc(asize, KM_NOSLEEP); 1521 if (alist == NULL) { 1522 SCTP_KSTAT(sctps, sctp_cl_assoc_change); 1523 return (ENOMEM); 1524 } 1525 /* 1526 * Just include the address the INIT was sent to in the 1527 * delete list and send the entire faddr list. We could 1528 * do it differently (i.e include all the addresses in the 1529 * add list even if it contains the original address OR 1530 * remove the original address from the add list etc.), but 1531 * this seems reasonable enough. 1532 */ 1533 dsize = sizeof (in6_addr_t); 1534 dlist = kmem_alloc(dsize, KM_NOSLEEP); 1535 if (dlist == NULL) { 1536 kmem_free(alist, asize); 1537 SCTP_KSTAT(sctps, sctp_cl_assoc_change); 1538 return (ENOMEM); 1539 } 1540 bcopy(&curaddr, dlist, sizeof (curaddr)); 1541 sctp_get_faddr_list(sctp, alist, asize); 1542 (*cl_sctp_assoc_change)(sctp->sctp_family, alist, asize, 1543 sctp->sctp_nfaddrs, dlist, dsize, 1, SCTP_CL_PADDR, 1544 (cl_sctp_handle_t)sctp); 1545 /* alist and dlist will be freed by the clustering module */ 1546 } 1547 return (0); 1548 } 1549 1550 /* 1551 * Returns 0 if the check failed and the restart should be refused, 1552 * 1 if the check succeeded. 1553 */ 1554 int 1555 sctp_secure_restart_check(mblk_t *pkt, sctp_chunk_hdr_t *ich, uint32_t ports, 1556 int sleep, sctp_stack_t *sctps) 1557 { 1558 sctp_faddr_t *fp, *fphead = NULL; 1559 sctp_parm_hdr_t *ph; 1560 ssize_t remaining; 1561 int isv4; 1562 ipha_t *iph; 1563 ip6_t *ip6h; 1564 in6_addr_t hdraddr[1]; 1565 int retval = 0; 1566 sctp_tf_t *tf; 1567 sctp_t *sctp; 1568 int compres; 1569 sctp_init_chunk_t *init; 1570 int nadded = 0; 1571 1572 /* extract the address from the IP header */ 1573 isv4 = (IPH_HDR_VERSION(pkt->b_rptr) == IPV4_VERSION); 1574 if (isv4) { 1575 iph = (ipha_t *)pkt->b_rptr; 1576 IN6_IPADDR_TO_V4MAPPED(iph->ipha_src, hdraddr); 1577 } else { 1578 ip6h = (ip6_t *)pkt->b_rptr; 1579 hdraddr[0] = ip6h->ip6_src; 1580 } 1581 1582 /* Walk the params in the INIT [ACK], pulling out addr params */ 1583 remaining = ntohs(ich->sch_len) - sizeof (*ich) - 1584 sizeof (sctp_init_chunk_t); 1585 if (remaining < sizeof (*ph)) { 1586 /* no parameters; restart OK */ 1587 return (1); 1588 } 1589 init = (sctp_init_chunk_t *)(ich + 1); 1590 ph = (sctp_parm_hdr_t *)(init + 1); 1591 1592 while (ph != NULL) { 1593 sctp_faddr_t *fpa = NULL; 1594 1595 /* params will have already been byteordered when validating */ 1596 if (ph->sph_type == htons(PARM_ADDR4)) { 1597 if (remaining >= PARM_ADDR4_LEN) { 1598 in6_addr_t addr; 1599 IN6_INADDR_TO_V4MAPPED((struct in_addr *) 1600 (ph + 1), &addr); 1601 fpa = kmem_cache_alloc(sctp_kmem_faddr_cache, 1602 sleep); 1603 if (fpa == NULL) { 1604 goto done; 1605 } 1606 bzero(fpa, sizeof (*fpa)); 1607 fpa->faddr = addr; 1608 fpa->next = NULL; 1609 } 1610 } else if (ph->sph_type == htons(PARM_ADDR6)) { 1611 if (remaining >= PARM_ADDR6_LEN) { 1612 fpa = kmem_cache_alloc(sctp_kmem_faddr_cache, 1613 sleep); 1614 if (fpa == NULL) { 1615 goto done; 1616 } 1617 bzero(fpa, sizeof (*fpa)); 1618 bcopy(ph + 1, &fpa->faddr, 1619 sizeof (fpa->faddr)); 1620 fpa->next = NULL; 1621 } 1622 } 1623 /* link in the new addr, if it was an addr param */ 1624 if (fpa != NULL) { 1625 if (fphead == NULL) { 1626 fphead = fpa; 1627 } else { 1628 fpa->next = fphead; 1629 fphead = fpa; 1630 } 1631 } 1632 1633 ph = sctp_next_parm(ph, &remaining); 1634 } 1635 1636 if (fphead == NULL) { 1637 /* no addr parameters; restart OK */ 1638 return (1); 1639 } 1640 1641 /* 1642 * got at least one; make sure the header's addr is 1643 * in the list 1644 */ 1645 fp = sctp_lookup_faddr_nosctp(fphead, hdraddr); 1646 if (fp == NULL) { 1647 /* not included; add it now */ 1648 fp = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep); 1649 if (fp == NULL) { 1650 goto done; 1651 } 1652 bzero(fp, sizeof (*fp)); 1653 fp->faddr = *hdraddr; 1654 fp->next = fphead; 1655 fphead = fp; 1656 } 1657 1658 /* 1659 * Now, we can finally do the check: For each sctp instance 1660 * on the hash line for ports, compare its faddr set against 1661 * the new one. If the new one is a strict subset of any 1662 * existing sctp's faddrs, the restart is OK. However, if there 1663 * is an overlap, this could be an attack, so return failure. 1664 * If all sctp's faddrs are disjoint, this is a legitimate new 1665 * association. 1666 */ 1667 tf = &(sctps->sctps_conn_fanout[SCTP_CONN_HASH(sctps, ports)]); 1668 mutex_enter(&tf->tf_lock); 1669 1670 for (sctp = tf->tf_sctp; sctp; sctp = sctp->sctp_conn_hash_next) { 1671 if (ports != sctp->sctp_ports) { 1672 continue; 1673 } 1674 compres = sctp_compare_faddrsets(fphead, sctp->sctp_faddrs); 1675 if (compres <= SCTP_ADDR_SUBSET) { 1676 retval = 1; 1677 mutex_exit(&tf->tf_lock); 1678 goto done; 1679 } 1680 if (compres == SCTP_ADDR_OVERLAP) { 1681 dprint(1, 1682 ("new assoc from %x:%x:%x:%x overlaps with %p\n", 1683 SCTP_PRINTADDR(*hdraddr), (void *)sctp)); 1684 /* 1685 * While we still hold the lock, we need to 1686 * figure out which addresses have been 1687 * added so we can include them in the abort 1688 * we will send back. Since these faddrs will 1689 * never be used, we overload the rto field 1690 * here, setting it to 0 if the address was 1691 * not added, 1 if it was added. 1692 */ 1693 for (fp = fphead; fp; fp = fp->next) { 1694 if (sctp_lookup_faddr(sctp, &fp->faddr)) { 1695 fp->rto = 0; 1696 } else { 1697 fp->rto = 1; 1698 nadded++; 1699 } 1700 } 1701 mutex_exit(&tf->tf_lock); 1702 goto done; 1703 } 1704 } 1705 mutex_exit(&tf->tf_lock); 1706 1707 /* All faddrs are disjoint; legit new association */ 1708 retval = 1; 1709 1710 done: 1711 /* If are attempted adds, send back an abort listing the addrs */ 1712 if (nadded > 0) { 1713 void *dtail; 1714 size_t dlen; 1715 1716 dtail = kmem_alloc(PARM_ADDR6_LEN * nadded, KM_NOSLEEP); 1717 if (dtail == NULL) { 1718 goto cleanup; 1719 } 1720 1721 ph = dtail; 1722 dlen = 0; 1723 for (fp = fphead; fp; fp = fp->next) { 1724 if (fp->rto == 0) { 1725 continue; 1726 } 1727 if (IN6_IS_ADDR_V4MAPPED(&fp->faddr)) { 1728 ipaddr_t addr4; 1729 1730 ph->sph_type = htons(PARM_ADDR4); 1731 ph->sph_len = htons(PARM_ADDR4_LEN); 1732 IN6_V4MAPPED_TO_IPADDR(&fp->faddr, addr4); 1733 ph++; 1734 bcopy(&addr4, ph, sizeof (addr4)); 1735 ph = (sctp_parm_hdr_t *) 1736 ((char *)ph + sizeof (addr4)); 1737 dlen += PARM_ADDR4_LEN; 1738 } else { 1739 ph->sph_type = htons(PARM_ADDR6); 1740 ph->sph_len = htons(PARM_ADDR6_LEN); 1741 ph++; 1742 bcopy(&fp->faddr, ph, sizeof (fp->faddr)); 1743 ph = (sctp_parm_hdr_t *) 1744 ((char *)ph + sizeof (fp->faddr)); 1745 dlen += PARM_ADDR6_LEN; 1746 } 1747 } 1748 1749 /* Send off the abort */ 1750 sctp_send_abort(sctp, sctp_init2vtag(ich), 1751 SCTP_ERR_RESTART_NEW_ADDRS, dtail, dlen, pkt, 0, B_TRUE); 1752 1753 kmem_free(dtail, PARM_ADDR6_LEN * nadded); 1754 } 1755 1756 cleanup: 1757 /* Clean up */ 1758 if (fphead) { 1759 sctp_faddr_t *fpn; 1760 for (fp = fphead; fp; fp = fpn) { 1761 fpn = fp->next; 1762 kmem_cache_free(sctp_kmem_faddr_cache, fp); 1763 } 1764 } 1765 1766 return (retval); 1767 } 1768 1769 /* 1770 * Reset any state related to transmitted chunks. 1771 */ 1772 void 1773 sctp_congest_reset(sctp_t *sctp) 1774 { 1775 sctp_faddr_t *fp; 1776 sctp_stack_t *sctps = sctp->sctp_sctps; 1777 mblk_t *mp; 1778 1779 for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) { 1780 fp->ssthresh = sctps->sctps_initial_mtu; 1781 SET_CWND(fp, fp->sfa_pmss, sctps->sctps_slow_start_initial); 1782 fp->suna = 0; 1783 fp->pba = 0; 1784 } 1785 /* 1786 * Clean up the transmit list as well since we have reset accounting 1787 * on all the fps. Send event upstream, if required. 1788 */ 1789 while ((mp = sctp->sctp_xmit_head) != NULL) { 1790 sctp->sctp_xmit_head = mp->b_next; 1791 mp->b_next = NULL; 1792 if (sctp->sctp_xmit_head != NULL) 1793 sctp->sctp_xmit_head->b_prev = NULL; 1794 sctp_sendfail_event(sctp, mp, 0, B_TRUE); 1795 } 1796 sctp->sctp_xmit_head = NULL; 1797 sctp->sctp_xmit_tail = NULL; 1798 sctp->sctp_xmit_unacked = NULL; 1799 1800 sctp->sctp_unacked = 0; 1801 /* 1802 * Any control message as well. We will clean-up this list as well. 1803 * This contains any pending ASCONF request that we have queued/sent. 1804 * If we do get an ACK we will just drop it. However, given that 1805 * we are restarting chances are we aren't going to get any. 1806 */ 1807 if (sctp->sctp_cxmit_list != NULL) 1808 sctp_asconf_free_cxmit(sctp, NULL); 1809 sctp->sctp_cxmit_list = NULL; 1810 sctp->sctp_cchunk_pend = 0; 1811 1812 sctp->sctp_rexmitting = B_FALSE; 1813 sctp->sctp_rxt_nxttsn = 0; 1814 sctp->sctp_rxt_maxtsn = 0; 1815 1816 sctp->sctp_zero_win_probe = B_FALSE; 1817 } 1818 1819 static void 1820 sctp_init_faddr(sctp_t *sctp, sctp_faddr_t *fp, in6_addr_t *addr, 1821 mblk_t *timer_mp) 1822 { 1823 sctp_stack_t *sctps = sctp->sctp_sctps; 1824 1825 bcopy(addr, &fp->faddr, sizeof (*addr)); 1826 if (IN6_IS_ADDR_V4MAPPED(addr)) { 1827 fp->isv4 = 1; 1828 /* Make sure that sfa_pmss is a multiple of SCTP_ALIGN. */ 1829 fp->sfa_pmss = 1830 (sctps->sctps_initial_mtu - sctp->sctp_hdr_len) & 1831 ~(SCTP_ALIGN - 1); 1832 } else { 1833 fp->isv4 = 0; 1834 fp->sfa_pmss = 1835 (sctps->sctps_initial_mtu - sctp->sctp_hdr6_len) & 1836 ~(SCTP_ALIGN - 1); 1837 } 1838 fp->cwnd = sctps->sctps_slow_start_initial * fp->sfa_pmss; 1839 fp->rto = MIN(sctp->sctp_rto_initial, sctp->sctp_init_rto_max); 1840 fp->srtt = -1; 1841 fp->rtt_updates = 0; 1842 fp->strikes = 0; 1843 fp->max_retr = sctp->sctp_pp_max_rxt; 1844 /* Mark it as not confirmed. */ 1845 fp->state = SCTP_FADDRS_UNCONFIRMED; 1846 fp->hb_interval = sctp->sctp_hb_interval; 1847 fp->ssthresh = sctps->sctps_initial_ssthresh; 1848 fp->suna = 0; 1849 fp->pba = 0; 1850 fp->acked = 0; 1851 fp->lastactive = lbolt64; 1852 fp->timer_mp = timer_mp; 1853 fp->hb_pending = B_FALSE; 1854 fp->hb_enabled = B_TRUE; 1855 fp->timer_running = 0; 1856 fp->df = 1; 1857 fp->pmtu_discovered = 0; 1858 fp->rc_timer_mp = NULL; 1859 fp->rc_timer_running = 0; 1860 fp->next = NULL; 1861 fp->ire = NULL; 1862 fp->T3expire = 0; 1863 (void) random_get_pseudo_bytes((uint8_t *)&fp->hb_secret, 1864 sizeof (fp->hb_secret)); 1865 fp->hb_expiry = lbolt64; 1866 fp->rxt_unacked = 0; 1867 1868 sctp_get_ire(sctp, fp); 1869 } 1870 1871 /*ARGSUSED*/ 1872 static void 1873 faddr_destructor(void *buf, void *cdrarg) 1874 { 1875 sctp_faddr_t *fp = buf; 1876 1877 ASSERT(fp->timer_mp == NULL); 1878 ASSERT(fp->timer_running == 0); 1879 1880 ASSERT(fp->rc_timer_mp == NULL); 1881 ASSERT(fp->rc_timer_running == 0); 1882 } 1883 1884 void 1885 sctp_faddr_init(void) 1886 { 1887 sctp_kmem_faddr_cache = kmem_cache_create("sctp_faddr_cache", 1888 sizeof (sctp_faddr_t), 0, NULL, faddr_destructor, 1889 NULL, NULL, NULL, 0); 1890 } 1891 1892 void 1893 sctp_faddr_fini(void) 1894 { 1895 kmem_cache_destroy(sctp_kmem_faddr_cache); 1896 } 1897