1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/stream.h> 30 #include <sys/sunddi.h> 31 #include <sys/ddi.h> 32 #include <sys/strlog.h> 33 34 #include <inet/common.h> 35 #include <inet/mib2.h> 36 #include <inet/ip.h> 37 #include <inet/ip6.h> 38 39 #include <net/pfkeyv2.h> 40 #include <inet/ipsec_info.h> 41 #include <inet/sadb.h> 42 #include <inet/ipsec_impl.h> 43 #include <inet/ipdrop.h> 44 #include <inet/ipsecesp.h> 45 #include <inet/ipsecah.h> 46 #include <sys/kstat.h> 47 48 /* 49 * Returns B_TRUE if the identities in the SA match the identities 50 * in the "latch" structure. 51 */ 52 53 static boolean_t 54 ipsec_match_outbound_ids(ipsec_latch_t *ipl, ipsa_t *sa) 55 { 56 ASSERT(ipl->ipl_ids_latched == B_TRUE); 57 return ipsid_equal(ipl->ipl_local_cid, sa->ipsa_src_cid) && 58 ipsid_equal(ipl->ipl_remote_cid, sa->ipsa_dst_cid); 59 } 60 61 /* 62 * Look up a security association based on the unique ID generated by IP and 63 * transport or tunnel information, such as ports and upper-layer protocol, 64 * and the inner and outer address(es). Used for uniqueness testing and 65 * outbound packets. The outer source address may be ignored. 66 * 67 * I expect an SA hash bucket, and that its per-bucket mutex is held. 68 * The SA ptr I return will have its reference count incremented by one. 69 */ 70 ipsa_t * 71 ipsec_getassocbyconn(isaf_t *bucket, ipsec_out_t *io, uint32_t *src, 72 uint32_t *dst, sa_family_t af, uint8_t protocol) 73 { 74 ipsa_t *retval, *candidate; 75 ipsec_action_t *candact; 76 boolean_t need_unique; 77 boolean_t tunnel_mode = io->ipsec_out_tunnel; 78 uint64_t unique_id; 79 uint32_t old_flags, excludeflags; 80 ipsec_policy_t *pp = io->ipsec_out_policy; 81 ipsec_action_t *actlist = io->ipsec_out_act; 82 ipsec_action_t *act; 83 ipsec_latch_t *ipl = io->ipsec_out_latch; 84 ipsa_ref_t *ipr = NULL; 85 sa_family_t inaf = io->ipsec_out_inaf; 86 uint32_t *insrc = io->ipsec_out_insrc; 87 uint32_t *indst = io->ipsec_out_indst; 88 uint8_t insrcpfx = io->ipsec_out_insrcpfx; 89 uint8_t indstpfx = io->ipsec_out_indstpfx; 90 91 ASSERT(MUTEX_HELD(&bucket->isaf_lock)); 92 93 /* 94 * Caller must set ipsec_out_t structure such that we know 95 * whether this is tunnel mode or transport mode based on 96 * io->ipsec_out_tunnel. If this flag is set, we assume that 97 * there are valid inner src and destination addresses to compare. 98 */ 99 100 /* 101 * Fast path: do we have a latch structure, is it for this bucket, 102 * and does the generation number match? If so, refhold and return. 103 */ 104 105 if (ipl != NULL) { 106 ASSERT((protocol == IPPROTO_AH) || (protocol == IPPROTO_ESP)); 107 ipr = &ipl->ipl_ref[protocol - IPPROTO_ESP]; 108 109 retval = ipr->ipsr_sa; 110 111 /* 112 * NOTE: The isaf_gen check (incremented upon 113 * sadb_unlinkassoc()) protects against retval being a freed 114 * SA. (We're exploiting short-circuit evaluation.) 115 */ 116 if ((bucket == ipr->ipsr_bucket) && 117 (bucket->isaf_gen == ipr->ipsr_gen) && 118 (retval->ipsa_state != IPSA_STATE_DEAD) && 119 !(retval->ipsa_flags & IPSA_F_CINVALID)) { 120 IPSA_REFHOLD(retval); 121 return (retval); 122 } 123 } 124 125 ASSERT((pp != NULL) || (actlist != NULL)); 126 if (actlist == NULL) 127 actlist = pp->ipsp_act; 128 ASSERT(actlist != NULL); 129 130 need_unique = actlist->ipa_want_unique; 131 unique_id = SA_FORM_UNIQUE_ID(io); 132 133 /* 134 * Precompute mask for SA flags comparison: If we need a 135 * unique SA and an SA has already been used, or if the SA has 136 * a unique value which doesn't match, we aren't interested in 137 * the SA.. 138 */ 139 140 excludeflags = IPSA_F_UNIQUE; 141 if (need_unique) 142 excludeflags |= IPSA_F_USED; 143 144 /* 145 * Walk the hash bucket, matching on: 146 * 147 * - unique_id 148 * - destination 149 * - source 150 * - algorithms 151 * - inner dst 152 * - inner src 153 * - <MORE TBD> 154 * 155 * Make sure that wildcard sources are inserted at the end of the hash 156 * bucket. 157 * 158 * DEFINITIONS: A _shared_ SA is one with unique_id == 0 and USED. 159 * An _unused_ SA is one with unique_id == 0 and not USED. 160 * A _unique_ SA is one with unique_id != 0 and USED. 161 * An SA with unique_id != 0 and not USED never happens. 162 */ 163 164 candidate = NULL; 165 166 for (retval = bucket->isaf_ipsa; retval != NULL; 167 retval = retval->ipsa_next) { 168 ASSERT((candidate == NULL) || 169 MUTEX_HELD(&candidate->ipsa_lock)); 170 171 /* 172 * Q: Should I lock this SA? 173 * A: For now, yes. I change and use too many fields in here 174 * (e.g. unique_id) that I may be racing with other threads. 175 * Also, the refcnt needs to be bumped up. 176 */ 177 178 mutex_enter(&retval->ipsa_lock); 179 180 /* My apologies for the use of goto instead of continue. */ 181 182 /* Outer destination address */ 183 if (!IPSA_ARE_ADDR_EQUAL(dst, retval->ipsa_dstaddr, af)) 184 goto next_ipsa; /* Destination mismatch. */ 185 186 /* Outer source address */ 187 if (!IPSA_ARE_ADDR_EQUAL(src, retval->ipsa_srcaddr, af) && 188 !IPSA_IS_ADDR_UNSPEC(retval->ipsa_srcaddr, af)) 189 goto next_ipsa; /* Specific source and not matched. */ 190 191 if (tunnel_mode) { 192 /* Check tunnel mode */ 193 if (!(retval->ipsa_flags & IPSA_F_TUNNEL)) 194 goto next_ipsa; /* Not tunnel mode SA */ 195 196 /* Inner destination address */ 197 if (!IPSA_IS_ADDR_UNSPEC(retval->ipsa_innerdst, inaf)) { 198 if (!ip_addr_match((uint8_t *)indst, 199 min(indstpfx, retval->ipsa_innerdstpfx), 200 (in6_addr_t *)retval->ipsa_innerdst)) 201 goto next_ipsa; /* not matched. */ 202 } 203 204 /* Inner source address */ 205 if (!IPSA_IS_ADDR_UNSPEC(retval->ipsa_innersrc, inaf)) { 206 if (!ip_addr_match((uint8_t *)insrc, 207 min(insrcpfx, retval->ipsa_innersrcpfx), 208 (in6_addr_t *)retval->ipsa_innersrc)) 209 goto next_ipsa; /* not matched. */ 210 } 211 } else { 212 /* Check transport mode */ 213 if (retval->ipsa_flags & IPSA_F_TUNNEL) 214 goto next_ipsa; /* Not transport mode SA */ 215 216 /* 217 * TODO - If we ever do RFC 3884's dream of transport- 218 * mode SAs with inner IP address selectors, we need 219 * to put some code here. 220 */ 221 } 222 223 /* 224 * XXX should be able to use cached/latched action 225 * to dodge this loop 226 */ 227 for (act = actlist; act != NULL; act = act->ipa_next) { 228 ipsec_act_t *ap = &act->ipa_act; 229 if (ap->ipa_type != IPSEC_POLICY_APPLY) 230 continue; 231 232 /* 233 * XXX ugly. should be better way to do this test 234 */ 235 if (protocol == IPPROTO_AH) { 236 if (!(ap->ipa_apply.ipp_use_ah)) 237 continue; 238 if (ap->ipa_apply.ipp_auth_alg != 239 retval->ipsa_auth_alg) 240 continue; 241 if (ap->ipa_apply.ipp_ah_minbits > 242 retval->ipsa_authkeybits) 243 continue; 244 } else { 245 if (!(ap->ipa_apply.ipp_use_esp)) 246 continue; 247 248 if ((ap->ipa_apply.ipp_encr_alg != 249 retval->ipsa_encr_alg)) 250 continue; 251 252 if (ap->ipa_apply.ipp_espe_minbits > 253 retval->ipsa_encrkeybits) 254 continue; 255 256 if (ap->ipa_apply.ipp_esp_auth_alg != 0) { 257 if (ap->ipa_apply.ipp_esp_auth_alg != 258 retval->ipsa_auth_alg) 259 continue; 260 if (ap->ipa_apply.ipp_espa_minbits > 261 retval->ipsa_authkeybits) 262 continue; 263 } 264 } 265 266 /* 267 * Check key mgmt proto, cookie 268 */ 269 if ((ap->ipa_apply.ipp_km_proto != 0) && 270 (retval->ipsa_kmp != 0) && 271 (ap->ipa_apply.ipp_km_proto != retval->ipsa_kmp)) 272 continue; 273 274 if ((ap->ipa_apply.ipp_km_cookie != 0) && 275 (retval->ipsa_kmc != 0) && 276 (ap->ipa_apply.ipp_km_cookie != retval->ipsa_kmc)) 277 continue; 278 279 break; 280 } 281 if (act == NULL) 282 goto next_ipsa; /* nothing matched */ 283 284 /* 285 * Do identities match? 286 */ 287 if (ipl && ipl->ipl_ids_latched && 288 !ipsec_match_outbound_ids(ipl, retval)) 289 goto next_ipsa; 290 291 /* 292 * At this point, we know that we have at least a match on: 293 * 294 * - dest 295 * - source (if source is specified, i.e. non-zeroes) 296 * - inner dest (if specified) 297 * - inner source (if specified) 298 * - auth alg (if auth alg is specified, i.e. non-zero) 299 * - encrypt. alg (if encrypt. alg is specified, i.e. non-zero) 300 * and we know that the SA keylengths are appropriate. 301 * 302 * (Keep in mind known-src SAs are hit before zero-src SAs, 303 * thanks to sadb_insertassoc().) 304 * If we need a unique asssociation, optimally we have 305 * ipsa_unique_id == unique_id, otherwise NOT USED 306 * is held in reserve (stored in candidate). 307 * 308 * For those stored in candidate, take best-match (i.e. given 309 * a choice, candidate should have non-zero ipsa_src). 310 */ 311 312 /* 313 * If SA has a unique value which matches, we're all set... 314 * "key management knows best" 315 */ 316 if ((retval->ipsa_flags & IPSA_F_UNIQUE) && 317 ((unique_id & retval->ipsa_unique_mask) == 318 retval->ipsa_unique_id)) 319 break; 320 321 /* 322 * If we need a unique SA and this SA has already been used, 323 * or if the SA has a unique value which doesn't match, 324 * this isn't for us. 325 */ 326 327 if (retval->ipsa_flags & excludeflags) 328 goto next_ipsa; 329 330 331 /* 332 * I found a candidate.. 333 */ 334 if (candidate == NULL) { 335 /* 336 * and didn't already have one.. 337 */ 338 candidate = retval; 339 candact = act; 340 continue; 341 } else { 342 /* 343 * If candidate's source address is zero and 344 * the current match (i.e. retval) address is 345 * not zero, we have a better candidate.. 346 */ 347 if (IPSA_IS_ADDR_UNSPEC(candidate->ipsa_srcaddr, af) && 348 !IPSA_IS_ADDR_UNSPEC(retval->ipsa_srcaddr, af)) { 349 mutex_exit(&candidate->ipsa_lock); 350 candidate = retval; 351 candact = act; 352 continue; 353 } 354 } 355 next_ipsa: 356 mutex_exit(&retval->ipsa_lock); 357 } 358 ASSERT((retval == NULL) || MUTEX_HELD(&retval->ipsa_lock)); 359 ASSERT((candidate == NULL) || MUTEX_HELD(&candidate->ipsa_lock)); 360 ASSERT((retval == NULL) || (act != NULL)); 361 ASSERT((candidate == NULL) || (candact != NULL)); 362 363 /* Let caller react to a lookup failure when it gets NULL. */ 364 if (retval == NULL && candidate == NULL) 365 return (NULL); 366 367 if (retval == NULL) { 368 ASSERT(MUTEX_HELD(&candidate->ipsa_lock)); 369 retval = candidate; 370 act = candact; 371 } else if (candidate != NULL) { 372 mutex_exit(&candidate->ipsa_lock); 373 } 374 ASSERT(MUTEX_HELD(&retval->ipsa_lock)); 375 ASSERT(act != NULL); 376 377 /* 378 * Even though I hold the mutex, since the reference counter is an 379 * atomic operation, I really have to use the IPSA_REFHOLD macro. 380 */ 381 IPSA_REFHOLD(retval); 382 383 /* 384 * This association is no longer unused. 385 */ 386 old_flags = retval->ipsa_flags; 387 retval->ipsa_flags |= IPSA_F_USED; 388 389 /* 390 * Cache a reference to this SA for the fast path. 391 */ 392 if (ipr != NULL) { 393 ipr->ipsr_bucket = bucket; 394 ipr->ipsr_gen = bucket->isaf_gen; 395 ipr->ipsr_sa = retval; 396 /* I'm now caching, so the cache-invalid flag goes away! */ 397 retval->ipsa_flags &= ~IPSA_F_CINVALID; 398 } 399 /* 400 * Latch various things while we're here.. 401 */ 402 if (ipl != NULL) { 403 if (!ipl->ipl_ids_latched) { 404 ipsec_latch_ids(ipl, 405 retval->ipsa_src_cid, retval->ipsa_dst_cid); 406 } 407 if (!ipl->ipl_out_action_latched) { 408 IPACT_REFHOLD(act); 409 ipl->ipl_out_action = act; 410 ipl->ipl_out_action_latched = B_TRUE; 411 } 412 } 413 414 /* 415 * Set the uniqueness only first time. 416 */ 417 if (need_unique && !(old_flags & IPSA_F_USED)) { 418 if (retval->ipsa_unique_id == 0) { 419 ASSERT((retval->ipsa_flags & IPSA_F_UNIQUE) == 0); 420 /* 421 * From now on, only this src, dst[ports, addr], 422 * proto, should use it. 423 */ 424 retval->ipsa_flags |= IPSA_F_UNIQUE; 425 retval->ipsa_unique_id = unique_id; 426 retval->ipsa_unique_mask = SA_UNIQUE_MASK( 427 io->ipsec_out_src_port, io->ipsec_out_dst_port, 428 protocol, 0); 429 } 430 431 /* 432 * Set the source address and adjust the hash 433 * buckets only if src_addr is zero. 434 */ 435 if (IPSA_IS_ADDR_UNSPEC(retval->ipsa_srcaddr, af)) { 436 /* 437 * sadb_unlinkassoc() will decrement the refcnt. Bump 438 * up when we have the lock so that we don't have to 439 * acquire locks when we come back from 440 * sadb_insertassoc(). 441 * 442 * We don't need to bump the bucket's gen since 443 * we aren't moving to a new bucket. 444 */ 445 IPSA_REFHOLD(retval); 446 IPSA_COPY_ADDR(retval->ipsa_srcaddr, src, af); 447 mutex_exit(&retval->ipsa_lock); 448 sadb_unlinkassoc(retval); 449 /* 450 * Since the bucket lock is held, we know 451 * sadb_insertassoc() will succeed. 452 */ 453 #ifdef DEBUG 454 if (sadb_insertassoc(retval, bucket) != 0) { 455 cmn_err(CE_PANIC, 456 "sadb_insertassoc() failed in " 457 "ipsec_getassocbyconn().\n"); 458 } 459 #else /* non-DEBUG */ 460 (void) sadb_insertassoc(retval, bucket); 461 #endif /* DEBUG */ 462 return (retval); 463 } 464 } 465 mutex_exit(&retval->ipsa_lock); 466 467 return (retval); 468 } 469 470 /* 471 * Look up a security association based on the security parameters index (SPI) 472 * and address(es). This is used for inbound packets and general SA lookups 473 * (even in outbound SA tables). The source address may be ignored. Return 474 * NULL if no association is available. If an SA is found, return it, with 475 * its refcnt incremented. The caller must REFRELE after using the SA. 476 * The hash bucket must be locked down before calling. 477 */ 478 ipsa_t * 479 ipsec_getassocbyspi(isaf_t *bucket, uint32_t spi, uint32_t *src, uint32_t *dst, 480 sa_family_t af) 481 { 482 ipsa_t *retval; 483 484 ASSERT(MUTEX_HELD(&bucket->isaf_lock)); 485 486 /* 487 * Walk the hash bucket, matching exactly on SPI, then destination, 488 * then source. 489 * 490 * Per-SA locking doesn't need to happen, because I'm only matching 491 * on addresses. Addresses are only changed during insertion/deletion 492 * from the hash bucket. Since the hash bucket lock is held, we don't 493 * need to worry about addresses changing. 494 */ 495 496 for (retval = bucket->isaf_ipsa; retval != NULL; 497 retval = retval->ipsa_next) { 498 if (retval->ipsa_spi != spi) 499 continue; 500 if (!IPSA_ARE_ADDR_EQUAL(dst, retval->ipsa_dstaddr, af)) 501 continue; 502 503 /* 504 * Assume that wildcard source addresses are inserted at the 505 * end of the hash bucket. (See sadb_insertassoc().) 506 * The following check for source addresses is a weak form 507 * of access control/source identity verification. If an 508 * SA has a source address, I only match an all-zeroes 509 * source address, or that particular one. If the SA has 510 * an all-zeroes source, then I match regardless. 511 * 512 * There is a weakness here in that a packet with all-zeroes 513 * for an address will match regardless of the source address 514 * stored in the packet. 515 * 516 * Note that port-level packet selectors, if present, 517 * are checked in ipsec_check_ipsecin_unique(). 518 */ 519 if (IPSA_ARE_ADDR_EQUAL(src, retval->ipsa_srcaddr, af) || 520 IPSA_IS_ADDR_UNSPEC(retval->ipsa_srcaddr, af) || 521 IPSA_IS_ADDR_UNSPEC(src, af)) 522 break; 523 } 524 525 if (retval != NULL) { 526 /* 527 * Just refhold the return value. The caller will then 528 * make the appropriate calls to set the USED flag. 529 */ 530 IPSA_REFHOLD(retval); 531 } 532 533 return (retval); 534 } 535 536 boolean_t 537 ipsec_outbound_sa(mblk_t *mp, uint_t proto) 538 { 539 mblk_t *data_mp; 540 ipsec_out_t *io; 541 ipaddr_t dst; 542 uint32_t *dst_ptr, *src_ptr; 543 isaf_t *bucket; 544 ipsa_t *assoc; 545 ip6_pkt_t ipp; 546 in6_addr_t dst6; 547 ipsa_t **sa; 548 sadbp_t *sadbp; 549 sadb_t *sp; 550 sa_family_t af; 551 netstack_t *ns; 552 553 data_mp = mp->b_cont; 554 io = (ipsec_out_t *)mp->b_rptr; 555 ns = io->ipsec_out_ns; 556 557 if (proto == IPPROTO_ESP) { 558 ipsecesp_stack_t *espstack; 559 560 espstack = ns->netstack_ipsecesp; 561 sa = &io->ipsec_out_esp_sa; 562 sadbp = &espstack->esp_sadb; 563 } else { 564 ipsecah_stack_t *ahstack; 565 566 ASSERT(proto == IPPROTO_AH); 567 ahstack = ns->netstack_ipsecah; 568 sa = &io->ipsec_out_ah_sa; 569 sadbp = &ahstack->ah_sadb; 570 } 571 572 ASSERT(*sa == NULL); 573 574 if (io->ipsec_out_v4) { 575 ipha_t *ipha = (ipha_t *)data_mp->b_rptr; 576 577 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION); 578 dst = ip_get_dst(ipha); 579 sp = &sadbp->s_v4; 580 af = AF_INET; 581 582 /* 583 * NOTE:Getting the outbound association is considerably 584 * painful. ipsec_getassocbyconn() will require more 585 * parameters as policy implementations mature. 586 */ 587 bucket = OUTBOUND_BUCKET_V4(sp, dst); 588 src_ptr = (uint32_t *)&ipha->ipha_src; 589 dst_ptr = (uint32_t *)&dst; 590 } else { 591 ip6_t *ip6h = (ip6_t *)data_mp->b_rptr; 592 593 ASSERT(IPH_HDR_VERSION(ip6h) == IPV6_VERSION); 594 dst6 = ip_get_dst_v6(ip6h, NULL); 595 af = AF_INET6; 596 597 bzero(&ipp, sizeof (ipp)); 598 sp = &sadbp->s_v6; 599 600 /* Same NOTE: applies here! */ 601 bucket = OUTBOUND_BUCKET_V6(sp, dst6); 602 src_ptr = (uint32_t *)&ip6h->ip6_src; 603 dst_ptr = (uint32_t *)&dst6; 604 } 605 606 mutex_enter(&bucket->isaf_lock); 607 assoc = ipsec_getassocbyconn(bucket, io, src_ptr, dst_ptr, af, proto); 608 mutex_exit(&bucket->isaf_lock); 609 610 if (assoc == NULL) 611 return (B_FALSE); 612 613 if (assoc->ipsa_state == IPSA_STATE_DEAD) { 614 IPSA_REFRELE(assoc); 615 return (B_FALSE); 616 } 617 618 ASSERT(assoc->ipsa_state != IPSA_STATE_LARVAL); 619 620 *sa = assoc; 621 return (B_TRUE); 622 } 623 624 /* 625 * Inbound IPsec SA selection. 626 */ 627 628 ah_t * 629 ipsec_inbound_ah_sa(mblk_t *mp, netstack_t *ns) 630 { 631 mblk_t *ipsec_in; 632 ipha_t *ipha; 633 ipsa_t *assoc; 634 ah_t *ah; 635 isaf_t *hptr; 636 ipsec_in_t *ii; 637 boolean_t isv6; 638 ip6_t *ip6h; 639 int ah_offset; 640 uint32_t *src_ptr, *dst_ptr; 641 int pullup_len; 642 sadb_t *sp; 643 sa_family_t af; 644 ipsec_stack_t *ipss = ns->netstack_ipsec; 645 ipsecah_stack_t *ahstack = ns->netstack_ipsecah; 646 647 IP_AH_BUMP_STAT(ipss, in_requests); 648 649 ASSERT(mp->b_datap->db_type == M_CTL); 650 651 ipsec_in = mp; 652 ii = (ipsec_in_t *)ipsec_in->b_rptr; 653 mp = mp->b_cont; 654 655 ASSERT(mp->b_datap->db_type == M_DATA); 656 657 isv6 = !ii->ipsec_in_v4; 658 if (isv6) { 659 ip6h = (ip6_t *)mp->b_rptr; 660 ah_offset = ipsec_ah_get_hdr_size_v6(mp, B_TRUE); 661 } else { 662 ipha = (ipha_t *)mp->b_rptr; 663 ASSERT(ipha->ipha_protocol == IPPROTO_AH); 664 ah_offset = ipha->ipha_version_and_hdr_length - 665 (uint8_t)((IP_VERSION << 4)); 666 ah_offset <<= 2; 667 } 668 669 /* 670 * We assume that the IP header is pulled up until 671 * the options. We need to see whether we have the 672 * AH header in the same mblk or not. 673 */ 674 pullup_len = ah_offset + sizeof (ah_t); 675 if (mp->b_rptr + pullup_len > mp->b_wptr) { 676 if (!pullupmsg(mp, pullup_len)) { 677 ipsec_rl_strlog(ns, ip_mod_info.mi_idnum, 0, 0, 678 SL_WARN | SL_ERROR, 679 "ipsec_inbound_ah_sa: Small AH header\n"); 680 IP_AH_BUMP_STAT(ipss, in_discards); 681 ip_drop_packet(ipsec_in, B_TRUE, NULL, NULL, 682 DROPPER(ipss, ipds_ah_bad_length), 683 &ipss->ipsec_dropper); 684 return (NULL); 685 } 686 if (isv6) 687 ip6h = (ip6_t *)mp->b_rptr; 688 else 689 ipha = (ipha_t *)mp->b_rptr; 690 } 691 692 ah = (ah_t *)(mp->b_rptr + ah_offset); 693 694 if (isv6) { 695 src_ptr = (uint32_t *)&ip6h->ip6_src; 696 dst_ptr = (uint32_t *)&ip6h->ip6_dst; 697 sp = &ahstack->ah_sadb.s_v6; 698 af = AF_INET6; 699 } else { 700 src_ptr = (uint32_t *)&ipha->ipha_src; 701 dst_ptr = (uint32_t *)&ipha->ipha_dst; 702 sp = &ahstack->ah_sadb.s_v4; 703 af = AF_INET; 704 } 705 706 hptr = INBOUND_BUCKET(sp, ah->ah_spi); 707 mutex_enter(&hptr->isaf_lock); 708 assoc = ipsec_getassocbyspi(hptr, ah->ah_spi, src_ptr, dst_ptr, af); 709 mutex_exit(&hptr->isaf_lock); 710 711 if (assoc == NULL || assoc->ipsa_state == IPSA_STATE_DEAD) { 712 IP_AH_BUMP_STAT(ipss, lookup_failure); 713 IP_AH_BUMP_STAT(ipss, in_discards); 714 ipsecah_in_assocfailure(ipsec_in, 0, 715 SL_ERROR | SL_CONSOLE | SL_WARN, 716 "ipsec_inbound_ah_sa: No association found for " 717 "spi 0x%x, dst addr %s\n", 718 ah->ah_spi, dst_ptr, af, ahstack); 719 if (assoc != NULL) { 720 IPSA_REFRELE(assoc); 721 } 722 return (NULL); 723 } 724 725 if (assoc->ipsa_state == IPSA_STATE_LARVAL) { 726 /* Not fully baked; swap the packet under a rock until then */ 727 sadb_set_lpkt(assoc, ipsec_in, ns); 728 IPSA_REFRELE(assoc); 729 return (NULL); 730 } 731 732 /* 733 * Save a reference to the association so that it can 734 * be retrieved after execution. We free any AH SA reference 735 * already there (innermost SA "wins". The reference to 736 * the SA will also be used later when doing the policy checks. 737 */ 738 if (ii->ipsec_in_ah_sa != NULL) { 739 IPSA_REFRELE(ii->ipsec_in_ah_sa); 740 } 741 ii->ipsec_in_ah_sa = assoc; 742 743 return (ah); 744 } 745 746 esph_t * 747 ipsec_inbound_esp_sa(mblk_t *ipsec_in_mp, netstack_t *ns) 748 { 749 mblk_t *data_mp, *placeholder; 750 uint32_t *src_ptr, *dst_ptr; 751 ipsec_in_t *ii; 752 ipha_t *ipha; 753 ip6_t *ip6h; 754 esph_t *esph; 755 ipsa_t *ipsa; 756 isaf_t *bucket; 757 uint_t preamble; 758 sa_family_t af; 759 boolean_t isv6; 760 sadb_t *sp; 761 ipsec_stack_t *ipss = ns->netstack_ipsec; 762 ipsecesp_stack_t *espstack = ns->netstack_ipsecesp; 763 764 IP_ESP_BUMP_STAT(ipss, in_requests); 765 ASSERT(ipsec_in_mp->b_datap->db_type == M_CTL); 766 767 /* We have IPSEC_IN already! */ 768 ii = (ipsec_in_t *)ipsec_in_mp->b_rptr; 769 data_mp = ipsec_in_mp->b_cont; 770 771 ASSERT(ii->ipsec_in_type == IPSEC_IN); 772 773 isv6 = !ii->ipsec_in_v4; 774 if (isv6) { 775 ip6h = (ip6_t *)data_mp->b_rptr; 776 } else { 777 ipha = (ipha_t *)data_mp->b_rptr; 778 } 779 780 /* 781 * Put all data into one mblk if it's not there already. 782 * XXX This is probably bad long-term. Figure out better ways of doing 783 * this. Much of the inbound path depends on all of the data being 784 * in one mblk. 785 * 786 * XXX Jumbogram issues will have to be dealt with here. 787 * If the plen is 0, we'll have to scan for a HBH header with the 788 * actual packet length. 789 */ 790 if (data_mp->b_datap->db_ref > 1 || 791 (data_mp->b_wptr - data_mp->b_rptr) < 792 (isv6 ? (ntohs(ip6h->ip6_plen) + sizeof (ip6_t)) 793 : ntohs(ipha->ipha_length))) { 794 placeholder = msgpullup(data_mp, -1); 795 if (placeholder == NULL) { 796 IP_ESP_BUMP_STAT(ipss, in_discards); 797 /* 798 * TODO: Extract inbound interface from the IPSEC_IN 799 * message's ii->ipsec_in_rill_index. 800 */ 801 ip_drop_packet(ipsec_in_mp, B_TRUE, NULL, NULL, 802 DROPPER(ipss, ipds_esp_nomem), 803 &ipss->ipsec_dropper); 804 return (NULL); 805 } else { 806 /* Reset packet with new pulled up mblk. */ 807 freemsg(data_mp); 808 data_mp = placeholder; 809 ipsec_in_mp->b_cont = data_mp; 810 } 811 } 812 813 /* 814 * Find the ESP header, point the address pointers at the appropriate 815 * IPv4/IPv6 places. 816 */ 817 if (isv6) { 818 ip6h = (ip6_t *)data_mp->b_rptr; 819 src_ptr = (uint32_t *)&ip6h->ip6_src; 820 dst_ptr = (uint32_t *)&ip6h->ip6_dst; 821 if (ip6h->ip6_nxt != IPPROTO_ESP) { 822 /* There are options that need to be processed. */ 823 preamble = ip_hdr_length_v6(data_mp, ip6h); 824 } else { 825 preamble = sizeof (ip6_t); 826 } 827 828 sp = &espstack->esp_sadb.s_v6; 829 af = AF_INET6; 830 } else { 831 ipha = (ipha_t *)data_mp->b_rptr; 832 src_ptr = (uint32_t *)&ipha->ipha_src; 833 dst_ptr = (uint32_t *)&ipha->ipha_dst; 834 preamble = IPH_HDR_LENGTH(ipha); 835 836 sp = &espstack->esp_sadb.s_v4; 837 af = AF_INET; 838 } 839 840 esph = (esph_t *)(data_mp->b_rptr + preamble); 841 842 /* Since hash is common on inbound (SPI value), hash here. */ 843 bucket = INBOUND_BUCKET(sp, esph->esph_spi); 844 mutex_enter(&bucket->isaf_lock); 845 ipsa = ipsec_getassocbyspi(bucket, esph->esph_spi, src_ptr, dst_ptr, 846 af); 847 mutex_exit(&bucket->isaf_lock); 848 849 if (ipsa == NULL || ipsa->ipsa_state == IPSA_STATE_DEAD) { 850 /* This is a loggable error! AUDIT ME! */ 851 IP_ESP_BUMP_STAT(ipss, lookup_failure); 852 IP_ESP_BUMP_STAT(ipss, in_discards); 853 ipsecesp_in_assocfailure(ipsec_in_mp, 0, 854 SL_ERROR | SL_CONSOLE | SL_WARN, 855 "ipsec_inbound_esp_sa: No association found for " 856 "spi 0x%x, dst addr %s\n", 857 esph->esph_spi, dst_ptr, af, espstack); 858 if (ipsa != NULL) { 859 IPSA_REFRELE(ipsa); 860 } 861 return (NULL); 862 } 863 864 if (ipsa->ipsa_state == IPSA_STATE_LARVAL) { 865 /* Not fully baked; swap the packet under a rock until then */ 866 sadb_set_lpkt(ipsa, ipsec_in_mp, ns); 867 IPSA_REFRELE(ipsa); 868 return (NULL); 869 } 870 871 /* 872 * Save a reference to the association so that it can 873 * be retrieved after execution. We free any AH SA reference 874 * already there (innermost SA "wins". The reference to 875 * the SA will also be used later when doing the policy checks. 876 */ 877 if (ii->ipsec_in_esp_sa != NULL) { 878 IPSA_REFRELE(ii->ipsec_in_esp_sa); 879 } 880 ii->ipsec_in_esp_sa = ipsa; 881 882 return (esph); 883 } 884