1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/stream.h> 28 #include <sys/sunddi.h> 29 #include <sys/ddi.h> 30 #include <sys/strlog.h> 31 32 #include <inet/common.h> 33 #include <inet/mib2.h> 34 #include <inet/ip.h> 35 #include <inet/ip6.h> 36 37 #include <net/pfkeyv2.h> 38 #include <inet/ipsec_info.h> 39 #include <inet/sadb.h> 40 #include <inet/ipsec_impl.h> 41 #include <inet/ipdrop.h> 42 #include <inet/ipsecesp.h> 43 #include <inet/ipsecah.h> 44 #include <sys/kstat.h> 45 46 /* 47 * Returns B_TRUE if the identities in the SA match the identities 48 * in the "latch" structure. 49 */ 50 51 static boolean_t 52 ipsec_match_outbound_ids(ipsec_latch_t *ipl, ipsa_t *sa) 53 { 54 ASSERT(ipl->ipl_ids_latched == B_TRUE); 55 return ipsid_equal(ipl->ipl_local_cid, sa->ipsa_src_cid) && 56 ipsid_equal(ipl->ipl_remote_cid, sa->ipsa_dst_cid); 57 } 58 59 /* 60 * Look up a security association based on the unique ID generated by IP and 61 * transport or tunnel information, such as ports and upper-layer protocol, 62 * and the inner and outer address(es). Used for uniqueness testing and 63 * outbound packets. The outer source address may be ignored. 64 * 65 * I expect an SA hash bucket, and that its per-bucket mutex is held. 66 * The SA ptr I return will have its reference count incremented by one. 67 */ 68 ipsa_t * 69 ipsec_getassocbyconn(isaf_t *bucket, ipsec_out_t *io, uint32_t *src, 70 uint32_t *dst, sa_family_t af, uint8_t protocol) 71 { 72 ipsa_t *retval, *candidate; 73 ipsec_action_t *candact; 74 boolean_t need_unique; 75 boolean_t tunnel_mode = io->ipsec_out_tunnel; 76 uint64_t unique_id; 77 uint32_t old_flags, excludeflags; 78 ipsec_policy_t *pp = io->ipsec_out_policy; 79 ipsec_action_t *actlist = io->ipsec_out_act; 80 ipsec_action_t *act; 81 ipsec_latch_t *ipl = io->ipsec_out_latch; 82 ipsa_ref_t *ipr = NULL; 83 sa_family_t inaf = io->ipsec_out_inaf; 84 uint32_t *insrc = io->ipsec_out_insrc; 85 uint32_t *indst = io->ipsec_out_indst; 86 uint8_t insrcpfx = io->ipsec_out_insrcpfx; 87 uint8_t indstpfx = io->ipsec_out_indstpfx; 88 89 ASSERT(MUTEX_HELD(&bucket->isaf_lock)); 90 91 /* 92 * Caller must set ipsec_out_t structure such that we know 93 * whether this is tunnel mode or transport mode based on 94 * io->ipsec_out_tunnel. If this flag is set, we assume that 95 * there are valid inner src and destination addresses to compare. 96 */ 97 98 /* 99 * Fast path: do we have a latch structure, is it for this bucket, 100 * and does the generation number match? If so, refhold and return. 101 */ 102 103 if (ipl != NULL) { 104 ASSERT((protocol == IPPROTO_AH) || (protocol == IPPROTO_ESP)); 105 ipr = &ipl->ipl_ref[protocol - IPPROTO_ESP]; 106 107 retval = ipr->ipsr_sa; 108 109 /* 110 * NOTE: The isaf_gen check (incremented upon 111 * sadb_unlinkassoc()) protects against retval being a freed 112 * SA. (We're exploiting short-circuit evaluation.) 113 */ 114 if ((bucket == ipr->ipsr_bucket) && 115 (bucket->isaf_gen == ipr->ipsr_gen) && 116 (retval->ipsa_state != IPSA_STATE_DEAD) && 117 !(retval->ipsa_flags & IPSA_F_CINVALID)) { 118 IPSA_REFHOLD(retval); 119 return (retval); 120 } 121 } 122 123 ASSERT((pp != NULL) || (actlist != NULL)); 124 if (actlist == NULL) 125 actlist = pp->ipsp_act; 126 ASSERT(actlist != NULL); 127 128 need_unique = actlist->ipa_want_unique; 129 unique_id = SA_FORM_UNIQUE_ID(io); 130 131 /* 132 * Precompute mask for SA flags comparison: If we need a 133 * unique SA and an SA has already been used, or if the SA has 134 * a unique value which doesn't match, we aren't interested in 135 * the SA.. 136 */ 137 138 excludeflags = IPSA_F_UNIQUE; 139 if (need_unique) 140 excludeflags |= IPSA_F_USED; 141 142 /* 143 * Walk the hash bucket, matching on: 144 * 145 * - unique_id 146 * - destination 147 * - source 148 * - algorithms 149 * - inner dst 150 * - inner src 151 * - <MORE TBD> 152 * 153 * Make sure that wildcard sources are inserted at the end of the hash 154 * bucket. 155 * 156 * DEFINITIONS: A _shared_ SA is one with unique_id == 0 and USED. 157 * An _unused_ SA is one with unique_id == 0 and not USED. 158 * A _unique_ SA is one with unique_id != 0 and USED. 159 * An SA with unique_id != 0 and not USED never happens. 160 */ 161 162 candidate = NULL; 163 164 for (retval = bucket->isaf_ipsa; retval != NULL; 165 retval = retval->ipsa_next) { 166 ASSERT((candidate == NULL) || 167 MUTEX_HELD(&candidate->ipsa_lock)); 168 169 /* 170 * Q: Should I lock this SA? 171 * A: For now, yes. I change and use too many fields in here 172 * (e.g. unique_id) that I may be racing with other threads. 173 * Also, the refcnt needs to be bumped up. 174 */ 175 176 mutex_enter(&retval->ipsa_lock); 177 178 /* My apologies for the use of goto instead of continue. */ 179 180 /* Outer destination address */ 181 if (!IPSA_ARE_ADDR_EQUAL(dst, retval->ipsa_dstaddr, af)) 182 goto next_ipsa; /* Destination mismatch. */ 183 184 /* Outer source address */ 185 if (!IPSA_ARE_ADDR_EQUAL(src, retval->ipsa_srcaddr, af) && 186 !IPSA_IS_ADDR_UNSPEC(retval->ipsa_srcaddr, af)) 187 goto next_ipsa; /* Specific source and not matched. */ 188 189 if (tunnel_mode) { 190 /* Check tunnel mode */ 191 if (!(retval->ipsa_flags & IPSA_F_TUNNEL)) 192 goto next_ipsa; /* Not tunnel mode SA */ 193 194 /* Inner destination address */ 195 if (!IPSA_IS_ADDR_UNSPEC(retval->ipsa_innerdst, inaf)) { 196 if (!ip_addr_match((uint8_t *)indst, 197 min(indstpfx, retval->ipsa_innerdstpfx), 198 (in6_addr_t *)retval->ipsa_innerdst)) 199 goto next_ipsa; /* not matched. */ 200 } 201 202 /* Inner source address */ 203 if (!IPSA_IS_ADDR_UNSPEC(retval->ipsa_innersrc, inaf)) { 204 if (!ip_addr_match((uint8_t *)insrc, 205 min(insrcpfx, retval->ipsa_innersrcpfx), 206 (in6_addr_t *)retval->ipsa_innersrc)) 207 goto next_ipsa; /* not matched. */ 208 } 209 } else { 210 /* Check transport mode */ 211 if (retval->ipsa_flags & IPSA_F_TUNNEL) 212 goto next_ipsa; /* Not transport mode SA */ 213 214 /* 215 * TODO - If we ever do RFC 3884's dream of transport- 216 * mode SAs with inner IP address selectors, we need 217 * to put some code here. 218 */ 219 } 220 221 /* 222 * XXX should be able to use cached/latched action 223 * to dodge this loop 224 */ 225 for (act = actlist; act != NULL; act = act->ipa_next) { 226 ipsec_act_t *ap = &act->ipa_act; 227 if (ap->ipa_type != IPSEC_POLICY_APPLY) 228 continue; 229 230 /* 231 * XXX ugly. should be better way to do this test 232 */ 233 if (protocol == IPPROTO_AH) { 234 if (!(ap->ipa_apply.ipp_use_ah)) 235 continue; 236 if (ap->ipa_apply.ipp_auth_alg != 237 retval->ipsa_auth_alg) 238 continue; 239 if (ap->ipa_apply.ipp_ah_minbits > 240 retval->ipsa_authkeybits) 241 continue; 242 } else { 243 if (!(ap->ipa_apply.ipp_use_esp)) 244 continue; 245 246 if ((ap->ipa_apply.ipp_encr_alg != 247 retval->ipsa_encr_alg)) 248 continue; 249 250 if (ap->ipa_apply.ipp_espe_minbits > 251 retval->ipsa_encrkeybits) 252 continue; 253 254 if (ap->ipa_apply.ipp_esp_auth_alg != 0) { 255 if (ap->ipa_apply.ipp_esp_auth_alg != 256 retval->ipsa_auth_alg) 257 continue; 258 if (ap->ipa_apply.ipp_espa_minbits > 259 retval->ipsa_authkeybits) 260 continue; 261 } 262 } 263 264 /* 265 * Check key mgmt proto, cookie 266 */ 267 if ((ap->ipa_apply.ipp_km_proto != 0) && 268 (retval->ipsa_kmp != 0) && 269 (ap->ipa_apply.ipp_km_proto != retval->ipsa_kmp)) 270 continue; 271 272 if ((ap->ipa_apply.ipp_km_cookie != 0) && 273 (retval->ipsa_kmc != 0) && 274 (ap->ipa_apply.ipp_km_cookie != retval->ipsa_kmc)) 275 continue; 276 277 break; 278 } 279 if (act == NULL) 280 goto next_ipsa; /* nothing matched */ 281 282 /* 283 * Do identities match? 284 */ 285 if (ipl && ipl->ipl_ids_latched && 286 !ipsec_match_outbound_ids(ipl, retval)) 287 goto next_ipsa; 288 289 /* 290 * At this point, we know that we have at least a match on: 291 * 292 * - dest 293 * - source (if source is specified, i.e. non-zeroes) 294 * - inner dest (if specified) 295 * - inner source (if specified) 296 * - auth alg (if auth alg is specified, i.e. non-zero) 297 * - encrypt. alg (if encrypt. alg is specified, i.e. non-zero) 298 * and we know that the SA keylengths are appropriate. 299 * 300 * (Keep in mind known-src SAs are hit before zero-src SAs, 301 * thanks to sadb_insertassoc().) 302 * If we need a unique asssociation, optimally we have 303 * ipsa_unique_id == unique_id, otherwise NOT USED 304 * is held in reserve (stored in candidate). 305 * 306 * For those stored in candidate, take best-match (i.e. given 307 * a choice, candidate should have non-zero ipsa_src). 308 */ 309 310 /* 311 * If SA has a unique value which matches, we're all set... 312 * "key management knows best" 313 */ 314 if ((retval->ipsa_flags & IPSA_F_UNIQUE) && 315 ((unique_id & retval->ipsa_unique_mask) == 316 retval->ipsa_unique_id)) 317 break; 318 319 /* 320 * If we need a unique SA and this SA has already been used, 321 * or if the SA has a unique value which doesn't match, 322 * this isn't for us. 323 */ 324 325 if (retval->ipsa_flags & excludeflags) 326 goto next_ipsa; 327 328 329 /* 330 * I found a candidate.. 331 */ 332 if (candidate == NULL) { 333 /* 334 * and didn't already have one.. 335 */ 336 candidate = retval; 337 candact = act; 338 continue; 339 } else { 340 /* 341 * If candidate's source address is zero and 342 * the current match (i.e. retval) address is 343 * not zero, we have a better candidate.. 344 */ 345 if (IPSA_IS_ADDR_UNSPEC(candidate->ipsa_srcaddr, af) && 346 !IPSA_IS_ADDR_UNSPEC(retval->ipsa_srcaddr, af)) { 347 mutex_exit(&candidate->ipsa_lock); 348 candidate = retval; 349 candact = act; 350 continue; 351 } 352 } 353 next_ipsa: 354 mutex_exit(&retval->ipsa_lock); 355 } 356 ASSERT((retval == NULL) || MUTEX_HELD(&retval->ipsa_lock)); 357 ASSERT((candidate == NULL) || MUTEX_HELD(&candidate->ipsa_lock)); 358 ASSERT((retval == NULL) || (act != NULL)); 359 ASSERT((candidate == NULL) || (candact != NULL)); 360 361 /* Let caller react to a lookup failure when it gets NULL. */ 362 if (retval == NULL && candidate == NULL) 363 return (NULL); 364 365 if (retval == NULL) { 366 ASSERT(MUTEX_HELD(&candidate->ipsa_lock)); 367 retval = candidate; 368 act = candact; 369 } else if (candidate != NULL) { 370 mutex_exit(&candidate->ipsa_lock); 371 } 372 ASSERT(MUTEX_HELD(&retval->ipsa_lock)); 373 ASSERT(act != NULL); 374 375 /* 376 * Even though I hold the mutex, since the reference counter is an 377 * atomic operation, I really have to use the IPSA_REFHOLD macro. 378 */ 379 IPSA_REFHOLD(retval); 380 381 /* 382 * This association is no longer unused. 383 */ 384 old_flags = retval->ipsa_flags; 385 retval->ipsa_flags |= IPSA_F_USED; 386 387 /* 388 * Cache a reference to this SA for the fast path. 389 */ 390 if (ipr != NULL) { 391 ipr->ipsr_bucket = bucket; 392 ipr->ipsr_gen = bucket->isaf_gen; 393 ipr->ipsr_sa = retval; 394 /* I'm now caching, so the cache-invalid flag goes away! */ 395 retval->ipsa_flags &= ~IPSA_F_CINVALID; 396 } 397 /* 398 * Latch various things while we're here.. 399 */ 400 if (ipl != NULL) { 401 if (!ipl->ipl_ids_latched) { 402 ipsec_latch_ids(ipl, 403 retval->ipsa_src_cid, retval->ipsa_dst_cid); 404 } 405 if (!ipl->ipl_out_action_latched) { 406 IPACT_REFHOLD(act); 407 ipl->ipl_out_action = act; 408 ipl->ipl_out_action_latched = B_TRUE; 409 } 410 } 411 412 /* 413 * Set the uniqueness only first time. 414 */ 415 if (need_unique && !(old_flags & IPSA_F_USED)) { 416 if (retval->ipsa_unique_id == 0) { 417 ASSERT((retval->ipsa_flags & IPSA_F_UNIQUE) == 0); 418 /* 419 * From now on, only this src, dst[ports, addr], 420 * proto, should use it. 421 */ 422 retval->ipsa_flags |= IPSA_F_UNIQUE; 423 retval->ipsa_unique_id = unique_id; 424 retval->ipsa_unique_mask = SA_UNIQUE_MASK( 425 io->ipsec_out_src_port, io->ipsec_out_dst_port, 426 protocol, 0); 427 } 428 429 /* 430 * Set the source address and adjust the hash 431 * buckets only if src_addr is zero. 432 */ 433 if (IPSA_IS_ADDR_UNSPEC(retval->ipsa_srcaddr, af)) { 434 /* 435 * sadb_unlinkassoc() will decrement the refcnt. Bump 436 * up when we have the lock so that we don't have to 437 * acquire locks when we come back from 438 * sadb_insertassoc(). 439 * 440 * We don't need to bump the bucket's gen since 441 * we aren't moving to a new bucket. 442 */ 443 IPSA_REFHOLD(retval); 444 IPSA_COPY_ADDR(retval->ipsa_srcaddr, src, af); 445 mutex_exit(&retval->ipsa_lock); 446 sadb_unlinkassoc(retval); 447 /* 448 * Since the bucket lock is held, we know 449 * sadb_insertassoc() will succeed. 450 */ 451 #ifdef DEBUG 452 if (sadb_insertassoc(retval, bucket) != 0) { 453 cmn_err(CE_PANIC, 454 "sadb_insertassoc() failed in " 455 "ipsec_getassocbyconn().\n"); 456 } 457 #else /* non-DEBUG */ 458 (void) sadb_insertassoc(retval, bucket); 459 #endif /* DEBUG */ 460 return (retval); 461 } 462 } 463 mutex_exit(&retval->ipsa_lock); 464 465 return (retval); 466 } 467 468 /* 469 * Look up a security association based on the security parameters index (SPI) 470 * and address(es). This is used for inbound packets and general SA lookups 471 * (even in outbound SA tables). The source address may be ignored. Return 472 * NULL if no association is available. If an SA is found, return it, with 473 * its refcnt incremented. The caller must REFRELE after using the SA. 474 * The hash bucket must be locked down before calling. 475 */ 476 ipsa_t * 477 ipsec_getassocbyspi(isaf_t *bucket, uint32_t spi, uint32_t *src, uint32_t *dst, 478 sa_family_t af) 479 { 480 ipsa_t *retval; 481 482 ASSERT(MUTEX_HELD(&bucket->isaf_lock)); 483 484 /* 485 * Walk the hash bucket, matching exactly on SPI, then destination, 486 * then source. 487 * 488 * Per-SA locking doesn't need to happen, because I'm only matching 489 * on addresses. Addresses are only changed during insertion/deletion 490 * from the hash bucket. Since the hash bucket lock is held, we don't 491 * need to worry about addresses changing. 492 */ 493 494 for (retval = bucket->isaf_ipsa; retval != NULL; 495 retval = retval->ipsa_next) { 496 if (retval->ipsa_spi != spi) 497 continue; 498 if (!IPSA_ARE_ADDR_EQUAL(dst, retval->ipsa_dstaddr, af)) 499 continue; 500 501 /* 502 * Assume that wildcard source addresses are inserted at the 503 * end of the hash bucket. (See sadb_insertassoc().) 504 * The following check for source addresses is a weak form 505 * of access control/source identity verification. If an 506 * SA has a source address, I only match an all-zeroes 507 * source address, or that particular one. If the SA has 508 * an all-zeroes source, then I match regardless. 509 * 510 * There is a weakness here in that a packet with all-zeroes 511 * for an address will match regardless of the source address 512 * stored in the packet. 513 * 514 * Note that port-level packet selectors, if present, 515 * are checked in ipsec_check_ipsecin_unique(). 516 */ 517 if (IPSA_ARE_ADDR_EQUAL(src, retval->ipsa_srcaddr, af) || 518 IPSA_IS_ADDR_UNSPEC(retval->ipsa_srcaddr, af) || 519 IPSA_IS_ADDR_UNSPEC(src, af)) 520 break; 521 } 522 523 if (retval != NULL) { 524 /* 525 * Just refhold the return value. The caller will then 526 * make the appropriate calls to set the USED flag. 527 */ 528 IPSA_REFHOLD(retval); 529 } 530 531 return (retval); 532 } 533 534 boolean_t 535 ipsec_outbound_sa(mblk_t *mp, uint_t proto) 536 { 537 mblk_t *data_mp; 538 ipsec_out_t *io; 539 ipaddr_t dst; 540 uint32_t *dst_ptr, *src_ptr; 541 isaf_t *bucket; 542 ipsa_t *assoc; 543 ip6_pkt_t ipp; 544 in6_addr_t dst6; 545 ipsa_t **sa; 546 sadbp_t *sadbp; 547 sadb_t *sp; 548 sa_family_t af; 549 netstack_t *ns; 550 551 data_mp = mp->b_cont; 552 io = (ipsec_out_t *)mp->b_rptr; 553 ns = io->ipsec_out_ns; 554 555 if (proto == IPPROTO_ESP) { 556 ipsecesp_stack_t *espstack; 557 558 espstack = ns->netstack_ipsecesp; 559 sa = &io->ipsec_out_esp_sa; 560 sadbp = &espstack->esp_sadb; 561 } else { 562 ipsecah_stack_t *ahstack; 563 564 ASSERT(proto == IPPROTO_AH); 565 ahstack = ns->netstack_ipsecah; 566 sa = &io->ipsec_out_ah_sa; 567 sadbp = &ahstack->ah_sadb; 568 } 569 570 ASSERT(*sa == NULL); 571 572 if (io->ipsec_out_v4) { 573 ipha_t *ipha = (ipha_t *)data_mp->b_rptr; 574 575 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION); 576 dst = ip_get_dst(ipha); 577 sp = &sadbp->s_v4; 578 af = AF_INET; 579 580 /* 581 * NOTE:Getting the outbound association is considerably 582 * painful. ipsec_getassocbyconn() will require more 583 * parameters as policy implementations mature. 584 */ 585 bucket = OUTBOUND_BUCKET_V4(sp, dst); 586 src_ptr = (uint32_t *)&ipha->ipha_src; 587 dst_ptr = (uint32_t *)&dst; 588 } else { 589 ip6_t *ip6h = (ip6_t *)data_mp->b_rptr; 590 591 ASSERT(IPH_HDR_VERSION(ip6h) == IPV6_VERSION); 592 dst6 = ip_get_dst_v6(ip6h, NULL); 593 af = AF_INET6; 594 595 bzero(&ipp, sizeof (ipp)); 596 sp = &sadbp->s_v6; 597 598 /* Same NOTE: applies here! */ 599 bucket = OUTBOUND_BUCKET_V6(sp, dst6); 600 src_ptr = (uint32_t *)&ip6h->ip6_src; 601 dst_ptr = (uint32_t *)&dst6; 602 } 603 604 mutex_enter(&bucket->isaf_lock); 605 assoc = ipsec_getassocbyconn(bucket, io, src_ptr, dst_ptr, af, proto); 606 mutex_exit(&bucket->isaf_lock); 607 608 if (assoc == NULL) 609 return (B_FALSE); 610 611 if (assoc->ipsa_state == IPSA_STATE_DEAD) { 612 IPSA_REFRELE(assoc); 613 return (B_FALSE); 614 } 615 616 ASSERT(assoc->ipsa_state != IPSA_STATE_LARVAL); 617 618 *sa = assoc; 619 return (B_TRUE); 620 } 621 622 /* 623 * Inbound IPsec SA selection. 624 */ 625 626 ah_t * 627 ipsec_inbound_ah_sa(mblk_t *mp, netstack_t *ns) 628 { 629 mblk_t *ipsec_in; 630 ipha_t *ipha; 631 ipsa_t *assoc; 632 ah_t *ah; 633 isaf_t *hptr; 634 ipsec_in_t *ii; 635 boolean_t isv6; 636 ip6_t *ip6h; 637 int ah_offset; 638 uint32_t *src_ptr, *dst_ptr; 639 int pullup_len; 640 sadb_t *sp; 641 sa_family_t af; 642 ipsec_stack_t *ipss = ns->netstack_ipsec; 643 ipsecah_stack_t *ahstack = ns->netstack_ipsecah; 644 645 IP_AH_BUMP_STAT(ipss, in_requests); 646 647 ASSERT(mp->b_datap->db_type == M_CTL); 648 649 ipsec_in = mp; 650 ii = (ipsec_in_t *)ipsec_in->b_rptr; 651 mp = mp->b_cont; 652 653 ASSERT(mp->b_datap->db_type == M_DATA); 654 655 isv6 = !ii->ipsec_in_v4; 656 if (isv6) { 657 ip6h = (ip6_t *)mp->b_rptr; 658 ah_offset = ipsec_ah_get_hdr_size_v6(mp, B_TRUE); 659 } else { 660 ipha = (ipha_t *)mp->b_rptr; 661 ASSERT(ipha->ipha_protocol == IPPROTO_AH); 662 ah_offset = ipha->ipha_version_and_hdr_length - 663 (uint8_t)((IP_VERSION << 4)); 664 ah_offset <<= 2; 665 } 666 667 /* 668 * We assume that the IP header is pulled up until 669 * the options. We need to see whether we have the 670 * AH header in the same mblk or not. 671 */ 672 pullup_len = ah_offset + sizeof (ah_t); 673 if (mp->b_rptr + pullup_len > mp->b_wptr) { 674 if (!pullupmsg(mp, pullup_len)) { 675 ipsec_rl_strlog(ns, ip_mod_info.mi_idnum, 0, 0, 676 SL_WARN | SL_ERROR, 677 "ipsec_inbound_ah_sa: Small AH header\n"); 678 IP_AH_BUMP_STAT(ipss, in_discards); 679 ip_drop_packet(ipsec_in, B_TRUE, NULL, NULL, 680 DROPPER(ipss, ipds_ah_bad_length), 681 &ipss->ipsec_dropper); 682 return (NULL); 683 } 684 if (isv6) 685 ip6h = (ip6_t *)mp->b_rptr; 686 else 687 ipha = (ipha_t *)mp->b_rptr; 688 } 689 690 ah = (ah_t *)(mp->b_rptr + ah_offset); 691 692 if (isv6) { 693 src_ptr = (uint32_t *)&ip6h->ip6_src; 694 dst_ptr = (uint32_t *)&ip6h->ip6_dst; 695 sp = &ahstack->ah_sadb.s_v6; 696 af = AF_INET6; 697 } else { 698 src_ptr = (uint32_t *)&ipha->ipha_src; 699 dst_ptr = (uint32_t *)&ipha->ipha_dst; 700 sp = &ahstack->ah_sadb.s_v4; 701 af = AF_INET; 702 } 703 704 hptr = INBOUND_BUCKET(sp, ah->ah_spi); 705 mutex_enter(&hptr->isaf_lock); 706 assoc = ipsec_getassocbyspi(hptr, ah->ah_spi, src_ptr, dst_ptr, af); 707 mutex_exit(&hptr->isaf_lock); 708 709 if (assoc == NULL || assoc->ipsa_state == IPSA_STATE_DEAD || 710 assoc->ipsa_state == IPSA_STATE_ACTIVE_ELSEWHERE) { 711 IP_AH_BUMP_STAT(ipss, lookup_failure); 712 IP_AH_BUMP_STAT(ipss, in_discards); 713 ipsecah_in_assocfailure(ipsec_in, 0, 714 SL_ERROR | SL_CONSOLE | SL_WARN, 715 "ipsec_inbound_ah_sa: No association found for " 716 "spi 0x%x, dst addr %s\n", 717 ah->ah_spi, dst_ptr, af, ahstack); 718 if (assoc != NULL) { 719 IPSA_REFRELE(assoc); 720 } 721 return (NULL); 722 } 723 724 if (assoc->ipsa_state == IPSA_STATE_LARVAL && 725 sadb_set_lpkt(assoc, ipsec_in, ns)) { 726 /* Not fully baked; swap the packet under a rock until then */ 727 IPSA_REFRELE(assoc); 728 return (NULL); 729 } 730 731 /* 732 * Save a reference to the association so that it can 733 * be retrieved after execution. We free any AH SA reference 734 * already there (innermost SA "wins". The reference to 735 * the SA will also be used later when doing the policy checks. 736 */ 737 738 if (ii->ipsec_in_ah_sa != NULL) { 739 IPSA_REFRELE(ii->ipsec_in_ah_sa); 740 } 741 ii->ipsec_in_ah_sa = assoc; 742 743 return (ah); 744 } 745 746 esph_t * 747 ipsec_inbound_esp_sa(mblk_t *ipsec_in_mp, netstack_t *ns) 748 { 749 mblk_t *data_mp, *placeholder; 750 uint32_t *src_ptr, *dst_ptr; 751 ipsec_in_t *ii; 752 ipha_t *ipha; 753 ip6_t *ip6h; 754 esph_t *esph; 755 ipsa_t *ipsa; 756 isaf_t *bucket; 757 uint_t preamble; 758 sa_family_t af; 759 boolean_t isv6; 760 sadb_t *sp; 761 ipsec_stack_t *ipss = ns->netstack_ipsec; 762 ipsecesp_stack_t *espstack = ns->netstack_ipsecesp; 763 764 IP_ESP_BUMP_STAT(ipss, in_requests); 765 ASSERT(ipsec_in_mp->b_datap->db_type == M_CTL); 766 767 /* We have IPSEC_IN already! */ 768 ii = (ipsec_in_t *)ipsec_in_mp->b_rptr; 769 data_mp = ipsec_in_mp->b_cont; 770 771 ASSERT(ii->ipsec_in_type == IPSEC_IN); 772 773 isv6 = !ii->ipsec_in_v4; 774 if (isv6) { 775 ip6h = (ip6_t *)data_mp->b_rptr; 776 } else { 777 ipha = (ipha_t *)data_mp->b_rptr; 778 } 779 780 /* 781 * Put all data into one mblk if it's not there already. 782 * XXX This is probably bad long-term. Figure out better ways of doing 783 * this. Much of the inbound path depends on all of the data being 784 * in one mblk. 785 * 786 * XXX Jumbogram issues will have to be dealt with here. 787 * If the plen is 0, we'll have to scan for a HBH header with the 788 * actual packet length. 789 */ 790 if (data_mp->b_datap->db_ref > 1 || 791 (data_mp->b_wptr - data_mp->b_rptr) < 792 (isv6 ? (ntohs(ip6h->ip6_plen) + sizeof (ip6_t)) 793 : ntohs(ipha->ipha_length))) { 794 placeholder = msgpullup(data_mp, -1); 795 if (placeholder == NULL) { 796 IP_ESP_BUMP_STAT(ipss, in_discards); 797 /* 798 * TODO: Extract inbound interface from the IPSEC_IN 799 * message's ii->ipsec_in_rill_index. 800 */ 801 ip_drop_packet(ipsec_in_mp, B_TRUE, NULL, NULL, 802 DROPPER(ipss, ipds_esp_nomem), 803 &ipss->ipsec_dropper); 804 return (NULL); 805 } else { 806 /* Reset packet with new pulled up mblk. */ 807 freemsg(data_mp); 808 data_mp = placeholder; 809 ipsec_in_mp->b_cont = data_mp; 810 } 811 } 812 813 /* 814 * Find the ESP header, point the address pointers at the appropriate 815 * IPv4/IPv6 places. 816 */ 817 if (isv6) { 818 ip6h = (ip6_t *)data_mp->b_rptr; 819 src_ptr = (uint32_t *)&ip6h->ip6_src; 820 dst_ptr = (uint32_t *)&ip6h->ip6_dst; 821 if (ip6h->ip6_nxt != IPPROTO_ESP) { 822 /* There are options that need to be processed. */ 823 preamble = ip_hdr_length_v6(data_mp, ip6h); 824 } else { 825 preamble = sizeof (ip6_t); 826 } 827 828 sp = &espstack->esp_sadb.s_v6; 829 af = AF_INET6; 830 } else { 831 ipha = (ipha_t *)data_mp->b_rptr; 832 src_ptr = (uint32_t *)&ipha->ipha_src; 833 dst_ptr = (uint32_t *)&ipha->ipha_dst; 834 preamble = IPH_HDR_LENGTH(ipha); 835 836 sp = &espstack->esp_sadb.s_v4; 837 af = AF_INET; 838 } 839 840 esph = (esph_t *)(data_mp->b_rptr + preamble); 841 842 /* Since hash is common on inbound (SPI value), hash here. */ 843 bucket = INBOUND_BUCKET(sp, esph->esph_spi); 844 mutex_enter(&bucket->isaf_lock); 845 ipsa = ipsec_getassocbyspi(bucket, esph->esph_spi, src_ptr, dst_ptr, 846 af); 847 mutex_exit(&bucket->isaf_lock); 848 849 if (ipsa == NULL || ipsa->ipsa_state == IPSA_STATE_DEAD || 850 ipsa->ipsa_state == IPSA_STATE_ACTIVE_ELSEWHERE) { 851 /* This is a loggable error! AUDIT ME! */ 852 IP_ESP_BUMP_STAT(ipss, lookup_failure); 853 IP_ESP_BUMP_STAT(ipss, in_discards); 854 ipsecesp_in_assocfailure(ipsec_in_mp, 0, 855 SL_ERROR | SL_CONSOLE | SL_WARN, 856 "ipsec_inbound_esp_sa: No association found for " 857 "spi 0x%x, dst addr %s\n", 858 esph->esph_spi, dst_ptr, af, espstack); 859 if (ipsa != NULL) { 860 IPSA_REFRELE(ipsa); 861 } 862 return (NULL); 863 } 864 865 if (ipsa->ipsa_state == IPSA_STATE_LARVAL && 866 sadb_set_lpkt(ipsa, ipsec_in_mp, ns)) { 867 /* Not fully baked; swap the packet under a rock until then */ 868 IPSA_REFRELE(ipsa); 869 return (NULL); 870 } 871 872 /* 873 * Save a reference to the association so that it can 874 * be retrieved after execution. We free any AH SA reference 875 * already there (innermost SA "wins". The reference to 876 * the SA will also be used later when doing the policy checks. 877 */ 878 if (ii->ipsec_in_esp_sa != NULL) { 879 IPSA_REFRELE(ii->ipsec_in_esp_sa); 880 } 881 ii->ipsec_in_esp_sa = ipsa; 882 883 return (esph); 884 } 885