1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * IPsec Security Policy Database. 31 * 32 * This module maintains the SPD and provides routines used by ip and ip6 33 * to apply IPsec policy to inbound and outbound datagrams. 34 * 35 * XXX TODO LIST 36 * Inbound policy: 37 * Put policy failure logging back in here (as policy action flag bit) 38 */ 39 40 #include <sys/types.h> 41 #include <sys/stream.h> 42 #include <sys/stropts.h> 43 #include <sys/sysmacros.h> 44 #include <sys/strsubr.h> 45 #include <sys/strlog.h> 46 #include <sys/cmn_err.h> 47 #include <sys/zone.h> 48 49 #include <sys/systm.h> 50 #include <sys/param.h> 51 #include <sys/kmem.h> 52 53 #include <sys/crypto/api.h> 54 55 #include <inet/common.h> 56 #include <inet/mi.h> 57 58 #include <netinet/ip6.h> 59 #include <netinet/icmp6.h> 60 #include <netinet/udp.h> 61 62 #include <inet/ip.h> 63 #include <inet/ip6.h> 64 65 #include <net/pfkeyv2.h> 66 #include <net/pfpolicy.h> 67 #include <inet/ipsec_info.h> 68 #include <inet/sadb.h> 69 #include <inet/ipsec_impl.h> 70 #include <inet/ipsecah.h> 71 #include <inet/ipsecesp.h> 72 #include <inet/ipdrop.h> 73 #include <inet/ipclassifier.h> 74 75 static void ipsec_update_present_flags(); 76 static ipsec_act_t *ipsec_act_wildcard_expand(ipsec_act_t *, uint_t *); 77 static void ipsec_out_free(void *); 78 static void ipsec_in_free(void *); 79 static boolean_t ipsec_init_inbound_sel(ipsec_selector_t *, mblk_t *, 80 ipha_t *, ip6_t *); 81 static mblk_t *ipsec_attach_global_policy(mblk_t *, conn_t *, 82 ipsec_selector_t *); 83 static mblk_t *ipsec_apply_global_policy(mblk_t *, conn_t *, 84 ipsec_selector_t *); 85 static mblk_t *ipsec_check_ipsecin_policy(queue_t *, mblk_t *, 86 ipsec_policy_t *, ipha_t *, ip6_t *); 87 static void ipsec_in_release_refs(ipsec_in_t *); 88 static void ipsec_out_release_refs(ipsec_out_t *); 89 static void ipsec_action_reclaim(void *); 90 static void ipsid_init(void); 91 static void ipsid_fini(void); 92 static boolean_t ipsec_check_ipsecin_action(struct ipsec_in_s *, mblk_t *, 93 struct ipsec_action_s *, ipha_t *ipha, ip6_t *ip6h, const char **, 94 kstat_named_t **); 95 static int32_t ipsec_act_ovhd(const ipsec_act_t *act); 96 static void ipsec_unregister_prov_update(void); 97 static boolean_t ipsec_compare_action(ipsec_policy_t *p1, 98 ipsec_policy_t *p2); 99 100 /* 101 * Policy rule index generator. We assume this won't wrap in the 102 * lifetime of a system. If we make 2^20 policy changes per second, 103 * this will last 2^44 seconds, or roughly 500,000 years, so we don't 104 * have to worry about reusing policy index values. 105 * 106 * Protected by ipsec_conf_lock. 107 */ 108 uint64_t ipsec_next_policy_index = 1; 109 110 /* 111 * Active & Inactive system policy roots 112 */ 113 static ipsec_policy_head_t system_policy; 114 static ipsec_policy_head_t inactive_policy; 115 116 /* Packet dropper for generic SPD drops. */ 117 static ipdropper_t spd_dropper; 118 119 /* 120 * For now, use a trivially sized hash table. 121 * In the future we can add the structure canonicalization necessary 122 * to get the hash function to behave correctly.. 123 */ 124 #define IPSEC_ACTION_HASH_SIZE 1 125 #define IPSEC_SEL_HASH_SIZE 1 126 127 static HASH_HEAD(ipsec_action_s) ipsec_action_hash[IPSEC_ACTION_HASH_SIZE]; 128 static HASH_HEAD(ipsec_sel) ipsec_sel_hash[IPSEC_SEL_HASH_SIZE]; 129 static kmem_cache_t *ipsec_action_cache; 130 static kmem_cache_t *ipsec_sel_cache; 131 static kmem_cache_t *ipsec_pol_cache; 132 static kmem_cache_t *ipsec_info_cache; 133 134 boolean_t ipsec_inbound_v4_policy_present = B_FALSE; 135 boolean_t ipsec_outbound_v4_policy_present = B_FALSE; 136 boolean_t ipsec_inbound_v6_policy_present = B_FALSE; 137 boolean_t ipsec_outbound_v6_policy_present = B_FALSE; 138 139 /* 140 * Because policy needs to know what algorithms are supported, keep the 141 * lists of algorithms here. 142 */ 143 144 kmutex_t alg_lock; 145 uint8_t ipsec_nalgs[IPSEC_NALGTYPES]; 146 ipsec_alginfo_t *ipsec_alglists[IPSEC_NALGTYPES][IPSEC_MAX_ALGS]; 147 uint8_t ipsec_sortlist[IPSEC_NALGTYPES][IPSEC_MAX_ALGS]; 148 ipsec_algs_exec_mode_t ipsec_algs_exec_mode[IPSEC_NALGTYPES]; 149 static crypto_notify_handle_t prov_update_handle = NULL; 150 151 #define ALGBITS_ROUND_DOWN(x, align) (((x)/(align))*(align)) 152 #define ALGBITS_ROUND_UP(x, align) ALGBITS_ROUND_DOWN((x)+(align)-1, align) 153 154 /* 155 * Inbound traffic should have matching identities for both SA's. 156 */ 157 158 #define SA_IDS_MATCH(sa1, sa2) \ 159 (((sa1) == NULL) || ((sa2) == NULL) || \ 160 (((sa1)->ipsa_src_cid == (sa2)->ipsa_src_cid) && \ 161 (((sa1)->ipsa_dst_cid == (sa2)->ipsa_dst_cid)))) 162 /* 163 * Policy failure messages. 164 */ 165 static char *ipsec_policy_failure_msgs[] = { 166 167 /* IPSEC_POLICY_NOT_NEEDED */ 168 "%s: Dropping the datagram because the incoming packet " 169 "is %s, but the recipient expects clear; Source %s, " 170 "Destination %s.\n", 171 172 /* IPSEC_POLICY_MISMATCH */ 173 "%s: Policy Failure for the incoming packet (%s); Source %s, " 174 "Destination %s.\n", 175 176 /* IPSEC_POLICY_AUTH_NOT_NEEDED */ 177 "%s: Authentication present while not expected in the " 178 "incoming %s packet; Source %s, Destination %s.\n", 179 180 /* IPSEC_POLICY_ENCR_NOT_NEEDED */ 181 "%s: Encryption present while not expected in the " 182 "incoming %s packet; Source %s, Destination %s.\n", 183 184 /* IPSEC_POLICY_SE_NOT_NEEDED */ 185 "%s: Self-Encapsulation present while not expected in the " 186 "incoming %s packet; Source %s, Destination %s.\n", 187 }; 188 /* 189 * Have a counter for every possible policy message in the previous array. 190 */ 191 static uint32_t ipsec_policy_failure_count[IPSEC_POLICY_MAX]; 192 /* Time since last ipsec policy failure that printed a message. */ 193 hrtime_t ipsec_policy_failure_last = 0; 194 195 /* 196 * General overviews: 197 * 198 * Locking: 199 * 200 * All of the system policy structures are protected by a single 201 * rwlock, ipsec_conf_lock. These structures are threaded in a 202 * fairly complex fashion and are not expected to change on a 203 * regular basis, so this should not cause scaling/contention 204 * problems. As a result, policy checks should (hopefully) be MT-hot. 205 * 206 * Allocation policy: 207 * 208 * We use custom kmem cache types for the various 209 * bits & pieces of the policy data structures. All allocations 210 * use KM_NOSLEEP instead of KM_SLEEP for policy allocation. The 211 * policy table is of potentially unbounded size, so we don't 212 * want to provide a way to hog all system memory with policy 213 * entries.. 214 */ 215 216 /* 217 * Module unload hook. 218 */ 219 void 220 ipsec_policy_destroy(void) 221 { 222 int i; 223 224 ip_drop_unregister(&spd_dropper); 225 ip_drop_destroy(); 226 227 rw_destroy(&system_policy.iph_lock); 228 rw_destroy(&inactive_policy.iph_lock); 229 230 for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++) 231 mutex_destroy(&(ipsec_action_hash[i].hash_lock)); 232 233 for (i = 0; i < IPSEC_SEL_HASH_SIZE; i++) 234 mutex_destroy(&(ipsec_sel_hash[i].hash_lock)); 235 236 ipsec_unregister_prov_update(); 237 238 mutex_destroy(&alg_lock); 239 240 kmem_cache_destroy(ipsec_action_cache); 241 kmem_cache_destroy(ipsec_sel_cache); 242 kmem_cache_destroy(ipsec_pol_cache); 243 kmem_cache_destroy(ipsec_info_cache); 244 ipsid_gc(); 245 ipsid_fini(); 246 } 247 248 /* 249 * Module load hook. 250 */ 251 void 252 ipsec_policy_init() 253 { 254 int i; 255 ipsid_init(); 256 257 rw_init(&system_policy.iph_lock, NULL, RW_DEFAULT, NULL); 258 rw_init(&inactive_policy.iph_lock, NULL, RW_DEFAULT, NULL); 259 260 for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++) 261 mutex_init(&(ipsec_action_hash[i].hash_lock), 262 NULL, MUTEX_DEFAULT, NULL); 263 264 for (i = 0; i < IPSEC_SEL_HASH_SIZE; i++) 265 mutex_init(&(ipsec_sel_hash[i].hash_lock), 266 NULL, MUTEX_DEFAULT, NULL); 267 268 mutex_init(&alg_lock, NULL, MUTEX_DEFAULT, NULL); 269 270 for (i = 0; i < IPSEC_NALGTYPES; i++) 271 ipsec_nalgs[i] = 0; 272 273 ipsec_action_cache = kmem_cache_create("ipsec_actions", 274 sizeof (ipsec_action_t), _POINTER_ALIGNMENT, NULL, NULL, 275 ipsec_action_reclaim, NULL, NULL, 0); 276 ipsec_sel_cache = kmem_cache_create("ipsec_selectors", 277 sizeof (ipsec_sel_t), _POINTER_ALIGNMENT, NULL, NULL, 278 NULL, NULL, NULL, 0); 279 ipsec_pol_cache = kmem_cache_create("ipsec_policy", 280 sizeof (ipsec_policy_t), _POINTER_ALIGNMENT, NULL, NULL, 281 NULL, NULL, NULL, 0); 282 ipsec_info_cache = kmem_cache_create("ipsec_info", 283 sizeof (ipsec_info_t), _POINTER_ALIGNMENT, NULL, NULL, 284 NULL, NULL, NULL, 0); 285 286 ip_drop_init(); 287 ip_drop_register(&spd_dropper, "IPsec SPD"); 288 } 289 290 /* 291 * Sort algorithm lists. 292 * EXPORT DELETE START 293 * I may need to split this based on 294 * authentication/encryption, and I may wish to have an administrator 295 * configure this list. Hold on to some NDD variables... 296 * EXPORT DELETE END 297 * 298 * XXX For now, sort on minimum key size (GAG!). While minimum key size is 299 * not the ideal metric, it's the only quantifiable measure available in the 300 * AUTH/ENCR PI. We need a better metric for sorting algorithms by preference. 301 */ 302 static void 303 alg_insert_sortlist(enum ipsec_algtype at, uint8_t algid) 304 { 305 ipsec_alginfo_t *ai = ipsec_alglists[at][algid]; 306 uint8_t holder, swap; 307 uint_t i; 308 uint_t count = ipsec_nalgs[at]; 309 ASSERT(ai != NULL); 310 ASSERT(algid == ai->alg_id); 311 312 ASSERT(MUTEX_HELD(&alg_lock)); 313 314 holder = algid; 315 316 for (i = 0; i < count - 1; i++) { 317 ipsec_alginfo_t *alt; 318 319 alt = ipsec_alglists[at][ipsec_sortlist[at][i]]; 320 /* 321 * If you want to give precedence to newly added algs, 322 * add the = in the > comparison. 323 */ 324 if ((holder != algid) || (ai->alg_minbits > alt->alg_minbits)) { 325 /* Swap sortlist[i] and holder. */ 326 swap = ipsec_sortlist[at][i]; 327 ipsec_sortlist[at][i] = holder; 328 holder = swap; 329 ai = alt; 330 } /* Else just continue. */ 331 } 332 333 /* Store holder in last slot. */ 334 ipsec_sortlist[at][i] = holder; 335 } 336 337 /* 338 * Remove an algorithm from a sorted algorithm list. 339 * This should be considerably easier, even with complex sorting. 340 */ 341 static void 342 alg_remove_sortlist(enum ipsec_algtype at, uint8_t algid) 343 { 344 boolean_t copyback = B_FALSE; 345 int i; 346 int newcount = ipsec_nalgs[at]; 347 348 ASSERT(MUTEX_HELD(&alg_lock)); 349 350 for (i = 0; i <= newcount; i++) { 351 if (copyback) 352 ipsec_sortlist[at][i-1] = ipsec_sortlist[at][i]; 353 else if (ipsec_sortlist[at][i] == algid) 354 copyback = B_TRUE; 355 } 356 } 357 358 /* 359 * Add the specified algorithm to the algorithm tables. 360 * Must be called while holding the algorithm table writer lock. 361 */ 362 void 363 ipsec_alg_reg(ipsec_algtype_t algtype, ipsec_alginfo_t *alg) 364 { 365 ASSERT(MUTEX_HELD(&alg_lock)); 366 367 ASSERT(ipsec_alglists[algtype][alg->alg_id] == NULL); 368 ipsec_alg_fix_min_max(alg, algtype); 369 ipsec_alglists[algtype][alg->alg_id] = alg; 370 371 ipsec_nalgs[algtype]++; 372 alg_insert_sortlist(algtype, alg->alg_id); 373 } 374 375 /* 376 * Remove the specified algorithm from the algorithm tables. 377 * Must be called while holding the algorithm table writer lock. 378 */ 379 void 380 ipsec_alg_unreg(ipsec_algtype_t algtype, uint8_t algid) 381 { 382 ASSERT(MUTEX_HELD(&alg_lock)); 383 384 ASSERT(ipsec_alglists[algtype][algid] != NULL); 385 ipsec_alg_free(ipsec_alglists[algtype][algid]); 386 ipsec_alglists[algtype][algid] = NULL; 387 388 ipsec_nalgs[algtype]--; 389 alg_remove_sortlist(algtype, algid); 390 } 391 392 /* 393 * Hooks for spdsock to get a grip on system policy. 394 */ 395 396 ipsec_policy_head_t * 397 ipsec_system_policy(void) 398 { 399 ipsec_policy_head_t *h = &system_policy; 400 IPPH_REFHOLD(h); 401 return (h); 402 } 403 404 ipsec_policy_head_t * 405 ipsec_inactive_policy(void) 406 { 407 ipsec_policy_head_t *h = &inactive_policy; 408 IPPH_REFHOLD(h); 409 return (h); 410 } 411 412 /* 413 * Lock inactive policy, then active policy, then exchange policy root 414 * pointers. 415 */ 416 void 417 ipsec_swap_policy(void) 418 { 419 int af, dir; 420 ipsec_policy_t *t1, *t2; 421 422 rw_enter(&inactive_policy.iph_lock, RW_WRITER); 423 rw_enter(&system_policy.iph_lock, RW_WRITER); 424 for (dir = 0; dir < IPSEC_NTYPES; dir++) { 425 for (af = 0; af < IPSEC_NAF; af++) { 426 t1 = system_policy.iph_root[dir].ipr[af]; 427 t2 = inactive_policy.iph_root[dir].ipr[af]; 428 system_policy.iph_root[dir].ipr[af] = t2; 429 inactive_policy.iph_root[dir].ipr[af] = t1; 430 } 431 } 432 system_policy.iph_gen++; 433 inactive_policy.iph_gen++; 434 ipsec_update_present_flags(); 435 rw_exit(&system_policy.iph_lock); 436 rw_exit(&inactive_policy.iph_lock); 437 } 438 439 /* 440 * Clone one policy rule.. 441 */ 442 static ipsec_policy_t * 443 ipsec_copy_policy(const ipsec_policy_t *src) 444 { 445 ipsec_policy_t *dst = kmem_cache_alloc(ipsec_pol_cache, KM_NOSLEEP); 446 447 if (dst == NULL) 448 return (NULL); 449 450 /* 451 * Adjust refcounts of cloned state. 452 */ 453 IPACT_REFHOLD(src->ipsp_act); 454 src->ipsp_sel->ipsl_refs++; 455 456 dst->ipsp_links.itl_next = NULL; 457 dst->ipsp_refs = 1; 458 dst->ipsp_sel = src->ipsp_sel; 459 dst->ipsp_act = src->ipsp_act; 460 dst->ipsp_prio = src->ipsp_prio; 461 dst->ipsp_index = src->ipsp_index; 462 463 return (dst); 464 } 465 466 /* 467 * Make one policy head look exactly like another. 468 * 469 * As with ipsec_swap_policy, we lock the destination policy head first, then 470 * the source policy head. Note that we only need to read-lock the source 471 * policy head as we are not changing it. 472 */ 473 static int 474 ipsec_copy_polhead(ipsec_policy_head_t *sph, ipsec_policy_head_t *dph) 475 { 476 int af, dir; 477 ipsec_policy_t *src, *dst, **dstp; 478 479 rw_enter(&dph->iph_lock, RW_WRITER); 480 481 ipsec_polhead_flush(dph); 482 483 rw_enter(&sph->iph_lock, RW_READER); 484 485 for (dir = 0; dir < IPSEC_NTYPES; dir++) { 486 for (af = 0; af < IPSEC_NAF; af++) { 487 dstp = &dph->iph_root[dir].ipr[af]; 488 for (src = sph->iph_root[dir].ipr[af]; 489 src != NULL; src = src->ipsp_links.itl_next) { 490 dst = ipsec_copy_policy(src); 491 if (dst == NULL) { 492 rw_exit(&sph->iph_lock); 493 rw_exit(&dph->iph_lock); 494 return (ENOMEM); 495 } 496 *dstp = dst; 497 dstp = &dst->ipsp_links.itl_next; 498 } 499 } 500 } 501 502 dph->iph_gen++; 503 504 rw_exit(&sph->iph_lock); 505 rw_exit(&dph->iph_lock); 506 return (0); 507 } 508 509 /* 510 * Clone currently active policy to the inactive policy list. 511 */ 512 int 513 ipsec_clone_system_policy(void) 514 { 515 return (ipsec_copy_polhead(&system_policy, &inactive_policy)); 516 } 517 518 519 /* 520 * Extract the string from ipsec_policy_failure_msgs[type] and 521 * log it. 522 * 523 * This function needs to be kept in synch with ipsec_rl_strlog() in 524 * sadb.c. 525 * XXX this function should be combined with the ipsec_rl_strlog() function. 526 */ 527 void 528 ipsec_log_policy_failure(queue_t *q, int type, char *func_name, ipha_t *ipha, 529 ip6_t *ip6h, boolean_t secure) 530 { 531 char sbuf[INET6_ADDRSTRLEN]; 532 char dbuf[INET6_ADDRSTRLEN]; 533 char *s; 534 char *d; 535 hrtime_t current = gethrtime(); 536 537 ASSERT((ipha == NULL && ip6h != NULL) || 538 (ip6h == NULL && ipha != NULL)); 539 540 if (ipha != NULL) { 541 s = inet_ntop(AF_INET, &ipha->ipha_src, sbuf, sizeof (sbuf)); 542 d = inet_ntop(AF_INET, &ipha->ipha_dst, dbuf, sizeof (dbuf)); 543 } else { 544 s = inet_ntop(AF_INET6, &ip6h->ip6_src, sbuf, sizeof (sbuf)); 545 d = inet_ntop(AF_INET6, &ip6h->ip6_dst, dbuf, sizeof (dbuf)); 546 547 } 548 549 /* Always bump the policy failure counter. */ 550 ipsec_policy_failure_count[type]++; 551 552 /* Convert interval (in msec) to hrtime (in nsec), which means * 10^6 */ 553 if (ipsec_policy_failure_last + 554 ((hrtime_t)ipsec_policy_log_interval * (hrtime_t)1000000) <= 555 current) { 556 /* 557 * Throttle the logging such that I only log one message 558 * every 'ipsec_policy_log_interval' amount of time. 559 */ 560 (void) mi_strlog(q, 0, SL_ERROR|SL_WARN|SL_CONSOLE, 561 ipsec_policy_failure_msgs[type], 562 func_name, 563 (secure ? "secure" : "not secure"), s, d); 564 ipsec_policy_failure_last = current; 565 } 566 } 567 568 void 569 ipsec_config_flush() 570 { 571 rw_enter(&system_policy.iph_lock, RW_WRITER); 572 ipsec_polhead_flush(&system_policy); 573 ipsec_next_policy_index = 1; 574 rw_exit(&system_policy.iph_lock); 575 ipsec_action_reclaim(0); 576 } 577 578 /* 579 * Clip a policy's min/max keybits vs. the capabilities of the 580 * algorithm. 581 */ 582 static void 583 act_alg_adjust(uint_t algtype, uint_t algid, 584 uint16_t *minbits, uint16_t *maxbits) 585 { 586 ipsec_alginfo_t *algp = ipsec_alglists[algtype][algid]; 587 if (algp != NULL) { 588 /* 589 * If passed-in minbits is zero, we assume the caller trusts 590 * us with setting the minimum key size. We pick the 591 * algorithms DEFAULT key size for the minimum in this case. 592 */ 593 if (*minbits == 0) { 594 *minbits = algp->alg_default_bits; 595 ASSERT(*minbits >= algp->alg_minbits); 596 } else { 597 *minbits = MAX(*minbits, algp->alg_minbits); 598 } 599 if (*maxbits == 0) 600 *maxbits = algp->alg_maxbits; 601 else 602 *maxbits = MIN(*maxbits, algp->alg_maxbits); 603 ASSERT(*minbits <= *maxbits); 604 } else { 605 *minbits = 0; 606 *maxbits = 0; 607 } 608 } 609 610 /* 611 * Check an action's requested algorithms against the algorithms currently 612 * loaded in the system. 613 */ 614 boolean_t 615 ipsec_check_action(ipsec_act_t *act, int *diag) 616 { 617 ipsec_prot_t *ipp; 618 619 ipp = &act->ipa_apply; 620 621 if (ipp->ipp_use_ah && 622 ipsec_alglists[IPSEC_ALG_AUTH][ipp->ipp_auth_alg] == NULL) { 623 *diag = SPD_DIAGNOSTIC_UNSUPP_AH_ALG; 624 return (B_FALSE); 625 } 626 if (ipp->ipp_use_espa && 627 ipsec_alglists[IPSEC_ALG_AUTH][ipp->ipp_esp_auth_alg] == NULL) { 628 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_ALG; 629 return (B_FALSE); 630 } 631 if (ipp->ipp_use_esp && 632 ipsec_alglists[IPSEC_ALG_ENCR][ipp->ipp_encr_alg] == NULL) { 633 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_ALG; 634 return (B_FALSE); 635 } 636 637 act_alg_adjust(IPSEC_ALG_AUTH, ipp->ipp_auth_alg, 638 &ipp->ipp_ah_minbits, &ipp->ipp_ah_maxbits); 639 act_alg_adjust(IPSEC_ALG_AUTH, ipp->ipp_esp_auth_alg, 640 &ipp->ipp_espa_minbits, &ipp->ipp_espa_maxbits); 641 act_alg_adjust(IPSEC_ALG_ENCR, ipp->ipp_encr_alg, 642 &ipp->ipp_espe_minbits, &ipp->ipp_espe_maxbits); 643 644 if (ipp->ipp_ah_minbits > ipp->ipp_ah_maxbits) { 645 *diag = SPD_DIAGNOSTIC_UNSUPP_AH_KEYSIZE; 646 return (B_FALSE); 647 } 648 if (ipp->ipp_espa_minbits > ipp->ipp_espa_maxbits) { 649 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_KEYSIZE; 650 return (B_FALSE); 651 } 652 if (ipp->ipp_espe_minbits > ipp->ipp_espe_maxbits) { 653 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_KEYSIZE; 654 return (B_FALSE); 655 } 656 /* TODO: sanity check lifetimes */ 657 return (B_TRUE); 658 } 659 660 /* 661 * Set up a single action during wildcard expansion.. 662 */ 663 static void 664 ipsec_setup_act(ipsec_act_t *outact, ipsec_act_t *act, 665 uint_t auth_alg, uint_t encr_alg, uint_t eauth_alg) 666 { 667 ipsec_prot_t *ipp; 668 669 *outact = *act; 670 ipp = &outact->ipa_apply; 671 ipp->ipp_auth_alg = (uint8_t)auth_alg; 672 ipp->ipp_encr_alg = (uint8_t)encr_alg; 673 ipp->ipp_esp_auth_alg = (uint8_t)eauth_alg; 674 675 act_alg_adjust(IPSEC_ALG_AUTH, auth_alg, 676 &ipp->ipp_ah_minbits, &ipp->ipp_ah_maxbits); 677 act_alg_adjust(IPSEC_ALG_AUTH, eauth_alg, 678 &ipp->ipp_espa_minbits, &ipp->ipp_espa_maxbits); 679 act_alg_adjust(IPSEC_ALG_ENCR, encr_alg, 680 &ipp->ipp_espe_minbits, &ipp->ipp_espe_maxbits); 681 } 682 683 /* 684 * combinatoric expansion time: expand a wildcarded action into an 685 * array of wildcarded actions; we return the exploded action list, 686 * and return a count in *nact (output only). 687 */ 688 static ipsec_act_t * 689 ipsec_act_wildcard_expand(ipsec_act_t *act, uint_t *nact) 690 { 691 boolean_t use_ah, use_esp, use_espa; 692 boolean_t wild_auth, wild_encr, wild_eauth; 693 uint_t auth_alg, auth_idx, auth_min, auth_max; 694 uint_t eauth_alg, eauth_idx, eauth_min, eauth_max; 695 uint_t encr_alg, encr_idx, encr_min, encr_max; 696 uint_t action_count, ai; 697 ipsec_act_t *outact; 698 699 if (act->ipa_type != IPSEC_ACT_APPLY) { 700 outact = kmem_alloc(sizeof (*act), KM_NOSLEEP); 701 *nact = 1; 702 if (outact != NULL) 703 bcopy(act, outact, sizeof (*act)); 704 return (outact); 705 } 706 /* 707 * compute the combinatoric explosion.. 708 * 709 * we assume a request for encr if esp_req is PREF_REQUIRED 710 * we assume a request for ah auth if ah_req is PREF_REQUIRED. 711 * we assume a request for esp auth if !ah and esp_req is PREF_REQUIRED 712 */ 713 714 use_ah = act->ipa_apply.ipp_use_ah; 715 use_esp = act->ipa_apply.ipp_use_esp; 716 use_espa = act->ipa_apply.ipp_use_espa; 717 auth_alg = act->ipa_apply.ipp_auth_alg; 718 eauth_alg = act->ipa_apply.ipp_esp_auth_alg; 719 encr_alg = act->ipa_apply.ipp_encr_alg; 720 721 wild_auth = use_ah && (auth_alg == 0); 722 wild_eauth = use_espa && (eauth_alg == 0); 723 wild_encr = use_esp && (encr_alg == 0); 724 725 action_count = 1; 726 auth_min = auth_max = auth_alg; 727 eauth_min = eauth_max = eauth_alg; 728 encr_min = encr_max = encr_alg; 729 730 /* 731 * set up for explosion.. for each dimension, expand output 732 * size by the explosion factor. 733 * 734 * Don't include the "any" algorithms, if defined, as no 735 * kernel policies should be set for these algorithms. 736 */ 737 738 #define SET_EXP_MINMAX(type, wild, alg, min, max) if (wild) { \ 739 int nalgs = ipsec_nalgs[type]; \ 740 if (ipsec_alglists[type][alg] != NULL) \ 741 nalgs--; \ 742 action_count *= nalgs; \ 743 min = 0; \ 744 max = ipsec_nalgs[type] - 1; \ 745 } 746 747 SET_EXP_MINMAX(IPSEC_ALG_AUTH, wild_auth, SADB_AALG_NONE, 748 auth_min, auth_max); 749 SET_EXP_MINMAX(IPSEC_ALG_AUTH, wild_eauth, SADB_AALG_NONE, 750 eauth_min, eauth_max); 751 SET_EXP_MINMAX(IPSEC_ALG_ENCR, wild_encr, SADB_EALG_NONE, 752 encr_min, encr_max); 753 754 #undef SET_EXP_MINMAX 755 756 /* 757 * ok, allocate the whole mess.. 758 */ 759 760 outact = kmem_alloc(sizeof (*outact) * action_count, KM_NOSLEEP); 761 if (outact == NULL) 762 return (NULL); 763 764 /* 765 * Now compute all combinations. Note that non-wildcarded 766 * dimensions just get a single value from auth_min, while 767 * wildcarded dimensions indirect through the sortlist. 768 * 769 * We do encryption outermost since, at this time, there's 770 * greater difference in security and performance between 771 * encryption algorithms vs. authentication algorithms. 772 */ 773 774 ai = 0; 775 776 #define WHICH_ALG(type, wild, idx) ((wild)?(ipsec_sortlist[type][idx]):(idx)) 777 778 for (encr_idx = encr_min; encr_idx <= encr_max; encr_idx++) { 779 encr_alg = WHICH_ALG(IPSEC_ALG_ENCR, wild_encr, encr_idx); 780 if (wild_encr && encr_alg == SADB_EALG_NONE) 781 continue; 782 for (auth_idx = auth_min; auth_idx <= auth_max; auth_idx++) { 783 auth_alg = WHICH_ALG(IPSEC_ALG_AUTH, wild_auth, 784 auth_idx); 785 if (wild_auth && auth_alg == SADB_AALG_NONE) 786 continue; 787 for (eauth_idx = eauth_min; eauth_idx <= eauth_max; 788 eauth_idx++) { 789 eauth_alg = WHICH_ALG(IPSEC_ALG_AUTH, 790 wild_eauth, eauth_idx); 791 if (wild_eauth && eauth_alg == SADB_AALG_NONE) 792 continue; 793 794 ipsec_setup_act(&outact[ai], act, 795 auth_alg, encr_alg, eauth_alg); 796 ai++; 797 } 798 } 799 } 800 801 #undef WHICH_ALG 802 803 ASSERT(ai == action_count); 804 *nact = action_count; 805 return (outact); 806 } 807 808 /* 809 * Extract the parts of an ipsec_prot_t from an old-style ipsec_req_t. 810 */ 811 static void 812 ipsec_prot_from_req(ipsec_req_t *req, ipsec_prot_t *ipp) 813 { 814 bzero(ipp, sizeof (*ipp)); 815 /* 816 * ipp_use_* are bitfields. Look at "!!" in the following as a 817 * "boolean canonicalization" operator. 818 */ 819 ipp->ipp_use_ah = !!(req->ipsr_ah_req & IPSEC_PREF_REQUIRED); 820 ipp->ipp_use_esp = !!(req->ipsr_esp_req & IPSEC_PREF_REQUIRED); 821 ipp->ipp_use_espa = !!(req->ipsr_esp_auth_alg) || !ipp->ipp_use_ah; 822 ipp->ipp_use_se = !!(req->ipsr_self_encap_req & IPSEC_PREF_REQUIRED); 823 ipp->ipp_use_unique = !!((req->ipsr_ah_req|req->ipsr_esp_req) & 824 IPSEC_PREF_UNIQUE); 825 ipp->ipp_encr_alg = req->ipsr_esp_alg; 826 ipp->ipp_auth_alg = req->ipsr_auth_alg; 827 ipp->ipp_esp_auth_alg = req->ipsr_esp_auth_alg; 828 } 829 830 /* 831 * Extract a new-style action from a request. 832 */ 833 void 834 ipsec_actvec_from_req(ipsec_req_t *req, ipsec_act_t **actp, uint_t *nactp) 835 { 836 struct ipsec_act act; 837 bzero(&act, sizeof (act)); 838 if ((req->ipsr_ah_req & IPSEC_PREF_NEVER) && 839 (req->ipsr_esp_req & IPSEC_PREF_NEVER)) { 840 act.ipa_type = IPSEC_ACT_BYPASS; 841 } else { 842 act.ipa_type = IPSEC_ACT_APPLY; 843 ipsec_prot_from_req(req, &act.ipa_apply); 844 } 845 *actp = ipsec_act_wildcard_expand(&act, nactp); 846 } 847 848 /* 849 * Convert a new-style "prot" back to an ipsec_req_t (more backwards compat). 850 * We assume caller has already zero'ed *req for us. 851 */ 852 static int 853 ipsec_req_from_prot(ipsec_prot_t *ipp, ipsec_req_t *req) 854 { 855 req->ipsr_esp_alg = ipp->ipp_encr_alg; 856 req->ipsr_auth_alg = ipp->ipp_auth_alg; 857 req->ipsr_esp_auth_alg = ipp->ipp_esp_auth_alg; 858 859 if (ipp->ipp_use_unique) { 860 req->ipsr_ah_req |= IPSEC_PREF_UNIQUE; 861 req->ipsr_esp_req |= IPSEC_PREF_UNIQUE; 862 } 863 if (ipp->ipp_use_se) 864 req->ipsr_self_encap_req |= IPSEC_PREF_REQUIRED; 865 if (ipp->ipp_use_ah) 866 req->ipsr_ah_req |= IPSEC_PREF_REQUIRED; 867 if (ipp->ipp_use_esp) 868 req->ipsr_esp_req |= IPSEC_PREF_REQUIRED; 869 return (sizeof (*req)); 870 } 871 872 /* 873 * Convert a new-style action back to an ipsec_req_t (more backwards compat). 874 * We assume caller has already zero'ed *req for us. 875 */ 876 static int 877 ipsec_req_from_act(ipsec_action_t *ap, ipsec_req_t *req) 878 { 879 switch (ap->ipa_act.ipa_type) { 880 case IPSEC_ACT_BYPASS: 881 req->ipsr_ah_req = IPSEC_PREF_NEVER; 882 req->ipsr_esp_req = IPSEC_PREF_NEVER; 883 return (sizeof (*req)); 884 case IPSEC_ACT_APPLY: 885 return (ipsec_req_from_prot(&ap->ipa_act.ipa_apply, req)); 886 } 887 return (sizeof (*req)); 888 } 889 890 /* 891 * Convert a new-style action back to an ipsec_req_t (more backwards compat). 892 * We assume caller has already zero'ed *req for us. 893 */ 894 static int 895 ipsec_req_from_head(ipsec_policy_head_t *ph, ipsec_req_t *req, int af) 896 { 897 ipsec_policy_t *p; 898 899 for (p = ph->iph_root[IPSEC_INBOUND].ipr[af]; 900 p != NULL; 901 p = p->ipsp_links.itl_next) { 902 if ((p->ipsp_sel->ipsl_key.ipsl_valid&IPSL_WILDCARD) == 0) 903 return (ipsec_req_from_act(p->ipsp_act, req)); 904 } 905 return (sizeof (*req)); 906 } 907 908 /* 909 * Based on per-socket or latched policy, convert to an appropriate 910 * IP_SEC_OPT ipsec_req_t for the socket option; return size so we can 911 * be tail-called from ip. 912 */ 913 int 914 ipsec_req_from_conn(conn_t *connp, ipsec_req_t *req, int af) 915 { 916 ipsec_latch_t *ipl; 917 int rv = sizeof (ipsec_req_t); 918 919 bzero(req, sizeof (*req)); 920 921 mutex_enter(&connp->conn_lock); 922 ipl = connp->conn_latch; 923 924 /* 925 * Find appropriate policy. First choice is latched action; 926 * failing that, see latched policy; failing that, 927 * look at configured policy. 928 */ 929 if (ipl != NULL) { 930 if (ipl->ipl_in_action != NULL) { 931 rv = ipsec_req_from_act(ipl->ipl_in_action, req); 932 goto done; 933 } 934 if (ipl->ipl_in_policy != NULL) { 935 rv = ipsec_req_from_act(ipl->ipl_in_policy->ipsp_act, 936 req); 937 goto done; 938 } 939 } 940 if (connp->conn_policy != NULL) 941 rv = ipsec_req_from_head(connp->conn_policy, req, af); 942 done: 943 mutex_exit(&connp->conn_lock); 944 return (rv); 945 } 946 947 void 948 ipsec_actvec_free(ipsec_act_t *act, uint_t nact) 949 { 950 kmem_free(act, nact * sizeof (*act)); 951 } 952 953 /* 954 * When outbound policy is not cached, look it up the hard way and attach 955 * an ipsec_out_t to the packet.. 956 */ 957 static mblk_t * 958 ipsec_attach_global_policy(mblk_t *mp, conn_t *connp, ipsec_selector_t *sel) 959 { 960 ipsec_policy_t *p; 961 962 p = ipsec_find_policy(IPSEC_TYPE_OUTBOUND, connp, NULL, sel); 963 964 if (p == NULL) 965 return (NULL); 966 return (ipsec_attach_ipsec_out(mp, connp, p, sel->ips_protocol)); 967 } 968 969 /* 970 * We have an ipsec_out already, but don't have cached policy; fill it in 971 * with the right actions. 972 */ 973 static mblk_t * 974 ipsec_apply_global_policy(mblk_t *ipsec_mp, conn_t *connp, 975 ipsec_selector_t *sel) 976 { 977 ipsec_out_t *io; 978 ipsec_policy_t *p; 979 980 ASSERT(ipsec_mp->b_datap->db_type == M_CTL); 981 ASSERT(ipsec_mp->b_cont->b_datap->db_type == M_DATA); 982 983 io = (ipsec_out_t *)ipsec_mp->b_rptr; 984 985 if (io->ipsec_out_policy == NULL) { 986 p = ipsec_find_policy(IPSEC_TYPE_OUTBOUND, connp, io, sel); 987 io->ipsec_out_policy = p; 988 } 989 return (ipsec_mp); 990 } 991 992 993 /* ARGSUSED */ 994 /* 995 * Consumes a reference to ipsp. 996 */ 997 static mblk_t * 998 ipsec_check_loopback_policy(queue_t *q, mblk_t *first_mp, 999 boolean_t mctl_present, ipsec_policy_t *ipsp) 1000 { 1001 mblk_t *ipsec_mp; 1002 ipsec_in_t *ii; 1003 1004 if (!mctl_present) 1005 return (first_mp); 1006 1007 ipsec_mp = first_mp; 1008 1009 ii = (ipsec_in_t *)ipsec_mp->b_rptr; 1010 ASSERT(ii->ipsec_in_loopback); 1011 IPPOL_REFRELE(ipsp); 1012 1013 /* 1014 * We should do an actual policy check here. Revisit this 1015 * when we revisit the IPsec API. 1016 */ 1017 1018 return (first_mp); 1019 } 1020 1021 static boolean_t 1022 ipsec_check_ipsecin_action(ipsec_in_t *ii, mblk_t *mp, ipsec_action_t *ap, 1023 ipha_t *ipha, ip6_t *ip6h, const char **reason, kstat_named_t **counter) 1024 { 1025 boolean_t ret = B_TRUE; 1026 ipsec_prot_t *ipp; 1027 ipsa_t *ah_assoc; 1028 ipsa_t *esp_assoc; 1029 boolean_t decaps; 1030 1031 ASSERT((ipha == NULL && ip6h != NULL) || 1032 (ip6h == NULL && ipha != NULL)); 1033 1034 if (ii->ipsec_in_loopback) { 1035 /* 1036 * Besides accepting pointer-equivalent actions, we also 1037 * accept any ICMP errors we generated for ourselves, 1038 * regardless of policy. If we do not wish to make this 1039 * assumption in the future, check here, and where 1040 * icmp_loopback is initialized in ip.c and ip6.c. (Look for 1041 * ipsec_out_icmp_loopback.) 1042 */ 1043 if (ap == ii->ipsec_in_action || ii->ipsec_in_icmp_loopback) 1044 return (B_TRUE); 1045 1046 /* Deep compare necessary here?? */ 1047 *counter = &ipdrops_spd_loopback_mismatch; 1048 *reason = "loopback policy mismatch"; 1049 return (B_FALSE); 1050 } 1051 ASSERT(!ii->ipsec_in_icmp_loopback); 1052 1053 ah_assoc = ii->ipsec_in_ah_sa; 1054 esp_assoc = ii->ipsec_in_esp_sa; 1055 1056 decaps = ii->ipsec_in_decaps; 1057 1058 switch (ap->ipa_act.ipa_type) { 1059 case IPSEC_ACT_DISCARD: 1060 case IPSEC_ACT_REJECT: 1061 /* Should "fail hard" */ 1062 *counter = &ipdrops_spd_explicit; 1063 *reason = "blocked by policy"; 1064 return (B_FALSE); 1065 1066 case IPSEC_ACT_BYPASS: 1067 case IPSEC_ACT_CLEAR: 1068 *counter = &ipdrops_spd_got_secure; 1069 *reason = "expected clear, got protected"; 1070 return (B_FALSE); 1071 1072 case IPSEC_ACT_APPLY: 1073 ipp = &ap->ipa_act.ipa_apply; 1074 /* 1075 * As of now we do the simple checks of whether 1076 * the datagram has gone through the required IPSEC 1077 * protocol constraints or not. We might have more 1078 * in the future like sensitive levels, key bits, etc. 1079 * If it fails the constraints, check whether we would 1080 * have accepted this if it had come in clear. 1081 */ 1082 if (ipp->ipp_use_ah) { 1083 if (ah_assoc == NULL) { 1084 ret = ipsec_inbound_accept_clear(mp, ipha, 1085 ip6h); 1086 *counter = &ipdrops_spd_got_clear; 1087 *reason = "unprotected not accepted"; 1088 break; 1089 } 1090 ASSERT(ah_assoc != NULL); 1091 ASSERT(ipp->ipp_auth_alg != 0); 1092 1093 if (ah_assoc->ipsa_auth_alg != 1094 ipp->ipp_auth_alg) { 1095 *counter = &ipdrops_spd_bad_ahalg; 1096 *reason = "unacceptable ah alg"; 1097 ret = B_FALSE; 1098 break; 1099 } 1100 } else if (ah_assoc != NULL) { 1101 /* 1102 * Don't allow this. Check IPSEC NOTE above 1103 * ip_fanout_proto(). 1104 */ 1105 *counter = &ipdrops_spd_got_ah; 1106 *reason = "unexpected AH"; 1107 ret = B_FALSE; 1108 break; 1109 } 1110 if (ipp->ipp_use_esp) { 1111 if (esp_assoc == NULL) { 1112 ret = ipsec_inbound_accept_clear(mp, ipha, 1113 ip6h); 1114 *counter = &ipdrops_spd_got_clear; 1115 *reason = "unprotected not accepted"; 1116 break; 1117 } 1118 ASSERT(esp_assoc != NULL); 1119 ASSERT(ipp->ipp_encr_alg != 0); 1120 1121 if (esp_assoc->ipsa_encr_alg != 1122 ipp->ipp_encr_alg) { 1123 *counter = &ipdrops_spd_bad_espealg; 1124 *reason = "unacceptable esp alg"; 1125 ret = B_FALSE; 1126 break; 1127 } 1128 /* 1129 * If the client does not need authentication, 1130 * we don't verify the alogrithm. 1131 */ 1132 if (ipp->ipp_use_espa) { 1133 if (esp_assoc->ipsa_auth_alg != 1134 ipp->ipp_esp_auth_alg) { 1135 *counter = &ipdrops_spd_bad_espaalg; 1136 *reason = "unacceptable esp auth alg"; 1137 ret = B_FALSE; 1138 break; 1139 } 1140 } 1141 } else if (esp_assoc != NULL) { 1142 /* 1143 * Don't allow this. Check IPSEC NOTE above 1144 * ip_fanout_proto(). 1145 */ 1146 *counter = &ipdrops_spd_got_esp; 1147 *reason = "unexpected ESP"; 1148 ret = B_FALSE; 1149 break; 1150 } 1151 if (ipp->ipp_use_se) { 1152 if (!decaps) { 1153 ret = ipsec_inbound_accept_clear(mp, ipha, 1154 ip6h); 1155 if (!ret) { 1156 /* XXX mutant? */ 1157 *counter = &ipdrops_spd_bad_selfencap; 1158 *reason = "self encap not found"; 1159 break; 1160 } 1161 } 1162 } else if (decaps) { 1163 /* 1164 * XXX If the packet comes in tunneled and the 1165 * recipient does not expect it to be tunneled, it 1166 * is okay. But we drop to be consistent with the 1167 * other cases. 1168 */ 1169 *counter = &ipdrops_spd_got_selfencap; 1170 *reason = "unexpected self encap"; 1171 ret = B_FALSE; 1172 break; 1173 } 1174 if (ii->ipsec_in_action != NULL) { 1175 /* 1176 * This can happen if we do a double policy-check on 1177 * a packet 1178 * XXX XXX should fix this case! 1179 */ 1180 IPACT_REFRELE(ii->ipsec_in_action); 1181 } 1182 ASSERT(ii->ipsec_in_action == NULL); 1183 IPACT_REFHOLD(ap); 1184 ii->ipsec_in_action = ap; 1185 break; /* from switch */ 1186 } 1187 return (ret); 1188 } 1189 1190 static boolean_t 1191 spd_match_inbound_ids(ipsec_latch_t *ipl, ipsa_t *sa) 1192 { 1193 ASSERT(ipl->ipl_ids_latched == B_TRUE); 1194 return ipsid_equal(ipl->ipl_remote_cid, sa->ipsa_src_cid) && 1195 ipsid_equal(ipl->ipl_local_cid, sa->ipsa_dst_cid); 1196 } 1197 1198 /* 1199 * Called to check policy on a latched connection, both from this file 1200 * and from tcp.c 1201 */ 1202 boolean_t 1203 ipsec_check_ipsecin_latch(ipsec_in_t *ii, mblk_t *mp, ipsec_latch_t *ipl, 1204 ipha_t *ipha, ip6_t *ip6h, const char **reason, kstat_named_t **counter) 1205 { 1206 ASSERT(ipl->ipl_ids_latched == B_TRUE); 1207 1208 if ((ii->ipsec_in_ah_sa != NULL) && 1209 (!spd_match_inbound_ids(ipl, ii->ipsec_in_ah_sa))) { 1210 *counter = &ipdrops_spd_ah_badid; 1211 *reason = "AH identity mismatch"; 1212 return (B_FALSE); 1213 } 1214 1215 if ((ii->ipsec_in_esp_sa != NULL) && 1216 (!spd_match_inbound_ids(ipl, ii->ipsec_in_esp_sa))) { 1217 *counter = &ipdrops_spd_esp_badid; 1218 *reason = "ESP identity mismatch"; 1219 return (B_FALSE); 1220 } 1221 1222 return (ipsec_check_ipsecin_action(ii, mp, ipl->ipl_in_action, 1223 ipha, ip6h, reason, counter)); 1224 } 1225 1226 /* 1227 * Check to see whether this secured datagram meets the policy 1228 * constraints specified in ipsp. 1229 * 1230 * Called from ipsec_check_global_policy, and ipsec_check_inbound_policy. 1231 * 1232 * Consumes a reference to ipsp. 1233 */ 1234 static mblk_t * 1235 ipsec_check_ipsecin_policy(queue_t *q, mblk_t *first_mp, ipsec_policy_t *ipsp, 1236 ipha_t *ipha, ip6_t *ip6h) 1237 { 1238 ipsec_in_t *ii; 1239 ipsec_action_t *ap; 1240 const char *reason = "no policy actions found"; 1241 mblk_t *data_mp, *ipsec_mp; 1242 kstat_named_t *counter = &ipdrops_spd_got_secure; 1243 1244 data_mp = first_mp->b_cont; 1245 ipsec_mp = first_mp; 1246 1247 ASSERT(ipsp != NULL); 1248 1249 ASSERT((ipha == NULL && ip6h != NULL) || 1250 (ip6h == NULL && ipha != NULL)); 1251 1252 ii = (ipsec_in_t *)ipsec_mp->b_rptr; 1253 1254 if (ii->ipsec_in_loopback) 1255 return (ipsec_check_loopback_policy(q, first_mp, B_TRUE, ipsp)); 1256 1257 ASSERT(ii->ipsec_in_type == IPSEC_IN); 1258 if (ii->ipsec_in_action != NULL) { 1259 /* 1260 * this can happen if we do a double policy-check on a packet 1261 * Would be nice to be able to delete this test.. 1262 */ 1263 IPACT_REFRELE(ii->ipsec_in_action); 1264 } 1265 ASSERT(ii->ipsec_in_action == NULL); 1266 1267 if (!SA_IDS_MATCH(ii->ipsec_in_ah_sa, ii->ipsec_in_esp_sa)) { 1268 reason = "inbound AH and ESP identities differ"; 1269 counter = &ipdrops_spd_ahesp_diffid; 1270 goto drop; 1271 } 1272 1273 /* 1274 * Ok, now loop through the possible actions and see if any 1275 * of them work for us. 1276 */ 1277 1278 for (ap = ipsp->ipsp_act; ap != NULL; ap = ap->ipa_next) { 1279 if (ipsec_check_ipsecin_action(ii, data_mp, ap, 1280 ipha, ip6h, &reason, &counter)) { 1281 BUMP_MIB(&ip_mib, ipsecInSucceeded); 1282 IPPOL_REFRELE(ipsp); 1283 return (first_mp); 1284 } 1285 } 1286 drop: 1287 (void) mi_strlog(q, 0, SL_ERROR|SL_WARN|SL_CONSOLE, 1288 "ipsec inbound policy mismatch: %s, packet dropped\n", 1289 reason); 1290 IPPOL_REFRELE(ipsp); 1291 ASSERT(ii->ipsec_in_action == NULL); 1292 BUMP_MIB(&ip_mib, ipsecInFailed); 1293 ip_drop_packet(first_mp, B_TRUE, NULL, NULL, counter, &spd_dropper); 1294 return (NULL); 1295 } 1296 1297 /* 1298 * sleazy prefix-length-based compare. 1299 * another inlining candidate.. 1300 */ 1301 static boolean_t 1302 ip_addr_match(uint8_t *addr1, int pfxlen, in6_addr_t *addr2p) 1303 { 1304 int offset = pfxlen>>3; 1305 int bitsleft = pfxlen & 7; 1306 uint8_t *addr2 = (uint8_t *)addr2p; 1307 1308 /* 1309 * and there was much evil.. 1310 * XXX should inline-expand the bcmp here and do this 32 bits 1311 * or 64 bits at a time.. 1312 */ 1313 return ((bcmp(addr1, addr2, offset) == 0) && 1314 ((bitsleft == 0) || 1315 (((addr1[offset] ^ addr2[offset]) & 1316 (0xff<<(8-bitsleft))) == 0))); 1317 } 1318 1319 /* 1320 * Try to find and return the best policy entry under a given policy 1321 * root for a given set of selectors; the first parameter "best" is 1322 * the current best policy so far. If "best" is non-null, we have a 1323 * reference to it. We return a reference to a policy; if that policy 1324 * is not the original "best", we need to release that reference 1325 * before returning. 1326 */ 1327 static ipsec_policy_t * 1328 ipsec_find_policy_head(ipsec_policy_t *best, 1329 ipsec_policy_head_t *head, int direction, ipsec_selector_t *sel) 1330 { 1331 ipsec_policy_t *p, *curbest; 1332 ipsec_selkey_t *isel; 1333 uint32_t valid; 1334 ipsec_policy_root_t *root; 1335 uint8_t is_icmp_inv_acq = sel->ips_is_icmp_inv_acq; 1336 int bpri = best ? best->ipsp_prio : 0; 1337 int af = sel->ips_isv4 ? IPSEC_AF_V4 : IPSEC_AF_V6; 1338 1339 curbest = best; 1340 root = &head->iph_root[direction]; 1341 1342 #ifdef DEBUG 1343 if (is_icmp_inv_acq) { 1344 if (sel->ips_isv4) { 1345 if (sel->ips_protocol != IPPROTO_ICMP) { 1346 cmn_err(CE_WARN, "ipsec_find_policy_head:" 1347 " expecting icmp, got %d", sel->ips_protocol); 1348 } 1349 } else { 1350 if (sel->ips_protocol != IPPROTO_ICMP) { 1351 cmn_err(CE_WARN, "ipsec_find_policy_head:" 1352 " expecting icmpv6, got %d", sel->ips_protocol); 1353 } 1354 } 1355 } 1356 #endif 1357 1358 rw_enter(&head->iph_lock, RW_READER); 1359 1360 for (p = root->ipr[af]; p != NULL; p = p->ipsp_links.itl_next) { 1361 if (p->ipsp_prio <= bpri) 1362 continue; 1363 isel = &p->ipsp_sel->ipsl_key; 1364 valid = isel->ipsl_valid; 1365 1366 if ((valid & IPSL_PROTOCOL) && 1367 (isel->ipsl_proto != sel->ips_protocol)) 1368 continue; 1369 1370 if ((valid & IPSL_REMOTE_ADDR) && 1371 !ip_addr_match((uint8_t *)&isel->ipsl_remote, 1372 isel->ipsl_remote_pfxlen, 1373 &sel->ips_remote_addr_v6)) 1374 continue; 1375 1376 if ((valid & IPSL_LOCAL_ADDR) && 1377 !ip_addr_match((uint8_t *)&isel->ipsl_local, 1378 isel->ipsl_local_pfxlen, 1379 &sel->ips_local_addr_v6)) 1380 continue; 1381 1382 if ((valid & IPSL_REMOTE_PORT) && 1383 isel->ipsl_rport != sel->ips_remote_port) 1384 continue; 1385 1386 if ((valid & IPSL_LOCAL_PORT) && 1387 isel->ipsl_lport != sel->ips_local_port) 1388 continue; 1389 1390 if (!is_icmp_inv_acq) { 1391 if ((valid & IPSL_ICMP_TYPE) && 1392 (isel->ipsl_icmp_type > sel->ips_icmp_type || 1393 isel->ipsl_icmp_type_end < sel->ips_icmp_type)) { 1394 continue; 1395 } 1396 1397 if ((valid & IPSL_ICMP_CODE) && 1398 (isel->ipsl_icmp_code > sel->ips_icmp_code || 1399 isel->ipsl_icmp_code_end < 1400 sel->ips_icmp_code)) { 1401 continue; 1402 } 1403 } else { 1404 /* 1405 * special case for icmp inverse acquire 1406 * we only want policies that aren't drop/pass 1407 */ 1408 if (p->ipsp_act->ipa_act.ipa_type != IPSEC_ACT_APPLY) 1409 continue; 1410 } 1411 1412 /* we matched all the packet-port-field selectors! */ 1413 curbest = p; 1414 bpri = p->ipsp_prio; 1415 } 1416 1417 /* 1418 * Adjust reference counts if we found anything. 1419 */ 1420 if (curbest != best) { 1421 if (curbest != NULL) { 1422 IPPOL_REFHOLD(curbest); 1423 } 1424 if (best != NULL) { 1425 IPPOL_REFRELE(best); 1426 } 1427 } 1428 1429 rw_exit(&head->iph_lock); 1430 1431 return (curbest); 1432 } 1433 1434 /* 1435 * Find the best system policy (either global or per-interface) which 1436 * applies to the given selector; look in all the relevant policy roots 1437 * to figure out which policy wins. 1438 * 1439 * Returns a reference to a policy; caller must release this 1440 * reference when done. 1441 */ 1442 ipsec_policy_t * 1443 ipsec_find_policy(int direction, conn_t *connp, ipsec_out_t *io, 1444 ipsec_selector_t *sel) 1445 { 1446 ipsec_policy_t *p; 1447 1448 p = ipsec_find_policy_head(NULL, &system_policy, direction, sel); 1449 if ((connp != NULL) && (connp->conn_policy != NULL)) { 1450 p = ipsec_find_policy_head(p, connp->conn_policy, 1451 direction, sel); 1452 } else if ((io != NULL) && (io->ipsec_out_polhead != NULL)) { 1453 p = ipsec_find_policy_head(p, io->ipsec_out_polhead, 1454 direction, sel); 1455 } 1456 1457 return (p); 1458 } 1459 1460 /* 1461 * Check with global policy and see whether this inbound 1462 * packet meets the policy constraints. 1463 * 1464 * Locate appropriate policy from global policy, supplemented by the 1465 * conn's configured and/or cached policy if the conn is supplied. 1466 * 1467 * Dispatch to ipsec_check_ipsecin_policy if we have policy and an 1468 * encrypted packet to see if they match. 1469 * 1470 * Otherwise, see if the policy allows cleartext; if not, drop it on the 1471 * floor. 1472 */ 1473 mblk_t * 1474 ipsec_check_global_policy(mblk_t *first_mp, conn_t *connp, 1475 ipha_t *ipha, ip6_t *ip6h, boolean_t mctl_present) 1476 { 1477 ipsec_policy_t *p; 1478 ipsec_selector_t sel; 1479 queue_t *q = NULL; 1480 mblk_t *data_mp, *ipsec_mp; 1481 boolean_t policy_present; 1482 kstat_named_t *counter; 1483 1484 data_mp = mctl_present ? first_mp->b_cont : first_mp; 1485 ipsec_mp = mctl_present ? first_mp : NULL; 1486 1487 sel.ips_is_icmp_inv_acq = 0; 1488 1489 ASSERT((ipha == NULL && ip6h != NULL) || 1490 (ip6h == NULL && ipha != NULL)); 1491 1492 if (ipha != NULL) 1493 policy_present = ipsec_inbound_v4_policy_present; 1494 else 1495 policy_present = ipsec_inbound_v6_policy_present; 1496 1497 if (!policy_present && connp == NULL) { 1498 /* 1499 * No global policy and no per-socket policy; 1500 * just pass it back (but we shouldn't get here in that case) 1501 */ 1502 return (first_mp); 1503 } 1504 1505 if (connp != NULL) 1506 q = CONNP_TO_WQ(connp); 1507 1508 if (ipsec_mp != NULL) { 1509 ASSERT(ipsec_mp->b_datap->db_type == M_CTL); 1510 ASSERT(((ipsec_in_t *)ipsec_mp->b_rptr)->ipsec_in_type == 1511 IPSEC_IN); 1512 } 1513 1514 /* 1515 * If we have cached policy, use it. 1516 * Otherwise consult system policy. 1517 */ 1518 if ((connp != NULL) && (connp->conn_latch != NULL)) { 1519 p = connp->conn_latch->ipl_in_policy; 1520 if (p != NULL) { 1521 IPPOL_REFHOLD(p); 1522 } 1523 } else { 1524 /* Initialize the ports in the selector */ 1525 if (!ipsec_init_inbound_sel(&sel, data_mp, ipha, ip6h)) { 1526 /* 1527 * Technically not a policy mismatch, but it is 1528 * an internal failure. 1529 */ 1530 ipsec_log_policy_failure(q, IPSEC_POLICY_MISMATCH, 1531 "ipsec_init_inbound_sel", ipha, ip6h, B_FALSE); 1532 counter = &ipdrops_spd_nomem; 1533 goto fail; 1534 } 1535 1536 /* 1537 * Find the policy which best applies. 1538 * 1539 * If we find global policy, we should look at both 1540 * local policy and global policy and see which is 1541 * stronger and match accordingly. 1542 * 1543 * If we don't find a global policy, check with 1544 * local policy alone. 1545 */ 1546 1547 p = ipsec_find_policy(IPSEC_TYPE_INBOUND, connp, NULL, &sel); 1548 } 1549 1550 if (p == NULL) { 1551 if (ipsec_mp == NULL) { 1552 /* 1553 * We have no policy; default to succeeding. 1554 * XXX paranoid system design doesn't do this. 1555 */ 1556 BUMP_MIB(&ip_mib, ipsecInSucceeded); 1557 return (first_mp); 1558 } else { 1559 counter = &ipdrops_spd_got_secure; 1560 ipsec_log_policy_failure(q, IPSEC_POLICY_NOT_NEEDED, 1561 "ipsec_check_global_policy", ipha, ip6h, B_TRUE); 1562 goto fail; 1563 } 1564 } 1565 if (ipsec_mp != NULL) 1566 return (ipsec_check_ipsecin_policy(q, ipsec_mp, p, ipha, ip6h)); 1567 if (p->ipsp_act->ipa_allow_clear) { 1568 BUMP_MIB(&ip_mib, ipsecInSucceeded); 1569 IPPOL_REFRELE(p); 1570 return (first_mp); 1571 } 1572 IPPOL_REFRELE(p); 1573 /* 1574 * If we reach here, we will drop the packet because it failed the 1575 * global policy check because the packet was cleartext, and it 1576 * should not have been. 1577 */ 1578 ipsec_log_policy_failure(q, IPSEC_POLICY_MISMATCH, 1579 "ipsec_check_global_policy", ipha, ip6h, B_FALSE); 1580 counter = &ipdrops_spd_got_clear; 1581 1582 fail: 1583 ip_drop_packet(first_mp, B_TRUE, NULL, NULL, counter, &spd_dropper); 1584 BUMP_MIB(&ip_mib, ipsecInFailed); 1585 return (NULL); 1586 } 1587 1588 /* 1589 * We check whether an inbound datagram is a valid one 1590 * to accept in clear. If it is secure, it is the job 1591 * of IPSEC to log information appropriately if it 1592 * suspects that it may not be the real one. 1593 * 1594 * It is called only while fanning out to the ULP 1595 * where ULP accepts only secure data and the incoming 1596 * is clear. Usually we never accept clear datagrams in 1597 * such cases. ICMP is the only exception. 1598 * 1599 * NOTE : We don't call this function if the client (ULP) 1600 * is willing to accept things in clear. 1601 */ 1602 boolean_t 1603 ipsec_inbound_accept_clear(mblk_t *mp, ipha_t *ipha, ip6_t *ip6h) 1604 { 1605 ushort_t iph_hdr_length; 1606 icmph_t *icmph; 1607 icmp6_t *icmp6; 1608 uint8_t *nexthdrp; 1609 1610 ASSERT((ipha != NULL && ip6h == NULL) || 1611 (ipha == NULL && ip6h != NULL)); 1612 1613 if (ip6h != NULL) { 1614 iph_hdr_length = ip_hdr_length_v6(mp, ip6h); 1615 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, 1616 &nexthdrp)) { 1617 return (B_FALSE); 1618 } 1619 if (*nexthdrp != IPPROTO_ICMPV6) 1620 return (B_FALSE); 1621 icmp6 = (icmp6_t *)(&mp->b_rptr[iph_hdr_length]); 1622 /* Match IPv6 ICMP policy as closely as IPv4 as possible. */ 1623 switch (icmp6->icmp6_type) { 1624 case ICMP6_PARAM_PROB: 1625 /* Corresponds to port/proto unreach in IPv4. */ 1626 case ICMP6_ECHO_REQUEST: 1627 /* Just like IPv4. */ 1628 return (B_FALSE); 1629 1630 case MLD_LISTENER_QUERY: 1631 case MLD_LISTENER_REPORT: 1632 case MLD_LISTENER_REDUCTION: 1633 /* 1634 * XXX Seperate NDD in IPv4 what about here? 1635 * Plus, mcast is important to ND. 1636 */ 1637 case ICMP6_DST_UNREACH: 1638 /* Corresponds to HOST/NET unreachable in IPv4. */ 1639 case ICMP6_PACKET_TOO_BIG: 1640 case ICMP6_ECHO_REPLY: 1641 /* These are trusted in IPv4. */ 1642 case ND_ROUTER_SOLICIT: 1643 case ND_ROUTER_ADVERT: 1644 case ND_NEIGHBOR_SOLICIT: 1645 case ND_NEIGHBOR_ADVERT: 1646 case ND_REDIRECT: 1647 /* Trust ND messages for now. */ 1648 case ICMP6_TIME_EXCEEDED: 1649 default: 1650 return (B_TRUE); 1651 } 1652 } else { 1653 /* 1654 * If it is not ICMP, fail this request. 1655 */ 1656 if (ipha->ipha_protocol != IPPROTO_ICMP) 1657 return (B_FALSE); 1658 iph_hdr_length = IPH_HDR_LENGTH(ipha); 1659 icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length]; 1660 /* 1661 * It is an insecure icmp message. Check to see whether we are 1662 * willing to accept this one. 1663 */ 1664 1665 switch (icmph->icmph_type) { 1666 case ICMP_ECHO_REPLY: 1667 case ICMP_TIME_STAMP_REPLY: 1668 case ICMP_INFO_REPLY: 1669 case ICMP_ROUTER_ADVERTISEMENT: 1670 /* 1671 * We should not encourage clear replies if this 1672 * client expects secure. If somebody is replying 1673 * in clear some mailicious user watching both the 1674 * request and reply, can do chosen-plain-text attacks. 1675 * With global policy we might be just expecting secure 1676 * but sending out clear. We don't know what the right 1677 * thing is. We can't do much here as we can't control 1678 * the sender here. Till we are sure of what to do, 1679 * accept them. 1680 */ 1681 return (B_TRUE); 1682 case ICMP_ECHO_REQUEST: 1683 case ICMP_TIME_STAMP_REQUEST: 1684 case ICMP_INFO_REQUEST: 1685 case ICMP_ADDRESS_MASK_REQUEST: 1686 case ICMP_ROUTER_SOLICITATION: 1687 case ICMP_ADDRESS_MASK_REPLY: 1688 /* 1689 * Don't accept this as somebody could be sending 1690 * us plain text to get encrypted data. If we reply, 1691 * it will lead to chosen plain text attack. 1692 */ 1693 return (B_FALSE); 1694 case ICMP_DEST_UNREACHABLE: 1695 switch (icmph->icmph_code) { 1696 case ICMP_FRAGMENTATION_NEEDED: 1697 /* 1698 * Be in sync with icmp_inbound, where we have 1699 * already set ire_max_frag. 1700 */ 1701 return (B_TRUE); 1702 case ICMP_HOST_UNREACHABLE: 1703 case ICMP_NET_UNREACHABLE: 1704 /* 1705 * By accepting, we could reset a connection. 1706 * How do we solve the problem of some 1707 * intermediate router sending in-secure ICMP 1708 * messages ? 1709 */ 1710 return (B_TRUE); 1711 case ICMP_PORT_UNREACHABLE: 1712 case ICMP_PROTOCOL_UNREACHABLE: 1713 default : 1714 return (B_FALSE); 1715 } 1716 case ICMP_SOURCE_QUENCH: 1717 /* 1718 * If this is an attack, TCP will slow start 1719 * because of this. Is it very harmful ? 1720 */ 1721 return (B_TRUE); 1722 case ICMP_PARAM_PROBLEM: 1723 return (B_FALSE); 1724 case ICMP_TIME_EXCEEDED: 1725 return (B_TRUE); 1726 case ICMP_REDIRECT: 1727 return (B_FALSE); 1728 default : 1729 return (B_FALSE); 1730 } 1731 } 1732 } 1733 1734 void 1735 ipsec_latch_ids(ipsec_latch_t *ipl, ipsid_t *local, ipsid_t *remote) 1736 { 1737 mutex_enter(&ipl->ipl_lock); 1738 1739 if (ipl->ipl_ids_latched) { 1740 /* I lost, someone else got here before me */ 1741 mutex_exit(&ipl->ipl_lock); 1742 return; 1743 } 1744 1745 if (local != NULL) 1746 IPSID_REFHOLD(local); 1747 if (remote != NULL) 1748 IPSID_REFHOLD(remote); 1749 1750 ipl->ipl_local_cid = local; 1751 ipl->ipl_remote_cid = remote; 1752 ipl->ipl_ids_latched = B_TRUE; 1753 mutex_exit(&ipl->ipl_lock); 1754 } 1755 1756 void 1757 ipsec_latch_inbound(ipsec_latch_t *ipl, ipsec_in_t *ii) 1758 { 1759 ipsa_t *sa; 1760 1761 if (!ipl->ipl_ids_latched) { 1762 ipsid_t *local = NULL; 1763 ipsid_t *remote = NULL; 1764 1765 if (!ii->ipsec_in_loopback) { 1766 if (ii->ipsec_in_esp_sa != NULL) 1767 sa = ii->ipsec_in_esp_sa; 1768 else 1769 sa = ii->ipsec_in_ah_sa; 1770 ASSERT(sa != NULL); 1771 local = sa->ipsa_dst_cid; 1772 remote = sa->ipsa_src_cid; 1773 } 1774 ipsec_latch_ids(ipl, local, remote); 1775 } 1776 ipl->ipl_in_action = ii->ipsec_in_action; 1777 IPACT_REFHOLD(ipl->ipl_in_action); 1778 } 1779 1780 /* 1781 * Check whether the policy constraints are met either for an 1782 * inbound datagram; called from IP in numerous places. 1783 * 1784 * Note that this is not a chokepoint for inbound policy checks; 1785 * see also ipsec_check_ipsecin_latch() 1786 */ 1787 mblk_t * 1788 ipsec_check_inbound_policy(mblk_t *first_mp, conn_t *connp, 1789 ipha_t *ipha, ip6_t *ip6h, boolean_t mctl_present) 1790 { 1791 ipsec_in_t *ii; 1792 boolean_t ret; 1793 mblk_t *mp = mctl_present ? first_mp->b_cont : first_mp; 1794 mblk_t *ipsec_mp = mctl_present ? first_mp : NULL; 1795 ipsec_latch_t *ipl; 1796 1797 ASSERT(connp != NULL); 1798 ipl = connp->conn_latch; 1799 1800 if (ipsec_mp == NULL) { 1801 /* 1802 * This is the case where the incoming datagram is 1803 * cleartext and we need to see whether this client 1804 * would like to receive such untrustworthy things from 1805 * the wire. 1806 */ 1807 ASSERT(mp != NULL); 1808 1809 if (ipl != NULL) { 1810 /* 1811 * Policy is cached in the conn. 1812 */ 1813 if ((ipl->ipl_in_policy != NULL) && 1814 (!ipl->ipl_in_policy->ipsp_act->ipa_allow_clear)) { 1815 ret = ipsec_inbound_accept_clear(mp, 1816 ipha, ip6h); 1817 if (ret) { 1818 BUMP_MIB(&ip_mib, ipsecInSucceeded); 1819 return (first_mp); 1820 } else { 1821 ip_drop_packet(first_mp, B_TRUE, NULL, 1822 NULL, &ipdrops_spd_got_clear, 1823 &spd_dropper); 1824 ipsec_log_policy_failure( 1825 CONNP_TO_WQ(connp), 1826 IPSEC_POLICY_MISMATCH, 1827 "ipsec_check_inbound_policy", ipha, 1828 ip6h, B_FALSE); 1829 BUMP_MIB(&ip_mib, ipsecInFailed); 1830 return (NULL); 1831 } 1832 } else { 1833 BUMP_MIB(&ip_mib, ipsecInSucceeded); 1834 return (first_mp); 1835 } 1836 } else { 1837 /* 1838 * As this is a non-hardbound connection we need 1839 * to look at both per-socket policy and global 1840 * policy. As this is cleartext, mark the mp as 1841 * M_DATA in case if it is an ICMP error being 1842 * reported before calling ipsec_check_global_policy 1843 * so that it does not mistake it for IPSEC_IN. 1844 */ 1845 uchar_t db_type = mp->b_datap->db_type; 1846 mp->b_datap->db_type = M_DATA; 1847 first_mp = ipsec_check_global_policy(first_mp, connp, 1848 ipha, ip6h, mctl_present); 1849 if (first_mp != NULL) 1850 mp->b_datap->db_type = db_type; 1851 return (first_mp); 1852 } 1853 } 1854 /* 1855 * If it is inbound check whether the attached message 1856 * is secure or not. We have a special case for ICMP, 1857 * where we have a IPSEC_IN message and the attached 1858 * message is not secure. See icmp_inbound_error_fanout 1859 * for details. 1860 */ 1861 ASSERT(ipsec_mp != NULL); 1862 ASSERT(ipsec_mp->b_datap->db_type == M_CTL); 1863 ii = (ipsec_in_t *)ipsec_mp->b_rptr; 1864 1865 /* 1866 * mp->b_cont could be either a M_CTL message 1867 * for icmp errors being sent up or a M_DATA message. 1868 */ 1869 ASSERT(mp->b_datap->db_type == M_CTL || 1870 mp->b_datap->db_type == M_DATA); 1871 1872 ASSERT(ii->ipsec_in_type == IPSEC_IN); 1873 1874 if (ipl == NULL) { 1875 /* 1876 * We don't have policies cached in the conn's 1877 * for this stream. So, look at the global 1878 * policy. It will check against conn or global 1879 * depending on whichever is stronger. 1880 */ 1881 return (ipsec_check_global_policy(first_mp, connp, 1882 ipha, ip6h, mctl_present)); 1883 } else if (ipl->ipl_in_action != NULL) { 1884 /* Policy is cached & latched; fast(er) path */ 1885 const char *reason; 1886 kstat_named_t *counter; 1887 if (ipsec_check_ipsecin_latch(ii, mp, ipl, 1888 ipha, ip6h, &reason, &counter)) { 1889 BUMP_MIB(&ip_mib, ipsecInSucceeded); 1890 return (first_mp); 1891 } 1892 (void) mi_strlog(CONNP_TO_WQ(connp), 0, 1893 SL_ERROR|SL_WARN|SL_CONSOLE, 1894 "ipsec inbound policy mismatch: %s, packet dropped\n", 1895 reason); 1896 ip_drop_packet(first_mp, B_TRUE, NULL, NULL, counter, 1897 &spd_dropper); 1898 BUMP_MIB(&ip_mib, ipsecInFailed); 1899 return (NULL); 1900 } else if (ipl->ipl_in_policy == NULL) 1901 return (first_mp); 1902 1903 IPPOL_REFHOLD(ipl->ipl_in_policy); 1904 first_mp = ipsec_check_ipsecin_policy(CONNP_TO_WQ(connp), first_mp, 1905 ipl->ipl_in_policy, ipha, ip6h); 1906 /* 1907 * NOTE: ipsecIn{Failed,Succeeeded} bumped by 1908 * ipsec_check_ipsecin_policy(). 1909 */ 1910 if (first_mp != NULL) 1911 ipsec_latch_inbound(ipl, ii); 1912 return (first_mp); 1913 } 1914 1915 boolean_t 1916 ipsec_init_inbound_sel(ipsec_selector_t *sel, mblk_t *mp, 1917 ipha_t *ipha, ip6_t *ip6h) 1918 { 1919 uint16_t *ports; 1920 ushort_t hdr_len; 1921 mblk_t *spare_mp = NULL; 1922 uint8_t *nexthdrp; 1923 uint8_t nexthdr; 1924 uint8_t *typecode; 1925 uint8_t check_proto; 1926 1927 ASSERT((ipha == NULL && ip6h != NULL) || 1928 (ipha != NULL && ip6h == NULL)); 1929 1930 if (ip6h != NULL) { 1931 check_proto = IPPROTO_ICMPV6; 1932 sel->ips_isv4 = B_FALSE; 1933 sel->ips_local_addr_v6 = ip6h->ip6_dst; 1934 sel->ips_remote_addr_v6 = ip6h->ip6_src; 1935 1936 nexthdr = ip6h->ip6_nxt; 1937 switch (nexthdr) { 1938 case IPPROTO_HOPOPTS: 1939 case IPPROTO_ROUTING: 1940 case IPPROTO_DSTOPTS: 1941 /* 1942 * Use ip_hdr_length_nexthdr_v6(). And have a spare 1943 * mblk that's contiguous to feed it 1944 */ 1945 if ((spare_mp = msgpullup(mp, -1)) == NULL) 1946 return (B_FALSE); 1947 if (!ip_hdr_length_nexthdr_v6(spare_mp, 1948 (ip6_t *)spare_mp->b_rptr, &hdr_len, &nexthdrp)) { 1949 /* Malformed packet - XXX ip_drop_packet()? */ 1950 freemsg(spare_mp); 1951 return (B_FALSE); 1952 } 1953 nexthdr = *nexthdrp; 1954 /* We can just extract based on hdr_len now. */ 1955 break; 1956 default: 1957 hdr_len = IPV6_HDR_LEN; 1958 break; 1959 } 1960 } else { 1961 check_proto = IPPROTO_ICMP; 1962 sel->ips_isv4 = B_TRUE; 1963 sel->ips_local_addr_v4 = ipha->ipha_dst; 1964 sel->ips_remote_addr_v4 = ipha->ipha_src; 1965 nexthdr = ipha->ipha_protocol; 1966 hdr_len = IPH_HDR_LENGTH(ipha); 1967 } 1968 sel->ips_protocol = nexthdr; 1969 1970 if (nexthdr != IPPROTO_TCP && nexthdr != IPPROTO_UDP && 1971 nexthdr != IPPROTO_SCTP && nexthdr != check_proto) { 1972 sel->ips_remote_port = sel->ips_local_port = 0; 1973 freemsg(spare_mp); /* Always works, even if NULL. */ 1974 return (B_TRUE); 1975 } 1976 1977 if (&mp->b_rptr[hdr_len] + 4 > mp->b_wptr) { 1978 /* If we didn't pullup a copy already, do so now. */ 1979 /* 1980 * XXX performance, will upper-layers frequently split TCP/UDP 1981 * apart from IP or options? If so, perhaps we should revisit 1982 * the spare_mp strategy. 1983 */ 1984 if (spare_mp == NULL && 1985 (spare_mp = msgpullup(mp, -1)) == NULL) { 1986 return (B_FALSE); 1987 } 1988 ports = (uint16_t *)&spare_mp->b_rptr[hdr_len]; 1989 } else { 1990 ports = (uint16_t *)&mp->b_rptr[hdr_len]; 1991 } 1992 1993 if (nexthdr == check_proto) { 1994 typecode = (uint8_t *)ports; 1995 sel->ips_icmp_type = *typecode++; 1996 sel->ips_icmp_code = *typecode; 1997 sel->ips_remote_port = sel->ips_local_port = 0; 1998 freemsg(spare_mp); /* Always works, even if NULL */ 1999 return (B_TRUE); 2000 } 2001 2002 sel->ips_remote_port = *ports++; 2003 sel->ips_local_port = *ports; 2004 freemsg(spare_mp); /* Always works, even if NULL */ 2005 return (B_TRUE); 2006 } 2007 2008 static boolean_t 2009 ipsec_init_outbound_ports(ipsec_selector_t *sel, mblk_t *mp, ipha_t *ipha, 2010 ip6_t *ip6h) 2011 { 2012 /* 2013 * XXX cut&paste shared with ipsec_init_inbound_sel 2014 */ 2015 uint16_t *ports; 2016 ushort_t hdr_len; 2017 mblk_t *spare_mp = NULL; 2018 uint8_t *nexthdrp; 2019 uint8_t nexthdr; 2020 uint8_t *typecode; 2021 uint8_t check_proto; 2022 2023 ASSERT((ipha == NULL && ip6h != NULL) || 2024 (ipha != NULL && ip6h == NULL)); 2025 2026 if (ip6h != NULL) { 2027 check_proto = IPPROTO_ICMPV6; 2028 nexthdr = ip6h->ip6_nxt; 2029 switch (nexthdr) { 2030 case IPPROTO_HOPOPTS: 2031 case IPPROTO_ROUTING: 2032 case IPPROTO_DSTOPTS: 2033 /* 2034 * Use ip_hdr_length_nexthdr_v6(). And have a spare 2035 * mblk that's contiguous to feed it 2036 */ 2037 spare_mp = msgpullup(mp, -1); 2038 if (spare_mp == NULL || 2039 !ip_hdr_length_nexthdr_v6(spare_mp, 2040 (ip6_t *)spare_mp->b_rptr, &hdr_len, 2041 &nexthdrp)) { 2042 /* Always works, even if NULL. */ 2043 freemsg(spare_mp); 2044 freemsg(mp); 2045 return (B_FALSE); 2046 } else { 2047 nexthdr = *nexthdrp; 2048 /* We can just extract based on hdr_len now. */ 2049 } 2050 break; 2051 default: 2052 hdr_len = IPV6_HDR_LEN; 2053 break; 2054 } 2055 } else { 2056 check_proto = IPPROTO_ICMP; 2057 hdr_len = IPH_HDR_LENGTH(ipha); 2058 nexthdr = ipha->ipha_protocol; 2059 } 2060 2061 sel->ips_protocol = nexthdr; 2062 if (nexthdr != IPPROTO_TCP && nexthdr != IPPROTO_UDP && 2063 nexthdr != IPPROTO_SCTP && nexthdr != check_proto) { 2064 sel->ips_local_port = sel->ips_remote_port = 0; 2065 freemsg(spare_mp); /* Always works, even if NULL. */ 2066 return (B_TRUE); 2067 } 2068 2069 if (&mp->b_rptr[hdr_len] + 4 > mp->b_wptr) { 2070 /* If we didn't pullup a copy already, do so now. */ 2071 /* 2072 * XXX performance, will upper-layers frequently split TCP/UDP 2073 * apart from IP or options? If so, perhaps we should revisit 2074 * the spare_mp strategy. 2075 * 2076 * XXX should this be msgpullup(mp, hdr_len+4) ??? 2077 */ 2078 if (spare_mp == NULL && 2079 (spare_mp = msgpullup(mp, -1)) == NULL) { 2080 freemsg(mp); 2081 return (B_FALSE); 2082 } 2083 ports = (uint16_t *)&spare_mp->b_rptr[hdr_len]; 2084 } else { 2085 ports = (uint16_t *)&mp->b_rptr[hdr_len]; 2086 } 2087 2088 if (nexthdr == check_proto) { 2089 typecode = (uint8_t *)ports; 2090 sel->ips_icmp_type = *typecode++; 2091 sel->ips_icmp_code = *typecode; 2092 sel->ips_remote_port = sel->ips_local_port = 0; 2093 freemsg(spare_mp); /* Always works, even if NULL */ 2094 return (B_TRUE); 2095 } 2096 2097 sel->ips_local_port = *ports++; 2098 sel->ips_remote_port = *ports; 2099 freemsg(spare_mp); /* Always works, even if NULL */ 2100 return (B_TRUE); 2101 } 2102 2103 /* 2104 * Create an ipsec_action_t based on the way an inbound packet was protected. 2105 * Used to reflect traffic back to a sender. 2106 * 2107 * We don't bother interning the action into the hash table. 2108 */ 2109 ipsec_action_t * 2110 ipsec_in_to_out_action(ipsec_in_t *ii) 2111 { 2112 ipsa_t *ah_assoc, *esp_assoc; 2113 uint_t auth_alg = 0, encr_alg = 0, espa_alg = 0; 2114 ipsec_action_t *ap; 2115 boolean_t unique; 2116 2117 ap = kmem_cache_alloc(ipsec_action_cache, KM_NOSLEEP); 2118 2119 if (ap == NULL) 2120 return (NULL); 2121 2122 bzero(ap, sizeof (*ap)); 2123 ap->ipa_hash.hash_next = NULL; 2124 ap->ipa_hash.hash_pp = NULL; 2125 ap->ipa_next = NULL; 2126 ap->ipa_refs = 1; 2127 2128 /* 2129 * Get the algorithms that were used for this packet. 2130 */ 2131 ap->ipa_act.ipa_type = IPSEC_ACT_APPLY; 2132 ap->ipa_act.ipa_log = 0; 2133 ah_assoc = ii->ipsec_in_ah_sa; 2134 ap->ipa_act.ipa_apply.ipp_use_ah = (ah_assoc != NULL); 2135 2136 esp_assoc = ii->ipsec_in_esp_sa; 2137 ap->ipa_act.ipa_apply.ipp_use_esp = (esp_assoc != NULL); 2138 2139 if (esp_assoc != NULL) { 2140 encr_alg = esp_assoc->ipsa_encr_alg; 2141 espa_alg = esp_assoc->ipsa_auth_alg; 2142 ap->ipa_act.ipa_apply.ipp_use_espa = (espa_alg != 0); 2143 } 2144 if (ah_assoc != NULL) 2145 auth_alg = ah_assoc->ipsa_auth_alg; 2146 2147 ap->ipa_act.ipa_apply.ipp_encr_alg = (uint8_t)encr_alg; 2148 ap->ipa_act.ipa_apply.ipp_auth_alg = (uint8_t)auth_alg; 2149 ap->ipa_act.ipa_apply.ipp_esp_auth_alg = (uint8_t)espa_alg; 2150 ap->ipa_act.ipa_apply.ipp_use_se = ii->ipsec_in_decaps; 2151 unique = B_FALSE; 2152 2153 if (esp_assoc != NULL) { 2154 ap->ipa_act.ipa_apply.ipp_espa_minbits = 2155 esp_assoc->ipsa_authkeybits; 2156 ap->ipa_act.ipa_apply.ipp_espa_maxbits = 2157 esp_assoc->ipsa_authkeybits; 2158 ap->ipa_act.ipa_apply.ipp_espe_minbits = 2159 esp_assoc->ipsa_encrkeybits; 2160 ap->ipa_act.ipa_apply.ipp_espe_maxbits = 2161 esp_assoc->ipsa_encrkeybits; 2162 ap->ipa_act.ipa_apply.ipp_km_proto = esp_assoc->ipsa_kmp; 2163 ap->ipa_act.ipa_apply.ipp_km_cookie = esp_assoc->ipsa_kmc; 2164 if (esp_assoc->ipsa_flags & IPSA_F_UNIQUE) 2165 unique = B_TRUE; 2166 } 2167 if (ah_assoc != NULL) { 2168 ap->ipa_act.ipa_apply.ipp_ah_minbits = 2169 ah_assoc->ipsa_authkeybits; 2170 ap->ipa_act.ipa_apply.ipp_ah_maxbits = 2171 ah_assoc->ipsa_authkeybits; 2172 ap->ipa_act.ipa_apply.ipp_km_proto = ah_assoc->ipsa_kmp; 2173 ap->ipa_act.ipa_apply.ipp_km_cookie = ah_assoc->ipsa_kmc; 2174 if (ah_assoc->ipsa_flags & IPSA_F_UNIQUE) 2175 unique = B_TRUE; 2176 } 2177 ap->ipa_act.ipa_apply.ipp_use_unique = unique; 2178 ap->ipa_want_unique = unique; 2179 ap->ipa_allow_clear = B_FALSE; 2180 ap->ipa_want_se = ii->ipsec_in_decaps; 2181 ap->ipa_want_ah = (ah_assoc != NULL); 2182 ap->ipa_want_esp = (esp_assoc != NULL); 2183 2184 ap->ipa_ovhd = ipsec_act_ovhd(&ap->ipa_act); 2185 2186 ap->ipa_act.ipa_apply.ipp_replay_depth = 0; /* don't care */ 2187 2188 return (ap); 2189 } 2190 2191 2192 /* 2193 * Compute the worst-case amount of extra space required by an action. 2194 * Note that, because of the ESP considerations listed below, this is 2195 * actually not the same as the best-case reduction in the MTU; in the 2196 * future, we should pass additional information to this function to 2197 * allow the actual MTU impact to be computed. 2198 * 2199 * AH: Revisit this if we implement algorithms with 2200 * a verifier size of more than 12 bytes. 2201 * 2202 * ESP: A more exact but more messy computation would take into 2203 * account the interaction between the cipher block size and the 2204 * effective MTU, yielding the inner payload size which reflects a 2205 * packet with *minimum* ESP padding.. 2206 */ 2207 static int32_t 2208 ipsec_act_ovhd(const ipsec_act_t *act) 2209 { 2210 int32_t overhead = 0; 2211 2212 if (act->ipa_type == IPSEC_ACT_APPLY) { 2213 const ipsec_prot_t *ipp = &act->ipa_apply; 2214 2215 if (ipp->ipp_use_ah) 2216 overhead += IPSEC_MAX_AH_HDR_SIZE; 2217 if (ipp->ipp_use_esp) { 2218 overhead += IPSEC_MAX_ESP_HDR_SIZE; 2219 overhead += sizeof (struct udphdr); 2220 } 2221 if (ipp->ipp_use_se) 2222 overhead += IP_SIMPLE_HDR_LENGTH; 2223 } 2224 return (overhead); 2225 } 2226 2227 /* 2228 * This hash function is used only when creating policies and thus is not 2229 * performance-critical for packet flows. 2230 * 2231 * Future work: canonicalize the structures hashed with this (i.e., 2232 * zeroize padding) so the hash works correctly. 2233 */ 2234 /* ARGSUSED */ 2235 static uint32_t 2236 policy_hash(int size, const void *start, const void *end) 2237 { 2238 return (0); 2239 } 2240 2241 /* 2242 * Intern actions into the action hash table. 2243 */ 2244 ipsec_action_t * 2245 ipsec_act_find(const ipsec_act_t *a, int n) 2246 { 2247 int i; 2248 uint32_t hval; 2249 ipsec_action_t *ap; 2250 ipsec_action_t *prev = NULL; 2251 int32_t overhead, maxovhd = 0; 2252 boolean_t allow_clear = B_FALSE; 2253 boolean_t want_ah = B_FALSE; 2254 boolean_t want_esp = B_FALSE; 2255 boolean_t want_se = B_FALSE; 2256 boolean_t want_unique = B_FALSE; 2257 2258 /* 2259 * TODO: should canonicalize a[] (i.e., zeroize any padding) 2260 * so we can use a non-trivial policy_hash function. 2261 */ 2262 for (i = n-1; i >= 0; i--) { 2263 hval = policy_hash(IPSEC_ACTION_HASH_SIZE, &a[i], &a[n]); 2264 2265 HASH_LOCK(ipsec_action_hash, hval); 2266 2267 for (HASH_ITERATE(ap, ipa_hash, ipsec_action_hash, hval)) { 2268 if (bcmp(&ap->ipa_act, &a[i], sizeof (*a)) != 0) 2269 continue; 2270 if (ap->ipa_next != prev) 2271 continue; 2272 break; 2273 } 2274 if (ap != NULL) { 2275 HASH_UNLOCK(ipsec_action_hash, hval); 2276 prev = ap; 2277 continue; 2278 } 2279 /* 2280 * need to allocate a new one.. 2281 */ 2282 ap = kmem_cache_alloc(ipsec_action_cache, KM_NOSLEEP); 2283 if (ap == NULL) { 2284 HASH_UNLOCK(ipsec_action_hash, hval); 2285 if (prev != NULL) 2286 ipsec_action_free(prev); 2287 return (NULL); 2288 } 2289 HASH_INSERT(ap, ipa_hash, ipsec_action_hash, hval); 2290 2291 ap->ipa_next = prev; 2292 ap->ipa_act = a[i]; 2293 2294 overhead = ipsec_act_ovhd(&a[i]); 2295 if (maxovhd < overhead) 2296 maxovhd = overhead; 2297 2298 if ((a[i].ipa_type == IPSEC_ACT_BYPASS) || 2299 (a[i].ipa_type == IPSEC_ACT_CLEAR)) 2300 allow_clear = B_TRUE; 2301 if (a[i].ipa_type == IPSEC_ACT_APPLY) { 2302 const ipsec_prot_t *ipp = &a[i].ipa_apply; 2303 2304 ASSERT(ipp->ipp_use_ah || ipp->ipp_use_esp); 2305 want_ah |= ipp->ipp_use_ah; 2306 want_esp |= ipp->ipp_use_esp; 2307 want_se |= ipp->ipp_use_se; 2308 want_unique |= ipp->ipp_use_unique; 2309 } 2310 ap->ipa_allow_clear = allow_clear; 2311 ap->ipa_want_ah = want_ah; 2312 ap->ipa_want_esp = want_esp; 2313 ap->ipa_want_se = want_se; 2314 ap->ipa_want_unique = want_unique; 2315 ap->ipa_refs = 1; /* from the hash table */ 2316 ap->ipa_ovhd = maxovhd; 2317 if (prev) 2318 prev->ipa_refs++; 2319 prev = ap; 2320 HASH_UNLOCK(ipsec_action_hash, hval); 2321 } 2322 2323 ap->ipa_refs++; /* caller's reference */ 2324 2325 return (ap); 2326 } 2327 2328 /* 2329 * Called when refcount goes to 0, indicating that all references to this 2330 * node are gone. 2331 * 2332 * This does not unchain the action from the hash table. 2333 */ 2334 void 2335 ipsec_action_free(ipsec_action_t *ap) 2336 { 2337 for (;;) { 2338 ipsec_action_t *np = ap->ipa_next; 2339 ASSERT(ap->ipa_refs == 0); 2340 ASSERT(ap->ipa_hash.hash_pp == NULL); 2341 kmem_cache_free(ipsec_action_cache, ap); 2342 ap = np; 2343 /* Inlined IPACT_REFRELE -- avoid recursion */ 2344 if (ap == NULL) 2345 break; 2346 membar_exit(); 2347 if (atomic_add_32_nv(&(ap)->ipa_refs, -1) != 0) 2348 break; 2349 /* End inlined IPACT_REFRELE */ 2350 } 2351 } 2352 2353 /* 2354 * Periodically sweep action hash table for actions with refcount==1, and 2355 * nuke them. We cannot do this "on demand" (i.e., from IPACT_REFRELE) 2356 * because we can't close the race between another thread finding the action 2357 * in the hash table without holding the bucket lock during IPACT_REFRELE. 2358 * Instead, we run this function sporadically to clean up after ourselves; 2359 * we also set it as the "reclaim" function for the action kmem_cache. 2360 * 2361 * Note that it may take several passes of ipsec_action_gc() to free all 2362 * "stale" actions. 2363 */ 2364 /* ARGSUSED */ 2365 static void 2366 ipsec_action_reclaim(void *dummy) 2367 { 2368 int i; 2369 2370 for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++) { 2371 ipsec_action_t *ap, *np; 2372 2373 /* skip the lock if nobody home */ 2374 if (ipsec_action_hash[i].hash_head == NULL) 2375 continue; 2376 2377 HASH_LOCK(ipsec_action_hash, i); 2378 for (ap = ipsec_action_hash[i].hash_head; 2379 ap != NULL; ap = np) { 2380 ASSERT(ap->ipa_refs > 0); 2381 np = ap->ipa_hash.hash_next; 2382 if (ap->ipa_refs > 1) 2383 continue; 2384 HASH_UNCHAIN(ap, ipa_hash, ipsec_action_hash, i); 2385 IPACT_REFRELE(ap); 2386 } 2387 HASH_UNLOCK(ipsec_action_hash, i); 2388 } 2389 } 2390 2391 /* 2392 * Intern a selector set into the selector set hash table. 2393 * This is simpler than the actions case.. 2394 */ 2395 ipsec_sel_t * 2396 ipsec_find_sel(const ipsec_selkey_t *selkey) 2397 { 2398 ipsec_sel_t *sp; 2399 uint32_t hval; 2400 2401 /* 2402 * Exactly one AF bit should be set in selkey. 2403 */ 2404 ASSERT(!(selkey->ipsl_valid & IPSL_IPV4) ^ 2405 !(selkey->ipsl_valid & IPSL_IPV6)); 2406 2407 /* 2408 * TODO: should canonicalize selkey (i.e., zeroize any padding) 2409 * so we can use a non-trivial policy_hash function. 2410 */ 2411 hval = policy_hash(IPSEC_SEL_HASH_SIZE, selkey, selkey+1); 2412 2413 ASSERT(!HASH_LOCKED(ipsec_sel_hash, hval)); 2414 HASH_LOCK(ipsec_sel_hash, hval); 2415 2416 for (HASH_ITERATE(sp, ipsl_hash, ipsec_sel_hash, hval)) { 2417 if (bcmp(&sp->ipsl_key, selkey, sizeof (*selkey)) == 0) 2418 break; 2419 } 2420 if (sp != NULL) { 2421 sp->ipsl_refs++; 2422 2423 HASH_UNLOCK(ipsec_sel_hash, hval); 2424 return (sp); 2425 } 2426 sp = kmem_cache_alloc(ipsec_sel_cache, KM_NOSLEEP); 2427 if (sp == NULL) { 2428 HASH_UNLOCK(ipsec_sel_hash, hval); 2429 return (NULL); 2430 } 2431 2432 HASH_INSERT(sp, ipsl_hash, ipsec_sel_hash, hval); 2433 sp->ipsl_refs = 2; /* one for hash table, one for caller */ 2434 sp->ipsl_key = *selkey; 2435 sp->ipsl_key.ipsl_hval = (uint8_t)hval; 2436 2437 HASH_UNLOCK(ipsec_sel_hash, hval); 2438 2439 return (sp); 2440 } 2441 2442 static void 2443 ipsec_sel_rel(ipsec_sel_t **spp) 2444 { 2445 ipsec_sel_t *sp = *spp; 2446 int hval = sp->ipsl_key.ipsl_hval; 2447 *spp = NULL; 2448 2449 ASSERT(!HASH_LOCKED(ipsec_sel_hash, hval)); 2450 HASH_LOCK(ipsec_sel_hash, hval); 2451 if (--sp->ipsl_refs == 1) { 2452 HASH_UNCHAIN(sp, ipsl_hash, ipsec_sel_hash, hval); 2453 sp->ipsl_refs--; 2454 HASH_UNLOCK(ipsec_sel_hash, hval); 2455 ASSERT(sp->ipsl_refs == 0); 2456 kmem_cache_free(ipsec_sel_cache, sp); 2457 /* Caller unlocks */ 2458 return; 2459 } 2460 2461 HASH_UNLOCK(ipsec_sel_hash, hval); 2462 } 2463 2464 /* 2465 * Free a policy rule which we know is no longer being referenced. 2466 */ 2467 void 2468 ipsec_policy_free(ipsec_policy_t *ipp) 2469 { 2470 ASSERT(ipp->ipsp_refs == 0); 2471 ASSERT(ipp->ipsp_sel != NULL); 2472 ASSERT(ipp->ipsp_act != NULL); 2473 ipsec_sel_rel(&ipp->ipsp_sel); 2474 IPACT_REFRELE(ipp->ipsp_act); 2475 kmem_cache_free(ipsec_pol_cache, ipp); 2476 } 2477 2478 /* 2479 * Construction of new policy rules; construct a policy, and add it to 2480 * the appropriate tables. 2481 */ 2482 ipsec_policy_t * 2483 ipsec_policy_create(const ipsec_selkey_t *keys, 2484 const ipsec_act_t *a, int nacts, int prio) 2485 { 2486 ipsec_action_t *ap; 2487 ipsec_sel_t *sp; 2488 ipsec_policy_t *ipp; 2489 2490 ipp = kmem_cache_alloc(ipsec_pol_cache, KM_NOSLEEP); 2491 ap = ipsec_act_find(a, nacts); 2492 sp = ipsec_find_sel(keys); 2493 2494 if ((ap == NULL) || (sp == NULL) || (ipp == NULL)) { 2495 if (ap != NULL) { 2496 IPACT_REFRELE(ap); 2497 } 2498 if (sp != NULL) 2499 ipsec_sel_rel(&sp); 2500 if (ipp != NULL) 2501 kmem_cache_free(ipsec_pol_cache, ipp); 2502 return (NULL); 2503 } 2504 2505 ipp->ipsp_links.itl_next = NULL; 2506 2507 ipp->ipsp_refs = 1; /* caller's reference */ 2508 ipp->ipsp_sel = sp; 2509 ipp->ipsp_act = ap; 2510 ipp->ipsp_prio = prio; /* rule priority */ 2511 ipp->ipsp_index = ipsec_next_policy_index++; 2512 2513 return (ipp); 2514 } 2515 2516 static void 2517 ipsec_update_present_flags() 2518 { 2519 ipsec_outbound_v4_policy_present = (NULL != 2520 system_policy.iph_root[IPSEC_TYPE_OUTBOUND].ipr[IPSEC_AF_V4]); 2521 ipsec_outbound_v6_policy_present = (NULL != 2522 system_policy.iph_root[IPSEC_TYPE_OUTBOUND].ipr[IPSEC_AF_V6]); 2523 ipsec_inbound_v4_policy_present = (NULL != 2524 system_policy.iph_root[IPSEC_TYPE_INBOUND].ipr[IPSEC_AF_V4]); 2525 ipsec_inbound_v6_policy_present = (NULL != 2526 system_policy.iph_root[IPSEC_TYPE_INBOUND].ipr[IPSEC_AF_V6]); 2527 } 2528 2529 boolean_t 2530 ipsec_policy_delete(ipsec_policy_head_t *php, 2531 const ipsec_selkey_t *keys, int dir) 2532 { 2533 ipsec_sel_t *sp; 2534 ipsec_policy_t **pptr; 2535 ipsec_policy_t *ip; 2536 int af; 2537 2538 sp = ipsec_find_sel(keys); 2539 2540 if (sp == NULL) 2541 return (B_FALSE); 2542 2543 af = (sp->ipsl_key.ipsl_valid & IPSL_IPV4) ? IPSEC_AF_V4 : IPSEC_AF_V6; 2544 2545 rw_enter(&php->iph_lock, RW_WRITER); 2546 2547 for (pptr = &php->iph_root[dir].ipr[af], ip = *pptr; 2548 ip != NULL; ip = *pptr) { 2549 if (ip->ipsp_sel != sp) { 2550 pptr = &ip->ipsp_links.itl_next; 2551 continue; 2552 } 2553 *pptr = ip->ipsp_links.itl_next; 2554 rw_exit(&php->iph_lock); 2555 IPPOL_REFRELE(ip); 2556 ipsec_sel_rel(&sp); 2557 php->iph_gen++; 2558 return (B_TRUE); 2559 } 2560 ipsec_update_present_flags(); 2561 rw_exit(&php->iph_lock); 2562 ipsec_sel_rel(&sp); 2563 return (B_FALSE); 2564 } 2565 2566 int 2567 ipsec_policy_delete_index(ipsec_policy_head_t *php, uint64_t policy_index) 2568 { 2569 int dir; 2570 boolean_t found; 2571 2572 found = B_FALSE; 2573 2574 rw_enter(&php->iph_lock, RW_WRITER); 2575 for (dir = 0; dir < IPSEC_NTYPES; dir++) { 2576 ipsec_policy_t **pptr; 2577 ipsec_policy_t *ip; 2578 int af; 2579 2580 for (af = 0; af < IPSEC_NAF; af++) { 2581 for (pptr = &php->iph_root[dir].ipr[af], ip = *pptr; 2582 ip != NULL; ip = *pptr) { 2583 if (ip->ipsp_index != policy_index) { 2584 pptr = &ip->ipsp_links.itl_next; 2585 continue; 2586 } 2587 *pptr = ip->ipsp_links.itl_next; 2588 php->iph_gen++; 2589 IPPOL_REFRELE(ip); 2590 found = B_TRUE; 2591 } 2592 } 2593 } 2594 ipsec_update_present_flags(); 2595 rw_exit(&php->iph_lock); 2596 return (found ? 0 : ENOENT); 2597 } 2598 2599 /* 2600 * Given a constructed ipsec_policy_t policy rule, see if it can be entered 2601 * into the correct policy ruleset. 2602 * 2603 * Returns B_TRUE if it can be entered, B_FALSE if it can't be (because a 2604 * duplicate policy exists with exactly the same selectors), or an icmp 2605 * rule exists with a different encryption/authentication action. 2606 */ 2607 boolean_t 2608 ipsec_check_policy(ipsec_policy_head_t *php, ipsec_policy_t *ipp, int direction) 2609 { 2610 ipsec_policy_root_t *pr = &php->iph_root[direction]; 2611 int af = -1; 2612 ipsec_policy_t *p2; 2613 uint8_t check_proto; 2614 2615 ASSERT(RW_WRITE_HELD(&php->iph_lock)); 2616 2617 if (ipp->ipsp_sel->ipsl_key.ipsl_valid & IPSL_IPV6) { 2618 ASSERT(!(ipp->ipsp_sel->ipsl_key.ipsl_valid & IPSL_IPV4)); 2619 af = IPSEC_AF_V6; 2620 check_proto = IPPROTO_ICMPV6; 2621 } else { 2622 ASSERT(ipp->ipsp_sel->ipsl_key.ipsl_valid & IPSL_IPV4); 2623 af = IPSEC_AF_V4; 2624 check_proto = IPPROTO_ICMP; 2625 } 2626 2627 /* 2628 * Double-check that we don't have any duplicate selectors here. 2629 * Because selectors are interned below, we need only compare pointers 2630 * for equality. 2631 */ 2632 2633 for (p2 = pr->ipr[af]; p2 != NULL; p2 = p2->ipsp_links.itl_next) { 2634 if (p2->ipsp_sel == ipp->ipsp_sel) 2635 return (B_FALSE); 2636 } 2637 2638 /* 2639 * If it's ICMP and not a drop or pass rule, run through the ICMP 2640 * rules and make sure the action is either new or the same as any 2641 * other actions. We don't have to check the full chain because 2642 * discard and bypass will override all other actions 2643 */ 2644 2645 if (ipp->ipsp_sel->ipsl_key.ipsl_valid & IPSL_PROTOCOL && 2646 ipp->ipsp_sel->ipsl_key.ipsl_proto == check_proto && 2647 (ipp->ipsp_act->ipa_act.ipa_type == IPSEC_ACT_APPLY)) { 2648 for (p2 = pr->ipr[af]; p2 != NULL; 2649 p2 = p2->ipsp_links.itl_next) { 2650 if (p2->ipsp_sel->ipsl_key.ipsl_valid & IPSL_PROTOCOL && 2651 p2->ipsp_sel->ipsl_key.ipsl_proto == check_proto && 2652 (p2->ipsp_act->ipa_act.ipa_type == 2653 IPSEC_ACT_APPLY)) { 2654 return (ipsec_compare_action(p2, ipp)); 2655 } 2656 } 2657 } 2658 2659 return (B_TRUE); 2660 } 2661 2662 /* 2663 * compare the action chains of two policies for equality 2664 * B_TRUE -> effective equality 2665 */ 2666 2667 static boolean_t 2668 ipsec_compare_action(ipsec_policy_t *p1, ipsec_policy_t *p2) 2669 { 2670 2671 ipsec_action_t *act1, *act2; 2672 2673 /* We have a valid rule. Let's compare the actions */ 2674 if (p1->ipsp_act == p2->ipsp_act) { 2675 /* same action. We are good */ 2676 return (B_TRUE); 2677 } 2678 2679 /* we have to walk the chain */ 2680 2681 act1 = p1->ipsp_act; 2682 act2 = p2->ipsp_act; 2683 2684 while (act1 != NULL && act2 != NULL) { 2685 2686 /* otherwise, Are we close enough? */ 2687 if (act1->ipa_allow_clear != act2->ipa_allow_clear || 2688 act1->ipa_want_ah != act2->ipa_want_ah || 2689 act1->ipa_want_esp != act2->ipa_want_esp || 2690 act1->ipa_want_se != act2->ipa_want_se) { 2691 /* Nope, we aren't */ 2692 return (B_FALSE); 2693 } 2694 2695 if (act1->ipa_want_ah) { 2696 if (act1->ipa_act.ipa_apply.ipp_auth_alg != 2697 act2->ipa_act.ipa_apply.ipp_auth_alg) { 2698 return (B_FALSE); 2699 } 2700 2701 if (act1->ipa_act.ipa_apply.ipp_ah_minbits != 2702 act2->ipa_act.ipa_apply.ipp_ah_minbits || 2703 act1->ipa_act.ipa_apply.ipp_ah_maxbits != 2704 act2->ipa_act.ipa_apply.ipp_ah_maxbits) { 2705 return (B_FALSE); 2706 } 2707 } 2708 2709 if (act1->ipa_want_esp) { 2710 if (act1->ipa_act.ipa_apply.ipp_use_esp != 2711 act2->ipa_act.ipa_apply.ipp_use_esp || 2712 act1->ipa_act.ipa_apply.ipp_use_espa != 2713 act2->ipa_act.ipa_apply.ipp_use_espa) { 2714 return (B_FALSE); 2715 } 2716 2717 if (act1->ipa_act.ipa_apply.ipp_use_esp) { 2718 if (act1->ipa_act.ipa_apply.ipp_encr_alg != 2719 act2->ipa_act.ipa_apply.ipp_encr_alg) { 2720 return (B_FALSE); 2721 } 2722 2723 if (act1->ipa_act.ipa_apply.ipp_espe_minbits != 2724 act2->ipa_act.ipa_apply.ipp_espe_minbits || 2725 act1->ipa_act.ipa_apply.ipp_espe_maxbits != 2726 act2->ipa_act.ipa_apply.ipp_espe_maxbits) { 2727 return (B_FALSE); 2728 } 2729 } 2730 2731 if (act1->ipa_act.ipa_apply.ipp_use_espa) { 2732 if (act1->ipa_act.ipa_apply.ipp_esp_auth_alg != 2733 act2->ipa_act.ipa_apply.ipp_esp_auth_alg) { 2734 return (B_FALSE); 2735 } 2736 2737 if (act1->ipa_act.ipa_apply.ipp_espa_minbits != 2738 act2->ipa_act.ipa_apply.ipp_espa_minbits || 2739 act1->ipa_act.ipa_apply.ipp_espa_maxbits != 2740 act2->ipa_act.ipa_apply.ipp_espa_maxbits) { 2741 return (B_FALSE); 2742 } 2743 } 2744 2745 } 2746 2747 act1 = act1->ipa_next; 2748 act2 = act2->ipa_next; 2749 } 2750 2751 if (act1 != NULL || act2 != NULL) { 2752 return (B_FALSE); 2753 } 2754 2755 return (B_TRUE); 2756 } 2757 2758 2759 /* 2760 * Given a constructed ipsec_policy_t policy rule, enter it into 2761 * the correct policy ruleset. 2762 * 2763 * ipsec_check_policy() is assumed to have succeeded first (to check for 2764 * duplicates). 2765 */ 2766 void 2767 ipsec_enter_policy(ipsec_policy_head_t *php, ipsec_policy_t *ipp, int direction) 2768 { 2769 ipsec_policy_root_t *pr = &php->iph_root[direction]; 2770 int af = -1; 2771 2772 ASSERT(RW_WRITE_HELD(&php->iph_lock)); 2773 2774 if (ipp->ipsp_sel->ipsl_key.ipsl_valid & IPSL_IPV6) { 2775 ASSERT(!(ipp->ipsp_sel->ipsl_key.ipsl_valid & IPSL_IPV4)); 2776 af = IPSEC_AF_V6; 2777 } else { 2778 ASSERT(ipp->ipsp_sel->ipsl_key.ipsl_valid & IPSL_IPV4); 2779 af = IPSEC_AF_V4; 2780 } 2781 2782 php->iph_gen++; 2783 2784 ipp->ipsp_links.itl_next = pr->ipr[af]; 2785 pr->ipr[af] = ipp; 2786 ipsec_update_present_flags(); 2787 } 2788 2789 void 2790 ipsec_polhead_flush(ipsec_policy_head_t *php) 2791 { 2792 ipsec_policy_t *ip, *nip; 2793 int dir, af; 2794 2795 ASSERT(RW_WRITE_HELD(&php->iph_lock)); 2796 2797 for (dir = 0; dir < IPSEC_NTYPES; dir++) { 2798 for (af = 0; af < IPSEC_NAF; af++) { 2799 for (ip = php->iph_root[dir].ipr[af]; 2800 ip != NULL; ip = nip) { 2801 nip = ip->ipsp_links.itl_next; 2802 IPPOL_REFRELE(ip); 2803 } 2804 php->iph_root[dir].ipr[af] = NULL; 2805 } 2806 } 2807 ipsec_update_present_flags(); 2808 } 2809 2810 void 2811 ipsec_polhead_free(ipsec_policy_head_t *php) 2812 { 2813 ASSERT(php->iph_refs == 0); 2814 rw_enter(&php->iph_lock, RW_WRITER); 2815 ipsec_polhead_flush(php); 2816 rw_exit(&php->iph_lock); 2817 rw_destroy(&php->iph_lock); 2818 kmem_free(php, sizeof (*php)); 2819 } 2820 2821 extern ipsec_policy_head_t * 2822 ipsec_polhead_create(void) 2823 { 2824 ipsec_policy_head_t *php; 2825 int af; 2826 2827 php = kmem_alloc(sizeof (*php), KM_NOSLEEP); 2828 if (php != NULL) { 2829 rw_init(&php->iph_lock, NULL, RW_DEFAULT, NULL); 2830 for (af = 0; af < IPSEC_NAF; af++) { 2831 php->iph_root[IPSEC_TYPE_INBOUND].ipr[af] = NULL; 2832 php->iph_root[IPSEC_TYPE_OUTBOUND].ipr[af] = NULL; 2833 } 2834 php->iph_refs = 1; 2835 } 2836 return (php); 2837 } 2838 2839 /* 2840 * Clone the policy head into a new polhead; release one reference to the 2841 * old one and return the only reference to the new one. 2842 * If the old one had a refcount of 1, just return it. 2843 */ 2844 2845 extern ipsec_policy_head_t * 2846 ipsec_polhead_split(ipsec_policy_head_t *php) 2847 { 2848 ipsec_policy_head_t *nphp; 2849 2850 if (php == NULL) 2851 return (ipsec_polhead_create()); 2852 else if (php->iph_refs == 1) 2853 return (php); 2854 2855 nphp = ipsec_polhead_create(); 2856 if (nphp == NULL) 2857 return (NULL); 2858 2859 if (ipsec_copy_polhead(php, nphp) != 0) { 2860 ipsec_polhead_free(nphp); 2861 return (NULL); 2862 } 2863 IPPH_REFRELE(php); 2864 return (nphp); 2865 } 2866 2867 /* 2868 * When sending a response to a ICMP request or generating a RST 2869 * in the TCP case, the outbound packets need to go at the same level 2870 * of protection as the incoming ones i.e we associate our outbound 2871 * policy with how the packet came in. We call this after we have 2872 * accepted the incoming packet which may or may not have been in 2873 * clear and hence we are sending the reply back with the policy 2874 * matching the incoming datagram's policy. 2875 * 2876 * NOTE : This technology serves two purposes : 2877 * 2878 * 1) If we have multiple outbound policies, we send out a reply 2879 * matching with how it came in rather than matching the outbound 2880 * policy. 2881 * 2882 * 2) For assymetric policies, we want to make sure that incoming 2883 * and outgoing has the same level of protection. Assymetric 2884 * policies exist only with global policy where we may not have 2885 * both outbound and inbound at the same time. 2886 * 2887 * NOTE2: This function is called by cleartext cases, so it needs to be 2888 * in IP proper. 2889 */ 2890 boolean_t 2891 ipsec_in_to_out(mblk_t *ipsec_mp, ipha_t *ipha, ip6_t *ip6h) 2892 { 2893 ipsec_in_t *ii; 2894 ipsec_out_t *io; 2895 boolean_t v4; 2896 mblk_t *mp; 2897 boolean_t secure, attach_if; 2898 uint_t ifindex; 2899 ipsec_selector_t sel; 2900 ipsec_action_t *reflect_action = NULL; 2901 zoneid_t zoneid; 2902 2903 ASSERT(ipsec_mp->b_datap->db_type == M_CTL); 2904 2905 bzero((void*)&sel, sizeof (sel)); 2906 2907 ii = (ipsec_in_t *)ipsec_mp->b_rptr; 2908 2909 mp = ipsec_mp->b_cont; 2910 ASSERT(mp != NULL); 2911 2912 if (ii->ipsec_in_action != NULL) { 2913 /* transfer reference.. */ 2914 reflect_action = ii->ipsec_in_action; 2915 ii->ipsec_in_action = NULL; 2916 } else if (!ii->ipsec_in_loopback) 2917 reflect_action = ipsec_in_to_out_action(ii); 2918 secure = ii->ipsec_in_secure; 2919 attach_if = ii->ipsec_in_attach_if; 2920 ifindex = ii->ipsec_in_ill_index; 2921 zoneid = ii->ipsec_in_zoneid; 2922 v4 = ii->ipsec_in_v4; 2923 2924 ipsec_in_release_refs(ii); 2925 2926 /* 2927 * The caller is going to send the datagram out which might 2928 * go on the wire or delivered locally through ip_wput_local. 2929 * 2930 * 1) If it goes out on the wire, new associations will be 2931 * obtained. 2932 * 2) If it is delivered locally, ip_wput_local will convert 2933 * this IPSEC_OUT to a IPSEC_IN looking at the requests. 2934 */ 2935 2936 io = (ipsec_out_t *)ipsec_mp->b_rptr; 2937 bzero(io, sizeof (ipsec_out_t)); 2938 io->ipsec_out_type = IPSEC_OUT; 2939 io->ipsec_out_len = sizeof (ipsec_out_t); 2940 io->ipsec_out_frtn.free_func = ipsec_out_free; 2941 io->ipsec_out_frtn.free_arg = (char *)io; 2942 io->ipsec_out_act = reflect_action; 2943 2944 if (!ipsec_init_outbound_ports(&sel, mp, ipha, ip6h)) 2945 return (B_FALSE); 2946 2947 io->ipsec_out_src_port = sel.ips_local_port; 2948 io->ipsec_out_dst_port = sel.ips_remote_port; 2949 io->ipsec_out_proto = sel.ips_protocol; 2950 io->ipsec_out_icmp_type = sel.ips_icmp_type; 2951 io->ipsec_out_icmp_code = sel.ips_icmp_code; 2952 2953 /* 2954 * Don't use global policy for this, as we want 2955 * to use the same protection that was applied to the inbound packet. 2956 */ 2957 io->ipsec_out_use_global_policy = B_FALSE; 2958 io->ipsec_out_proc_begin = B_FALSE; 2959 io->ipsec_out_secure = secure; 2960 io->ipsec_out_v4 = v4; 2961 io->ipsec_out_attach_if = attach_if; 2962 io->ipsec_out_ill_index = ifindex; 2963 io->ipsec_out_zoneid = zoneid; 2964 return (B_TRUE); 2965 } 2966 2967 mblk_t * 2968 ipsec_in_tag(mblk_t *mp, mblk_t *cont) 2969 { 2970 ipsec_in_t *ii = (ipsec_in_t *)mp->b_rptr; 2971 ipsec_in_t *nii; 2972 mblk_t *nmp; 2973 frtn_t nfrtn; 2974 2975 ASSERT(ii->ipsec_in_type == IPSEC_IN); 2976 ASSERT(ii->ipsec_in_len == sizeof (ipsec_in_t)); 2977 2978 nmp = ipsec_in_alloc(ii->ipsec_in_v4); 2979 2980 ASSERT(nmp->b_datap->db_type == M_CTL); 2981 ASSERT(nmp->b_wptr == (nmp->b_rptr + sizeof (ipsec_info_t))); 2982 2983 /* 2984 * Bump refcounts. 2985 */ 2986 if (ii->ipsec_in_ah_sa != NULL) 2987 IPSA_REFHOLD(ii->ipsec_in_ah_sa); 2988 if (ii->ipsec_in_esp_sa != NULL) 2989 IPSA_REFHOLD(ii->ipsec_in_esp_sa); 2990 if (ii->ipsec_in_policy != NULL) 2991 IPPH_REFHOLD(ii->ipsec_in_policy); 2992 2993 /* 2994 * Copy everything, but preserve the free routine provided by 2995 * ipsec_in_alloc(). 2996 */ 2997 nii = (ipsec_in_t *)nmp->b_rptr; 2998 nfrtn = nii->ipsec_in_frtn; 2999 bcopy(ii, nii, sizeof (*ii)); 3000 nii->ipsec_in_frtn = nfrtn; 3001 3002 nmp->b_cont = cont; 3003 3004 return (nmp); 3005 } 3006 3007 mblk_t * 3008 ipsec_out_tag(mblk_t *mp, mblk_t *cont) 3009 { 3010 ipsec_out_t *io = (ipsec_out_t *)mp->b_rptr; 3011 ipsec_out_t *nio; 3012 mblk_t *nmp; 3013 frtn_t nfrtn; 3014 3015 ASSERT(io->ipsec_out_type == IPSEC_OUT); 3016 ASSERT(io->ipsec_out_len == sizeof (ipsec_out_t)); 3017 3018 nmp = ipsec_alloc_ipsec_out(); 3019 if (nmp == NULL) { 3020 freemsg(cont); /* XXX ip_drop_packet() ? */ 3021 return (NULL); 3022 } 3023 ASSERT(nmp->b_datap->db_type == M_CTL); 3024 ASSERT(nmp->b_wptr == (nmp->b_rptr + sizeof (ipsec_info_t))); 3025 3026 /* 3027 * Bump refcounts. 3028 */ 3029 if (io->ipsec_out_ah_sa != NULL) 3030 IPSA_REFHOLD(io->ipsec_out_ah_sa); 3031 if (io->ipsec_out_esp_sa != NULL) 3032 IPSA_REFHOLD(io->ipsec_out_esp_sa); 3033 if (io->ipsec_out_polhead != NULL) 3034 IPPH_REFHOLD(io->ipsec_out_polhead); 3035 if (io->ipsec_out_policy != NULL) 3036 IPPOL_REFHOLD(io->ipsec_out_policy); 3037 if (io->ipsec_out_act != NULL) 3038 IPACT_REFHOLD(io->ipsec_out_act); 3039 if (io->ipsec_out_latch != NULL) 3040 IPLATCH_REFHOLD(io->ipsec_out_latch); 3041 if (io->ipsec_out_cred != NULL) 3042 crhold(io->ipsec_out_cred); 3043 3044 /* 3045 * Copy everything, but preserve the free routine provided by 3046 * ipsec_alloc_ipsec_out(). 3047 */ 3048 nio = (ipsec_out_t *)nmp->b_rptr; 3049 nfrtn = nio->ipsec_out_frtn; 3050 bcopy(io, nio, sizeof (*io)); 3051 nio->ipsec_out_frtn = nfrtn; 3052 3053 nmp->b_cont = cont; 3054 3055 return (nmp); 3056 } 3057 3058 static void 3059 ipsec_out_release_refs(ipsec_out_t *io) 3060 { 3061 ASSERT(io->ipsec_out_type == IPSEC_OUT); 3062 ASSERT(io->ipsec_out_len == sizeof (ipsec_out_t)); 3063 3064 /* Note: IPSA_REFRELE is multi-line macro */ 3065 if (io->ipsec_out_ah_sa != NULL) 3066 IPSA_REFRELE(io->ipsec_out_ah_sa); 3067 if (io->ipsec_out_esp_sa != NULL) 3068 IPSA_REFRELE(io->ipsec_out_esp_sa); 3069 if (io->ipsec_out_polhead != NULL) 3070 IPPH_REFRELE(io->ipsec_out_polhead); 3071 if (io->ipsec_out_policy != NULL) 3072 IPPOL_REFRELE(io->ipsec_out_policy); 3073 if (io->ipsec_out_act != NULL) 3074 IPACT_REFRELE(io->ipsec_out_act); 3075 if (io->ipsec_out_cred != NULL) { 3076 crfree(io->ipsec_out_cred); 3077 io->ipsec_out_cred = NULL; 3078 } 3079 if (io->ipsec_out_latch) { 3080 IPLATCH_REFRELE(io->ipsec_out_latch); 3081 io->ipsec_out_latch = NULL; 3082 } 3083 } 3084 3085 static void 3086 ipsec_out_free(void *arg) 3087 { 3088 ipsec_out_t *io = (ipsec_out_t *)arg; 3089 ipsec_out_release_refs(io); 3090 kmem_cache_free(ipsec_info_cache, arg); 3091 } 3092 3093 static void 3094 ipsec_in_release_refs(ipsec_in_t *ii) 3095 { 3096 /* Note: IPSA_REFRELE is multi-line macro */ 3097 if (ii->ipsec_in_ah_sa != NULL) 3098 IPSA_REFRELE(ii->ipsec_in_ah_sa); 3099 if (ii->ipsec_in_esp_sa != NULL) 3100 IPSA_REFRELE(ii->ipsec_in_esp_sa); 3101 if (ii->ipsec_in_policy != NULL) 3102 IPPH_REFRELE(ii->ipsec_in_policy); 3103 if (ii->ipsec_in_da != NULL) { 3104 freeb(ii->ipsec_in_da); 3105 ii->ipsec_in_da = NULL; 3106 } 3107 } 3108 3109 static void 3110 ipsec_in_free(void *arg) 3111 { 3112 ipsec_in_t *ii = (ipsec_in_t *)arg; 3113 ipsec_in_release_refs(ii); 3114 kmem_cache_free(ipsec_info_cache, arg); 3115 } 3116 3117 /* 3118 * This is called only for outbound datagrams if the datagram needs to 3119 * go out secure. A NULL mp can be passed to get an ipsec_out. This 3120 * facility is used by ip_unbind. 3121 * 3122 * NOTE : o As the data part could be modified by ipsec_out_process etc. 3123 * we can't make it fast by calling a dup. 3124 */ 3125 mblk_t * 3126 ipsec_alloc_ipsec_out() 3127 { 3128 mblk_t *ipsec_mp; 3129 3130 ipsec_out_t *io = kmem_cache_alloc(ipsec_info_cache, KM_NOSLEEP); 3131 3132 if (io == NULL) 3133 return (NULL); 3134 3135 bzero(io, sizeof (ipsec_out_t)); 3136 3137 io->ipsec_out_type = IPSEC_OUT; 3138 io->ipsec_out_len = sizeof (ipsec_out_t); 3139 io->ipsec_out_frtn.free_func = ipsec_out_free; 3140 io->ipsec_out_frtn.free_arg = (char *)io; 3141 3142 /* 3143 * Set the zoneid to ALL_ZONES which is used as an invalid value. Code 3144 * using ipsec_out_zoneid should assert that the zoneid has been set to 3145 * a sane value. 3146 */ 3147 io->ipsec_out_zoneid = ALL_ZONES; 3148 3149 ipsec_mp = desballoc((uint8_t *)io, sizeof (ipsec_info_t), BPRI_HI, 3150 &io->ipsec_out_frtn); 3151 if (ipsec_mp == NULL) { 3152 ipsec_out_free(io); 3153 3154 return (NULL); 3155 } 3156 ipsec_mp->b_datap->db_type = M_CTL; 3157 ipsec_mp->b_wptr = ipsec_mp->b_rptr + sizeof (ipsec_info_t); 3158 3159 return (ipsec_mp); 3160 } 3161 3162 /* 3163 * Attach an IPSEC_OUT; use pol for policy if it is non-null. 3164 * Otherwise initialize using conn. 3165 * 3166 * If pol is non-null, we consume a reference to it. 3167 */ 3168 mblk_t * 3169 ipsec_attach_ipsec_out(mblk_t *mp, conn_t *connp, ipsec_policy_t *pol, 3170 uint8_t proto) 3171 { 3172 mblk_t *ipsec_mp; 3173 3174 ASSERT((pol != NULL) || (connp != NULL)); 3175 3176 ipsec_mp = ipsec_alloc_ipsec_out(); 3177 if (ipsec_mp == NULL) { 3178 (void) mi_strlog(CONNP_TO_WQ(connp), 0, SL_ERROR|SL_NOTE, 3179 "ipsec_attach_ipsec_out: Allocation failure\n"); 3180 BUMP_MIB(&ip_mib, ipOutDiscards); 3181 ip_drop_packet(mp, B_FALSE, NULL, NULL, &ipdrops_spd_nomem, 3182 &spd_dropper); 3183 return (NULL); 3184 } 3185 ipsec_mp->b_cont = mp; 3186 return (ipsec_init_ipsec_out(ipsec_mp, connp, pol, proto)); 3187 } 3188 3189 /* 3190 * Initialize the IPSEC_OUT (ipsec_mp) using pol if it is non-null. 3191 * Otherwise initialize using conn. 3192 * 3193 * If pol is non-null, we consume a reference to it. 3194 */ 3195 mblk_t * 3196 ipsec_init_ipsec_out(mblk_t *ipsec_mp, conn_t *connp, ipsec_policy_t *pol, 3197 uint8_t proto) 3198 { 3199 mblk_t *mp; 3200 ipsec_out_t *io; 3201 ipsec_policy_t *p; 3202 ipha_t *ipha; 3203 ip6_t *ip6h; 3204 3205 ASSERT((pol != NULL) || (connp != NULL)); 3206 3207 /* 3208 * If mp is NULL, we won't/should not be using it. 3209 */ 3210 mp = ipsec_mp->b_cont; 3211 3212 ASSERT(ipsec_mp->b_datap->db_type == M_CTL); 3213 ASSERT(ipsec_mp->b_wptr == (ipsec_mp->b_rptr + sizeof (ipsec_info_t))); 3214 io = (ipsec_out_t *)ipsec_mp->b_rptr; 3215 ASSERT(io->ipsec_out_type == IPSEC_OUT); 3216 ASSERT(io->ipsec_out_len == sizeof (ipsec_out_t)); 3217 io->ipsec_out_latch = NULL; 3218 /* 3219 * Set the zoneid when we have the connp. 3220 * Otherwise, we're called from ip_wput_attach_policy() who will take 3221 * care of setting the zoneid. 3222 */ 3223 if (connp != NULL) 3224 io->ipsec_out_zoneid = connp->conn_zoneid; 3225 3226 if (mp != NULL) { 3227 ipha = (ipha_t *)mp->b_rptr; 3228 if (IPH_HDR_VERSION(ipha) == IP_VERSION) { 3229 io->ipsec_out_v4 = B_TRUE; 3230 ip6h = NULL; 3231 } else { 3232 io->ipsec_out_v4 = B_FALSE; 3233 ip6h = (ip6_t *)ipha; 3234 ipha = NULL; 3235 } 3236 } else { 3237 ASSERT(connp != NULL && connp->conn_policy_cached); 3238 ip6h = NULL; 3239 ipha = NULL; 3240 io->ipsec_out_v4 = !connp->conn_pkt_isv6; 3241 } 3242 3243 p = NULL; 3244 3245 /* 3246 * Take latched policies over global policy. Check here again for 3247 * this, in case we had conn_latch set while the packet was flying 3248 * around in IP. 3249 */ 3250 if (connp != NULL && connp->conn_latch != NULL) { 3251 p = connp->conn_latch->ipl_out_policy; 3252 io->ipsec_out_latch = connp->conn_latch; 3253 IPLATCH_REFHOLD(connp->conn_latch); 3254 if (p != NULL) { 3255 IPPOL_REFHOLD(p); 3256 } 3257 io->ipsec_out_src_port = connp->conn_lport; 3258 io->ipsec_out_dst_port = connp->conn_fport; 3259 io->ipsec_out_icmp_type = io->ipsec_out_icmp_code = 0; 3260 if (pol != NULL) 3261 IPPOL_REFRELE(pol); 3262 } else if (pol != NULL) { 3263 ipsec_selector_t sel; 3264 3265 bzero((void*)&sel, sizeof (sel)); 3266 3267 p = pol; 3268 /* 3269 * conn does not have the port information. Get 3270 * it from the packet. 3271 */ 3272 3273 if (!ipsec_init_outbound_ports(&sel, mp, ipha, ip6h)) { 3274 /* XXX any cleanup required here?? */ 3275 return (NULL); 3276 } 3277 io->ipsec_out_src_port = sel.ips_local_port; 3278 io->ipsec_out_dst_port = sel.ips_remote_port; 3279 io->ipsec_out_icmp_type = sel.ips_icmp_type; 3280 io->ipsec_out_icmp_code = sel.ips_icmp_code; 3281 } 3282 3283 io->ipsec_out_proto = proto; 3284 io->ipsec_out_use_global_policy = B_TRUE; 3285 io->ipsec_out_secure = (p != NULL); 3286 io->ipsec_out_policy = p; 3287 3288 if (p == NULL) { 3289 if (connp->conn_policy != NULL) { 3290 io->ipsec_out_secure = B_TRUE; 3291 ASSERT(io->ipsec_out_latch == NULL); 3292 ASSERT(io->ipsec_out_use_global_policy == B_TRUE); 3293 io->ipsec_out_need_policy = B_TRUE; 3294 ASSERT(io->ipsec_out_polhead == NULL); 3295 IPPH_REFHOLD(connp->conn_policy); 3296 io->ipsec_out_polhead = connp->conn_policy; 3297 } 3298 } 3299 return (ipsec_mp); 3300 } 3301 3302 /* 3303 * Allocate an IPSEC_IN mblk. This will be prepended to an inbound datagram 3304 * and keep track of what-if-any IPsec processing will be applied to the 3305 * datagram. 3306 */ 3307 mblk_t * 3308 ipsec_in_alloc(boolean_t isv4) 3309 { 3310 mblk_t *ipsec_in; 3311 ipsec_in_t *ii = kmem_cache_alloc(ipsec_info_cache, KM_NOSLEEP); 3312 3313 if (ii == NULL) 3314 return (NULL); 3315 3316 bzero(ii, sizeof (ipsec_info_t)); 3317 ii->ipsec_in_type = IPSEC_IN; 3318 ii->ipsec_in_len = sizeof (ipsec_in_t); 3319 3320 ii->ipsec_in_v4 = isv4; 3321 ii->ipsec_in_secure = B_TRUE; 3322 3323 ii->ipsec_in_frtn.free_func = ipsec_in_free; 3324 ii->ipsec_in_frtn.free_arg = (char *)ii; 3325 3326 ipsec_in = desballoc((uint8_t *)ii, sizeof (ipsec_info_t), BPRI_HI, 3327 &ii->ipsec_in_frtn); 3328 if (ipsec_in == NULL) { 3329 ip1dbg(("ipsec_in_alloc: IPSEC_IN allocation failure.\n")); 3330 ipsec_in_free(ii); 3331 return (NULL); 3332 } 3333 3334 ipsec_in->b_datap->db_type = M_CTL; 3335 ipsec_in->b_wptr += sizeof (ipsec_info_t); 3336 3337 return (ipsec_in); 3338 } 3339 3340 /* 3341 * This is called from ip_wput_local when a packet which needs 3342 * security is looped back, to convert the IPSEC_OUT to a IPSEC_IN 3343 * before fanout, where the policy check happens. In most of the 3344 * cases, IPSEC processing has *never* been done. There is one case 3345 * (ip_wput_ire_fragmentit -> ip_wput_frag -> icmp_frag_needed) where 3346 * the packet is destined for localhost, IPSEC processing has already 3347 * been done. 3348 * 3349 * Future: This could happen after SA selection has occurred for 3350 * outbound.. which will tell us who the src and dst identities are.. 3351 * Then it's just a matter of splicing the ah/esp SA pointers from the 3352 * ipsec_out_t to the ipsec_in_t. 3353 */ 3354 void 3355 ipsec_out_to_in(mblk_t *ipsec_mp) 3356 { 3357 ipsec_in_t *ii; 3358 ipsec_out_t *io; 3359 ipsec_policy_t *pol; 3360 ipsec_action_t *act; 3361 boolean_t v4, icmp_loopback; 3362 3363 ASSERT(ipsec_mp->b_datap->db_type == M_CTL); 3364 3365 io = (ipsec_out_t *)ipsec_mp->b_rptr; 3366 3367 v4 = io->ipsec_out_v4; 3368 icmp_loopback = io->ipsec_out_icmp_loopback; 3369 3370 act = io->ipsec_out_act; 3371 if (act == NULL) { 3372 pol = io->ipsec_out_policy; 3373 if (pol != NULL) { 3374 act = pol->ipsp_act; 3375 IPACT_REFHOLD(act); 3376 } 3377 } 3378 io->ipsec_out_act = NULL; 3379 3380 ipsec_out_release_refs(io); 3381 3382 ii = (ipsec_in_t *)ipsec_mp->b_rptr; 3383 bzero(ii, sizeof (ipsec_in_t)); 3384 ii->ipsec_in_type = IPSEC_IN; 3385 ii->ipsec_in_len = sizeof (ipsec_in_t); 3386 ii->ipsec_in_loopback = B_TRUE; 3387 ii->ipsec_in_frtn.free_func = ipsec_in_free; 3388 ii->ipsec_in_frtn.free_arg = (char *)ii; 3389 ii->ipsec_in_action = act; 3390 3391 /* 3392 * In most of the cases, we can't look at the ipsec_out_XXX_sa 3393 * because this never went through IPSEC processing. So, look at 3394 * the requests and infer whether it would have gone through 3395 * IPSEC processing or not. Initialize the "done" fields with 3396 * the requests. The possible values for "done" fields are : 3397 * 3398 * 1) zero, indicates that a particular preference was never 3399 * requested. 3400 * 2) non-zero, indicates that it could be IPSEC_PREF_REQUIRED/ 3401 * IPSEC_PREF_NEVER. If IPSEC_REQ_DONE is set, it means that 3402 * IPSEC processing has been completed. 3403 */ 3404 ii->ipsec_in_secure = B_TRUE; 3405 ii->ipsec_in_v4 = v4; 3406 ii->ipsec_in_icmp_loopback = icmp_loopback; 3407 ii->ipsec_in_attach_if = B_FALSE; 3408 } 3409 3410 /* 3411 * Consults global policy to see whether this datagram should 3412 * go out secure. If so it attaches a ipsec_mp in front and 3413 * returns. 3414 */ 3415 mblk_t * 3416 ip_wput_attach_policy(mblk_t *ipsec_mp, ipha_t *ipha, ip6_t *ip6h, ire_t *ire, 3417 conn_t *connp, boolean_t unspec_src) 3418 { 3419 mblk_t *mp; 3420 ipsec_out_t *io = NULL; 3421 ipsec_selector_t sel; 3422 uint_t ill_index; 3423 boolean_t conn_dontroutex; 3424 boolean_t conn_multicast_loopx; 3425 boolean_t policy_present; 3426 3427 ASSERT((ipha != NULL && ip6h == NULL) || 3428 (ip6h != NULL && ipha == NULL)); 3429 3430 bzero((void*)&sel, sizeof (sel)); 3431 3432 if (ipha != NULL) 3433 policy_present = ipsec_outbound_v4_policy_present; 3434 else 3435 policy_present = ipsec_outbound_v6_policy_present; 3436 /* 3437 * Fast Path to see if there is any policy. 3438 */ 3439 if (!policy_present) { 3440 if (ipsec_mp->b_datap->db_type == M_CTL) { 3441 io = (ipsec_out_t *)ipsec_mp->b_rptr; 3442 if (!io->ipsec_out_secure) { 3443 /* 3444 * If there is no global policy and ip_wput 3445 * or ip_wput_multicast has attached this mp 3446 * for multicast case, free the ipsec_mp and 3447 * return the original mp. 3448 */ 3449 mp = ipsec_mp->b_cont; 3450 freeb(ipsec_mp); 3451 ipsec_mp = mp; 3452 io = NULL; 3453 } 3454 } 3455 if (((io == NULL) || (io->ipsec_out_polhead == NULL)) && 3456 ((connp == NULL) || (connp->conn_policy == NULL))) 3457 return (ipsec_mp); 3458 } 3459 3460 ill_index = 0; 3461 conn_multicast_loopx = conn_dontroutex = B_FALSE; 3462 mp = ipsec_mp; 3463 if (ipsec_mp->b_datap->db_type == M_CTL) { 3464 mp = ipsec_mp->b_cont; 3465 /* 3466 * This is a connection where we have some per-socket 3467 * policy or ip_wput has attached an ipsec_mp for 3468 * the multicast datagram. 3469 */ 3470 io = (ipsec_out_t *)ipsec_mp->b_rptr; 3471 if (!io->ipsec_out_secure) { 3472 /* 3473 * This ipsec_mp was allocated in ip_wput or 3474 * ip_wput_multicast so that we will know the 3475 * value of ill_index, conn_dontroute, 3476 * conn_multicast_loop in the multicast case if 3477 * we inherit global policy here. 3478 */ 3479 ill_index = io->ipsec_out_ill_index; 3480 conn_dontroutex = io->ipsec_out_dontroute; 3481 conn_multicast_loopx = io->ipsec_out_multicast_loop; 3482 freeb(ipsec_mp); 3483 ipsec_mp = mp; 3484 io = NULL; 3485 } 3486 } 3487 3488 if (ipha != NULL) { 3489 sel.ips_local_addr_v4 = (ipha->ipha_src != 0 ? 3490 ipha->ipha_src : ire->ire_src_addr); 3491 sel.ips_remote_addr_v4 = ip_get_dst(ipha); 3492 sel.ips_protocol = (uint8_t)ipha->ipha_protocol; 3493 sel.ips_isv4 = B_TRUE; 3494 } else { 3495 ushort_t hdr_len; 3496 uint8_t *nexthdrp; 3497 boolean_t is_fragment; 3498 3499 sel.ips_isv4 = B_FALSE; 3500 if (IN6_IS_ADDR_UNSPECIFIED(&ip6h->ip6_src)) { 3501 if (!unspec_src) 3502 sel.ips_local_addr_v6 = ire->ire_src_addr_v6; 3503 } else { 3504 sel.ips_local_addr_v6 = ip6h->ip6_src; 3505 } 3506 3507 sel.ips_remote_addr_v6 = ip_get_dst_v6(ip6h, &is_fragment); 3508 if (is_fragment) { 3509 /* 3510 * It's a packet fragment for a packet that 3511 * we have already processed (since IPsec processing 3512 * is done before fragmentation), so we don't 3513 * have to do policy checks again. Fragments can 3514 * come back to us for processing if they have 3515 * been queued up due to flow control. 3516 */ 3517 if (ipsec_mp->b_datap->db_type == M_CTL) { 3518 mp = ipsec_mp->b_cont; 3519 freeb(ipsec_mp); 3520 ipsec_mp = mp; 3521 } 3522 return (ipsec_mp); 3523 } 3524 3525 /* IPv6 common-case. */ 3526 sel.ips_protocol = ip6h->ip6_nxt; 3527 switch (ip6h->ip6_nxt) { 3528 case IPPROTO_TCP: 3529 case IPPROTO_UDP: 3530 case IPPROTO_SCTP: 3531 case IPPROTO_ICMPV6: 3532 break; 3533 default: 3534 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, 3535 &hdr_len, &nexthdrp)) { 3536 BUMP_MIB(&ip6_mib, ipv6OutDiscards); 3537 freemsg(ipsec_mp); /* Not IPsec-related drop. */ 3538 return (NULL); 3539 } 3540 sel.ips_protocol = *nexthdrp; 3541 break; 3542 } 3543 } 3544 3545 if (!ipsec_init_outbound_ports(&sel, mp, ipha, ip6h)) { 3546 if (ipha != NULL) { 3547 BUMP_MIB(&ip_mib, ipOutDiscards); 3548 } else { 3549 BUMP_MIB(&ip6_mib, ipv6OutDiscards); 3550 } 3551 3552 ip_drop_packet(ipsec_mp, B_FALSE, NULL, NULL, 3553 &ipdrops_spd_nomem, &spd_dropper); 3554 return (NULL); 3555 } 3556 3557 if (io != NULL) { 3558 /* 3559 * We seem to have some local policy (we already have 3560 * an ipsec_out). Look at global policy and see 3561 * whether we have to inherit or not. 3562 */ 3563 io->ipsec_out_need_policy = B_FALSE; 3564 ipsec_mp = ipsec_apply_global_policy(ipsec_mp, connp, &sel); 3565 ASSERT((io->ipsec_out_policy != NULL) || 3566 (io->ipsec_out_act != NULL)); 3567 ASSERT(io->ipsec_out_need_policy == B_FALSE); 3568 return (ipsec_mp); 3569 } 3570 ipsec_mp = ipsec_attach_global_policy(mp, connp, &sel); 3571 if (ipsec_mp == NULL) 3572 return (mp); 3573 3574 /* 3575 * Copy the right port information. 3576 */ 3577 ASSERT(ipsec_mp->b_datap->db_type == M_CTL); 3578 io = (ipsec_out_t *)ipsec_mp->b_rptr; 3579 3580 ASSERT(io->ipsec_out_need_policy == B_FALSE); 3581 ASSERT((io->ipsec_out_policy != NULL) || 3582 (io->ipsec_out_act != NULL)); 3583 io->ipsec_out_src_port = sel.ips_local_port; 3584 io->ipsec_out_dst_port = sel.ips_remote_port; 3585 io->ipsec_out_icmp_type = sel.ips_icmp_type; 3586 io->ipsec_out_icmp_code = sel.ips_icmp_code; 3587 /* 3588 * Set ill_index, conn_dontroute and conn_multicast_loop 3589 * for multicast datagrams. 3590 */ 3591 io->ipsec_out_ill_index = ill_index; 3592 io->ipsec_out_dontroute = conn_dontroutex; 3593 io->ipsec_out_multicast_loop = conn_multicast_loopx; 3594 /* 3595 * When conn is non-NULL, the zoneid is set by ipsec_init_ipsec_out(). 3596 * Otherwise set the zoneid based on the ire. 3597 */ 3598 if (connp == NULL) 3599 io->ipsec_out_zoneid = ire->ire_zoneid; 3600 return (ipsec_mp); 3601 } 3602 3603 /* 3604 * When appropriate, this function caches inbound and outbound policy 3605 * for this connection. 3606 * 3607 * XXX need to work out more details about per-interface policy and 3608 * caching here! 3609 * 3610 * XXX may want to split inbound and outbound caching for ill.. 3611 */ 3612 int 3613 ipsec_conn_cache_policy(conn_t *connp, boolean_t isv4) 3614 { 3615 boolean_t global_policy_present; 3616 3617 /* 3618 * There is no policy latching for ICMP sockets because we can't 3619 * decide on which policy to use until we see the packet and get 3620 * type/code selectors. 3621 */ 3622 if (connp->conn_ulp == IPPROTO_ICMP || 3623 connp->conn_ulp == IPPROTO_ICMPV6) { 3624 connp->conn_in_enforce_policy = 3625 connp->conn_out_enforce_policy = B_TRUE; 3626 if (connp->conn_latch != NULL) { 3627 IPLATCH_REFRELE(connp->conn_latch); 3628 connp->conn_latch = NULL; 3629 } 3630 connp->conn_flags |= IPCL_CHECK_POLICY; 3631 return (0); 3632 } 3633 3634 global_policy_present = isv4 ? 3635 (ipsec_outbound_v4_policy_present || 3636 ipsec_inbound_v4_policy_present) : 3637 (ipsec_outbound_v6_policy_present || 3638 ipsec_inbound_v6_policy_present); 3639 3640 if ((connp->conn_policy != NULL) || global_policy_present) { 3641 ipsec_selector_t sel; 3642 ipsec_policy_t *p; 3643 3644 if (connp->conn_latch == NULL && 3645 (connp->conn_latch = iplatch_create()) == NULL) { 3646 return (ENOMEM); 3647 } 3648 3649 sel.ips_protocol = connp->conn_ulp; 3650 sel.ips_local_port = connp->conn_lport; 3651 sel.ips_remote_port = connp->conn_fport; 3652 sel.ips_is_icmp_inv_acq = 0; 3653 sel.ips_isv4 = isv4; 3654 if (isv4) { 3655 sel.ips_local_addr_v4 = connp->conn_src; 3656 sel.ips_remote_addr_v4 = connp->conn_rem; 3657 } else { 3658 sel.ips_local_addr_v6 = connp->conn_srcv6; 3659 sel.ips_remote_addr_v6 = connp->conn_remv6; 3660 } 3661 3662 p = ipsec_find_policy(IPSEC_TYPE_INBOUND, connp, NULL, &sel); 3663 if (connp->conn_latch->ipl_in_policy != NULL) 3664 IPPOL_REFRELE(connp->conn_latch->ipl_in_policy); 3665 connp->conn_latch->ipl_in_policy = p; 3666 connp->conn_in_enforce_policy = (p != NULL); 3667 3668 p = ipsec_find_policy(IPSEC_TYPE_OUTBOUND, connp, NULL, &sel); 3669 if (connp->conn_latch->ipl_out_policy != NULL) 3670 IPPOL_REFRELE(connp->conn_latch->ipl_out_policy); 3671 connp->conn_latch->ipl_out_policy = p; 3672 connp->conn_out_enforce_policy = (p != NULL); 3673 3674 /* Clear the latched actions too, in case we're recaching. */ 3675 if (connp->conn_latch->ipl_out_action != NULL) 3676 IPACT_REFRELE(connp->conn_latch->ipl_out_action); 3677 if (connp->conn_latch->ipl_in_action != NULL) 3678 IPACT_REFRELE(connp->conn_latch->ipl_in_action); 3679 } 3680 3681 /* 3682 * We may or may not have policy for this endpoint. We still set 3683 * conn_policy_cached so that inbound datagrams don't have to look 3684 * at global policy as policy is considered latched for these 3685 * endpoints. We should not set conn_policy_cached until the conn 3686 * reflects the actual policy. If we *set* this before inheriting 3687 * the policy there is a window where the check 3688 * CONN_INBOUND_POLICY_PRESENT, will neither check with the policy 3689 * on the conn (because we have not yet copied the policy on to 3690 * conn and hence not set conn_in_enforce_policy) nor with the 3691 * global policy (because conn_policy_cached is already set). 3692 */ 3693 connp->conn_policy_cached = B_TRUE; 3694 if (connp->conn_in_enforce_policy) 3695 connp->conn_flags |= IPCL_CHECK_POLICY; 3696 return (0); 3697 } 3698 3699 void 3700 iplatch_free(ipsec_latch_t *ipl) 3701 { 3702 if (ipl->ipl_out_policy != NULL) 3703 IPPOL_REFRELE(ipl->ipl_out_policy); 3704 if (ipl->ipl_in_policy != NULL) 3705 IPPOL_REFRELE(ipl->ipl_in_policy); 3706 if (ipl->ipl_in_action != NULL) 3707 IPACT_REFRELE(ipl->ipl_in_action); 3708 if (ipl->ipl_out_action != NULL) 3709 IPACT_REFRELE(ipl->ipl_out_action); 3710 if (ipl->ipl_local_cid != NULL) 3711 IPSID_REFRELE(ipl->ipl_local_cid); 3712 if (ipl->ipl_remote_cid != NULL) 3713 IPSID_REFRELE(ipl->ipl_remote_cid); 3714 if (ipl->ipl_local_id != NULL) 3715 crfree(ipl->ipl_local_id); 3716 mutex_destroy(&ipl->ipl_lock); 3717 kmem_free(ipl, sizeof (*ipl)); 3718 } 3719 3720 ipsec_latch_t * 3721 iplatch_create() 3722 { 3723 ipsec_latch_t *ipl = kmem_alloc(sizeof (*ipl), KM_NOSLEEP); 3724 if (ipl == NULL) 3725 return (ipl); 3726 bzero(ipl, sizeof (*ipl)); 3727 mutex_init(&ipl->ipl_lock, NULL, MUTEX_DEFAULT, NULL); 3728 ipl->ipl_refcnt = 1; 3729 return (ipl); 3730 } 3731 3732 /* 3733 * Identity hash table. 3734 * 3735 * Identities are refcounted and "interned" into the hash table. 3736 * Only references coming from other objects (SA's, latching state) 3737 * are counted in ipsid_refcnt. 3738 * 3739 * Locking: IPSID_REFHOLD is safe only when (a) the object's hash bucket 3740 * is locked, (b) we know that the refcount must be > 0. 3741 * 3742 * The ipsid_next and ipsid_ptpn fields are only to be referenced or 3743 * modified when the bucket lock is held; in particular, we only 3744 * delete objects while holding the bucket lock, and we only increase 3745 * the refcount from 0 to 1 while the bucket lock is held. 3746 */ 3747 3748 #define IPSID_HASHSIZE 64 3749 3750 typedef struct ipsif_s 3751 { 3752 ipsid_t *ipsif_head; 3753 kmutex_t ipsif_lock; 3754 } ipsif_t; 3755 3756 ipsif_t ipsid_buckets[IPSID_HASHSIZE]; 3757 3758 /* 3759 * Hash function for ID hash table. 3760 */ 3761 static uint32_t 3762 ipsid_hash(int idtype, char *idstring) 3763 { 3764 uint32_t hval = idtype; 3765 unsigned char c; 3766 3767 while ((c = *idstring++) != 0) { 3768 hval = (hval << 4) | (hval >> 28); 3769 hval ^= c; 3770 } 3771 hval = hval ^ (hval >> 16); 3772 return (hval & (IPSID_HASHSIZE-1)); 3773 } 3774 3775 /* 3776 * Look up identity string in hash table. Return identity object 3777 * corresponding to the name -- either preexisting, or newly allocated. 3778 * 3779 * Return NULL if we need to allocate a new one and can't get memory. 3780 */ 3781 ipsid_t * 3782 ipsid_lookup(int idtype, char *idstring) 3783 { 3784 ipsid_t *retval; 3785 char *nstr; 3786 int idlen = strlen(idstring) + 1; 3787 3788 ipsif_t *bucket = &ipsid_buckets[ipsid_hash(idtype, idstring)]; 3789 3790 mutex_enter(&bucket->ipsif_lock); 3791 3792 for (retval = bucket->ipsif_head; retval != NULL; 3793 retval = retval->ipsid_next) { 3794 if (idtype != retval->ipsid_type) 3795 continue; 3796 if (bcmp(idstring, retval->ipsid_cid, idlen) != 0) 3797 continue; 3798 3799 IPSID_REFHOLD(retval); 3800 mutex_exit(&bucket->ipsif_lock); 3801 return (retval); 3802 } 3803 3804 retval = kmem_alloc(sizeof (*retval), KM_NOSLEEP); 3805 if (!retval) { 3806 mutex_exit(&bucket->ipsif_lock); 3807 return (NULL); 3808 } 3809 3810 nstr = kmem_alloc(idlen, KM_NOSLEEP); 3811 if (!nstr) { 3812 mutex_exit(&bucket->ipsif_lock); 3813 kmem_free(retval, sizeof (*retval)); 3814 return (NULL); 3815 } 3816 3817 retval->ipsid_refcnt = 1; 3818 retval->ipsid_next = bucket->ipsif_head; 3819 if (retval->ipsid_next != NULL) 3820 retval->ipsid_next->ipsid_ptpn = &retval->ipsid_next; 3821 retval->ipsid_ptpn = &bucket->ipsif_head; 3822 retval->ipsid_type = idtype; 3823 retval->ipsid_cid = nstr; 3824 bucket->ipsif_head = retval; 3825 bcopy(idstring, nstr, idlen); 3826 mutex_exit(&bucket->ipsif_lock); 3827 3828 return (retval); 3829 } 3830 3831 /* 3832 * Garbage collect the identity hash table. 3833 */ 3834 void 3835 ipsid_gc() 3836 { 3837 int i, len; 3838 ipsid_t *id, *nid; 3839 ipsif_t *bucket; 3840 3841 for (i = 0; i < IPSID_HASHSIZE; i++) { 3842 bucket = &ipsid_buckets[i]; 3843 mutex_enter(&bucket->ipsif_lock); 3844 for (id = bucket->ipsif_head; id != NULL; id = nid) { 3845 nid = id->ipsid_next; 3846 if (id->ipsid_refcnt == 0) { 3847 *id->ipsid_ptpn = nid; 3848 if (nid != NULL) 3849 nid->ipsid_ptpn = id->ipsid_ptpn; 3850 len = strlen(id->ipsid_cid) + 1; 3851 kmem_free(id->ipsid_cid, len); 3852 kmem_free(id, sizeof (*id)); 3853 } 3854 } 3855 mutex_exit(&bucket->ipsif_lock); 3856 } 3857 } 3858 3859 /* 3860 * Return true if two identities are the same. 3861 */ 3862 boolean_t 3863 ipsid_equal(ipsid_t *id1, ipsid_t *id2) 3864 { 3865 if (id1 == id2) 3866 return (B_TRUE); 3867 #ifdef DEBUG 3868 if ((id1 == NULL) || (id2 == NULL)) 3869 return (B_FALSE); 3870 /* 3871 * test that we're interning id's correctly.. 3872 */ 3873 ASSERT((strcmp(id1->ipsid_cid, id2->ipsid_cid) != 0) || 3874 (id1->ipsid_type != id2->ipsid_type)); 3875 #endif 3876 return (B_FALSE); 3877 } 3878 3879 /* 3880 * Initialize identity table; called during module initialization. 3881 */ 3882 static void 3883 ipsid_init() 3884 { 3885 ipsif_t *bucket; 3886 int i; 3887 3888 for (i = 0; i < IPSID_HASHSIZE; i++) { 3889 bucket = &ipsid_buckets[i]; 3890 mutex_init(&bucket->ipsif_lock, NULL, MUTEX_DEFAULT, NULL); 3891 } 3892 } 3893 3894 /* 3895 * Free identity table (preparatory to module unload) 3896 */ 3897 static void 3898 ipsid_fini() 3899 { 3900 ipsif_t *bucket; 3901 int i; 3902 3903 for (i = 0; i < IPSID_HASHSIZE; i++) { 3904 bucket = &ipsid_buckets[i]; 3905 mutex_destroy(&bucket->ipsif_lock); 3906 } 3907 } 3908 3909 /* 3910 * Update the minimum and maximum supported key sizes for the 3911 * specified algorithm. Must be called while holding the algorithms lock. 3912 */ 3913 void 3914 ipsec_alg_fix_min_max(ipsec_alginfo_t *alg, ipsec_algtype_t alg_type) 3915 { 3916 size_t crypto_min = (size_t)-1, crypto_max = 0; 3917 size_t cur_crypto_min, cur_crypto_max; 3918 boolean_t is_valid; 3919 crypto_mechanism_info_t *mech_infos; 3920 uint_t nmech_infos; 3921 int crypto_rc, i; 3922 crypto_mech_usage_t mask; 3923 3924 ASSERT(MUTEX_HELD(&alg_lock)); 3925 3926 /* 3927 * Compute the min, max, and default key sizes (in number of 3928 * increments to the default key size in bits) as defined 3929 * by the algorithm mappings. This range of key sizes is used 3930 * for policy related operations. The effective key sizes 3931 * supported by the framework could be more limited than 3932 * those defined for an algorithm. 3933 */ 3934 alg->alg_default_bits = alg->alg_key_sizes[0]; 3935 if (alg->alg_increment != 0) { 3936 /* key sizes are defined by range & increment */ 3937 alg->alg_minbits = alg->alg_key_sizes[1]; 3938 alg->alg_maxbits = alg->alg_key_sizes[2]; 3939 3940 alg->alg_default = SADB_ALG_DEFAULT_INCR(alg->alg_minbits, 3941 alg->alg_increment, alg->alg_default_bits); 3942 } else if (alg->alg_nkey_sizes == 0) { 3943 /* no specified key size for algorithm */ 3944 alg->alg_minbits = alg->alg_maxbits = 0; 3945 } else { 3946 /* key sizes are defined by enumeration */ 3947 alg->alg_minbits = (uint16_t)-1; 3948 alg->alg_maxbits = 0; 3949 3950 for (i = 0; i < alg->alg_nkey_sizes; i++) { 3951 if (alg->alg_key_sizes[i] < alg->alg_minbits) 3952 alg->alg_minbits = alg->alg_key_sizes[i]; 3953 if (alg->alg_key_sizes[i] > alg->alg_maxbits) 3954 alg->alg_maxbits = alg->alg_key_sizes[i]; 3955 } 3956 alg->alg_default = 0; 3957 } 3958 3959 if (!(alg->alg_flags & ALG_FLAG_VALID)) 3960 return; 3961 3962 /* 3963 * Mechanisms do not apply to the NULL encryption 3964 * algorithm, so simply return for this case. 3965 */ 3966 if (alg->alg_id == SADB_EALG_NULL) 3967 return; 3968 3969 /* 3970 * Find the min and max key sizes supported by the cryptographic 3971 * framework providers. 3972 */ 3973 3974 /* get the key sizes supported by the framework */ 3975 crypto_rc = crypto_get_all_mech_info(alg->alg_mech_type, 3976 &mech_infos, &nmech_infos, KM_SLEEP); 3977 if (crypto_rc != CRYPTO_SUCCESS || nmech_infos == 0) { 3978 alg->alg_flags &= ~ALG_FLAG_VALID; 3979 return; 3980 } 3981 3982 /* min and max key sizes supported by framework */ 3983 for (i = 0, is_valid = B_FALSE; i < nmech_infos; i++) { 3984 int unit_bits; 3985 3986 /* 3987 * Ignore entries that do not support the operations 3988 * needed for the algorithm type. 3989 */ 3990 if (alg_type == IPSEC_ALG_AUTH) 3991 mask = CRYPTO_MECH_USAGE_MAC; 3992 else 3993 mask = CRYPTO_MECH_USAGE_ENCRYPT | 3994 CRYPTO_MECH_USAGE_DECRYPT; 3995 if ((mech_infos[i].mi_usage & mask) != mask) 3996 continue; 3997 3998 unit_bits = (mech_infos[i].mi_keysize_unit == 3999 CRYPTO_KEYSIZE_UNIT_IN_BYTES) ? 8 : 1; 4000 /* adjust min/max supported by framework */ 4001 cur_crypto_min = mech_infos[i].mi_min_key_size * unit_bits; 4002 cur_crypto_max = mech_infos[i].mi_max_key_size * unit_bits; 4003 4004 if (cur_crypto_min < crypto_min) 4005 crypto_min = cur_crypto_min; 4006 4007 /* 4008 * CRYPTO_EFFECTIVELY_INFINITE is a special value of 4009 * the crypto framework which means "no upper limit". 4010 */ 4011 if (mech_infos[i].mi_max_key_size == 4012 CRYPTO_EFFECTIVELY_INFINITE) 4013 crypto_max = (size_t)-1; 4014 else if (cur_crypto_max > crypto_max) 4015 crypto_max = cur_crypto_max; 4016 4017 is_valid = B_TRUE; 4018 } 4019 4020 kmem_free(mech_infos, sizeof (crypto_mechanism_info_t) * 4021 nmech_infos); 4022 4023 if (!is_valid) { 4024 /* no key sizes supported by framework */ 4025 alg->alg_flags &= ~ALG_FLAG_VALID; 4026 return; 4027 } 4028 4029 /* 4030 * Determine min and max key sizes from alg_key_sizes[]. 4031 * defined for the algorithm entry. Adjust key sizes based on 4032 * those supported by the framework. 4033 */ 4034 alg->alg_ef_default_bits = alg->alg_key_sizes[0]; 4035 if (alg->alg_increment != 0) { 4036 /* supported key sizes are defined by range & increment */ 4037 crypto_min = ALGBITS_ROUND_UP(crypto_min, alg->alg_increment); 4038 crypto_max = ALGBITS_ROUND_DOWN(crypto_max, alg->alg_increment); 4039 4040 alg->alg_ef_minbits = MAX(alg->alg_minbits, 4041 (uint16_t)crypto_min); 4042 alg->alg_ef_maxbits = MIN(alg->alg_maxbits, 4043 (uint16_t)crypto_max); 4044 4045 /* 4046 * If the sizes supported by the framework are outside 4047 * the range of sizes defined by the algorithm mappings, 4048 * the algorithm cannot be used. Check for this 4049 * condition here. 4050 */ 4051 if (alg->alg_ef_minbits > alg->alg_ef_maxbits) { 4052 alg->alg_flags &= ~ALG_FLAG_VALID; 4053 return; 4054 } 4055 4056 if (alg->alg_ef_default_bits < alg->alg_ef_minbits) 4057 alg->alg_ef_default_bits = alg->alg_ef_minbits; 4058 if (alg->alg_ef_default_bits > alg->alg_ef_maxbits) 4059 alg->alg_ef_default_bits = alg->alg_ef_maxbits; 4060 4061 alg->alg_ef_default = SADB_ALG_DEFAULT_INCR(alg->alg_ef_minbits, 4062 alg->alg_increment, alg->alg_ef_default_bits); 4063 } else if (alg->alg_nkey_sizes == 0) { 4064 /* no specified key size for algorithm */ 4065 alg->alg_ef_minbits = alg->alg_ef_maxbits = 0; 4066 } else { 4067 /* supported key sizes are defined by enumeration */ 4068 alg->alg_ef_minbits = (uint16_t)-1; 4069 alg->alg_ef_maxbits = 0; 4070 4071 for (i = 0, is_valid = B_FALSE; i < alg->alg_nkey_sizes; i++) { 4072 /* 4073 * Ignore the current key size if it is not in the 4074 * range of sizes supported by the framework. 4075 */ 4076 if (alg->alg_key_sizes[i] < crypto_min || 4077 alg->alg_key_sizes[i] > crypto_max) 4078 continue; 4079 if (alg->alg_key_sizes[i] < alg->alg_ef_minbits) 4080 alg->alg_ef_minbits = alg->alg_key_sizes[i]; 4081 if (alg->alg_key_sizes[i] > alg->alg_ef_maxbits) 4082 alg->alg_ef_maxbits = alg->alg_key_sizes[i]; 4083 is_valid = B_TRUE; 4084 } 4085 4086 if (!is_valid) { 4087 alg->alg_flags &= ~ALG_FLAG_VALID; 4088 return; 4089 } 4090 alg->alg_ef_default = 0; 4091 } 4092 } 4093 4094 /* 4095 * Free the memory used by the specified algorithm. 4096 */ 4097 void 4098 ipsec_alg_free(ipsec_alginfo_t *alg) 4099 { 4100 if (alg == NULL) 4101 return; 4102 4103 if (alg->alg_key_sizes != NULL) 4104 kmem_free(alg->alg_key_sizes, 4105 (alg->alg_nkey_sizes + 1) * sizeof (uint16_t)); 4106 4107 if (alg->alg_block_sizes != NULL) 4108 kmem_free(alg->alg_block_sizes, 4109 (alg->alg_nblock_sizes + 1) * sizeof (uint16_t)); 4110 4111 kmem_free(alg, sizeof (*alg)); 4112 } 4113 4114 /* 4115 * Check the validity of the specified key size for an algorithm. 4116 * Returns B_TRUE if key size is valid, B_FALSE otherwise. 4117 */ 4118 boolean_t 4119 ipsec_valid_key_size(uint16_t key_size, ipsec_alginfo_t *alg) 4120 { 4121 if (key_size < alg->alg_ef_minbits || key_size > alg->alg_ef_maxbits) 4122 return (B_FALSE); 4123 4124 if (alg->alg_increment == 0 && alg->alg_nkey_sizes != 0) { 4125 /* 4126 * If the key sizes are defined by enumeration, the new 4127 * key size must be equal to one of the supported values. 4128 */ 4129 int i; 4130 4131 for (i = 0; i < alg->alg_nkey_sizes; i++) 4132 if (key_size == alg->alg_key_sizes[i]) 4133 break; 4134 if (i == alg->alg_nkey_sizes) 4135 return (B_FALSE); 4136 } 4137 4138 return (B_TRUE); 4139 } 4140 4141 /* 4142 * Callback function invoked by the crypto framework when a provider 4143 * registers or unregisters. This callback updates the algorithms 4144 * tables when a crypto algorithm is no longer available or becomes 4145 * available, and triggers the freeing/creation of context templates 4146 * associated with existing SAs, if needed. 4147 */ 4148 void 4149 ipsec_prov_update_callback(uint32_t event, void *event_arg) 4150 { 4151 crypto_notify_event_change_t *prov_change = 4152 (crypto_notify_event_change_t *)event_arg; 4153 uint_t algidx, algid, algtype, mech_count, mech_idx; 4154 ipsec_alginfo_t *alg; 4155 ipsec_alginfo_t oalg; 4156 crypto_mech_name_t *mechs; 4157 boolean_t alg_changed = B_FALSE; 4158 4159 /* ignore events for which we didn't register */ 4160 if (event != CRYPTO_EVENT_PROVIDERS_CHANGE) { 4161 ip1dbg(("ipsec_prov_update_callback: unexpected event 0x%x " 4162 " received from crypto framework\n", event)); 4163 return; 4164 } 4165 4166 mechs = crypto_get_mech_list(&mech_count, KM_SLEEP); 4167 if (mechs == NULL) 4168 return; 4169 4170 /* 4171 * Walk the list of currently defined IPsec algorithm. Update 4172 * the algorithm valid flag and trigger an update of the 4173 * SAs that depend on that algorithm. 4174 */ 4175 mutex_enter(&alg_lock); 4176 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 4177 for (algidx = 0; algidx < ipsec_nalgs[algtype]; algidx++) { 4178 4179 algid = ipsec_sortlist[algtype][algidx]; 4180 alg = ipsec_alglists[algtype][algid]; 4181 ASSERT(alg != NULL); 4182 4183 /* 4184 * Skip the algorithms which do not map to the 4185 * crypto framework provider being added or removed. 4186 */ 4187 if (strncmp(alg->alg_mech_name, 4188 prov_change->ec_mech_name, 4189 CRYPTO_MAX_MECH_NAME) != 0) 4190 continue; 4191 4192 /* 4193 * Determine if the mechanism is valid. If it 4194 * is not, mark the algorithm as being invalid. If 4195 * it is, mark the algorithm as being valid. 4196 */ 4197 for (mech_idx = 0; mech_idx < mech_count; mech_idx++) 4198 if (strncmp(alg->alg_mech_name, 4199 mechs[mech_idx], CRYPTO_MAX_MECH_NAME) == 0) 4200 break; 4201 if (mech_idx == mech_count && 4202 alg->alg_flags & ALG_FLAG_VALID) { 4203 alg->alg_flags &= ~ALG_FLAG_VALID; 4204 alg_changed = B_TRUE; 4205 } else if (mech_idx < mech_count && 4206 !(alg->alg_flags & ALG_FLAG_VALID)) { 4207 alg->alg_flags |= ALG_FLAG_VALID; 4208 alg_changed = B_TRUE; 4209 } 4210 4211 /* 4212 * Update the supported key sizes, regardless 4213 * of whether a crypto provider was added or 4214 * removed. 4215 */ 4216 oalg = *alg; 4217 ipsec_alg_fix_min_max(alg, algtype); 4218 if (!alg_changed && 4219 alg->alg_ef_minbits != oalg.alg_ef_minbits || 4220 alg->alg_ef_maxbits != oalg.alg_ef_maxbits || 4221 alg->alg_ef_default != oalg.alg_ef_default || 4222 alg->alg_ef_default_bits != 4223 oalg.alg_ef_default_bits) 4224 alg_changed = B_TRUE; 4225 4226 /* 4227 * Update the affected SAs if a software provider is 4228 * being added or removed. 4229 */ 4230 if (prov_change->ec_provider_type == 4231 CRYPTO_SW_PROVIDER) 4232 sadb_alg_update(algtype, alg->alg_id, 4233 prov_change->ec_change == 4234 CRYPTO_EVENT_CHANGE_ADDED); 4235 } 4236 } 4237 mutex_exit(&alg_lock); 4238 crypto_free_mech_list(mechs, mech_count); 4239 4240 if (alg_changed) { 4241 /* 4242 * An algorithm has changed, i.e. it became valid or 4243 * invalid, or its support key sizes have changed. 4244 * Notify ipsecah and ipsecesp of this change so 4245 * that they can send a SADB_REGISTER to their consumers. 4246 */ 4247 ipsecah_algs_changed(); 4248 ipsecesp_algs_changed(); 4249 } 4250 } 4251 4252 /* 4253 * Registers with the crypto framework to be notified of crypto 4254 * providers changes. Used to update the algorithm tables and 4255 * to free or create context templates if needed. Invoked after IPsec 4256 * is loaded successfully. 4257 */ 4258 void 4259 ipsec_register_prov_update(void) 4260 { 4261 prov_update_handle = crypto_notify_events( 4262 ipsec_prov_update_callback, CRYPTO_EVENT_PROVIDERS_CHANGE); 4263 } 4264 4265 /* 4266 * Unregisters from the framework to be notified of crypto providers 4267 * changes. Called from ipsec_policy_destroy(). 4268 */ 4269 static void 4270 ipsec_unregister_prov_update(void) 4271 { 4272 if (prov_update_handle != NULL) 4273 crypto_unnotify_events(prov_update_handle); 4274 } 4275