1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * Copyright (c) 2012 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2018 Joyent, Inc. 26 */ 27 28 #include <sys/types.h> 29 #include <sys/stream.h> 30 #include <sys/stropts.h> 31 #include <sys/strsubr.h> 32 #include <sys/errno.h> 33 #include <sys/ddi.h> 34 #include <sys/debug.h> 35 #include <sys/cmn_err.h> 36 #include <sys/stream.h> 37 #include <sys/strlog.h> 38 #include <sys/kmem.h> 39 #include <sys/sunddi.h> 40 #include <sys/tihdr.h> 41 #include <sys/atomic.h> 42 #include <sys/socket.h> 43 #include <sys/sysmacros.h> 44 #include <sys/crypto/common.h> 45 #include <sys/crypto/api.h> 46 #include <sys/zone.h> 47 #include <netinet/in.h> 48 #include <net/if.h> 49 #include <net/pfkeyv2.h> 50 #include <net/pfpolicy.h> 51 #include <inet/common.h> 52 #include <netinet/ip6.h> 53 #include <inet/ip.h> 54 #include <inet/ip_ire.h> 55 #include <inet/ip6.h> 56 #include <inet/ipsec_info.h> 57 #include <inet/tcp.h> 58 #include <inet/sadb.h> 59 #include <inet/ipsec_impl.h> 60 #include <inet/ipsecah.h> 61 #include <inet/ipsecesp.h> 62 #include <sys/random.h> 63 #include <sys/dlpi.h> 64 #include <sys/strsun.h> 65 #include <sys/strsubr.h> 66 #include <inet/ip_if.h> 67 #include <inet/ipdrop.h> 68 #include <inet/ipclassifier.h> 69 #include <inet/sctp_ip.h> 70 #include <sys/tsol/tnet.h> 71 72 /* 73 * This source file contains Security Association Database (SADB) common 74 * routines. They are linked in with the AH module. Since AH has no chance 75 * of falling under export control, it was safe to link it in there. 76 */ 77 78 static uint8_t *sadb_action_to_ecomb(uint8_t *, uint8_t *, ipsec_action_t *, 79 netstack_t *); 80 static ipsa_t *sadb_torch_assoc(isaf_t *, ipsa_t *); 81 static void sadb_destroy_acqlist(iacqf_t **, uint_t, boolean_t, 82 netstack_t *); 83 static void sadb_destroy(sadb_t *, netstack_t *); 84 static mblk_t *sadb_sa2msg(ipsa_t *, sadb_msg_t *); 85 static ts_label_t *sadb_label_from_sens(sadb_sens_t *, uint64_t *); 86 87 static time_t sadb_add_time(time_t, uint64_t); 88 static void lifetime_fuzz(ipsa_t *); 89 static void age_pair_peer_list(templist_t *, sadb_t *, boolean_t); 90 static int get_ipsa_pair(ipsa_query_t *, ipsap_t *, int *); 91 static void init_ipsa_pair(ipsap_t *); 92 static void destroy_ipsa_pair(ipsap_t *); 93 static int update_pairing(ipsap_t *, ipsa_query_t *, keysock_in_t *, int *); 94 static void ipsa_set_replay(ipsa_t *ipsa, uint32_t offset); 95 96 /* 97 * ipsacq_maxpackets is defined here to make it tunable 98 * from /etc/system. 99 */ 100 extern uint64_t ipsacq_maxpackets; 101 102 #define SET_EXPIRE(sa, delta, exp) { \ 103 if (((sa)->ipsa_ ## delta) != 0) { \ 104 (sa)->ipsa_ ## exp = sadb_add_time((sa)->ipsa_addtime, \ 105 (sa)->ipsa_ ## delta); \ 106 } \ 107 } 108 109 #define UPDATE_EXPIRE(sa, delta, exp) { \ 110 if (((sa)->ipsa_ ## delta) != 0) { \ 111 time_t tmp = sadb_add_time((sa)->ipsa_usetime, \ 112 (sa)->ipsa_ ## delta); \ 113 if (((sa)->ipsa_ ## exp) == 0) \ 114 (sa)->ipsa_ ## exp = tmp; \ 115 else \ 116 (sa)->ipsa_ ## exp = \ 117 MIN((sa)->ipsa_ ## exp, tmp); \ 118 } \ 119 } 120 121 122 /* wrap the macro so we can pass it as a function pointer */ 123 void 124 sadb_sa_refrele(void *target) 125 { 126 IPSA_REFRELE(((ipsa_t *)target)); 127 } 128 129 /* 130 * We presume that sizeof (long) == sizeof (time_t) and that time_t is 131 * a signed type. 132 */ 133 #define TIME_MAX LONG_MAX 134 135 /* 136 * PF_KEY gives us lifetimes in uint64_t seconds. We presume that 137 * time_t is defined to be a signed type with the same range as 138 * "long". On ILP32 systems, we thus run the risk of wrapping around 139 * at end of time, as well as "overwrapping" the clock back around 140 * into a seemingly valid but incorrect future date earlier than the 141 * desired expiration. 142 * 143 * In order to avoid odd behavior (either negative lifetimes or loss 144 * of high order bits) when someone asks for bizarrely long SA 145 * lifetimes, we do a saturating add for expire times. 146 * 147 * We presume that ILP32 systems will be past end of support life when 148 * the 32-bit time_t overflows (a dangerous assumption, mind you..). 149 * 150 * On LP64, 2^64 seconds are about 5.8e11 years, at which point we 151 * will hopefully have figured out clever ways to avoid the use of 152 * fixed-sized integers in computation. 153 */ 154 static time_t 155 sadb_add_time(time_t base, uint64_t delta) 156 { 157 time_t sum; 158 159 /* 160 * Clip delta to the maximum possible time_t value to 161 * prevent "overwrapping" back into a shorter-than-desired 162 * future time. 163 */ 164 if (delta > TIME_MAX) 165 delta = TIME_MAX; 166 /* 167 * This sum may still overflow. 168 */ 169 sum = base + delta; 170 171 /* 172 * .. so if the result is less than the base, we overflowed. 173 */ 174 if (sum < base) 175 sum = TIME_MAX; 176 177 return (sum); 178 } 179 180 /* 181 * Callers of this function have already created a working security 182 * association, and have found the appropriate table & hash chain. All this 183 * function does is check duplicates, and insert the SA. The caller needs to 184 * hold the hash bucket lock and increment the refcnt before insertion. 185 * 186 * Return 0 if success, EEXIST if collision. 187 */ 188 #define SA_UNIQUE_MATCH(sa1, sa2) \ 189 (((sa1)->ipsa_unique_id & (sa1)->ipsa_unique_mask) == \ 190 ((sa2)->ipsa_unique_id & (sa2)->ipsa_unique_mask)) 191 192 int 193 sadb_insertassoc(ipsa_t *ipsa, isaf_t *bucket) 194 { 195 ipsa_t **ptpn = NULL; 196 ipsa_t *walker; 197 boolean_t unspecsrc; 198 199 ASSERT(MUTEX_HELD(&bucket->isaf_lock)); 200 201 unspecsrc = IPSA_IS_ADDR_UNSPEC(ipsa->ipsa_srcaddr, ipsa->ipsa_addrfam); 202 203 walker = bucket->isaf_ipsa; 204 ASSERT(walker == NULL || ipsa->ipsa_addrfam == walker->ipsa_addrfam); 205 206 /* 207 * Find insertion point (pointed to with **ptpn). Insert at the head 208 * of the list unless there's an unspecified source address, then 209 * insert it after the last SA with a specified source address. 210 * 211 * BTW, you'll have to walk the whole chain, matching on {DST, SPI} 212 * checking for collisions. 213 */ 214 215 while (walker != NULL) { 216 if (IPSA_ARE_ADDR_EQUAL(walker->ipsa_dstaddr, 217 ipsa->ipsa_dstaddr, ipsa->ipsa_addrfam)) { 218 if (walker->ipsa_spi == ipsa->ipsa_spi) 219 return (EEXIST); 220 221 mutex_enter(&walker->ipsa_lock); 222 if (ipsa->ipsa_state == IPSA_STATE_MATURE && 223 (walker->ipsa_flags & IPSA_F_USED) && 224 SA_UNIQUE_MATCH(walker, ipsa)) { 225 walker->ipsa_flags |= IPSA_F_CINVALID; 226 } 227 mutex_exit(&walker->ipsa_lock); 228 } 229 230 if (ptpn == NULL && unspecsrc) { 231 if (IPSA_IS_ADDR_UNSPEC(walker->ipsa_srcaddr, 232 walker->ipsa_addrfam)) 233 ptpn = walker->ipsa_ptpn; 234 else if (walker->ipsa_next == NULL) 235 ptpn = &walker->ipsa_next; 236 } 237 238 walker = walker->ipsa_next; 239 } 240 241 if (ptpn == NULL) 242 ptpn = &bucket->isaf_ipsa; 243 ipsa->ipsa_next = *ptpn; 244 ipsa->ipsa_ptpn = ptpn; 245 if (ipsa->ipsa_next != NULL) 246 ipsa->ipsa_next->ipsa_ptpn = &ipsa->ipsa_next; 247 *ptpn = ipsa; 248 ipsa->ipsa_linklock = &bucket->isaf_lock; 249 250 return (0); 251 } 252 #undef SA_UNIQUE_MATCH 253 254 /* 255 * Free a security association. Its reference count is 0, which means 256 * I must free it. The SA must be unlocked and must not be linked into 257 * any fanout list. 258 */ 259 static void 260 sadb_freeassoc(ipsa_t *ipsa) 261 { 262 ipsec_stack_t *ipss = ipsa->ipsa_netstack->netstack_ipsec; 263 mblk_t *asyncmp, *mp; 264 265 ASSERT(ipss != NULL); 266 ASSERT(MUTEX_NOT_HELD(&ipsa->ipsa_lock)); 267 ASSERT(ipsa->ipsa_refcnt == 0); 268 ASSERT(ipsa->ipsa_next == NULL); 269 ASSERT(ipsa->ipsa_ptpn == NULL); 270 271 272 asyncmp = sadb_clear_lpkt(ipsa); 273 if (asyncmp != NULL) { 274 mp = ip_recv_attr_free_mblk(asyncmp); 275 ip_drop_packet(mp, B_TRUE, NULL, 276 DROPPER(ipss, ipds_sadb_inlarval_timeout), 277 &ipss->ipsec_sadb_dropper); 278 } 279 mutex_enter(&ipsa->ipsa_lock); 280 281 if (ipsa->ipsa_tsl != NULL) { 282 label_rele(ipsa->ipsa_tsl); 283 ipsa->ipsa_tsl = NULL; 284 } 285 286 if (ipsa->ipsa_otsl != NULL) { 287 label_rele(ipsa->ipsa_otsl); 288 ipsa->ipsa_otsl = NULL; 289 } 290 291 ipsec_destroy_ctx_tmpl(ipsa, IPSEC_ALG_AUTH); 292 ipsec_destroy_ctx_tmpl(ipsa, IPSEC_ALG_ENCR); 293 mutex_exit(&ipsa->ipsa_lock); 294 295 /* bzero() these fields for paranoia's sake. */ 296 if (ipsa->ipsa_authkey != NULL) { 297 bzero(ipsa->ipsa_authkey, ipsa->ipsa_authkeylen); 298 kmem_free(ipsa->ipsa_authkey, ipsa->ipsa_authkeylen); 299 } 300 if (ipsa->ipsa_encrkey != NULL) { 301 bzero(ipsa->ipsa_encrkey, ipsa->ipsa_encrkeylen); 302 kmem_free(ipsa->ipsa_encrkey, ipsa->ipsa_encrkeylen); 303 } 304 if (ipsa->ipsa_nonce_buf != NULL) { 305 bzero(ipsa->ipsa_nonce_buf, sizeof (ipsec_nonce_t)); 306 kmem_free(ipsa->ipsa_nonce_buf, sizeof (ipsec_nonce_t)); 307 } 308 if (ipsa->ipsa_src_cid != NULL) { 309 IPSID_REFRELE(ipsa->ipsa_src_cid); 310 } 311 if (ipsa->ipsa_dst_cid != NULL) { 312 IPSID_REFRELE(ipsa->ipsa_dst_cid); 313 } 314 if (ipsa->ipsa_emech.cm_param != NULL) 315 kmem_free(ipsa->ipsa_emech.cm_param, 316 ipsa->ipsa_emech.cm_param_len); 317 318 mutex_destroy(&ipsa->ipsa_lock); 319 kmem_free(ipsa, sizeof (*ipsa)); 320 } 321 322 /* 323 * Unlink a security association from a hash bucket. Assume the hash bucket 324 * lock is held, but the association's lock is not. 325 * 326 * Note that we do not bump the bucket's generation number here because 327 * we might not be making a visible change to the set of visible SA's. 328 * All callers MUST bump the bucket's generation number before they unlock 329 * the bucket if they use sadb_unlinkassoc to permanetly remove an SA which 330 * was present in the bucket at the time it was locked. 331 */ 332 void 333 sadb_unlinkassoc(ipsa_t *ipsa) 334 { 335 ASSERT(ipsa->ipsa_linklock != NULL); 336 ASSERT(MUTEX_HELD(ipsa->ipsa_linklock)); 337 338 /* These fields are protected by the link lock. */ 339 *(ipsa->ipsa_ptpn) = ipsa->ipsa_next; 340 if (ipsa->ipsa_next != NULL) { 341 ipsa->ipsa_next->ipsa_ptpn = ipsa->ipsa_ptpn; 342 ipsa->ipsa_next = NULL; 343 } 344 345 ipsa->ipsa_ptpn = NULL; 346 347 /* This may destroy the SA. */ 348 IPSA_REFRELE(ipsa); 349 } 350 351 void 352 sadb_delete_cluster(ipsa_t *assoc) 353 { 354 uint8_t protocol; 355 356 if (cl_inet_deletespi && 357 ((assoc->ipsa_state == IPSA_STATE_LARVAL) || 358 (assoc->ipsa_state == IPSA_STATE_MATURE))) { 359 protocol = (assoc->ipsa_type == SADB_SATYPE_AH) ? 360 IPPROTO_AH : IPPROTO_ESP; 361 cl_inet_deletespi(assoc->ipsa_netstack->netstack_stackid, 362 protocol, assoc->ipsa_spi, NULL); 363 } 364 } 365 366 /* 367 * Create a larval security association with the specified SPI. All other 368 * fields are zeroed. 369 */ 370 static ipsa_t * 371 sadb_makelarvalassoc(uint32_t spi, uint32_t *src, uint32_t *dst, int addrfam, 372 netstack_t *ns) 373 { 374 ipsa_t *newbie; 375 376 /* 377 * Allocate... 378 */ 379 380 newbie = (ipsa_t *)kmem_zalloc(sizeof (ipsa_t), KM_NOSLEEP); 381 if (newbie == NULL) { 382 /* Can't make new larval SA. */ 383 return (NULL); 384 } 385 386 /* Assigned requested SPI, assume caller does SPI allocation magic. */ 387 newbie->ipsa_spi = spi; 388 newbie->ipsa_netstack = ns; /* No netstack_hold */ 389 390 /* 391 * Copy addresses... 392 */ 393 394 IPSA_COPY_ADDR(newbie->ipsa_srcaddr, src, addrfam); 395 IPSA_COPY_ADDR(newbie->ipsa_dstaddr, dst, addrfam); 396 397 newbie->ipsa_addrfam = addrfam; 398 399 /* 400 * Set common initialization values, including refcnt. 401 */ 402 mutex_init(&newbie->ipsa_lock, NULL, MUTEX_DEFAULT, NULL); 403 newbie->ipsa_state = IPSA_STATE_LARVAL; 404 newbie->ipsa_refcnt = 1; 405 newbie->ipsa_freefunc = sadb_freeassoc; 406 407 /* 408 * There aren't a lot of other common initialization values, as 409 * they are copied in from the PF_KEY message. 410 */ 411 412 return (newbie); 413 } 414 415 /* 416 * Call me to initialize a security association fanout. 417 */ 418 static int 419 sadb_init_fanout(isaf_t **tablep, uint_t size, int kmflag) 420 { 421 isaf_t *table; 422 int i; 423 424 table = (isaf_t *)kmem_alloc(size * sizeof (*table), kmflag); 425 *tablep = table; 426 427 if (table == NULL) 428 return (ENOMEM); 429 430 for (i = 0; i < size; i++) { 431 mutex_init(&(table[i].isaf_lock), NULL, MUTEX_DEFAULT, NULL); 432 table[i].isaf_ipsa = NULL; 433 table[i].isaf_gen = 0; 434 } 435 436 return (0); 437 } 438 439 /* 440 * Call me to initialize an acquire fanout 441 */ 442 static int 443 sadb_init_acfanout(iacqf_t **tablep, uint_t size, int kmflag) 444 { 445 iacqf_t *table; 446 int i; 447 448 table = (iacqf_t *)kmem_alloc(size * sizeof (*table), kmflag); 449 *tablep = table; 450 451 if (table == NULL) 452 return (ENOMEM); 453 454 for (i = 0; i < size; i++) { 455 mutex_init(&(table[i].iacqf_lock), NULL, MUTEX_DEFAULT, NULL); 456 table[i].iacqf_ipsacq = NULL; 457 } 458 459 return (0); 460 } 461 462 /* 463 * Attempt to initialize an SADB instance. On failure, return ENOMEM; 464 * caller must clean up partial allocations. 465 */ 466 static int 467 sadb_init_trial(sadb_t *sp, uint_t size, int kmflag) 468 { 469 ASSERT(sp->sdb_of == NULL); 470 ASSERT(sp->sdb_if == NULL); 471 ASSERT(sp->sdb_acq == NULL); 472 473 sp->sdb_hashsize = size; 474 if (sadb_init_fanout(&sp->sdb_of, size, kmflag) != 0) 475 return (ENOMEM); 476 if (sadb_init_fanout(&sp->sdb_if, size, kmflag) != 0) 477 return (ENOMEM); 478 if (sadb_init_acfanout(&sp->sdb_acq, size, kmflag) != 0) 479 return (ENOMEM); 480 481 return (0); 482 } 483 484 /* 485 * Call me to initialize an SADB instance; fall back to default size on failure. 486 */ 487 static void 488 sadb_init(const char *name, sadb_t *sp, uint_t size, uint_t ver, 489 netstack_t *ns) 490 { 491 ASSERT(sp->sdb_of == NULL); 492 ASSERT(sp->sdb_if == NULL); 493 ASSERT(sp->sdb_acq == NULL); 494 495 if (size < IPSEC_DEFAULT_HASH_SIZE) 496 size = IPSEC_DEFAULT_HASH_SIZE; 497 498 if (sadb_init_trial(sp, size, KM_NOSLEEP) != 0) { 499 500 cmn_err(CE_WARN, 501 "Unable to allocate %u entry IPv%u %s SADB hash table", 502 size, ver, name); 503 504 sadb_destroy(sp, ns); 505 size = IPSEC_DEFAULT_HASH_SIZE; 506 cmn_err(CE_WARN, "Falling back to %d entries", size); 507 (void) sadb_init_trial(sp, size, KM_SLEEP); 508 } 509 } 510 511 512 /* 513 * Initialize an SADB-pair. 514 */ 515 void 516 sadbp_init(const char *name, sadbp_t *sp, int type, int size, netstack_t *ns) 517 { 518 sadb_init(name, &sp->s_v4, size, 4, ns); 519 sadb_init(name, &sp->s_v6, size, 6, ns); 520 521 sp->s_satype = type; 522 523 ASSERT((type == SADB_SATYPE_AH) || (type == SADB_SATYPE_ESP)); 524 if (type == SADB_SATYPE_AH) { 525 ipsec_stack_t *ipss = ns->netstack_ipsec; 526 527 ip_drop_register(&ipss->ipsec_sadb_dropper, "IPsec SADB"); 528 sp->s_addflags = AH_ADD_SETTABLE_FLAGS; 529 sp->s_updateflags = AH_UPDATE_SETTABLE_FLAGS; 530 } else { 531 sp->s_addflags = ESP_ADD_SETTABLE_FLAGS; 532 sp->s_updateflags = ESP_UPDATE_SETTABLE_FLAGS; 533 } 534 } 535 536 /* 537 * Deliver a single SADB_DUMP message representing a single SA. This is 538 * called many times by sadb_dump(). 539 * 540 * If the return value of this is ENOBUFS (not the same as ENOMEM), then 541 * the caller should take that as a hint that dupb() on the "original answer" 542 * failed, and that perhaps the caller should try again with a copyb()ed 543 * "original answer". 544 */ 545 static int 546 sadb_dump_deliver(queue_t *pfkey_q, mblk_t *original_answer, ipsa_t *ipsa, 547 sadb_msg_t *samsg) 548 { 549 mblk_t *answer; 550 551 answer = dupb(original_answer); 552 if (answer == NULL) 553 return (ENOBUFS); 554 answer->b_cont = sadb_sa2msg(ipsa, samsg); 555 if (answer->b_cont == NULL) { 556 freeb(answer); 557 return (ENOMEM); 558 } 559 560 /* Just do a putnext, and let keysock deal with flow control. */ 561 putnext(pfkey_q, answer); 562 return (0); 563 } 564 565 /* 566 * Common function to allocate and prepare a keysock_out_t M_CTL message. 567 */ 568 mblk_t * 569 sadb_keysock_out(minor_t serial) 570 { 571 mblk_t *mp; 572 keysock_out_t *kso; 573 574 mp = allocb(sizeof (ipsec_info_t), BPRI_HI); 575 if (mp != NULL) { 576 mp->b_datap->db_type = M_CTL; 577 mp->b_wptr += sizeof (ipsec_info_t); 578 kso = (keysock_out_t *)mp->b_rptr; 579 kso->ks_out_type = KEYSOCK_OUT; 580 kso->ks_out_len = sizeof (*kso); 581 kso->ks_out_serial = serial; 582 } 583 584 return (mp); 585 } 586 587 /* 588 * Perform an SADB_DUMP, spewing out every SA in an array of SA fanouts 589 * to keysock. 590 */ 591 static int 592 sadb_dump_fanout(queue_t *pfkey_q, mblk_t *mp, minor_t serial, isaf_t *fanout, 593 int num_entries, boolean_t do_peers, time_t active_time) 594 { 595 int i, error = 0; 596 mblk_t *original_answer; 597 ipsa_t *walker; 598 sadb_msg_t *samsg; 599 time_t current; 600 601 /* 602 * For each IPSA hash bucket do: 603 * - Hold the mutex 604 * - Walk each entry, doing an sadb_dump_deliver() on it. 605 */ 606 ASSERT(mp->b_cont != NULL); 607 samsg = (sadb_msg_t *)mp->b_cont->b_rptr; 608 609 original_answer = sadb_keysock_out(serial); 610 if (original_answer == NULL) 611 return (ENOMEM); 612 613 current = gethrestime_sec(); 614 for (i = 0; i < num_entries; i++) { 615 mutex_enter(&fanout[i].isaf_lock); 616 for (walker = fanout[i].isaf_ipsa; walker != NULL; 617 walker = walker->ipsa_next) { 618 if (!do_peers && walker->ipsa_haspeer) 619 continue; 620 if ((active_time != 0) && 621 ((current - walker->ipsa_lastuse) > active_time)) 622 continue; 623 error = sadb_dump_deliver(pfkey_q, original_answer, 624 walker, samsg); 625 if (error == ENOBUFS) { 626 mblk_t *new_original_answer; 627 628 /* Ran out of dupb's. Try a copyb. */ 629 new_original_answer = copyb(original_answer); 630 if (new_original_answer == NULL) { 631 error = ENOMEM; 632 } else { 633 freeb(original_answer); 634 original_answer = new_original_answer; 635 error = sadb_dump_deliver(pfkey_q, 636 original_answer, walker, samsg); 637 } 638 } 639 if (error != 0) 640 break; /* out of for loop. */ 641 } 642 mutex_exit(&fanout[i].isaf_lock); 643 if (error != 0) 644 break; /* out of for loop. */ 645 } 646 647 freeb(original_answer); 648 return (error); 649 } 650 651 /* 652 * Dump an entire SADB; outbound first, then inbound. 653 */ 654 655 int 656 sadb_dump(queue_t *pfkey_q, mblk_t *mp, keysock_in_t *ksi, sadb_t *sp) 657 { 658 int error; 659 time_t active_time = 0; 660 sadb_x_edump_t *edump = 661 (sadb_x_edump_t *)ksi->ks_in_extv[SADB_X_EXT_EDUMP]; 662 663 if (edump != NULL) { 664 active_time = edump->sadb_x_edump_timeout; 665 } 666 667 /* Dump outbound */ 668 error = sadb_dump_fanout(pfkey_q, mp, ksi->ks_in_serial, sp->sdb_of, 669 sp->sdb_hashsize, B_TRUE, active_time); 670 if (error) 671 return (error); 672 673 /* Dump inbound */ 674 return sadb_dump_fanout(pfkey_q, mp, ksi->ks_in_serial, sp->sdb_if, 675 sp->sdb_hashsize, B_FALSE, active_time); 676 } 677 678 /* 679 * Generic sadb table walker. 680 * 681 * Call "walkfn" for each SA in each bucket in "table"; pass the 682 * bucket, the entry and "cookie" to the callback function. 683 * Take care to ensure that walkfn can delete the SA without screwing 684 * up our traverse. 685 * 686 * The bucket is locked for the duration of the callback, both so that the 687 * callback can just call sadb_unlinkassoc() when it wants to delete something, 688 * and so that no new entries are added while we're walking the list. 689 */ 690 static void 691 sadb_walker(isaf_t *table, uint_t numentries, 692 void (*walkfn)(isaf_t *head, ipsa_t *entry, void *cookie), 693 void *cookie) 694 { 695 int i; 696 for (i = 0; i < numentries; i++) { 697 ipsa_t *entry, *next; 698 699 mutex_enter(&table[i].isaf_lock); 700 701 for (entry = table[i].isaf_ipsa; entry != NULL; 702 entry = next) { 703 next = entry->ipsa_next; 704 (*walkfn)(&table[i], entry, cookie); 705 } 706 mutex_exit(&table[i].isaf_lock); 707 } 708 } 709 710 /* 711 * Call me to free up a security association fanout. Use the forever 712 * variable to indicate freeing up the SAs (forever == B_FALSE, e.g. 713 * an SADB_FLUSH message), or destroying everything (forever == B_TRUE, 714 * when a module is unloaded). 715 */ 716 static void 717 sadb_destroyer(isaf_t **tablep, uint_t numentries, boolean_t forever, 718 boolean_t inbound) 719 { 720 int i; 721 isaf_t *table = *tablep; 722 uint8_t protocol; 723 ipsa_t *sa; 724 netstackid_t sid; 725 726 if (table == NULL) 727 return; 728 729 for (i = 0; i < numentries; i++) { 730 mutex_enter(&table[i].isaf_lock); 731 while ((sa = table[i].isaf_ipsa) != NULL) { 732 if (inbound && cl_inet_deletespi && 733 (sa->ipsa_state != IPSA_STATE_ACTIVE_ELSEWHERE) && 734 (sa->ipsa_state != IPSA_STATE_IDLE)) { 735 protocol = (sa->ipsa_type == SADB_SATYPE_AH) ? 736 IPPROTO_AH : IPPROTO_ESP; 737 sid = sa->ipsa_netstack->netstack_stackid; 738 cl_inet_deletespi(sid, protocol, sa->ipsa_spi, 739 NULL); 740 } 741 sadb_unlinkassoc(sa); 742 } 743 table[i].isaf_gen++; 744 mutex_exit(&table[i].isaf_lock); 745 if (forever) 746 mutex_destroy(&(table[i].isaf_lock)); 747 } 748 749 if (forever) { 750 *tablep = NULL; 751 kmem_free(table, numentries * sizeof (*table)); 752 } 753 } 754 755 /* 756 * Entry points to sadb_destroyer(). 757 */ 758 static void 759 sadb_flush(sadb_t *sp, netstack_t *ns) 760 { 761 /* 762 * Flush out each bucket, one at a time. Were it not for keysock's 763 * enforcement, there would be a subtlety where I could add on the 764 * heels of a flush. With keysock's enforcement, however, this 765 * makes ESP's job easy. 766 */ 767 sadb_destroyer(&sp->sdb_of, sp->sdb_hashsize, B_FALSE, B_FALSE); 768 sadb_destroyer(&sp->sdb_if, sp->sdb_hashsize, B_FALSE, B_TRUE); 769 770 /* For each acquire, destroy it; leave the bucket mutex alone. */ 771 sadb_destroy_acqlist(&sp->sdb_acq, sp->sdb_hashsize, B_FALSE, ns); 772 } 773 774 static void 775 sadb_destroy(sadb_t *sp, netstack_t *ns) 776 { 777 sadb_destroyer(&sp->sdb_of, sp->sdb_hashsize, B_TRUE, B_FALSE); 778 sadb_destroyer(&sp->sdb_if, sp->sdb_hashsize, B_TRUE, B_TRUE); 779 780 /* For each acquire, destroy it, including the bucket mutex. */ 781 sadb_destroy_acqlist(&sp->sdb_acq, sp->sdb_hashsize, B_TRUE, ns); 782 783 ASSERT(sp->sdb_of == NULL); 784 ASSERT(sp->sdb_if == NULL); 785 ASSERT(sp->sdb_acq == NULL); 786 } 787 788 void 789 sadbp_flush(sadbp_t *spp, netstack_t *ns) 790 { 791 sadb_flush(&spp->s_v4, ns); 792 sadb_flush(&spp->s_v6, ns); 793 } 794 795 void 796 sadbp_destroy(sadbp_t *spp, netstack_t *ns) 797 { 798 sadb_destroy(&spp->s_v4, ns); 799 sadb_destroy(&spp->s_v6, ns); 800 801 if (spp->s_satype == SADB_SATYPE_AH) { 802 ipsec_stack_t *ipss = ns->netstack_ipsec; 803 804 ip_drop_unregister(&ipss->ipsec_sadb_dropper); 805 } 806 } 807 808 809 /* 810 * Check hard vs. soft lifetimes. If there's a reality mismatch (e.g. 811 * soft lifetimes > hard lifetimes) return an appropriate diagnostic for 812 * EINVAL. 813 */ 814 int 815 sadb_hardsoftchk(sadb_lifetime_t *hard, sadb_lifetime_t *soft, 816 sadb_lifetime_t *idle) 817 { 818 if (hard == NULL || soft == NULL) 819 return (0); 820 821 if (hard->sadb_lifetime_allocations != 0 && 822 soft->sadb_lifetime_allocations != 0 && 823 hard->sadb_lifetime_allocations < soft->sadb_lifetime_allocations) 824 return (SADB_X_DIAGNOSTIC_ALLOC_HSERR); 825 826 if (hard->sadb_lifetime_bytes != 0 && 827 soft->sadb_lifetime_bytes != 0 && 828 hard->sadb_lifetime_bytes < soft->sadb_lifetime_bytes) 829 return (SADB_X_DIAGNOSTIC_BYTES_HSERR); 830 831 if (hard->sadb_lifetime_addtime != 0 && 832 soft->sadb_lifetime_addtime != 0 && 833 hard->sadb_lifetime_addtime < soft->sadb_lifetime_addtime) 834 return (SADB_X_DIAGNOSTIC_ADDTIME_HSERR); 835 836 if (hard->sadb_lifetime_usetime != 0 && 837 soft->sadb_lifetime_usetime != 0 && 838 hard->sadb_lifetime_usetime < soft->sadb_lifetime_usetime) 839 return (SADB_X_DIAGNOSTIC_USETIME_HSERR); 840 841 if (idle != NULL) { 842 if (hard->sadb_lifetime_addtime != 0 && 843 idle->sadb_lifetime_addtime != 0 && 844 hard->sadb_lifetime_addtime < idle->sadb_lifetime_addtime) 845 return (SADB_X_DIAGNOSTIC_ADDTIME_HSERR); 846 847 if (soft->sadb_lifetime_addtime != 0 && 848 idle->sadb_lifetime_addtime != 0 && 849 soft->sadb_lifetime_addtime < idle->sadb_lifetime_addtime) 850 return (SADB_X_DIAGNOSTIC_ADDTIME_HSERR); 851 852 if (hard->sadb_lifetime_usetime != 0 && 853 idle->sadb_lifetime_usetime != 0 && 854 hard->sadb_lifetime_usetime < idle->sadb_lifetime_usetime) 855 return (SADB_X_DIAGNOSTIC_USETIME_HSERR); 856 857 if (soft->sadb_lifetime_usetime != 0 && 858 idle->sadb_lifetime_usetime != 0 && 859 soft->sadb_lifetime_usetime < idle->sadb_lifetime_usetime) 860 return (SADB_X_DIAGNOSTIC_USETIME_HSERR); 861 } 862 863 return (0); 864 } 865 866 /* 867 * Sanity check sensitivity labels. 868 * 869 * For now, just reject labels on unlabeled systems. 870 */ 871 int 872 sadb_labelchk(keysock_in_t *ksi) 873 { 874 if (!is_system_labeled()) { 875 if (ksi->ks_in_extv[SADB_EXT_SENSITIVITY] != NULL) 876 return (SADB_X_DIAGNOSTIC_BAD_LABEL); 877 878 if (ksi->ks_in_extv[SADB_X_EXT_OUTER_SENS] != NULL) 879 return (SADB_X_DIAGNOSTIC_BAD_LABEL); 880 } 881 882 return (0); 883 } 884 885 /* 886 * Clone a security association for the purposes of inserting a single SA 887 * into inbound and outbound tables respectively. This function should only 888 * be called from sadb_common_add(). 889 */ 890 static ipsa_t * 891 sadb_cloneassoc(ipsa_t *ipsa) 892 { 893 ipsa_t *newbie; 894 boolean_t error = B_FALSE; 895 896 ASSERT(MUTEX_NOT_HELD(&(ipsa->ipsa_lock))); 897 898 newbie = kmem_alloc(sizeof (ipsa_t), KM_NOSLEEP); 899 if (newbie == NULL) 900 return (NULL); 901 902 /* Copy over what we can. */ 903 *newbie = *ipsa; 904 905 /* bzero and initialize locks, in case *_init() allocates... */ 906 mutex_init(&newbie->ipsa_lock, NULL, MUTEX_DEFAULT, NULL); 907 908 if (newbie->ipsa_tsl != NULL) 909 label_hold(newbie->ipsa_tsl); 910 911 if (newbie->ipsa_otsl != NULL) 912 label_hold(newbie->ipsa_otsl); 913 914 /* 915 * While somewhat dain-bramaged, the most graceful way to 916 * recover from errors is to keep plowing through the 917 * allocations, and getting what I can. It's easier to call 918 * sadb_freeassoc() on the stillborn clone when all the 919 * pointers aren't pointing to the parent's data. 920 */ 921 922 if (ipsa->ipsa_authkey != NULL) { 923 newbie->ipsa_authkey = kmem_alloc(newbie->ipsa_authkeylen, 924 KM_NOSLEEP); 925 if (newbie->ipsa_authkey == NULL) { 926 error = B_TRUE; 927 } else { 928 bcopy(ipsa->ipsa_authkey, newbie->ipsa_authkey, 929 newbie->ipsa_authkeylen); 930 931 newbie->ipsa_kcfauthkey.ck_data = 932 newbie->ipsa_authkey; 933 } 934 935 if (newbie->ipsa_amech.cm_param != NULL) { 936 newbie->ipsa_amech.cm_param = 937 (char *)&newbie->ipsa_mac_len; 938 } 939 } 940 941 if (ipsa->ipsa_encrkey != NULL) { 942 newbie->ipsa_encrkey = kmem_alloc(newbie->ipsa_encrkeylen, 943 KM_NOSLEEP); 944 if (newbie->ipsa_encrkey == NULL) { 945 error = B_TRUE; 946 } else { 947 bcopy(ipsa->ipsa_encrkey, newbie->ipsa_encrkey, 948 newbie->ipsa_encrkeylen); 949 950 newbie->ipsa_kcfencrkey.ck_data = 951 newbie->ipsa_encrkey; 952 } 953 } 954 955 newbie->ipsa_authtmpl = NULL; 956 newbie->ipsa_encrtmpl = NULL; 957 newbie->ipsa_haspeer = B_TRUE; 958 959 if (ipsa->ipsa_src_cid != NULL) { 960 newbie->ipsa_src_cid = ipsa->ipsa_src_cid; 961 IPSID_REFHOLD(ipsa->ipsa_src_cid); 962 } 963 964 if (ipsa->ipsa_dst_cid != NULL) { 965 newbie->ipsa_dst_cid = ipsa->ipsa_dst_cid; 966 IPSID_REFHOLD(ipsa->ipsa_dst_cid); 967 } 968 969 if (error) { 970 sadb_freeassoc(newbie); 971 return (NULL); 972 } 973 974 return (newbie); 975 } 976 977 /* 978 * Initialize a SADB address extension at the address specified by addrext. 979 * Return a pointer to the end of the new address extension. 980 */ 981 static uint8_t * 982 sadb_make_addr_ext(uint8_t *start, uint8_t *end, uint16_t exttype, 983 sa_family_t af, uint32_t *addr, uint16_t port, uint8_t proto, int prefix) 984 { 985 struct sockaddr_in *sin; 986 struct sockaddr_in6 *sin6; 987 uint8_t *cur = start; 988 int addrext_len; 989 int sin_len; 990 sadb_address_t *addrext = (sadb_address_t *)cur; 991 992 if (cur == NULL) 993 return (NULL); 994 995 cur += sizeof (*addrext); 996 if (cur > end) 997 return (NULL); 998 999 addrext->sadb_address_proto = proto; 1000 addrext->sadb_address_prefixlen = prefix; 1001 addrext->sadb_address_reserved = 0; 1002 addrext->sadb_address_exttype = exttype; 1003 1004 switch (af) { 1005 case AF_INET: 1006 sin = (struct sockaddr_in *)cur; 1007 sin_len = sizeof (*sin); 1008 cur += sin_len; 1009 if (cur > end) 1010 return (NULL); 1011 1012 sin->sin_family = af; 1013 bzero(sin->sin_zero, sizeof (sin->sin_zero)); 1014 sin->sin_port = port; 1015 IPSA_COPY_ADDR(&sin->sin_addr, addr, af); 1016 break; 1017 case AF_INET6: 1018 sin6 = (struct sockaddr_in6 *)cur; 1019 sin_len = sizeof (*sin6); 1020 cur += sin_len; 1021 if (cur > end) 1022 return (NULL); 1023 1024 bzero(sin6, sizeof (*sin6)); 1025 sin6->sin6_family = af; 1026 sin6->sin6_port = port; 1027 IPSA_COPY_ADDR(&sin6->sin6_addr, addr, af); 1028 break; 1029 } 1030 1031 addrext_len = roundup(cur - start, sizeof (uint64_t)); 1032 addrext->sadb_address_len = SADB_8TO64(addrext_len); 1033 1034 cur = start + addrext_len; 1035 if (cur > end) 1036 cur = NULL; 1037 1038 return (cur); 1039 } 1040 1041 /* 1042 * Construct a key management cookie extension. 1043 */ 1044 1045 static uint8_t * 1046 sadb_make_kmc_ext(uint8_t *cur, uint8_t *end, uint32_t kmp, uint64_t kmc) 1047 { 1048 sadb_x_kmc_t *kmcext = (sadb_x_kmc_t *)cur; 1049 1050 if (cur == NULL) 1051 return (NULL); 1052 1053 cur += sizeof (*kmcext); 1054 1055 if (cur > end) 1056 return (NULL); 1057 1058 kmcext->sadb_x_kmc_len = SADB_8TO64(sizeof (*kmcext)); 1059 kmcext->sadb_x_kmc_exttype = SADB_X_EXT_KM_COOKIE; 1060 kmcext->sadb_x_kmc_proto = kmp; 1061 kmcext->sadb_x_kmc_cookie64 = kmc; 1062 1063 return (cur); 1064 } 1065 1066 /* 1067 * Given an original message header with sufficient space following it, and an 1068 * SA, construct a full PF_KEY message with all of the relevant extensions. 1069 * This is mostly used for SADB_GET, and SADB_DUMP. 1070 */ 1071 static mblk_t * 1072 sadb_sa2msg(ipsa_t *ipsa, sadb_msg_t *samsg) 1073 { 1074 int alloclen, addrsize, paddrsize, authsize, encrsize; 1075 int srcidsize, dstidsize, senslen, osenslen; 1076 sa_family_t fam, pfam; /* Address family for SADB_EXT_ADDRESS */ 1077 /* src/dst and proxy sockaddrs. */ 1078 /* 1079 * The following are pointers into the PF_KEY message this PF_KEY 1080 * message creates. 1081 */ 1082 sadb_msg_t *newsamsg; 1083 sadb_sa_t *assoc; 1084 sadb_lifetime_t *lt; 1085 sadb_key_t *key; 1086 sadb_ident_t *ident; 1087 sadb_sens_t *sens; 1088 sadb_ext_t *walker; /* For when we need a generic ext. pointer. */ 1089 sadb_x_replay_ctr_t *repl_ctr; 1090 sadb_x_pair_t *pair_ext; 1091 1092 mblk_t *mp; 1093 uint8_t *cur, *end; 1094 /* These indicate the presence of the above extension fields. */ 1095 boolean_t soft = B_FALSE, hard = B_FALSE; 1096 boolean_t isrc = B_FALSE, idst = B_FALSE; 1097 boolean_t auth = B_FALSE, encr = B_FALSE; 1098 boolean_t sensinteg = B_FALSE, osensinteg = B_FALSE; 1099 boolean_t srcid = B_FALSE, dstid = B_FALSE; 1100 boolean_t idle; 1101 boolean_t paired; 1102 uint32_t otherspi; 1103 1104 /* First off, figure out the allocation length for this message. */ 1105 /* 1106 * Constant stuff. This includes base, SA, address (src, dst), 1107 * and lifetime (current). 1108 */ 1109 alloclen = sizeof (sadb_msg_t) + sizeof (sadb_sa_t) + 1110 sizeof (sadb_lifetime_t); 1111 1112 fam = ipsa->ipsa_addrfam; 1113 switch (fam) { 1114 case AF_INET: 1115 addrsize = roundup(sizeof (struct sockaddr_in) + 1116 sizeof (sadb_address_t), sizeof (uint64_t)); 1117 break; 1118 case AF_INET6: 1119 addrsize = roundup(sizeof (struct sockaddr_in6) + 1120 sizeof (sadb_address_t), sizeof (uint64_t)); 1121 break; 1122 default: 1123 return (NULL); 1124 } 1125 /* 1126 * Allocate TWO address extensions, for source and destination. 1127 * (Thus, the * 2.) 1128 */ 1129 alloclen += addrsize * 2; 1130 if (ipsa->ipsa_flags & IPSA_F_NATT_REM) 1131 alloclen += addrsize; 1132 if (ipsa->ipsa_flags & IPSA_F_NATT_LOC) 1133 alloclen += addrsize; 1134 1135 if (ipsa->ipsa_flags & IPSA_F_PAIRED) { 1136 paired = B_TRUE; 1137 alloclen += sizeof (sadb_x_pair_t); 1138 otherspi = ipsa->ipsa_otherspi; 1139 } else { 1140 paired = B_FALSE; 1141 } 1142 1143 /* How 'bout other lifetimes? */ 1144 if (ipsa->ipsa_softaddlt != 0 || ipsa->ipsa_softuselt != 0 || 1145 ipsa->ipsa_softbyteslt != 0 || ipsa->ipsa_softalloc != 0) { 1146 alloclen += sizeof (sadb_lifetime_t); 1147 soft = B_TRUE; 1148 } 1149 1150 if (ipsa->ipsa_hardaddlt != 0 || ipsa->ipsa_harduselt != 0 || 1151 ipsa->ipsa_hardbyteslt != 0 || ipsa->ipsa_hardalloc != 0) { 1152 alloclen += sizeof (sadb_lifetime_t); 1153 hard = B_TRUE; 1154 } 1155 1156 if (ipsa->ipsa_idleaddlt != 0 || ipsa->ipsa_idleuselt != 0) { 1157 alloclen += sizeof (sadb_lifetime_t); 1158 idle = B_TRUE; 1159 } else { 1160 idle = B_FALSE; 1161 } 1162 1163 /* Inner addresses. */ 1164 if (ipsa->ipsa_innerfam != 0) { 1165 pfam = ipsa->ipsa_innerfam; 1166 switch (pfam) { 1167 case AF_INET6: 1168 paddrsize = roundup(sizeof (struct sockaddr_in6) + 1169 sizeof (sadb_address_t), sizeof (uint64_t)); 1170 break; 1171 case AF_INET: 1172 paddrsize = roundup(sizeof (struct sockaddr_in) + 1173 sizeof (sadb_address_t), sizeof (uint64_t)); 1174 break; 1175 default: 1176 cmn_err(CE_PANIC, 1177 "IPsec SADB: Proxy length failure.\n"); 1178 break; 1179 } 1180 isrc = B_TRUE; 1181 idst = B_TRUE; 1182 alloclen += 2 * paddrsize; 1183 } 1184 1185 /* For the following fields, assume that length != 0 ==> stuff */ 1186 if (ipsa->ipsa_authkeylen != 0) { 1187 authsize = roundup(sizeof (sadb_key_t) + ipsa->ipsa_authkeylen, 1188 sizeof (uint64_t)); 1189 alloclen += authsize; 1190 auth = B_TRUE; 1191 } 1192 1193 if (ipsa->ipsa_encrkeylen != 0) { 1194 encrsize = roundup(sizeof (sadb_key_t) + ipsa->ipsa_encrkeylen + 1195 ipsa->ipsa_nonce_len, sizeof (uint64_t)); 1196 alloclen += encrsize; 1197 encr = B_TRUE; 1198 } else { 1199 encr = B_FALSE; 1200 } 1201 1202 if (ipsa->ipsa_tsl != NULL) { 1203 senslen = sadb_sens_len_from_label(ipsa->ipsa_tsl); 1204 alloclen += senslen; 1205 sensinteg = B_TRUE; 1206 } 1207 1208 if (ipsa->ipsa_otsl != NULL) { 1209 osenslen = sadb_sens_len_from_label(ipsa->ipsa_otsl); 1210 alloclen += osenslen; 1211 osensinteg = B_TRUE; 1212 } 1213 1214 /* 1215 * Must use strlen() here for lengths. Identities use NULL 1216 * pointers to indicate their nonexistence. 1217 */ 1218 if (ipsa->ipsa_src_cid != NULL) { 1219 srcidsize = roundup(sizeof (sadb_ident_t) + 1220 strlen(ipsa->ipsa_src_cid->ipsid_cid) + 1, 1221 sizeof (uint64_t)); 1222 alloclen += srcidsize; 1223 srcid = B_TRUE; 1224 } 1225 1226 if (ipsa->ipsa_dst_cid != NULL) { 1227 dstidsize = roundup(sizeof (sadb_ident_t) + 1228 strlen(ipsa->ipsa_dst_cid->ipsid_cid) + 1, 1229 sizeof (uint64_t)); 1230 alloclen += dstidsize; 1231 dstid = B_TRUE; 1232 } 1233 1234 if ((ipsa->ipsa_kmp != 0) || (ipsa->ipsa_kmc != 0)) 1235 alloclen += sizeof (sadb_x_kmc_t); 1236 1237 if (ipsa->ipsa_replay != 0) { 1238 alloclen += sizeof (sadb_x_replay_ctr_t); 1239 } 1240 1241 /* Make sure the allocation length is a multiple of 8 bytes. */ 1242 ASSERT((alloclen & 0x7) == 0); 1243 1244 /* XXX Possibly make it esballoc, with a bzero-ing free_ftn. */ 1245 mp = allocb(alloclen, BPRI_HI); 1246 if (mp == NULL) 1247 return (NULL); 1248 bzero(mp->b_rptr, alloclen); 1249 1250 mp->b_wptr += alloclen; 1251 end = mp->b_wptr; 1252 newsamsg = (sadb_msg_t *)mp->b_rptr; 1253 *newsamsg = *samsg; 1254 newsamsg->sadb_msg_len = (uint16_t)SADB_8TO64(alloclen); 1255 1256 mutex_enter(&ipsa->ipsa_lock); /* Since I'm grabbing SA fields... */ 1257 1258 newsamsg->sadb_msg_satype = ipsa->ipsa_type; 1259 1260 assoc = (sadb_sa_t *)(newsamsg + 1); 1261 assoc->sadb_sa_len = SADB_8TO64(sizeof (*assoc)); 1262 assoc->sadb_sa_exttype = SADB_EXT_SA; 1263 assoc->sadb_sa_spi = ipsa->ipsa_spi; 1264 assoc->sadb_sa_replay = ipsa->ipsa_replay_wsize; 1265 assoc->sadb_sa_state = ipsa->ipsa_state; 1266 assoc->sadb_sa_auth = ipsa->ipsa_auth_alg; 1267 assoc->sadb_sa_encrypt = ipsa->ipsa_encr_alg; 1268 assoc->sadb_sa_flags = ipsa->ipsa_flags; 1269 1270 lt = (sadb_lifetime_t *)(assoc + 1); 1271 lt->sadb_lifetime_len = SADB_8TO64(sizeof (*lt)); 1272 lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT; 1273 /* We do not support the concept. */ 1274 lt->sadb_lifetime_allocations = 0; 1275 lt->sadb_lifetime_bytes = ipsa->ipsa_bytes; 1276 lt->sadb_lifetime_addtime = ipsa->ipsa_addtime; 1277 lt->sadb_lifetime_usetime = ipsa->ipsa_usetime; 1278 1279 if (hard) { 1280 lt++; 1281 lt->sadb_lifetime_len = SADB_8TO64(sizeof (*lt)); 1282 lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_HARD; 1283 lt->sadb_lifetime_allocations = ipsa->ipsa_hardalloc; 1284 lt->sadb_lifetime_bytes = ipsa->ipsa_hardbyteslt; 1285 lt->sadb_lifetime_addtime = ipsa->ipsa_hardaddlt; 1286 lt->sadb_lifetime_usetime = ipsa->ipsa_harduselt; 1287 } 1288 1289 if (soft) { 1290 lt++; 1291 lt->sadb_lifetime_len = SADB_8TO64(sizeof (*lt)); 1292 lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_SOFT; 1293 lt->sadb_lifetime_allocations = ipsa->ipsa_softalloc; 1294 lt->sadb_lifetime_bytes = ipsa->ipsa_softbyteslt; 1295 lt->sadb_lifetime_addtime = ipsa->ipsa_softaddlt; 1296 lt->sadb_lifetime_usetime = ipsa->ipsa_softuselt; 1297 } 1298 1299 if (idle) { 1300 lt++; 1301 lt->sadb_lifetime_len = SADB_8TO64(sizeof (*lt)); 1302 lt->sadb_lifetime_exttype = SADB_X_EXT_LIFETIME_IDLE; 1303 lt->sadb_lifetime_addtime = ipsa->ipsa_idleaddlt; 1304 lt->sadb_lifetime_usetime = ipsa->ipsa_idleuselt; 1305 } 1306 1307 cur = (uint8_t *)(lt + 1); 1308 1309 /* NOTE: Don't fill in ports here if we are a tunnel-mode SA. */ 1310 cur = sadb_make_addr_ext(cur, end, SADB_EXT_ADDRESS_SRC, fam, 1311 ipsa->ipsa_srcaddr, (!isrc && !idst) ? SA_SRCPORT(ipsa) : 0, 1312 SA_PROTO(ipsa), 0); 1313 if (cur == NULL) { 1314 freemsg(mp); 1315 mp = NULL; 1316 goto bail; 1317 } 1318 1319 cur = sadb_make_addr_ext(cur, end, SADB_EXT_ADDRESS_DST, fam, 1320 ipsa->ipsa_dstaddr, (!isrc && !idst) ? SA_DSTPORT(ipsa) : 0, 1321 SA_PROTO(ipsa), 0); 1322 if (cur == NULL) { 1323 freemsg(mp); 1324 mp = NULL; 1325 goto bail; 1326 } 1327 1328 if (ipsa->ipsa_flags & IPSA_F_NATT_LOC) { 1329 cur = sadb_make_addr_ext(cur, end, SADB_X_EXT_ADDRESS_NATT_LOC, 1330 fam, &ipsa->ipsa_natt_addr_loc, ipsa->ipsa_local_nat_port, 1331 IPPROTO_UDP, 0); 1332 if (cur == NULL) { 1333 freemsg(mp); 1334 mp = NULL; 1335 goto bail; 1336 } 1337 } 1338 1339 if (ipsa->ipsa_flags & IPSA_F_NATT_REM) { 1340 cur = sadb_make_addr_ext(cur, end, SADB_X_EXT_ADDRESS_NATT_REM, 1341 fam, &ipsa->ipsa_natt_addr_rem, ipsa->ipsa_remote_nat_port, 1342 IPPROTO_UDP, 0); 1343 if (cur == NULL) { 1344 freemsg(mp); 1345 mp = NULL; 1346 goto bail; 1347 } 1348 } 1349 1350 /* If we are a tunnel-mode SA, fill in the inner-selectors. */ 1351 if (isrc) { 1352 cur = sadb_make_addr_ext(cur, end, SADB_X_EXT_ADDRESS_INNER_SRC, 1353 pfam, ipsa->ipsa_innersrc, SA_SRCPORT(ipsa), 1354 SA_IPROTO(ipsa), ipsa->ipsa_innersrcpfx); 1355 if (cur == NULL) { 1356 freemsg(mp); 1357 mp = NULL; 1358 goto bail; 1359 } 1360 } 1361 1362 if (idst) { 1363 cur = sadb_make_addr_ext(cur, end, SADB_X_EXT_ADDRESS_INNER_DST, 1364 pfam, ipsa->ipsa_innerdst, SA_DSTPORT(ipsa), 1365 SA_IPROTO(ipsa), ipsa->ipsa_innerdstpfx); 1366 if (cur == NULL) { 1367 freemsg(mp); 1368 mp = NULL; 1369 goto bail; 1370 } 1371 } 1372 1373 if ((ipsa->ipsa_kmp != 0) || (ipsa->ipsa_kmc != 0)) { 1374 cur = sadb_make_kmc_ext(cur, end, 1375 ipsa->ipsa_kmp, ipsa->ipsa_kmc); 1376 if (cur == NULL) { 1377 freemsg(mp); 1378 mp = NULL; 1379 goto bail; 1380 } 1381 } 1382 1383 walker = (sadb_ext_t *)cur; 1384 if (auth) { 1385 key = (sadb_key_t *)walker; 1386 key->sadb_key_len = SADB_8TO64(authsize); 1387 key->sadb_key_exttype = SADB_EXT_KEY_AUTH; 1388 key->sadb_key_bits = ipsa->ipsa_authkeybits; 1389 key->sadb_key_reserved = 0; 1390 bcopy(ipsa->ipsa_authkey, key + 1, ipsa->ipsa_authkeylen); 1391 walker = (sadb_ext_t *)((uint64_t *)walker + 1392 walker->sadb_ext_len); 1393 } 1394 1395 if (encr) { 1396 uint8_t *buf_ptr; 1397 key = (sadb_key_t *)walker; 1398 key->sadb_key_len = SADB_8TO64(encrsize); 1399 key->sadb_key_exttype = SADB_EXT_KEY_ENCRYPT; 1400 key->sadb_key_bits = ipsa->ipsa_encrkeybits; 1401 key->sadb_key_reserved = ipsa->ipsa_saltbits; 1402 buf_ptr = (uint8_t *)(key + 1); 1403 bcopy(ipsa->ipsa_encrkey, buf_ptr, ipsa->ipsa_encrkeylen); 1404 if (ipsa->ipsa_salt != NULL) { 1405 buf_ptr += ipsa->ipsa_encrkeylen; 1406 bcopy(ipsa->ipsa_salt, buf_ptr, ipsa->ipsa_saltlen); 1407 } 1408 walker = (sadb_ext_t *)((uint64_t *)walker + 1409 walker->sadb_ext_len); 1410 } 1411 1412 if (srcid) { 1413 ident = (sadb_ident_t *)walker; 1414 ident->sadb_ident_len = SADB_8TO64(srcidsize); 1415 ident->sadb_ident_exttype = SADB_EXT_IDENTITY_SRC; 1416 ident->sadb_ident_type = ipsa->ipsa_src_cid->ipsid_type; 1417 ident->sadb_ident_id = 0; 1418 ident->sadb_ident_reserved = 0; 1419 (void) strcpy((char *)(ident + 1), 1420 ipsa->ipsa_src_cid->ipsid_cid); 1421 walker = (sadb_ext_t *)((uint64_t *)walker + 1422 walker->sadb_ext_len); 1423 } 1424 1425 if (dstid) { 1426 ident = (sadb_ident_t *)walker; 1427 ident->sadb_ident_len = SADB_8TO64(dstidsize); 1428 ident->sadb_ident_exttype = SADB_EXT_IDENTITY_DST; 1429 ident->sadb_ident_type = ipsa->ipsa_dst_cid->ipsid_type; 1430 ident->sadb_ident_id = 0; 1431 ident->sadb_ident_reserved = 0; 1432 (void) strcpy((char *)(ident + 1), 1433 ipsa->ipsa_dst_cid->ipsid_cid); 1434 walker = (sadb_ext_t *)((uint64_t *)walker + 1435 walker->sadb_ext_len); 1436 } 1437 1438 if (sensinteg) { 1439 sens = (sadb_sens_t *)walker; 1440 sadb_sens_from_label(sens, SADB_EXT_SENSITIVITY, 1441 ipsa->ipsa_tsl, senslen); 1442 1443 walker = (sadb_ext_t *)((uint64_t *)walker + 1444 walker->sadb_ext_len); 1445 } 1446 1447 if (osensinteg) { 1448 sens = (sadb_sens_t *)walker; 1449 1450 sadb_sens_from_label(sens, SADB_X_EXT_OUTER_SENS, 1451 ipsa->ipsa_otsl, osenslen); 1452 if (ipsa->ipsa_mac_exempt) 1453 sens->sadb_x_sens_flags = SADB_X_SENS_IMPLICIT; 1454 1455 walker = (sadb_ext_t *)((uint64_t *)walker + 1456 walker->sadb_ext_len); 1457 } 1458 1459 if (paired) { 1460 pair_ext = (sadb_x_pair_t *)walker; 1461 1462 pair_ext->sadb_x_pair_len = SADB_8TO64(sizeof (sadb_x_pair_t)); 1463 pair_ext->sadb_x_pair_exttype = SADB_X_EXT_PAIR; 1464 pair_ext->sadb_x_pair_spi = otherspi; 1465 1466 walker = (sadb_ext_t *)((uint64_t *)walker + 1467 walker->sadb_ext_len); 1468 } 1469 1470 if (ipsa->ipsa_replay != 0) { 1471 repl_ctr = (sadb_x_replay_ctr_t *)walker; 1472 repl_ctr->sadb_x_rc_len = SADB_8TO64(sizeof (*repl_ctr)); 1473 repl_ctr->sadb_x_rc_exttype = SADB_X_EXT_REPLAY_VALUE; 1474 repl_ctr->sadb_x_rc_replay32 = ipsa->ipsa_replay; 1475 repl_ctr->sadb_x_rc_replay64 = 0; 1476 walker = (sadb_ext_t *)(repl_ctr + 1); 1477 } 1478 1479 bail: 1480 /* Pardon any delays... */ 1481 mutex_exit(&ipsa->ipsa_lock); 1482 1483 return (mp); 1484 } 1485 1486 /* 1487 * Strip out key headers or unmarked headers (SADB_EXT_KEY_*, SADB_EXT_UNKNOWN) 1488 * and adjust base message accordingly. 1489 * 1490 * Assume message is pulled up in one piece of contiguous memory. 1491 * 1492 * Say if we start off with: 1493 * 1494 * +------+----+-------------+-----------+---------------+---------------+ 1495 * | base | SA | source addr | dest addr | rsrvd. or key | soft lifetime | 1496 * +------+----+-------------+-----------+---------------+---------------+ 1497 * 1498 * we will end up with 1499 * 1500 * +------+----+-------------+-----------+---------------+ 1501 * | base | SA | source addr | dest addr | soft lifetime | 1502 * +------+----+-------------+-----------+---------------+ 1503 */ 1504 static void 1505 sadb_strip(sadb_msg_t *samsg) 1506 { 1507 sadb_ext_t *ext; 1508 uint8_t *target = NULL; 1509 uint8_t *msgend; 1510 int sofar = SADB_8TO64(sizeof (*samsg)); 1511 int copylen; 1512 1513 ext = (sadb_ext_t *)(samsg + 1); 1514 msgend = (uint8_t *)samsg; 1515 msgend += SADB_64TO8(samsg->sadb_msg_len); 1516 while ((uint8_t *)ext < msgend) { 1517 if (ext->sadb_ext_type == SADB_EXT_RESERVED || 1518 ext->sadb_ext_type == SADB_EXT_KEY_AUTH || 1519 ext->sadb_ext_type == SADB_X_EXT_EDUMP || 1520 ext->sadb_ext_type == SADB_EXT_KEY_ENCRYPT) { 1521 /* 1522 * Aha! I found a header to be erased. 1523 */ 1524 1525 if (target != NULL) { 1526 /* 1527 * If I had a previous header to be erased, 1528 * copy over it. I can get away with just 1529 * copying backwards because the target will 1530 * always be 8 bytes behind the source. 1531 */ 1532 copylen = ((uint8_t *)ext) - (target + 1533 SADB_64TO8( 1534 ((sadb_ext_t *)target)->sadb_ext_len)); 1535 ovbcopy(((uint8_t *)ext - copylen), target, 1536 copylen); 1537 target += copylen; 1538 ((sadb_ext_t *)target)->sadb_ext_len = 1539 SADB_8TO64(((uint8_t *)ext) - target + 1540 SADB_64TO8(ext->sadb_ext_len)); 1541 } else { 1542 target = (uint8_t *)ext; 1543 } 1544 } else { 1545 sofar += ext->sadb_ext_len; 1546 } 1547 1548 ext = (sadb_ext_t *)(((uint64_t *)ext) + ext->sadb_ext_len); 1549 } 1550 1551 ASSERT((uint8_t *)ext == msgend); 1552 1553 if (target != NULL) { 1554 copylen = ((uint8_t *)ext) - (target + 1555 SADB_64TO8(((sadb_ext_t *)target)->sadb_ext_len)); 1556 if (copylen != 0) 1557 ovbcopy(((uint8_t *)ext - copylen), target, copylen); 1558 } 1559 1560 /* Adjust samsg. */ 1561 samsg->sadb_msg_len = (uint16_t)sofar; 1562 1563 /* Assume all of the rest is cleared by caller in sadb_pfkey_echo(). */ 1564 } 1565 1566 /* 1567 * AH needs to send an error to PF_KEY. Assume mp points to an M_CTL 1568 * followed by an M_DATA with a PF_KEY message in it. The serial of 1569 * the sending keysock instance is included. 1570 */ 1571 void 1572 sadb_pfkey_error(queue_t *pfkey_q, mblk_t *mp, int error, int diagnostic, 1573 uint_t serial) 1574 { 1575 mblk_t *msg = mp->b_cont; 1576 sadb_msg_t *samsg; 1577 keysock_out_t *kso; 1578 1579 /* 1580 * Enough functions call this to merit a NULL queue check. 1581 */ 1582 if (pfkey_q == NULL) { 1583 freemsg(mp); 1584 return; 1585 } 1586 1587 ASSERT(msg != NULL); 1588 ASSERT((mp->b_wptr - mp->b_rptr) == sizeof (ipsec_info_t)); 1589 ASSERT((msg->b_wptr - msg->b_rptr) >= sizeof (sadb_msg_t)); 1590 samsg = (sadb_msg_t *)msg->b_rptr; 1591 kso = (keysock_out_t *)mp->b_rptr; 1592 1593 kso->ks_out_type = KEYSOCK_OUT; 1594 kso->ks_out_len = sizeof (*kso); 1595 kso->ks_out_serial = serial; 1596 1597 /* 1598 * Only send the base message up in the event of an error. 1599 * Don't worry about bzero()-ing, because it was probably bogus 1600 * anyway. 1601 */ 1602 msg->b_wptr = msg->b_rptr + sizeof (*samsg); 1603 samsg = (sadb_msg_t *)msg->b_rptr; 1604 samsg->sadb_msg_len = SADB_8TO64(sizeof (*samsg)); 1605 samsg->sadb_msg_errno = (uint8_t)error; 1606 if (diagnostic != SADB_X_DIAGNOSTIC_PRESET) 1607 samsg->sadb_x_msg_diagnostic = (uint16_t)diagnostic; 1608 1609 putnext(pfkey_q, mp); 1610 } 1611 1612 /* 1613 * Send a successful return packet back to keysock via the queue in pfkey_q. 1614 * 1615 * Often, an SA is associated with the reply message, it's passed in if needed, 1616 * and NULL if not. BTW, that ipsa will have its refcnt appropriately held, 1617 * and the caller will release said refcnt. 1618 */ 1619 void 1620 sadb_pfkey_echo(queue_t *pfkey_q, mblk_t *mp, sadb_msg_t *samsg, 1621 keysock_in_t *ksi, ipsa_t *ipsa) 1622 { 1623 keysock_out_t *kso; 1624 mblk_t *mp1; 1625 sadb_msg_t *newsamsg; 1626 uint8_t *oldend; 1627 1628 ASSERT((mp->b_cont != NULL) && 1629 ((void *)samsg == (void *)mp->b_cont->b_rptr) && 1630 ((void *)mp->b_rptr == (void *)ksi)); 1631 1632 switch (samsg->sadb_msg_type) { 1633 case SADB_ADD: 1634 case SADB_UPDATE: 1635 case SADB_X_UPDATEPAIR: 1636 case SADB_X_DELPAIR_STATE: 1637 case SADB_FLUSH: 1638 case SADB_DUMP: 1639 /* 1640 * I have all of the message already. I just need to strip 1641 * out the keying material and echo the message back. 1642 * 1643 * NOTE: for SADB_DUMP, the function sadb_dump() did the 1644 * work. When DUMP reaches here, it should only be a base 1645 * message. 1646 */ 1647 justecho: 1648 if (ksi->ks_in_extv[SADB_EXT_KEY_AUTH] != NULL || 1649 ksi->ks_in_extv[SADB_EXT_KEY_ENCRYPT] != NULL || 1650 ksi->ks_in_extv[SADB_X_EXT_EDUMP] != NULL) { 1651 sadb_strip(samsg); 1652 /* Assume PF_KEY message is contiguous. */ 1653 ASSERT(mp->b_cont->b_cont == NULL); 1654 oldend = mp->b_cont->b_wptr; 1655 mp->b_cont->b_wptr = mp->b_cont->b_rptr + 1656 SADB_64TO8(samsg->sadb_msg_len); 1657 bzero(mp->b_cont->b_wptr, oldend - mp->b_cont->b_wptr); 1658 } 1659 break; 1660 case SADB_GET: 1661 /* 1662 * Do a lot of work here, because of the ipsa I just found. 1663 * First construct the new PF_KEY message, then abandon 1664 * the old one. 1665 */ 1666 mp1 = sadb_sa2msg(ipsa, samsg); 1667 if (mp1 == NULL) { 1668 sadb_pfkey_error(pfkey_q, mp, ENOMEM, 1669 SADB_X_DIAGNOSTIC_NONE, ksi->ks_in_serial); 1670 return; 1671 } 1672 freemsg(mp->b_cont); 1673 mp->b_cont = mp1; 1674 break; 1675 case SADB_DELETE: 1676 case SADB_X_DELPAIR: 1677 if (ipsa == NULL) 1678 goto justecho; 1679 /* 1680 * Because listening KMds may require more info, treat 1681 * DELETE like a special case of GET. 1682 */ 1683 mp1 = sadb_sa2msg(ipsa, samsg); 1684 if (mp1 == NULL) { 1685 sadb_pfkey_error(pfkey_q, mp, ENOMEM, 1686 SADB_X_DIAGNOSTIC_NONE, ksi->ks_in_serial); 1687 return; 1688 } 1689 newsamsg = (sadb_msg_t *)mp1->b_rptr; 1690 sadb_strip(newsamsg); 1691 oldend = mp1->b_wptr; 1692 mp1->b_wptr = mp1->b_rptr + SADB_64TO8(newsamsg->sadb_msg_len); 1693 bzero(mp1->b_wptr, oldend - mp1->b_wptr); 1694 freemsg(mp->b_cont); 1695 mp->b_cont = mp1; 1696 break; 1697 default: 1698 if (mp != NULL) 1699 freemsg(mp); 1700 return; 1701 } 1702 1703 /* ksi is now null and void. */ 1704 kso = (keysock_out_t *)ksi; 1705 kso->ks_out_type = KEYSOCK_OUT; 1706 kso->ks_out_len = sizeof (*kso); 1707 kso->ks_out_serial = ksi->ks_in_serial; 1708 /* We're ready to send... */ 1709 putnext(pfkey_q, mp); 1710 } 1711 1712 /* 1713 * Set up a global pfkey_q instance for AH, ESP, or some other consumer. 1714 */ 1715 void 1716 sadb_keysock_hello(queue_t **pfkey_qp, queue_t *q, mblk_t *mp, 1717 void (*ager)(void *), void *agerarg, timeout_id_t *top, int satype) 1718 { 1719 keysock_hello_ack_t *kha; 1720 queue_t *oldq; 1721 1722 ASSERT(OTHERQ(q) != NULL); 1723 1724 /* 1725 * First, check atomically that I'm the first and only keysock 1726 * instance. 1727 * 1728 * Use OTHERQ(q), because qreply(q, mp) == putnext(OTHERQ(q), mp), 1729 * and I want this module to say putnext(*_pfkey_q, mp) for PF_KEY 1730 * messages. 1731 */ 1732 1733 oldq = atomic_cas_ptr((void **)pfkey_qp, NULL, OTHERQ(q)); 1734 if (oldq != NULL) { 1735 ASSERT(oldq != q); 1736 cmn_err(CE_WARN, "Danger! Multiple keysocks on top of %s.\n", 1737 (satype == SADB_SATYPE_ESP)? "ESP" : "AH or other"); 1738 freemsg(mp); 1739 return; 1740 } 1741 1742 kha = (keysock_hello_ack_t *)mp->b_rptr; 1743 kha->ks_hello_len = sizeof (keysock_hello_ack_t); 1744 kha->ks_hello_type = KEYSOCK_HELLO_ACK; 1745 kha->ks_hello_satype = (uint8_t)satype; 1746 1747 /* 1748 * If we made it past the atomic_cas_ptr, then we have "exclusive" 1749 * access to the timeout handle. Fire it off after the default ager 1750 * interval. 1751 */ 1752 *top = qtimeout(*pfkey_qp, ager, agerarg, 1753 drv_usectohz(SADB_AGE_INTERVAL_DEFAULT * 1000)); 1754 1755 putnext(*pfkey_qp, mp); 1756 } 1757 1758 /* 1759 * Normalize IPv4-mapped IPv6 addresses (and prefixes) as appropriate. 1760 * 1761 * Check addresses themselves for wildcard or multicast. 1762 * Check ire table for local/non-local/broadcast. 1763 */ 1764 int 1765 sadb_addrcheck(queue_t *pfkey_q, mblk_t *mp, sadb_ext_t *ext, uint_t serial, 1766 netstack_t *ns) 1767 { 1768 sadb_address_t *addr = (sadb_address_t *)ext; 1769 struct sockaddr_in *sin; 1770 struct sockaddr_in6 *sin6; 1771 int diagnostic, type; 1772 boolean_t normalized = B_FALSE; 1773 1774 ASSERT(ext != NULL); 1775 ASSERT((ext->sadb_ext_type == SADB_EXT_ADDRESS_SRC) || 1776 (ext->sadb_ext_type == SADB_EXT_ADDRESS_DST) || 1777 (ext->sadb_ext_type == SADB_X_EXT_ADDRESS_INNER_SRC) || 1778 (ext->sadb_ext_type == SADB_X_EXT_ADDRESS_INNER_DST) || 1779 (ext->sadb_ext_type == SADB_X_EXT_ADDRESS_NATT_LOC) || 1780 (ext->sadb_ext_type == SADB_X_EXT_ADDRESS_NATT_REM)); 1781 1782 /* Assign both sockaddrs, the compiler will do the right thing. */ 1783 sin = (struct sockaddr_in *)(addr + 1); 1784 sin6 = (struct sockaddr_in6 *)(addr + 1); 1785 1786 if (sin6->sin6_family == AF_INET6) { 1787 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 1788 /* 1789 * Convert to an AF_INET sockaddr. This means the 1790 * return messages will have the extra space, but have 1791 * AF_INET sockaddrs instead of AF_INET6. 1792 * 1793 * Yes, RFC 2367 isn't clear on what to do here w.r.t. 1794 * mapped addresses, but since AF_INET6 ::ffff:<v4> is 1795 * equal to AF_INET <v4>, it shouldnt be a huge 1796 * problem. 1797 */ 1798 sin->sin_family = AF_INET; 1799 IN6_V4MAPPED_TO_INADDR(&sin6->sin6_addr, 1800 &sin->sin_addr); 1801 bzero(&sin->sin_zero, sizeof (sin->sin_zero)); 1802 normalized = B_TRUE; 1803 } 1804 } else if (sin->sin_family != AF_INET) { 1805 switch (ext->sadb_ext_type) { 1806 case SADB_EXT_ADDRESS_SRC: 1807 diagnostic = SADB_X_DIAGNOSTIC_BAD_SRC_AF; 1808 break; 1809 case SADB_EXT_ADDRESS_DST: 1810 diagnostic = SADB_X_DIAGNOSTIC_BAD_DST_AF; 1811 break; 1812 case SADB_X_EXT_ADDRESS_INNER_SRC: 1813 diagnostic = SADB_X_DIAGNOSTIC_BAD_PROXY_AF; 1814 break; 1815 case SADB_X_EXT_ADDRESS_INNER_DST: 1816 diagnostic = SADB_X_DIAGNOSTIC_BAD_INNER_DST_AF; 1817 break; 1818 case SADB_X_EXT_ADDRESS_NATT_LOC: 1819 diagnostic = SADB_X_DIAGNOSTIC_BAD_NATT_LOC_AF; 1820 break; 1821 case SADB_X_EXT_ADDRESS_NATT_REM: 1822 diagnostic = SADB_X_DIAGNOSTIC_BAD_NATT_REM_AF; 1823 break; 1824 /* There is no default, see above ASSERT. */ 1825 } 1826 bail: 1827 if (pfkey_q != NULL) { 1828 sadb_pfkey_error(pfkey_q, mp, EINVAL, diagnostic, 1829 serial); 1830 } else { 1831 /* 1832 * Scribble in sadb_msg that we got passed in. 1833 * Overload "mp" to be an sadb_msg pointer. 1834 */ 1835 sadb_msg_t *samsg = (sadb_msg_t *)mp; 1836 1837 samsg->sadb_msg_errno = EINVAL; 1838 samsg->sadb_x_msg_diagnostic = diagnostic; 1839 } 1840 return (KS_IN_ADDR_UNKNOWN); 1841 } 1842 1843 if (ext->sadb_ext_type == SADB_X_EXT_ADDRESS_INNER_SRC || 1844 ext->sadb_ext_type == SADB_X_EXT_ADDRESS_INNER_DST) { 1845 /* 1846 * We need only check for prefix issues. 1847 */ 1848 1849 /* Set diagnostic now, in case we need it later. */ 1850 diagnostic = 1851 (ext->sadb_ext_type == SADB_X_EXT_ADDRESS_INNER_SRC) ? 1852 SADB_X_DIAGNOSTIC_PREFIX_INNER_SRC : 1853 SADB_X_DIAGNOSTIC_PREFIX_INNER_DST; 1854 1855 if (normalized) 1856 addr->sadb_address_prefixlen -= 96; 1857 1858 /* 1859 * Verify and mask out inner-addresses based on prefix length. 1860 */ 1861 if (sin->sin_family == AF_INET) { 1862 if (addr->sadb_address_prefixlen > 32) 1863 goto bail; 1864 sin->sin_addr.s_addr &= 1865 ip_plen_to_mask(addr->sadb_address_prefixlen); 1866 } else { 1867 in6_addr_t mask; 1868 1869 ASSERT(sin->sin_family == AF_INET6); 1870 /* 1871 * ip_plen_to_mask_v6() returns NULL if the value in 1872 * question is out of range. 1873 */ 1874 if (ip_plen_to_mask_v6(addr->sadb_address_prefixlen, 1875 &mask) == NULL) 1876 goto bail; 1877 sin6->sin6_addr.s6_addr32[0] &= mask.s6_addr32[0]; 1878 sin6->sin6_addr.s6_addr32[1] &= mask.s6_addr32[1]; 1879 sin6->sin6_addr.s6_addr32[2] &= mask.s6_addr32[2]; 1880 sin6->sin6_addr.s6_addr32[3] &= mask.s6_addr32[3]; 1881 } 1882 1883 /* We don't care in these cases. */ 1884 return (KS_IN_ADDR_DONTCARE); 1885 } 1886 1887 if (sin->sin_family == AF_INET6) { 1888 /* Check the easy ones now. */ 1889 if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) 1890 return (KS_IN_ADDR_MBCAST); 1891 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 1892 return (KS_IN_ADDR_UNSPEC); 1893 /* 1894 * At this point, we're a unicast IPv6 address. 1895 * 1896 * XXX Zones alert -> me/notme decision needs to be tempered 1897 * by what zone we're in when we go to zone-aware IPsec. 1898 */ 1899 if (ip_type_v6(&sin6->sin6_addr, ns->netstack_ip) == 1900 IRE_LOCAL) { 1901 /* Hey hey, it's local. */ 1902 return (KS_IN_ADDR_ME); 1903 } 1904 } else { 1905 ASSERT(sin->sin_family == AF_INET); 1906 if (sin->sin_addr.s_addr == INADDR_ANY) 1907 return (KS_IN_ADDR_UNSPEC); 1908 if (CLASSD(sin->sin_addr.s_addr)) 1909 return (KS_IN_ADDR_MBCAST); 1910 /* 1911 * At this point we're a unicast or broadcast IPv4 address. 1912 * 1913 * Check if the address is IRE_BROADCAST or IRE_LOCAL. 1914 * 1915 * XXX Zones alert -> me/notme decision needs to be tempered 1916 * by what zone we're in when we go to zone-aware IPsec. 1917 */ 1918 type = ip_type_v4(sin->sin_addr.s_addr, ns->netstack_ip); 1919 switch (type) { 1920 case IRE_LOCAL: 1921 return (KS_IN_ADDR_ME); 1922 case IRE_BROADCAST: 1923 return (KS_IN_ADDR_MBCAST); 1924 } 1925 } 1926 1927 return (KS_IN_ADDR_NOTME); 1928 } 1929 1930 /* 1931 * Address normalizations and reality checks for inbound PF_KEY messages. 1932 * 1933 * For the case of src == unspecified AF_INET6, and dst == AF_INET, convert 1934 * the source to AF_INET. Do the same for the inner sources. 1935 */ 1936 boolean_t 1937 sadb_addrfix(keysock_in_t *ksi, queue_t *pfkey_q, mblk_t *mp, netstack_t *ns) 1938 { 1939 struct sockaddr_in *src, *isrc; 1940 struct sockaddr_in6 *dst, *idst; 1941 sadb_address_t *srcext, *dstext; 1942 uint16_t sport; 1943 sadb_ext_t **extv = ksi->ks_in_extv; 1944 int rc; 1945 1946 if (extv[SADB_EXT_ADDRESS_SRC] != NULL) { 1947 rc = sadb_addrcheck(pfkey_q, mp, extv[SADB_EXT_ADDRESS_SRC], 1948 ksi->ks_in_serial, ns); 1949 if (rc == KS_IN_ADDR_UNKNOWN) 1950 return (B_FALSE); 1951 if (rc == KS_IN_ADDR_MBCAST) { 1952 sadb_pfkey_error(pfkey_q, mp, EINVAL, 1953 SADB_X_DIAGNOSTIC_BAD_SRC, ksi->ks_in_serial); 1954 return (B_FALSE); 1955 } 1956 ksi->ks_in_srctype = rc; 1957 } 1958 1959 if (extv[SADB_EXT_ADDRESS_DST] != NULL) { 1960 rc = sadb_addrcheck(pfkey_q, mp, extv[SADB_EXT_ADDRESS_DST], 1961 ksi->ks_in_serial, ns); 1962 if (rc == KS_IN_ADDR_UNKNOWN) 1963 return (B_FALSE); 1964 if (rc == KS_IN_ADDR_UNSPEC) { 1965 sadb_pfkey_error(pfkey_q, mp, EINVAL, 1966 SADB_X_DIAGNOSTIC_BAD_DST, ksi->ks_in_serial); 1967 return (B_FALSE); 1968 } 1969 ksi->ks_in_dsttype = rc; 1970 } 1971 1972 /* 1973 * NAT-Traversal addrs are simple enough to not require all of 1974 * the checks in sadb_addrcheck(). Just normalize or reject if not 1975 * AF_INET. 1976 */ 1977 if (extv[SADB_X_EXT_ADDRESS_NATT_LOC] != NULL) { 1978 rc = sadb_addrcheck(pfkey_q, mp, 1979 extv[SADB_X_EXT_ADDRESS_NATT_LOC], ksi->ks_in_serial, ns); 1980 1981 /* 1982 * Local NAT-T addresses never use an IRE_LOCAL, so it should 1983 * always be NOTME, or UNSPEC (to handle both tunnel mode 1984 * AND local-port flexibility). 1985 */ 1986 if (rc != KS_IN_ADDR_NOTME && rc != KS_IN_ADDR_UNSPEC) { 1987 sadb_pfkey_error(pfkey_q, mp, EINVAL, 1988 SADB_X_DIAGNOSTIC_MALFORMED_NATT_LOC, 1989 ksi->ks_in_serial); 1990 return (B_FALSE); 1991 } 1992 src = (struct sockaddr_in *) 1993 (((sadb_address_t *)extv[SADB_X_EXT_ADDRESS_NATT_LOC]) + 1); 1994 if (src->sin_family != AF_INET) { 1995 sadb_pfkey_error(pfkey_q, mp, EINVAL, 1996 SADB_X_DIAGNOSTIC_BAD_NATT_LOC_AF, 1997 ksi->ks_in_serial); 1998 return (B_FALSE); 1999 } 2000 } 2001 2002 if (extv[SADB_X_EXT_ADDRESS_NATT_REM] != NULL) { 2003 rc = sadb_addrcheck(pfkey_q, mp, 2004 extv[SADB_X_EXT_ADDRESS_NATT_REM], ksi->ks_in_serial, ns); 2005 2006 /* 2007 * Remote NAT-T addresses never use an IRE_LOCAL, so it should 2008 * always be NOTME, or UNSPEC if it's a tunnel-mode SA. 2009 */ 2010 if (rc != KS_IN_ADDR_NOTME && 2011 !(extv[SADB_X_EXT_ADDRESS_INNER_SRC] != NULL && 2012 rc == KS_IN_ADDR_UNSPEC)) { 2013 sadb_pfkey_error(pfkey_q, mp, EINVAL, 2014 SADB_X_DIAGNOSTIC_MALFORMED_NATT_REM, 2015 ksi->ks_in_serial); 2016 return (B_FALSE); 2017 } 2018 src = (struct sockaddr_in *) 2019 (((sadb_address_t *)extv[SADB_X_EXT_ADDRESS_NATT_REM]) + 1); 2020 if (src->sin_family != AF_INET) { 2021 sadb_pfkey_error(pfkey_q, mp, EINVAL, 2022 SADB_X_DIAGNOSTIC_BAD_NATT_REM_AF, 2023 ksi->ks_in_serial); 2024 return (B_FALSE); 2025 } 2026 } 2027 2028 if (extv[SADB_X_EXT_ADDRESS_INNER_SRC] != NULL) { 2029 if (extv[SADB_X_EXT_ADDRESS_INNER_DST] == NULL) { 2030 sadb_pfkey_error(pfkey_q, mp, EINVAL, 2031 SADB_X_DIAGNOSTIC_MISSING_INNER_DST, 2032 ksi->ks_in_serial); 2033 return (B_FALSE); 2034 } 2035 2036 if (sadb_addrcheck(pfkey_q, mp, 2037 extv[SADB_X_EXT_ADDRESS_INNER_DST], ksi->ks_in_serial, ns) 2038 == KS_IN_ADDR_UNKNOWN || 2039 sadb_addrcheck(pfkey_q, mp, 2040 extv[SADB_X_EXT_ADDRESS_INNER_SRC], ksi->ks_in_serial, ns) 2041 == KS_IN_ADDR_UNKNOWN) 2042 return (B_FALSE); 2043 2044 isrc = (struct sockaddr_in *) 2045 (((sadb_address_t *)extv[SADB_X_EXT_ADDRESS_INNER_SRC]) + 2046 1); 2047 idst = (struct sockaddr_in6 *) 2048 (((sadb_address_t *)extv[SADB_X_EXT_ADDRESS_INNER_DST]) + 2049 1); 2050 if (isrc->sin_family != idst->sin6_family) { 2051 sadb_pfkey_error(pfkey_q, mp, EINVAL, 2052 SADB_X_DIAGNOSTIC_INNER_AF_MISMATCH, 2053 ksi->ks_in_serial); 2054 return (B_FALSE); 2055 } 2056 } else if (extv[SADB_X_EXT_ADDRESS_INNER_DST] != NULL) { 2057 sadb_pfkey_error(pfkey_q, mp, EINVAL, 2058 SADB_X_DIAGNOSTIC_MISSING_INNER_SRC, 2059 ksi->ks_in_serial); 2060 return (B_FALSE); 2061 } else { 2062 isrc = NULL; /* For inner/outer port check below. */ 2063 } 2064 2065 dstext = (sadb_address_t *)extv[SADB_EXT_ADDRESS_DST]; 2066 srcext = (sadb_address_t *)extv[SADB_EXT_ADDRESS_SRC]; 2067 2068 if (dstext == NULL || srcext == NULL) 2069 return (B_TRUE); 2070 2071 dst = (struct sockaddr_in6 *)(dstext + 1); 2072 src = (struct sockaddr_in *)(srcext + 1); 2073 2074 if (isrc != NULL && 2075 (isrc->sin_port != 0 || idst->sin6_port != 0) && 2076 (src->sin_port != 0 || dst->sin6_port != 0)) { 2077 /* Can't set inner and outer ports in one SA. */ 2078 sadb_pfkey_error(pfkey_q, mp, EINVAL, 2079 SADB_X_DIAGNOSTIC_DUAL_PORT_SETS, 2080 ksi->ks_in_serial); 2081 return (B_FALSE); 2082 } 2083 2084 if (dst->sin6_family == src->sin_family) 2085 return (B_TRUE); 2086 2087 if (srcext->sadb_address_proto != dstext->sadb_address_proto) { 2088 if (srcext->sadb_address_proto == 0) { 2089 srcext->sadb_address_proto = dstext->sadb_address_proto; 2090 } else if (dstext->sadb_address_proto == 0) { 2091 dstext->sadb_address_proto = srcext->sadb_address_proto; 2092 } else { 2093 /* Inequal protocols, neither were 0. Report error. */ 2094 sadb_pfkey_error(pfkey_q, mp, EINVAL, 2095 SADB_X_DIAGNOSTIC_PROTO_MISMATCH, 2096 ksi->ks_in_serial); 2097 return (B_FALSE); 2098 } 2099 } 2100 2101 /* 2102 * With the exception of an unspec IPv6 source and an IPv4 2103 * destination, address families MUST me matched. 2104 */ 2105 if (src->sin_family == AF_INET || 2106 ksi->ks_in_srctype != KS_IN_ADDR_UNSPEC) { 2107 sadb_pfkey_error(pfkey_q, mp, EINVAL, 2108 SADB_X_DIAGNOSTIC_AF_MISMATCH, ksi->ks_in_serial); 2109 return (B_FALSE); 2110 } 2111 2112 /* 2113 * Convert "src" to AF_INET INADDR_ANY. We rely on sin_port being 2114 * in the same place for sockaddr_in and sockaddr_in6. 2115 */ 2116 sport = src->sin_port; 2117 bzero(src, sizeof (*src)); 2118 src->sin_family = AF_INET; 2119 src->sin_port = sport; 2120 2121 return (B_TRUE); 2122 } 2123 2124 /* 2125 * Set the results in "addrtype", given an IRE as requested by 2126 * sadb_addrcheck(). 2127 */ 2128 int 2129 sadb_addrset(ire_t *ire) 2130 { 2131 if ((ire->ire_type & IRE_BROADCAST) || 2132 (ire->ire_ipversion == IPV4_VERSION && CLASSD(ire->ire_addr)) || 2133 (ire->ire_ipversion == IPV6_VERSION && 2134 IN6_IS_ADDR_MULTICAST(&(ire->ire_addr_v6)))) 2135 return (KS_IN_ADDR_MBCAST); 2136 if (ire->ire_type & (IRE_LOCAL | IRE_LOOPBACK)) 2137 return (KS_IN_ADDR_ME); 2138 return (KS_IN_ADDR_NOTME); 2139 } 2140 2141 /* 2142 * Match primitives.. 2143 * !!! TODO: short term: inner selectors 2144 * ipv6 scope id (ifindex) 2145 * longer term: zone id. sensitivity label. uid. 2146 */ 2147 boolean_t 2148 sadb_match_spi(ipsa_query_t *sq, ipsa_t *sa) 2149 { 2150 return (sq->spi == sa->ipsa_spi); 2151 } 2152 2153 boolean_t 2154 sadb_match_dst_v6(ipsa_query_t *sq, ipsa_t *sa) 2155 { 2156 return (IPSA_ARE_ADDR_EQUAL(sa->ipsa_dstaddr, sq->dstaddr, AF_INET6)); 2157 } 2158 2159 boolean_t 2160 sadb_match_src_v6(ipsa_query_t *sq, ipsa_t *sa) 2161 { 2162 return (IPSA_ARE_ADDR_EQUAL(sa->ipsa_srcaddr, sq->srcaddr, AF_INET6)); 2163 } 2164 2165 boolean_t 2166 sadb_match_dst_v4(ipsa_query_t *sq, ipsa_t *sa) 2167 { 2168 return (sq->dstaddr[0] == sa->ipsa_dstaddr[0]); 2169 } 2170 2171 boolean_t 2172 sadb_match_src_v4(ipsa_query_t *sq, ipsa_t *sa) 2173 { 2174 return (sq->srcaddr[0] == sa->ipsa_srcaddr[0]); 2175 } 2176 2177 boolean_t 2178 sadb_match_dstid(ipsa_query_t *sq, ipsa_t *sa) 2179 { 2180 return ((sa->ipsa_dst_cid != NULL) && 2181 (sq->didtype == sa->ipsa_dst_cid->ipsid_type) && 2182 (strcmp(sq->didstr, sa->ipsa_dst_cid->ipsid_cid) == 0)); 2183 2184 } 2185 boolean_t 2186 sadb_match_srcid(ipsa_query_t *sq, ipsa_t *sa) 2187 { 2188 return ((sa->ipsa_src_cid != NULL) && 2189 (sq->sidtype == sa->ipsa_src_cid->ipsid_type) && 2190 (strcmp(sq->sidstr, sa->ipsa_src_cid->ipsid_cid) == 0)); 2191 } 2192 2193 boolean_t 2194 sadb_match_kmc(ipsa_query_t *sq, ipsa_t *sa) 2195 { 2196 #define M(a, b) (((a) == 0) || ((b) == 0) || ((a) == (b))) 2197 2198 return (M(sq->kmc, sa->ipsa_kmc) && M(sq->kmp, sa->ipsa_kmp)); 2199 2200 #undef M 2201 } 2202 2203 /* 2204 * Common function which extracts several PF_KEY extensions for ease of 2205 * SADB matching. 2206 * 2207 * XXX TODO: weed out ipsa_query_t fields not used during matching 2208 * or afterwards? 2209 */ 2210 int 2211 sadb_form_query(keysock_in_t *ksi, uint32_t req, uint32_t match, 2212 ipsa_query_t *sq, int *diagnostic) 2213 { 2214 int i; 2215 ipsa_match_fn_t *mfpp = &(sq->matchers[0]); 2216 2217 for (i = 0; i < IPSA_NMATCH; i++) 2218 sq->matchers[i] = NULL; 2219 2220 ASSERT((req & ~match) == 0); 2221 2222 sq->req = req; 2223 sq->dstext = (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_DST]; 2224 sq->srcext = (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_SRC]; 2225 sq->assoc = (sadb_sa_t *)ksi->ks_in_extv[SADB_EXT_SA]; 2226 2227 if ((req & IPSA_Q_DST) && (sq->dstext == NULL)) { 2228 *diagnostic = SADB_X_DIAGNOSTIC_MISSING_DST; 2229 return (EINVAL); 2230 } 2231 if ((req & IPSA_Q_SRC) && (sq->srcext == NULL)) { 2232 *diagnostic = SADB_X_DIAGNOSTIC_MISSING_SRC; 2233 return (EINVAL); 2234 } 2235 if ((req & IPSA_Q_SA) && (sq->assoc == NULL)) { 2236 *diagnostic = SADB_X_DIAGNOSTIC_MISSING_SA; 2237 return (EINVAL); 2238 } 2239 2240 if (match & IPSA_Q_SA) { 2241 *mfpp++ = sadb_match_spi; 2242 sq->spi = sq->assoc->sadb_sa_spi; 2243 } 2244 2245 if (sq->dstext != NULL) 2246 sq->dst = (struct sockaddr_in *)(sq->dstext + 1); 2247 else { 2248 sq->dst = NULL; 2249 sq->dst6 = NULL; 2250 sq->dstaddr = NULL; 2251 } 2252 2253 if (sq->srcext != NULL) 2254 sq->src = (struct sockaddr_in *)(sq->srcext + 1); 2255 else { 2256 sq->src = NULL; 2257 sq->src6 = NULL; 2258 sq->srcaddr = NULL; 2259 } 2260 2261 if (sq->dst != NULL) 2262 sq->af = sq->dst->sin_family; 2263 else if (sq->src != NULL) 2264 sq->af = sq->src->sin_family; 2265 else 2266 sq->af = AF_INET; 2267 2268 if (sq->af == AF_INET6) { 2269 if ((match & IPSA_Q_DST) && (sq->dstext != NULL)) { 2270 *mfpp++ = sadb_match_dst_v6; 2271 sq->dst6 = (struct sockaddr_in6 *)sq->dst; 2272 sq->dstaddr = (uint32_t *)&(sq->dst6->sin6_addr); 2273 } else { 2274 match &= ~IPSA_Q_DST; 2275 sq->dstaddr = ALL_ZEROES_PTR; 2276 } 2277 2278 if ((match & IPSA_Q_SRC) && (sq->srcext != NULL)) { 2279 sq->src6 = (struct sockaddr_in6 *)(sq->srcext + 1); 2280 sq->srcaddr = (uint32_t *)&sq->src6->sin6_addr; 2281 if (sq->src6->sin6_family != AF_INET6) { 2282 *diagnostic = SADB_X_DIAGNOSTIC_AF_MISMATCH; 2283 return (EINVAL); 2284 } 2285 *mfpp++ = sadb_match_src_v6; 2286 } else { 2287 match &= ~IPSA_Q_SRC; 2288 sq->srcaddr = ALL_ZEROES_PTR; 2289 } 2290 } else { 2291 sq->src6 = sq->dst6 = NULL; 2292 if ((match & IPSA_Q_DST) && (sq->dstext != NULL)) { 2293 *mfpp++ = sadb_match_dst_v4; 2294 sq->dstaddr = (uint32_t *)&sq->dst->sin_addr; 2295 } else { 2296 match &= ~IPSA_Q_DST; 2297 sq->dstaddr = ALL_ZEROES_PTR; 2298 } 2299 if ((match & IPSA_Q_SRC) && (sq->srcext != NULL)) { 2300 sq->srcaddr = (uint32_t *)&sq->src->sin_addr; 2301 if (sq->src->sin_family != AF_INET) { 2302 *diagnostic = SADB_X_DIAGNOSTIC_AF_MISMATCH; 2303 return (EINVAL); 2304 } 2305 *mfpp++ = sadb_match_src_v4; 2306 } else { 2307 match &= ~IPSA_Q_SRC; 2308 sq->srcaddr = ALL_ZEROES_PTR; 2309 } 2310 } 2311 2312 sq->dstid = (sadb_ident_t *)ksi->ks_in_extv[SADB_EXT_IDENTITY_DST]; 2313 if ((match & IPSA_Q_DSTID) && (sq->dstid != NULL)) { 2314 sq->didstr = (char *)(sq->dstid + 1); 2315 sq->didtype = sq->dstid->sadb_ident_type; 2316 *mfpp++ = sadb_match_dstid; 2317 } 2318 2319 sq->srcid = (sadb_ident_t *)ksi->ks_in_extv[SADB_EXT_IDENTITY_SRC]; 2320 2321 if ((match & IPSA_Q_SRCID) && (sq->srcid != NULL)) { 2322 sq->sidstr = (char *)(sq->srcid + 1); 2323 sq->sidtype = sq->srcid->sadb_ident_type; 2324 *mfpp++ = sadb_match_srcid; 2325 } 2326 2327 sq->kmcext = (sadb_x_kmc_t *)ksi->ks_in_extv[SADB_X_EXT_KM_COOKIE]; 2328 sq->kmc = 0; 2329 sq->kmp = 0; 2330 2331 if ((match & IPSA_Q_KMC) && (sq->kmcext)) { 2332 sq->kmp = sq->kmcext->sadb_x_kmc_proto; 2333 /* 2334 * Be liberal in what we receive. Special-case the IKEv1 2335 * cookie, which closed-source in.iked assumes is 32 bits. 2336 * Now that we store all 64 bits, we should pre-zero the 2337 * reserved field on behalf of closed-source in.iked. 2338 */ 2339 if (sq->kmp == SADB_X_KMP_IKE) { 2340 /* Just in case in.iked is misbehaving... */ 2341 sq->kmcext->sadb_x_kmc_reserved = 0; 2342 } 2343 sq->kmc = sq->kmcext->sadb_x_kmc_cookie64; 2344 *mfpp++ = sadb_match_kmc; 2345 } 2346 2347 if (match & (IPSA_Q_INBOUND|IPSA_Q_OUTBOUND)) { 2348 if (sq->af == AF_INET6) 2349 sq->sp = &sq->spp->s_v6; 2350 else 2351 sq->sp = &sq->spp->s_v4; 2352 } else { 2353 sq->sp = NULL; 2354 } 2355 2356 if (match & IPSA_Q_INBOUND) { 2357 sq->inhash = INBOUND_HASH(sq->sp, sq->assoc->sadb_sa_spi); 2358 sq->inbound = &sq->sp->sdb_if[sq->inhash]; 2359 } else { 2360 sq->inhash = 0; 2361 sq->inbound = NULL; 2362 } 2363 2364 if (match & IPSA_Q_OUTBOUND) { 2365 if (sq->af == AF_INET6) { 2366 sq->outhash = OUTBOUND_HASH_V6(sq->sp, *(sq->dstaddr)); 2367 } else { 2368 sq->outhash = OUTBOUND_HASH_V4(sq->sp, *(sq->dstaddr)); 2369 } 2370 sq->outbound = &sq->sp->sdb_of[sq->outhash]; 2371 } else { 2372 sq->outhash = 0; 2373 sq->outbound = NULL; 2374 } 2375 sq->match = match; 2376 return (0); 2377 } 2378 2379 /* 2380 * Match an initialized query structure with a security association; 2381 * return B_TRUE on a match, B_FALSE on a miss. 2382 * Applies match functions set up by sadb_form_query() until one returns false. 2383 */ 2384 boolean_t 2385 sadb_match_query(ipsa_query_t *sq, ipsa_t *sa) 2386 { 2387 ipsa_match_fn_t *mfpp = &(sq->matchers[0]); 2388 ipsa_match_fn_t mfp; 2389 2390 for (mfp = *mfpp++; mfp != NULL; mfp = *mfpp++) { 2391 if (!mfp(sq, sa)) 2392 return (B_FALSE); 2393 } 2394 return (B_TRUE); 2395 } 2396 2397 /* 2398 * Walker callback function to delete sa's based on src/dst address. 2399 * Assumes that we're called with *head locked, no other locks held; 2400 * Conveniently, and not coincidentally, this is both what sadb_walker 2401 * gives us and also what sadb_unlinkassoc expects. 2402 */ 2403 struct sadb_purge_state 2404 { 2405 ipsa_query_t sq; 2406 boolean_t inbnd; 2407 uint8_t sadb_sa_state; 2408 }; 2409 2410 static void 2411 sadb_purge_cb(isaf_t *head, ipsa_t *entry, void *cookie) 2412 { 2413 struct sadb_purge_state *ps = (struct sadb_purge_state *)cookie; 2414 2415 ASSERT(MUTEX_HELD(&head->isaf_lock)); 2416 2417 mutex_enter(&entry->ipsa_lock); 2418 2419 if (entry->ipsa_state == IPSA_STATE_LARVAL || 2420 !sadb_match_query(&ps->sq, entry)) { 2421 mutex_exit(&entry->ipsa_lock); 2422 return; 2423 } 2424 2425 if (ps->inbnd) { 2426 sadb_delete_cluster(entry); 2427 } 2428 entry->ipsa_state = IPSA_STATE_DEAD; 2429 (void) sadb_torch_assoc(head, entry); 2430 } 2431 2432 /* 2433 * Common code to purge an SA with a matching src or dst address. 2434 * Don't kill larval SA's in such a purge. 2435 */ 2436 int 2437 sadb_purge_sa(mblk_t *mp, keysock_in_t *ksi, sadb_t *sp, 2438 int *diagnostic, queue_t *pfkey_q) 2439 { 2440 struct sadb_purge_state ps; 2441 int error = sadb_form_query(ksi, 0, 2442 IPSA_Q_SRC|IPSA_Q_DST|IPSA_Q_SRCID|IPSA_Q_DSTID|IPSA_Q_KMC, 2443 &ps.sq, diagnostic); 2444 2445 if (error != 0) 2446 return (error); 2447 2448 /* 2449 * This is simple, crude, and effective. 2450 * Unimplemented optimizations (TBD): 2451 * - we can limit how many places we search based on where we 2452 * think the SA is filed. 2453 * - if we get a dst address, we can hash based on dst addr to find 2454 * the correct bucket in the outbound table. 2455 */ 2456 ps.inbnd = B_TRUE; 2457 sadb_walker(sp->sdb_if, sp->sdb_hashsize, sadb_purge_cb, &ps); 2458 ps.inbnd = B_FALSE; 2459 sadb_walker(sp->sdb_of, sp->sdb_hashsize, sadb_purge_cb, &ps); 2460 2461 ASSERT(mp->b_cont != NULL); 2462 sadb_pfkey_echo(pfkey_q, mp, (sadb_msg_t *)mp->b_cont->b_rptr, ksi, 2463 NULL); 2464 return (0); 2465 } 2466 2467 static void 2468 sadb_delpair_state_one(isaf_t *head, ipsa_t *entry, void *cookie) 2469 { 2470 struct sadb_purge_state *ps = (struct sadb_purge_state *)cookie; 2471 isaf_t *inbound_bucket; 2472 ipsa_t *peer_assoc; 2473 ipsa_query_t *sq = &ps->sq; 2474 2475 ASSERT(MUTEX_HELD(&head->isaf_lock)); 2476 2477 mutex_enter(&entry->ipsa_lock); 2478 2479 if ((entry->ipsa_state != ps->sadb_sa_state) || 2480 ((sq->srcaddr != NULL) && 2481 !IPSA_ARE_ADDR_EQUAL(entry->ipsa_srcaddr, sq->srcaddr, sq->af))) { 2482 mutex_exit(&entry->ipsa_lock); 2483 return; 2484 } 2485 2486 /* 2487 * The isaf_t *, which is passed in , is always an outbound bucket, 2488 * and we are preserving the outbound-then-inbound hash-bucket lock 2489 * ordering. The sadb_walker() which triggers this function is called 2490 * only on the outbound fanout, and the corresponding inbound bucket 2491 * lock is safe to acquire here. 2492 */ 2493 2494 if (entry->ipsa_haspeer) { 2495 inbound_bucket = INBOUND_BUCKET(sq->sp, entry->ipsa_spi); 2496 mutex_enter(&inbound_bucket->isaf_lock); 2497 peer_assoc = ipsec_getassocbyspi(inbound_bucket, 2498 entry->ipsa_spi, entry->ipsa_srcaddr, 2499 entry->ipsa_dstaddr, entry->ipsa_addrfam); 2500 } else { 2501 inbound_bucket = INBOUND_BUCKET(sq->sp, entry->ipsa_otherspi); 2502 mutex_enter(&inbound_bucket->isaf_lock); 2503 peer_assoc = ipsec_getassocbyspi(inbound_bucket, 2504 entry->ipsa_otherspi, entry->ipsa_dstaddr, 2505 entry->ipsa_srcaddr, entry->ipsa_addrfam); 2506 } 2507 2508 entry->ipsa_state = IPSA_STATE_DEAD; 2509 (void) sadb_torch_assoc(head, entry); 2510 if (peer_assoc != NULL) { 2511 mutex_enter(&peer_assoc->ipsa_lock); 2512 peer_assoc->ipsa_state = IPSA_STATE_DEAD; 2513 (void) sadb_torch_assoc(inbound_bucket, peer_assoc); 2514 } 2515 mutex_exit(&inbound_bucket->isaf_lock); 2516 } 2517 2518 static int 2519 sadb_delpair_state(mblk_t *mp, keysock_in_t *ksi, sadbp_t *spp, 2520 int *diagnostic, queue_t *pfkey_q) 2521 { 2522 sadb_sa_t *assoc = (sadb_sa_t *)ksi->ks_in_extv[SADB_EXT_SA]; 2523 struct sadb_purge_state ps; 2524 int error; 2525 2526 ps.sq.spp = spp; /* XXX param */ 2527 2528 error = sadb_form_query(ksi, IPSA_Q_DST|IPSA_Q_SRC, 2529 IPSA_Q_SRC|IPSA_Q_DST|IPSA_Q_SRCID|IPSA_Q_DSTID|IPSA_Q_KMC, 2530 &ps.sq, diagnostic); 2531 if (error != 0) 2532 return (error); 2533 2534 ps.inbnd = B_FALSE; 2535 ps.sadb_sa_state = assoc->sadb_sa_state; 2536 sadb_walker(ps.sq.sp->sdb_of, ps.sq.sp->sdb_hashsize, 2537 sadb_delpair_state_one, &ps); 2538 2539 ASSERT(mp->b_cont != NULL); 2540 sadb_pfkey_echo(pfkey_q, mp, (sadb_msg_t *)mp->b_cont->b_rptr, 2541 ksi, NULL); 2542 return (0); 2543 } 2544 2545 /* 2546 * Common code to delete/get an SA. 2547 */ 2548 int 2549 sadb_delget_sa(mblk_t *mp, keysock_in_t *ksi, sadbp_t *spp, 2550 int *diagnostic, queue_t *pfkey_q, uint8_t sadb_msg_type) 2551 { 2552 ipsa_query_t sq; 2553 ipsa_t *echo_target = NULL; 2554 ipsap_t ipsapp; 2555 uint_t error = 0; 2556 2557 if (sadb_msg_type == SADB_X_DELPAIR_STATE) 2558 return (sadb_delpair_state(mp, ksi, spp, diagnostic, pfkey_q)); 2559 2560 sq.spp = spp; /* XXX param */ 2561 error = sadb_form_query(ksi, IPSA_Q_DST|IPSA_Q_SA, 2562 IPSA_Q_SRC|IPSA_Q_DST|IPSA_Q_SA|IPSA_Q_INBOUND|IPSA_Q_OUTBOUND, 2563 &sq, diagnostic); 2564 if (error != 0) 2565 return (error); 2566 2567 error = get_ipsa_pair(&sq, &ipsapp, diagnostic); 2568 if (error != 0) { 2569 return (error); 2570 } 2571 2572 echo_target = ipsapp.ipsap_sa_ptr; 2573 if (echo_target == NULL) 2574 echo_target = ipsapp.ipsap_psa_ptr; 2575 2576 if (sadb_msg_type == SADB_DELETE || sadb_msg_type == SADB_X_DELPAIR) { 2577 /* 2578 * Bucket locks will be required if SA is actually unlinked. 2579 * get_ipsa_pair() returns valid hash bucket pointers even 2580 * if it can't find a pair SA pointer. To prevent a potential 2581 * deadlock, always lock the outbound bucket before the inbound. 2582 */ 2583 if (ipsapp.in_inbound_table) { 2584 mutex_enter(&ipsapp.ipsap_pbucket->isaf_lock); 2585 mutex_enter(&ipsapp.ipsap_bucket->isaf_lock); 2586 } else { 2587 mutex_enter(&ipsapp.ipsap_bucket->isaf_lock); 2588 mutex_enter(&ipsapp.ipsap_pbucket->isaf_lock); 2589 } 2590 2591 if (ipsapp.ipsap_sa_ptr != NULL) { 2592 mutex_enter(&ipsapp.ipsap_sa_ptr->ipsa_lock); 2593 if (ipsapp.ipsap_sa_ptr->ipsa_flags & IPSA_F_INBOUND) { 2594 sadb_delete_cluster(ipsapp.ipsap_sa_ptr); 2595 } 2596 ipsapp.ipsap_sa_ptr->ipsa_state = IPSA_STATE_DEAD; 2597 (void) sadb_torch_assoc(ipsapp.ipsap_bucket, 2598 ipsapp.ipsap_sa_ptr); 2599 /* 2600 * sadb_torch_assoc() releases the ipsa_lock 2601 * and calls sadb_unlinkassoc() which does a 2602 * IPSA_REFRELE. 2603 */ 2604 } 2605 if (ipsapp.ipsap_psa_ptr != NULL) { 2606 mutex_enter(&ipsapp.ipsap_psa_ptr->ipsa_lock); 2607 if (sadb_msg_type == SADB_X_DELPAIR || 2608 ipsapp.ipsap_psa_ptr->ipsa_haspeer) { 2609 if (ipsapp.ipsap_psa_ptr->ipsa_flags & 2610 IPSA_F_INBOUND) { 2611 sadb_delete_cluster 2612 (ipsapp.ipsap_psa_ptr); 2613 } 2614 ipsapp.ipsap_psa_ptr->ipsa_state = 2615 IPSA_STATE_DEAD; 2616 (void) sadb_torch_assoc(ipsapp.ipsap_pbucket, 2617 ipsapp.ipsap_psa_ptr); 2618 } else { 2619 /* 2620 * Only half of the "pair" has been deleted. 2621 * Update the remaining SA and remove references 2622 * to its pair SA, which is now gone. 2623 */ 2624 ipsapp.ipsap_psa_ptr->ipsa_otherspi = 0; 2625 ipsapp.ipsap_psa_ptr->ipsa_flags &= 2626 ~IPSA_F_PAIRED; 2627 mutex_exit(&ipsapp.ipsap_psa_ptr->ipsa_lock); 2628 } 2629 } else if (sadb_msg_type == SADB_X_DELPAIR) { 2630 *diagnostic = SADB_X_DIAGNOSTIC_PAIR_SA_NOTFOUND; 2631 error = ESRCH; 2632 } 2633 mutex_exit(&ipsapp.ipsap_bucket->isaf_lock); 2634 mutex_exit(&ipsapp.ipsap_pbucket->isaf_lock); 2635 } 2636 2637 ASSERT(mp->b_cont != NULL); 2638 2639 if (error == 0) 2640 sadb_pfkey_echo(pfkey_q, mp, (sadb_msg_t *) 2641 mp->b_cont->b_rptr, ksi, echo_target); 2642 2643 destroy_ipsa_pair(&ipsapp); 2644 2645 return (error); 2646 } 2647 2648 /* 2649 * This function takes a sadb_sa_t and finds the ipsa_t structure 2650 * and the isaf_t (hash bucket) that its stored under. If the security 2651 * association has a peer, the ipsa_t structure and bucket for that security 2652 * association are also searched for. The "pair" of ipsa_t's and isaf_t's 2653 * are returned as a ipsap_t. 2654 * 2655 * The hash buckets are returned for convenience, if the calling function 2656 * needs to use the hash bucket locks, say to remove the SA's, it should 2657 * take care to observe the convention of locking outbound bucket then 2658 * inbound bucket. The flag in_inbound_table provides direction. 2659 * 2660 * Note that a "pair" is defined as one (but not both) of the following: 2661 * 2662 * A security association which has a soft reference to another security 2663 * association via its SPI. 2664 * 2665 * A security association that is not obviously "inbound" or "outbound" so 2666 * it appears in both hash tables, the "peer" being the same security 2667 * association in the other hash table. 2668 * 2669 * This function will return NULL if the ipsa_t can't be found in the 2670 * inbound or outbound hash tables (not found). If only one ipsa_t is 2671 * found, the pair ipsa_t will be NULL. Both isaf_t values are valid 2672 * provided at least one ipsa_t is found. 2673 */ 2674 static int 2675 get_ipsa_pair(ipsa_query_t *sq, ipsap_t *ipsapp, int *diagnostic) 2676 { 2677 uint32_t pair_srcaddr[IPSA_MAX_ADDRLEN]; 2678 uint32_t pair_dstaddr[IPSA_MAX_ADDRLEN]; 2679 uint32_t pair_spi; 2680 2681 init_ipsa_pair(ipsapp); 2682 2683 ipsapp->in_inbound_table = B_FALSE; 2684 2685 /* Lock down both buckets. */ 2686 mutex_enter(&sq->outbound->isaf_lock); 2687 mutex_enter(&sq->inbound->isaf_lock); 2688 2689 if (sq->assoc->sadb_sa_flags & IPSA_F_INBOUND) { 2690 ipsapp->ipsap_sa_ptr = ipsec_getassocbyspi(sq->inbound, 2691 sq->assoc->sadb_sa_spi, sq->srcaddr, sq->dstaddr, sq->af); 2692 if (ipsapp->ipsap_sa_ptr != NULL) { 2693 ipsapp->ipsap_bucket = sq->inbound; 2694 ipsapp->ipsap_pbucket = sq->outbound; 2695 ipsapp->in_inbound_table = B_TRUE; 2696 } else { 2697 ipsapp->ipsap_sa_ptr = ipsec_getassocbyspi(sq->outbound, 2698 sq->assoc->sadb_sa_spi, sq->srcaddr, sq->dstaddr, 2699 sq->af); 2700 ipsapp->ipsap_bucket = sq->outbound; 2701 ipsapp->ipsap_pbucket = sq->inbound; 2702 } 2703 } else { 2704 /* IPSA_F_OUTBOUND is set *or* no directions flags set. */ 2705 ipsapp->ipsap_sa_ptr = 2706 ipsec_getassocbyspi(sq->outbound, 2707 sq->assoc->sadb_sa_spi, sq->srcaddr, sq->dstaddr, sq->af); 2708 if (ipsapp->ipsap_sa_ptr != NULL) { 2709 ipsapp->ipsap_bucket = sq->outbound; 2710 ipsapp->ipsap_pbucket = sq->inbound; 2711 } else { 2712 ipsapp->ipsap_sa_ptr = ipsec_getassocbyspi(sq->inbound, 2713 sq->assoc->sadb_sa_spi, sq->srcaddr, sq->dstaddr, 2714 sq->af); 2715 ipsapp->ipsap_bucket = sq->inbound; 2716 ipsapp->ipsap_pbucket = sq->outbound; 2717 if (ipsapp->ipsap_sa_ptr != NULL) 2718 ipsapp->in_inbound_table = B_TRUE; 2719 } 2720 } 2721 2722 if (ipsapp->ipsap_sa_ptr == NULL) { 2723 mutex_exit(&sq->outbound->isaf_lock); 2724 mutex_exit(&sq->inbound->isaf_lock); 2725 *diagnostic = SADB_X_DIAGNOSTIC_SA_NOTFOUND; 2726 return (ESRCH); 2727 } 2728 2729 if ((ipsapp->ipsap_sa_ptr->ipsa_state == IPSA_STATE_LARVAL) && 2730 ipsapp->in_inbound_table) { 2731 mutex_exit(&sq->outbound->isaf_lock); 2732 mutex_exit(&sq->inbound->isaf_lock); 2733 return (0); 2734 } 2735 2736 mutex_enter(&ipsapp->ipsap_sa_ptr->ipsa_lock); 2737 if (ipsapp->ipsap_sa_ptr->ipsa_haspeer) { 2738 /* 2739 * haspeer implies no sa_pairing, look for same spi 2740 * in other hashtable. 2741 */ 2742 ipsapp->ipsap_psa_ptr = 2743 ipsec_getassocbyspi(ipsapp->ipsap_pbucket, 2744 sq->assoc->sadb_sa_spi, sq->srcaddr, sq->dstaddr, sq->af); 2745 mutex_exit(&ipsapp->ipsap_sa_ptr->ipsa_lock); 2746 mutex_exit(&sq->outbound->isaf_lock); 2747 mutex_exit(&sq->inbound->isaf_lock); 2748 return (0); 2749 } 2750 pair_spi = ipsapp->ipsap_sa_ptr->ipsa_otherspi; 2751 IPSA_COPY_ADDR(&pair_srcaddr, 2752 ipsapp->ipsap_sa_ptr->ipsa_srcaddr, sq->af); 2753 IPSA_COPY_ADDR(&pair_dstaddr, 2754 ipsapp->ipsap_sa_ptr->ipsa_dstaddr, sq->af); 2755 mutex_exit(&ipsapp->ipsap_sa_ptr->ipsa_lock); 2756 mutex_exit(&sq->inbound->isaf_lock); 2757 mutex_exit(&sq->outbound->isaf_lock); 2758 2759 if (pair_spi == 0) { 2760 ASSERT(ipsapp->ipsap_bucket != NULL); 2761 ASSERT(ipsapp->ipsap_pbucket != NULL); 2762 return (0); 2763 } 2764 2765 /* found sa in outbound sadb, peer should be inbound */ 2766 2767 if (ipsapp->in_inbound_table) { 2768 /* Found SA in inbound table, pair will be in outbound. */ 2769 if (sq->af == AF_INET6) { 2770 ipsapp->ipsap_pbucket = OUTBOUND_BUCKET_V6(sq->sp, 2771 *(uint32_t *)pair_srcaddr); 2772 } else { 2773 ipsapp->ipsap_pbucket = OUTBOUND_BUCKET_V4(sq->sp, 2774 *(uint32_t *)pair_srcaddr); 2775 } 2776 } else { 2777 ipsapp->ipsap_pbucket = INBOUND_BUCKET(sq->sp, pair_spi); 2778 } 2779 mutex_enter(&ipsapp->ipsap_pbucket->isaf_lock); 2780 ipsapp->ipsap_psa_ptr = ipsec_getassocbyspi(ipsapp->ipsap_pbucket, 2781 pair_spi, pair_dstaddr, pair_srcaddr, sq->af); 2782 mutex_exit(&ipsapp->ipsap_pbucket->isaf_lock); 2783 ASSERT(ipsapp->ipsap_bucket != NULL); 2784 ASSERT(ipsapp->ipsap_pbucket != NULL); 2785 return (0); 2786 } 2787 2788 /* 2789 * Perform NAT-traversal cached checksum offset calculations here. 2790 */ 2791 static void 2792 sadb_nat_calculations(ipsa_t *newbie, sadb_address_t *natt_loc_ext, 2793 sadb_address_t *natt_rem_ext, uint32_t *src_addr_ptr, 2794 uint32_t *dst_addr_ptr) 2795 { 2796 struct sockaddr_in *natt_loc, *natt_rem; 2797 uint32_t *natt_loc_ptr = NULL, *natt_rem_ptr = NULL; 2798 uint32_t running_sum = 0; 2799 2800 #define DOWN_SUM(x) (x) = ((x) & 0xFFFF) + ((x) >> 16) 2801 2802 if (natt_rem_ext != NULL) { 2803 uint32_t l_src; 2804 uint32_t l_rem; 2805 2806 natt_rem = (struct sockaddr_in *)(natt_rem_ext + 1); 2807 2808 /* Ensured by sadb_addrfix(). */ 2809 ASSERT(natt_rem->sin_family == AF_INET); 2810 2811 natt_rem_ptr = (uint32_t *)(&natt_rem->sin_addr); 2812 newbie->ipsa_remote_nat_port = natt_rem->sin_port; 2813 l_src = *src_addr_ptr; 2814 l_rem = *natt_rem_ptr; 2815 2816 /* Instead of IPSA_COPY_ADDR(), just copy first 32 bits. */ 2817 newbie->ipsa_natt_addr_rem = *natt_rem_ptr; 2818 2819 l_src = ntohl(l_src); 2820 DOWN_SUM(l_src); 2821 DOWN_SUM(l_src); 2822 l_rem = ntohl(l_rem); 2823 DOWN_SUM(l_rem); 2824 DOWN_SUM(l_rem); 2825 2826 /* 2827 * We're 1's complement for checksums, so check for wraparound 2828 * here. 2829 */ 2830 if (l_rem > l_src) 2831 l_src--; 2832 2833 running_sum += l_src - l_rem; 2834 2835 DOWN_SUM(running_sum); 2836 DOWN_SUM(running_sum); 2837 } 2838 2839 if (natt_loc_ext != NULL) { 2840 natt_loc = (struct sockaddr_in *)(natt_loc_ext + 1); 2841 2842 /* Ensured by sadb_addrfix(). */ 2843 ASSERT(natt_loc->sin_family == AF_INET); 2844 2845 natt_loc_ptr = (uint32_t *)(&natt_loc->sin_addr); 2846 newbie->ipsa_local_nat_port = natt_loc->sin_port; 2847 2848 /* Instead of IPSA_COPY_ADDR(), just copy first 32 bits. */ 2849 newbie->ipsa_natt_addr_loc = *natt_loc_ptr; 2850 2851 /* 2852 * NAT-T port agility means we may have natt_loc_ext, but 2853 * only for a local-port change. 2854 */ 2855 if (natt_loc->sin_addr.s_addr != INADDR_ANY) { 2856 uint32_t l_dst = ntohl(*dst_addr_ptr); 2857 uint32_t l_loc = ntohl(*natt_loc_ptr); 2858 2859 DOWN_SUM(l_loc); 2860 DOWN_SUM(l_loc); 2861 DOWN_SUM(l_dst); 2862 DOWN_SUM(l_dst); 2863 2864 /* 2865 * We're 1's complement for checksums, so check for 2866 * wraparound here. 2867 */ 2868 if (l_loc > l_dst) 2869 l_dst--; 2870 2871 running_sum += l_dst - l_loc; 2872 DOWN_SUM(running_sum); 2873 DOWN_SUM(running_sum); 2874 } 2875 } 2876 2877 newbie->ipsa_inbound_cksum = running_sum; 2878 #undef DOWN_SUM 2879 } 2880 2881 /* 2882 * This function is called from consumers that need to insert a fully-grown 2883 * security association into its tables. This function takes into account that 2884 * SAs can be "inbound", "outbound", or "both". The "primary" and "secondary" 2885 * hash bucket parameters are set in order of what the SA will be most of the 2886 * time. (For example, an SA with an unspecified source, and a multicast 2887 * destination will primarily be an outbound SA. OTOH, if that destination 2888 * is unicast for this node, then the SA will primarily be inbound.) 2889 * 2890 * It takes a lot of parameters because even if clone is B_FALSE, this needs 2891 * to check both buckets for purposes of collision. 2892 * 2893 * Return 0 upon success. Return various errnos (ENOMEM, EEXIST) for 2894 * various error conditions. We may need to set samsg->sadb_x_msg_diagnostic 2895 * with additional diagnostic information because there is at least one EINVAL 2896 * case here. 2897 */ 2898 int 2899 sadb_common_add(queue_t *pfkey_q, mblk_t *mp, sadb_msg_t *samsg, 2900 keysock_in_t *ksi, isaf_t *primary, isaf_t *secondary, 2901 ipsa_t *newbie, boolean_t clone, boolean_t is_inbound, int *diagnostic, 2902 netstack_t *ns, sadbp_t *spp) 2903 { 2904 ipsa_t *newbie_clone = NULL, *scratch; 2905 ipsap_t ipsapp; 2906 sadb_sa_t *assoc = (sadb_sa_t *)ksi->ks_in_extv[SADB_EXT_SA]; 2907 sadb_address_t *srcext = 2908 (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_SRC]; 2909 sadb_address_t *dstext = 2910 (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_DST]; 2911 sadb_address_t *isrcext = 2912 (sadb_address_t *)ksi->ks_in_extv[SADB_X_EXT_ADDRESS_INNER_SRC]; 2913 sadb_address_t *idstext = 2914 (sadb_address_t *)ksi->ks_in_extv[SADB_X_EXT_ADDRESS_INNER_DST]; 2915 sadb_x_kmc_t *kmcext = 2916 (sadb_x_kmc_t *)ksi->ks_in_extv[SADB_X_EXT_KM_COOKIE]; 2917 sadb_key_t *akey = (sadb_key_t *)ksi->ks_in_extv[SADB_EXT_KEY_AUTH]; 2918 sadb_key_t *ekey = (sadb_key_t *)ksi->ks_in_extv[SADB_EXT_KEY_ENCRYPT]; 2919 sadb_sens_t *sens = 2920 (sadb_sens_t *)ksi->ks_in_extv[SADB_EXT_SENSITIVITY]; 2921 sadb_sens_t *osens = 2922 (sadb_sens_t *)ksi->ks_in_extv[SADB_X_EXT_OUTER_SENS]; 2923 sadb_x_pair_t *pair_ext = 2924 (sadb_x_pair_t *)ksi->ks_in_extv[SADB_X_EXT_PAIR]; 2925 sadb_x_replay_ctr_t *replayext = 2926 (sadb_x_replay_ctr_t *)ksi->ks_in_extv[SADB_X_EXT_REPLAY_VALUE]; 2927 uint8_t protocol = 2928 (samsg->sadb_msg_satype == SADB_SATYPE_AH) ? IPPROTO_AH:IPPROTO_ESP; 2929 int salt_offset; 2930 uint8_t *buf_ptr; 2931 struct sockaddr_in *src, *dst, *isrc, *idst; 2932 struct sockaddr_in6 *src6, *dst6, *isrc6, *idst6; 2933 sadb_lifetime_t *soft = 2934 (sadb_lifetime_t *)ksi->ks_in_extv[SADB_EXT_LIFETIME_SOFT]; 2935 sadb_lifetime_t *hard = 2936 (sadb_lifetime_t *)ksi->ks_in_extv[SADB_EXT_LIFETIME_HARD]; 2937 sadb_lifetime_t *idle = 2938 (sadb_lifetime_t *)ksi->ks_in_extv[SADB_X_EXT_LIFETIME_IDLE]; 2939 sa_family_t af; 2940 int error = 0; 2941 boolean_t isupdate = (newbie != NULL); 2942 uint32_t *src_addr_ptr, *dst_addr_ptr, *isrc_addr_ptr, *idst_addr_ptr; 2943 ipsec_stack_t *ipss = ns->netstack_ipsec; 2944 ip_stack_t *ipst = ns->netstack_ip; 2945 ipsec_alginfo_t *alg; 2946 int rcode; 2947 boolean_t async = B_FALSE; 2948 2949 init_ipsa_pair(&ipsapp); 2950 2951 if (srcext == NULL) { 2952 *diagnostic = SADB_X_DIAGNOSTIC_MISSING_SRC; 2953 return (EINVAL); 2954 } 2955 if (dstext == NULL) { 2956 *diagnostic = SADB_X_DIAGNOSTIC_MISSING_DST; 2957 return (EINVAL); 2958 } 2959 if (assoc == NULL) { 2960 *diagnostic = SADB_X_DIAGNOSTIC_MISSING_SA; 2961 return (EINVAL); 2962 } 2963 2964 src = (struct sockaddr_in *)(srcext + 1); 2965 src6 = (struct sockaddr_in6 *)(srcext + 1); 2966 dst = (struct sockaddr_in *)(dstext + 1); 2967 dst6 = (struct sockaddr_in6 *)(dstext + 1); 2968 if (isrcext != NULL) { 2969 isrc = (struct sockaddr_in *)(isrcext + 1); 2970 isrc6 = (struct sockaddr_in6 *)(isrcext + 1); 2971 ASSERT(idstext != NULL); 2972 idst = (struct sockaddr_in *)(idstext + 1); 2973 idst6 = (struct sockaddr_in6 *)(idstext + 1); 2974 } else { 2975 isrc = NULL; 2976 isrc6 = NULL; 2977 } 2978 2979 af = src->sin_family; 2980 2981 if (af == AF_INET) { 2982 src_addr_ptr = (uint32_t *)&src->sin_addr; 2983 dst_addr_ptr = (uint32_t *)&dst->sin_addr; 2984 } else { 2985 ASSERT(af == AF_INET6); 2986 src_addr_ptr = (uint32_t *)&src6->sin6_addr; 2987 dst_addr_ptr = (uint32_t *)&dst6->sin6_addr; 2988 } 2989 2990 if (!isupdate && (clone == B_TRUE || is_inbound == B_TRUE) && 2991 cl_inet_checkspi && 2992 (assoc->sadb_sa_state != SADB_X_SASTATE_ACTIVE_ELSEWHERE)) { 2993 rcode = cl_inet_checkspi(ns->netstack_stackid, protocol, 2994 assoc->sadb_sa_spi, NULL); 2995 if (rcode == -1) { 2996 return (EEXIST); 2997 } 2998 } 2999 3000 /* 3001 * Check to see if the new SA will be cloned AND paired. The 3002 * reason a SA will be cloned is the source or destination addresses 3003 * are not specific enough to determine if the SA goes in the outbound 3004 * or the inbound hash table, so its cloned and put in both. If 3005 * the SA is paired, it's soft linked to another SA for the other 3006 * direction. Keeping track and looking up SA's that are direction 3007 * unspecific and linked is too hard. 3008 */ 3009 if (clone && (pair_ext != NULL)) { 3010 *diagnostic = SADB_X_DIAGNOSTIC_PAIR_INAPPROPRIATE; 3011 return (EINVAL); 3012 } 3013 3014 if (!isupdate) { 3015 newbie = sadb_makelarvalassoc(assoc->sadb_sa_spi, 3016 src_addr_ptr, dst_addr_ptr, af, ns); 3017 if (newbie == NULL) 3018 return (ENOMEM); 3019 } 3020 3021 mutex_enter(&newbie->ipsa_lock); 3022 3023 if (isrc != NULL) { 3024 if (isrc->sin_family == AF_INET) { 3025 if (srcext->sadb_address_proto != IPPROTO_ENCAP) { 3026 if (srcext->sadb_address_proto != 0) { 3027 /* 3028 * Mismatched outer-packet protocol 3029 * and inner-packet address family. 3030 */ 3031 mutex_exit(&newbie->ipsa_lock); 3032 error = EPROTOTYPE; 3033 *diagnostic = 3034 SADB_X_DIAGNOSTIC_INNER_AF_MISMATCH; 3035 goto error; 3036 } else { 3037 /* Fill in with explicit protocol. */ 3038 srcext->sadb_address_proto = 3039 IPPROTO_ENCAP; 3040 dstext->sadb_address_proto = 3041 IPPROTO_ENCAP; 3042 } 3043 } 3044 isrc_addr_ptr = (uint32_t *)&isrc->sin_addr; 3045 idst_addr_ptr = (uint32_t *)&idst->sin_addr; 3046 } else { 3047 ASSERT(isrc->sin_family == AF_INET6); 3048 if (srcext->sadb_address_proto != IPPROTO_IPV6) { 3049 if (srcext->sadb_address_proto != 0) { 3050 /* 3051 * Mismatched outer-packet protocol 3052 * and inner-packet address family. 3053 */ 3054 mutex_exit(&newbie->ipsa_lock); 3055 error = EPROTOTYPE; 3056 *diagnostic = 3057 SADB_X_DIAGNOSTIC_INNER_AF_MISMATCH; 3058 goto error; 3059 } else { 3060 /* Fill in with explicit protocol. */ 3061 srcext->sadb_address_proto = 3062 IPPROTO_IPV6; 3063 dstext->sadb_address_proto = 3064 IPPROTO_IPV6; 3065 } 3066 } 3067 isrc_addr_ptr = (uint32_t *)&isrc6->sin6_addr; 3068 idst_addr_ptr = (uint32_t *)&idst6->sin6_addr; 3069 } 3070 newbie->ipsa_innerfam = isrc->sin_family; 3071 3072 IPSA_COPY_ADDR(newbie->ipsa_innersrc, isrc_addr_ptr, 3073 newbie->ipsa_innerfam); 3074 IPSA_COPY_ADDR(newbie->ipsa_innerdst, idst_addr_ptr, 3075 newbie->ipsa_innerfam); 3076 newbie->ipsa_innersrcpfx = isrcext->sadb_address_prefixlen; 3077 newbie->ipsa_innerdstpfx = idstext->sadb_address_prefixlen; 3078 3079 /* Unique value uses inner-ports for Tunnel Mode... */ 3080 newbie->ipsa_unique_id = SA_UNIQUE_ID(isrc->sin_port, 3081 idst->sin_port, dstext->sadb_address_proto, 3082 idstext->sadb_address_proto); 3083 newbie->ipsa_unique_mask = SA_UNIQUE_MASK(isrc->sin_port, 3084 idst->sin_port, dstext->sadb_address_proto, 3085 idstext->sadb_address_proto); 3086 } else { 3087 /* ... and outer-ports for Transport Mode. */ 3088 newbie->ipsa_unique_id = SA_UNIQUE_ID(src->sin_port, 3089 dst->sin_port, dstext->sadb_address_proto, 0); 3090 newbie->ipsa_unique_mask = SA_UNIQUE_MASK(src->sin_port, 3091 dst->sin_port, dstext->sadb_address_proto, 0); 3092 } 3093 if (newbie->ipsa_unique_mask != (uint64_t)0) 3094 newbie->ipsa_flags |= IPSA_F_UNIQUE; 3095 3096 sadb_nat_calculations(newbie, 3097 (sadb_address_t *)ksi->ks_in_extv[SADB_X_EXT_ADDRESS_NATT_LOC], 3098 (sadb_address_t *)ksi->ks_in_extv[SADB_X_EXT_ADDRESS_NATT_REM], 3099 src_addr_ptr, dst_addr_ptr); 3100 3101 newbie->ipsa_type = samsg->sadb_msg_satype; 3102 3103 ASSERT((assoc->sadb_sa_state == SADB_SASTATE_MATURE) || 3104 (assoc->sadb_sa_state == SADB_X_SASTATE_ACTIVE_ELSEWHERE)); 3105 newbie->ipsa_auth_alg = assoc->sadb_sa_auth; 3106 newbie->ipsa_encr_alg = assoc->sadb_sa_encrypt; 3107 3108 newbie->ipsa_flags |= assoc->sadb_sa_flags; 3109 if (newbie->ipsa_flags & SADB_X_SAFLAGS_NATT_LOC && 3110 ksi->ks_in_extv[SADB_X_EXT_ADDRESS_NATT_LOC] == NULL) { 3111 mutex_exit(&newbie->ipsa_lock); 3112 *diagnostic = SADB_X_DIAGNOSTIC_MISSING_NATT_LOC; 3113 error = EINVAL; 3114 goto error; 3115 } 3116 if (newbie->ipsa_flags & SADB_X_SAFLAGS_NATT_REM && 3117 ksi->ks_in_extv[SADB_X_EXT_ADDRESS_NATT_REM] == NULL) { 3118 mutex_exit(&newbie->ipsa_lock); 3119 *diagnostic = SADB_X_DIAGNOSTIC_MISSING_NATT_REM; 3120 error = EINVAL; 3121 goto error; 3122 } 3123 if (newbie->ipsa_flags & SADB_X_SAFLAGS_TUNNEL && 3124 ksi->ks_in_extv[SADB_X_EXT_ADDRESS_INNER_SRC] == NULL) { 3125 mutex_exit(&newbie->ipsa_lock); 3126 *diagnostic = SADB_X_DIAGNOSTIC_MISSING_INNER_SRC; 3127 error = EINVAL; 3128 goto error; 3129 } 3130 /* 3131 * If unspecified source address, force replay_wsize to 0. 3132 * This is because an SA that has multiple sources of secure 3133 * traffic cannot enforce a replay counter w/o synchronizing the 3134 * senders. 3135 */ 3136 if (ksi->ks_in_srctype != KS_IN_ADDR_UNSPEC) 3137 newbie->ipsa_replay_wsize = assoc->sadb_sa_replay; 3138 else 3139 newbie->ipsa_replay_wsize = 0; 3140 3141 newbie->ipsa_addtime = gethrestime_sec(); 3142 3143 if (kmcext != NULL) { 3144 newbie->ipsa_kmp = kmcext->sadb_x_kmc_proto; 3145 /* 3146 * Be liberal in what we receive. Special-case the IKEv1 3147 * cookie, which closed-source in.iked assumes is 32 bits. 3148 * Now that we store all 64 bits, we should pre-zero the 3149 * reserved field on behalf of closed-source in.iked. 3150 */ 3151 if (newbie->ipsa_kmp == SADB_X_KMP_IKE) { 3152 /* Just in case in.iked is misbehaving... */ 3153 kmcext->sadb_x_kmc_reserved = 0; 3154 } 3155 newbie->ipsa_kmc = kmcext->sadb_x_kmc_cookie64; 3156 } 3157 3158 /* 3159 * XXX CURRENT lifetime checks MAY BE needed for an UPDATE. 3160 * The spec says that one can update current lifetimes, but 3161 * that seems impractical, especially in the larval-to-mature 3162 * update that this function performs. 3163 */ 3164 if (soft != NULL) { 3165 newbie->ipsa_softaddlt = soft->sadb_lifetime_addtime; 3166 newbie->ipsa_softuselt = soft->sadb_lifetime_usetime; 3167 newbie->ipsa_softbyteslt = soft->sadb_lifetime_bytes; 3168 newbie->ipsa_softalloc = soft->sadb_lifetime_allocations; 3169 SET_EXPIRE(newbie, softaddlt, softexpiretime); 3170 } 3171 if (hard != NULL) { 3172 newbie->ipsa_hardaddlt = hard->sadb_lifetime_addtime; 3173 newbie->ipsa_harduselt = hard->sadb_lifetime_usetime; 3174 newbie->ipsa_hardbyteslt = hard->sadb_lifetime_bytes; 3175 newbie->ipsa_hardalloc = hard->sadb_lifetime_allocations; 3176 SET_EXPIRE(newbie, hardaddlt, hardexpiretime); 3177 } 3178 if (idle != NULL) { 3179 newbie->ipsa_idleaddlt = idle->sadb_lifetime_addtime; 3180 newbie->ipsa_idleuselt = idle->sadb_lifetime_usetime; 3181 newbie->ipsa_idleexpiretime = newbie->ipsa_addtime + 3182 newbie->ipsa_idleaddlt; 3183 newbie->ipsa_idletime = newbie->ipsa_idleaddlt; 3184 } 3185 3186 newbie->ipsa_authtmpl = NULL; 3187 newbie->ipsa_encrtmpl = NULL; 3188 3189 #ifdef IPSEC_LATENCY_TEST 3190 if (akey != NULL && newbie->ipsa_auth_alg != SADB_AALG_NONE) { 3191 #else 3192 if (akey != NULL) { 3193 #endif 3194 async = (ipss->ipsec_algs_exec_mode[IPSEC_ALG_AUTH] == 3195 IPSEC_ALGS_EXEC_ASYNC); 3196 3197 newbie->ipsa_authkeybits = akey->sadb_key_bits; 3198 newbie->ipsa_authkeylen = SADB_1TO8(akey->sadb_key_bits); 3199 /* In case we have to round up to the next byte... */ 3200 if ((akey->sadb_key_bits & 0x7) != 0) 3201 newbie->ipsa_authkeylen++; 3202 newbie->ipsa_authkey = kmem_alloc(newbie->ipsa_authkeylen, 3203 KM_NOSLEEP); 3204 if (newbie->ipsa_authkey == NULL) { 3205 error = ENOMEM; 3206 mutex_exit(&newbie->ipsa_lock); 3207 goto error; 3208 } 3209 bcopy(akey + 1, newbie->ipsa_authkey, newbie->ipsa_authkeylen); 3210 bzero(akey + 1, newbie->ipsa_authkeylen); 3211 3212 /* 3213 * Pre-initialize the kernel crypto framework key 3214 * structure. 3215 */ 3216 newbie->ipsa_kcfauthkey.ck_format = CRYPTO_KEY_RAW; 3217 newbie->ipsa_kcfauthkey.ck_length = newbie->ipsa_authkeybits; 3218 newbie->ipsa_kcfauthkey.ck_data = newbie->ipsa_authkey; 3219 3220 rw_enter(&ipss->ipsec_alg_lock, RW_READER); 3221 alg = ipss->ipsec_alglists[IPSEC_ALG_AUTH] 3222 [newbie->ipsa_auth_alg]; 3223 if (alg != NULL && ALG_VALID(alg)) { 3224 newbie->ipsa_amech.cm_type = alg->alg_mech_type; 3225 newbie->ipsa_amech.cm_param = 3226 (char *)&newbie->ipsa_mac_len; 3227 newbie->ipsa_amech.cm_param_len = sizeof (size_t); 3228 newbie->ipsa_mac_len = (size_t)alg->alg_datalen; 3229 } else { 3230 newbie->ipsa_amech.cm_type = CRYPTO_MECHANISM_INVALID; 3231 } 3232 error = ipsec_create_ctx_tmpl(newbie, IPSEC_ALG_AUTH); 3233 rw_exit(&ipss->ipsec_alg_lock); 3234 if (error != 0) { 3235 mutex_exit(&newbie->ipsa_lock); 3236 /* 3237 * An error here indicates that alg is the wrong type 3238 * (IE: not authentication) or its not in the alg tables 3239 * created by ipsecalgs(1m), or Kcf does not like the 3240 * parameters passed in with this algorithm, which is 3241 * probably a coding error! 3242 */ 3243 *diagnostic = SADB_X_DIAGNOSTIC_BAD_CTX; 3244 3245 goto error; 3246 } 3247 } 3248 3249 if (ekey != NULL) { 3250 rw_enter(&ipss->ipsec_alg_lock, RW_READER); 3251 async = async || (ipss->ipsec_algs_exec_mode[IPSEC_ALG_ENCR] == 3252 IPSEC_ALGS_EXEC_ASYNC); 3253 alg = ipss->ipsec_alglists[IPSEC_ALG_ENCR] 3254 [newbie->ipsa_encr_alg]; 3255 3256 if (alg != NULL && ALG_VALID(alg)) { 3257 newbie->ipsa_emech.cm_type = alg->alg_mech_type; 3258 newbie->ipsa_datalen = alg->alg_datalen; 3259 if (alg->alg_flags & ALG_FLAG_COUNTERMODE) 3260 newbie->ipsa_flags |= IPSA_F_COUNTERMODE; 3261 3262 if (alg->alg_flags & ALG_FLAG_COMBINED) { 3263 newbie->ipsa_flags |= IPSA_F_COMBINED; 3264 newbie->ipsa_mac_len = alg->alg_icvlen; 3265 } 3266 3267 if (alg->alg_flags & ALG_FLAG_CCM) 3268 newbie->ipsa_noncefunc = ccm_params_init; 3269 else if (alg->alg_flags & ALG_FLAG_GCM) 3270 newbie->ipsa_noncefunc = gcm_params_init; 3271 else newbie->ipsa_noncefunc = cbc_params_init; 3272 3273 newbie->ipsa_saltlen = alg->alg_saltlen; 3274 newbie->ipsa_saltbits = SADB_8TO1(newbie->ipsa_saltlen); 3275 newbie->ipsa_iv_len = alg->alg_ivlen; 3276 newbie->ipsa_nonce_len = newbie->ipsa_saltlen + 3277 newbie->ipsa_iv_len; 3278 newbie->ipsa_emech.cm_param = NULL; 3279 newbie->ipsa_emech.cm_param_len = 0; 3280 } else { 3281 newbie->ipsa_emech.cm_type = CRYPTO_MECHANISM_INVALID; 3282 } 3283 rw_exit(&ipss->ipsec_alg_lock); 3284 3285 /* 3286 * The byte stream following the sadb_key_t is made up of: 3287 * key bytes, [salt bytes], [IV initial value] 3288 * All of these have variable length. The IV is typically 3289 * randomly generated by this function and not passed in. 3290 * By supporting the injection of a known IV, the whole 3291 * IPsec subsystem and the underlying crypto subsystem 3292 * can be tested with known test vectors. 3293 * 3294 * The keying material has been checked by ext_check() 3295 * and ipsec_valid_key_size(), after removing salt/IV 3296 * bits, whats left is the encryption key. If this is too 3297 * short, ipsec_create_ctx_tmpl() will fail and the SA 3298 * won't get created. 3299 * 3300 * set ipsa_encrkeylen to length of key only. 3301 */ 3302 newbie->ipsa_encrkeybits = ekey->sadb_key_bits; 3303 newbie->ipsa_encrkeybits -= ekey->sadb_key_reserved; 3304 newbie->ipsa_encrkeybits -= newbie->ipsa_saltbits; 3305 newbie->ipsa_encrkeylen = SADB_1TO8(newbie->ipsa_encrkeybits); 3306 3307 /* In case we have to round up to the next byte... */ 3308 if ((ekey->sadb_key_bits & 0x7) != 0) 3309 newbie->ipsa_encrkeylen++; 3310 3311 newbie->ipsa_encrkey = kmem_alloc(newbie->ipsa_encrkeylen, 3312 KM_NOSLEEP); 3313 if (newbie->ipsa_encrkey == NULL) { 3314 error = ENOMEM; 3315 mutex_exit(&newbie->ipsa_lock); 3316 goto error; 3317 } 3318 3319 buf_ptr = (uint8_t *)(ekey + 1); 3320 bcopy(buf_ptr, newbie->ipsa_encrkey, newbie->ipsa_encrkeylen); 3321 3322 if (newbie->ipsa_flags & IPSA_F_COMBINED) { 3323 /* 3324 * Combined mode algs need a nonce. Copy the salt and 3325 * IV into a buffer. The ipsa_nonce is a pointer into 3326 * this buffer, some bytes at the start of the buffer 3327 * may be unused, depends on the salt length. The IV 3328 * is 64 bit aligned so it can be incremented as a 3329 * uint64_t. Zero out key in samsg_t before freeing. 3330 */ 3331 3332 newbie->ipsa_nonce_buf = kmem_alloc( 3333 sizeof (ipsec_nonce_t), KM_NOSLEEP); 3334 if (newbie->ipsa_nonce_buf == NULL) { 3335 error = ENOMEM; 3336 mutex_exit(&newbie->ipsa_lock); 3337 goto error; 3338 } 3339 /* 3340 * Initialize nonce and salt pointers to point 3341 * to the nonce buffer. This is just in case we get 3342 * bad data, the pointers will be valid, the data 3343 * won't be. 3344 * 3345 * See sadb.h for layout of nonce. 3346 */ 3347 newbie->ipsa_iv = &newbie->ipsa_nonce_buf->iv; 3348 newbie->ipsa_salt = (uint8_t *)newbie->ipsa_nonce_buf; 3349 newbie->ipsa_nonce = newbie->ipsa_salt; 3350 if (newbie->ipsa_saltlen != 0) { 3351 salt_offset = MAXSALTSIZE - 3352 newbie->ipsa_saltlen; 3353 newbie->ipsa_salt = (uint8_t *) 3354 &newbie->ipsa_nonce_buf->salt[salt_offset]; 3355 newbie->ipsa_nonce = newbie->ipsa_salt; 3356 buf_ptr += newbie->ipsa_encrkeylen; 3357 bcopy(buf_ptr, newbie->ipsa_salt, 3358 newbie->ipsa_saltlen); 3359 } 3360 /* 3361 * The IV for CCM/GCM mode increments, it should not 3362 * repeat. Get a random value for the IV, make a 3363 * copy, the SA will expire when/if the IV ever 3364 * wraps back to the initial value. If an Initial IV 3365 * is passed in via PF_KEY, save this in the SA. 3366 * Initialising IV for inbound is pointless as its 3367 * taken from the inbound packet. 3368 */ 3369 if (!is_inbound) { 3370 if (ekey->sadb_key_reserved != 0) { 3371 buf_ptr += newbie->ipsa_saltlen; 3372 bcopy(buf_ptr, (uint8_t *)newbie-> 3373 ipsa_iv, SADB_1TO8(ekey-> 3374 sadb_key_reserved)); 3375 } else { 3376 (void) random_get_pseudo_bytes( 3377 (uint8_t *)newbie->ipsa_iv, 3378 newbie->ipsa_iv_len); 3379 } 3380 newbie->ipsa_iv_softexpire = 3381 (*newbie->ipsa_iv) << 9; 3382 newbie->ipsa_iv_hardexpire = *newbie->ipsa_iv; 3383 } 3384 } 3385 bzero((ekey + 1), SADB_1TO8(ekey->sadb_key_bits)); 3386 3387 /* 3388 * Pre-initialize the kernel crypto framework key 3389 * structure. 3390 */ 3391 newbie->ipsa_kcfencrkey.ck_format = CRYPTO_KEY_RAW; 3392 newbie->ipsa_kcfencrkey.ck_length = newbie->ipsa_encrkeybits; 3393 newbie->ipsa_kcfencrkey.ck_data = newbie->ipsa_encrkey; 3394 3395 rw_enter(&ipss->ipsec_alg_lock, RW_READER); 3396 error = ipsec_create_ctx_tmpl(newbie, IPSEC_ALG_ENCR); 3397 rw_exit(&ipss->ipsec_alg_lock); 3398 if (error != 0) { 3399 mutex_exit(&newbie->ipsa_lock); 3400 /* See above for error explanation. */ 3401 *diagnostic = SADB_X_DIAGNOSTIC_BAD_CTX; 3402 goto error; 3403 } 3404 } 3405 3406 if (async) 3407 newbie->ipsa_flags |= IPSA_F_ASYNC; 3408 3409 /* 3410 * Ptrs to processing functions. 3411 */ 3412 if (newbie->ipsa_type == SADB_SATYPE_ESP) 3413 ipsecesp_init_funcs(newbie); 3414 else 3415 ipsecah_init_funcs(newbie); 3416 ASSERT(newbie->ipsa_output_func != NULL && 3417 newbie->ipsa_input_func != NULL); 3418 3419 /* 3420 * Certificate ID stuff. 3421 */ 3422 if (ksi->ks_in_extv[SADB_EXT_IDENTITY_SRC] != NULL) { 3423 sadb_ident_t *id = 3424 (sadb_ident_t *)ksi->ks_in_extv[SADB_EXT_IDENTITY_SRC]; 3425 3426 /* 3427 * Can assume strlen() will return okay because ext_check() in 3428 * keysock.c prepares the string for us. 3429 */ 3430 newbie->ipsa_src_cid = ipsid_lookup(id->sadb_ident_type, 3431 (char *)(id+1), ns); 3432 if (newbie->ipsa_src_cid == NULL) { 3433 error = ENOMEM; 3434 mutex_exit(&newbie->ipsa_lock); 3435 goto error; 3436 } 3437 } 3438 3439 if (ksi->ks_in_extv[SADB_EXT_IDENTITY_DST] != NULL) { 3440 sadb_ident_t *id = 3441 (sadb_ident_t *)ksi->ks_in_extv[SADB_EXT_IDENTITY_DST]; 3442 3443 /* 3444 * Can assume strlen() will return okay because ext_check() in 3445 * keysock.c prepares the string for us. 3446 */ 3447 newbie->ipsa_dst_cid = ipsid_lookup(id->sadb_ident_type, 3448 (char *)(id+1), ns); 3449 if (newbie->ipsa_dst_cid == NULL) { 3450 error = ENOMEM; 3451 mutex_exit(&newbie->ipsa_lock); 3452 goto error; 3453 } 3454 } 3455 3456 /* 3457 * sensitivity label handling code: 3458 * Convert sens + bitmap into cred_t, and associate it 3459 * with the new SA. 3460 */ 3461 if (sens != NULL) { 3462 uint64_t *bitmap = (uint64_t *)(sens + 1); 3463 3464 newbie->ipsa_tsl = sadb_label_from_sens(sens, bitmap); 3465 } 3466 3467 /* 3468 * Likewise for outer sensitivity. 3469 */ 3470 if (osens != NULL) { 3471 uint64_t *bitmap = (uint64_t *)(osens + 1); 3472 ts_label_t *tsl, *effective_tsl; 3473 uint32_t *peer_addr_ptr; 3474 zoneid_t zoneid = GLOBAL_ZONEID; 3475 zone_t *zone; 3476 3477 peer_addr_ptr = is_inbound ? src_addr_ptr : dst_addr_ptr; 3478 3479 tsl = sadb_label_from_sens(osens, bitmap); 3480 newbie->ipsa_mac_exempt = CONN_MAC_DEFAULT; 3481 3482 if (osens->sadb_x_sens_flags & SADB_X_SENS_IMPLICIT) { 3483 newbie->ipsa_mac_exempt = CONN_MAC_IMPLICIT; 3484 } 3485 3486 error = tsol_check_dest(tsl, peer_addr_ptr, 3487 (af == AF_INET6)?IPV6_VERSION:IPV4_VERSION, 3488 newbie->ipsa_mac_exempt, B_TRUE, &effective_tsl); 3489 if (error != 0) { 3490 label_rele(tsl); 3491 mutex_exit(&newbie->ipsa_lock); 3492 goto error; 3493 } 3494 3495 if (effective_tsl != NULL) { 3496 label_rele(tsl); 3497 tsl = effective_tsl; 3498 } 3499 3500 newbie->ipsa_otsl = tsl; 3501 3502 zone = zone_find_by_label(tsl); 3503 if (zone != NULL) { 3504 zoneid = zone->zone_id; 3505 zone_rele(zone); 3506 } 3507 /* 3508 * For exclusive stacks we set the zoneid to zero to operate 3509 * as if in the global zone for tsol_compute_label_v4/v6 3510 */ 3511 if (ipst->ips_netstack->netstack_stackid != GLOBAL_NETSTACKID) 3512 zoneid = GLOBAL_ZONEID; 3513 3514 if (af == AF_INET6) { 3515 error = tsol_compute_label_v6(tsl, zoneid, 3516 (in6_addr_t *)peer_addr_ptr, 3517 newbie->ipsa_opt_storage, ipst); 3518 } else { 3519 error = tsol_compute_label_v4(tsl, zoneid, 3520 *peer_addr_ptr, newbie->ipsa_opt_storage, ipst); 3521 } 3522 if (error != 0) { 3523 mutex_exit(&newbie->ipsa_lock); 3524 goto error; 3525 } 3526 } 3527 3528 3529 if (replayext != NULL) { 3530 if ((replayext->sadb_x_rc_replay32 == 0) && 3531 (replayext->sadb_x_rc_replay64 != 0)) { 3532 error = EOPNOTSUPP; 3533 *diagnostic = SADB_X_DIAGNOSTIC_INVALID_REPLAY; 3534 mutex_exit(&newbie->ipsa_lock); 3535 goto error; 3536 } 3537 newbie->ipsa_replay = replayext->sadb_x_rc_replay32; 3538 } 3539 3540 /* now that the SA has been updated, set its new state */ 3541 newbie->ipsa_state = assoc->sadb_sa_state; 3542 3543 if (clone) { 3544 newbie->ipsa_haspeer = B_TRUE; 3545 } else { 3546 if (!is_inbound) { 3547 lifetime_fuzz(newbie); 3548 } 3549 } 3550 /* 3551 * The less locks I hold when doing an insertion and possible cloning, 3552 * the better! 3553 */ 3554 mutex_exit(&newbie->ipsa_lock); 3555 3556 if (clone) { 3557 newbie_clone = sadb_cloneassoc(newbie); 3558 3559 if (newbie_clone == NULL) { 3560 error = ENOMEM; 3561 goto error; 3562 } 3563 } 3564 3565 /* 3566 * Enter the bucket locks. The order of entry is outbound, 3567 * inbound. We map "primary" and "secondary" into outbound and inbound 3568 * based on the destination address type. If the destination address 3569 * type is for a node that isn't mine (or potentially mine), the 3570 * "primary" bucket is the outbound one. 3571 */ 3572 if (!is_inbound) { 3573 /* primary == outbound */ 3574 mutex_enter(&primary->isaf_lock); 3575 mutex_enter(&secondary->isaf_lock); 3576 } else { 3577 /* primary == inbound */ 3578 mutex_enter(&secondary->isaf_lock); 3579 mutex_enter(&primary->isaf_lock); 3580 } 3581 3582 /* 3583 * sadb_insertassoc() doesn't increment the reference 3584 * count. We therefore have to increment the 3585 * reference count one more time to reflect the 3586 * pointers of the table that reference this SA. 3587 */ 3588 IPSA_REFHOLD(newbie); 3589 3590 if (isupdate) { 3591 /* 3592 * Unlink from larval holding cell in the "inbound" fanout. 3593 */ 3594 ASSERT(newbie->ipsa_linklock == &primary->isaf_lock || 3595 newbie->ipsa_linklock == &secondary->isaf_lock); 3596 sadb_unlinkassoc(newbie); 3597 } 3598 3599 mutex_enter(&newbie->ipsa_lock); 3600 error = sadb_insertassoc(newbie, primary); 3601 mutex_exit(&newbie->ipsa_lock); 3602 3603 if (error != 0) { 3604 /* 3605 * Since sadb_insertassoc() failed, we must decrement the 3606 * refcount again so the cleanup code will actually free 3607 * the offending SA. 3608 */ 3609 IPSA_REFRELE(newbie); 3610 goto error_unlock; 3611 } 3612 3613 if (newbie_clone != NULL) { 3614 mutex_enter(&newbie_clone->ipsa_lock); 3615 error = sadb_insertassoc(newbie_clone, secondary); 3616 mutex_exit(&newbie_clone->ipsa_lock); 3617 if (error != 0) { 3618 /* Collision in secondary table. */ 3619 sadb_unlinkassoc(newbie); /* This does REFRELE. */ 3620 goto error_unlock; 3621 } 3622 IPSA_REFHOLD(newbie_clone); 3623 } else { 3624 ASSERT(primary != secondary); 3625 scratch = ipsec_getassocbyspi(secondary, newbie->ipsa_spi, 3626 ALL_ZEROES_PTR, newbie->ipsa_dstaddr, af); 3627 if (scratch != NULL) { 3628 /* Collision in secondary table. */ 3629 sadb_unlinkassoc(newbie); /* This does REFRELE. */ 3630 /* Set the error, since ipsec_getassocbyspi() can't. */ 3631 error = EEXIST; 3632 goto error_unlock; 3633 } 3634 } 3635 3636 /* OKAY! So let's do some reality check assertions. */ 3637 3638 ASSERT(MUTEX_NOT_HELD(&newbie->ipsa_lock)); 3639 ASSERT(newbie_clone == NULL || 3640 (MUTEX_NOT_HELD(&newbie_clone->ipsa_lock))); 3641 3642 error_unlock: 3643 3644 /* 3645 * We can exit the locks in any order. Only entrance needs to 3646 * follow any protocol. 3647 */ 3648 mutex_exit(&secondary->isaf_lock); 3649 mutex_exit(&primary->isaf_lock); 3650 3651 if (pair_ext != NULL && error == 0) { 3652 /* update pair_spi if it exists. */ 3653 ipsa_query_t sq; 3654 3655 sq.spp = spp; /* XXX param */ 3656 error = sadb_form_query(ksi, IPSA_Q_DST, IPSA_Q_SRC|IPSA_Q_DST| 3657 IPSA_Q_SA|IPSA_Q_INBOUND|IPSA_Q_OUTBOUND, &sq, diagnostic); 3658 if (error) 3659 return (error); 3660 3661 error = get_ipsa_pair(&sq, &ipsapp, diagnostic); 3662 3663 if (error != 0) 3664 goto error; 3665 3666 if (ipsapp.ipsap_psa_ptr != NULL) { 3667 *diagnostic = SADB_X_DIAGNOSTIC_PAIR_ALREADY; 3668 error = EINVAL; 3669 } else { 3670 /* update_pairing() sets diagnostic */ 3671 error = update_pairing(&ipsapp, &sq, ksi, diagnostic); 3672 } 3673 } 3674 /* Common error point for this routine. */ 3675 error: 3676 if (newbie != NULL) { 3677 if (error != 0) { 3678 /* This SA is broken, let the reaper clean up. */ 3679 mutex_enter(&newbie->ipsa_lock); 3680 newbie->ipsa_state = IPSA_STATE_DEAD; 3681 newbie->ipsa_hardexpiretime = 1; 3682 mutex_exit(&newbie->ipsa_lock); 3683 } 3684 IPSA_REFRELE(newbie); 3685 } 3686 if (newbie_clone != NULL) { 3687 IPSA_REFRELE(newbie_clone); 3688 } 3689 3690 if (error == 0) { 3691 /* 3692 * Construct favorable PF_KEY return message and send to 3693 * keysock. Update the flags in the original keysock message 3694 * to reflect the actual flags in the new SA. 3695 * (Q: Do I need to pass "newbie"? If I do, 3696 * make sure to REFHOLD, call, then REFRELE.) 3697 */ 3698 assoc->sadb_sa_flags = newbie->ipsa_flags; 3699 sadb_pfkey_echo(pfkey_q, mp, samsg, ksi, NULL); 3700 } 3701 3702 destroy_ipsa_pair(&ipsapp); 3703 return (error); 3704 } 3705 3706 /* 3707 * Set the time of first use for a security association. Update any 3708 * expiration times as a result. 3709 */ 3710 void 3711 sadb_set_usetime(ipsa_t *assoc) 3712 { 3713 time_t snapshot = gethrestime_sec(); 3714 3715 mutex_enter(&assoc->ipsa_lock); 3716 assoc->ipsa_lastuse = snapshot; 3717 assoc->ipsa_idleexpiretime = snapshot + assoc->ipsa_idletime; 3718 3719 /* 3720 * Caller does check usetime before calling me usually, and 3721 * double-checking is better than a mutex_enter/exit hit. 3722 */ 3723 if (assoc->ipsa_usetime == 0) { 3724 /* 3725 * This is redundant for outbound SA's, as 3726 * ipsec_getassocbyconn() sets the IPSA_F_USED flag already. 3727 * Inbound SAs, however, have no such protection. 3728 */ 3729 assoc->ipsa_flags |= IPSA_F_USED; 3730 assoc->ipsa_usetime = snapshot; 3731 3732 /* 3733 * After setting the use time, see if we have a use lifetime 3734 * that would cause the actual SA expiration time to shorten. 3735 */ 3736 UPDATE_EXPIRE(assoc, softuselt, softexpiretime); 3737 UPDATE_EXPIRE(assoc, harduselt, hardexpiretime); 3738 } 3739 mutex_exit(&assoc->ipsa_lock); 3740 } 3741 3742 /* 3743 * Send up a PF_KEY expire message for this association. 3744 */ 3745 static void 3746 sadb_expire_assoc(queue_t *pfkey_q, ipsa_t *assoc) 3747 { 3748 mblk_t *mp, *mp1; 3749 int alloclen, af; 3750 sadb_msg_t *samsg; 3751 sadb_lifetime_t *current, *expire; 3752 sadb_sa_t *saext; 3753 uint8_t *end; 3754 boolean_t tunnel_mode; 3755 3756 ASSERT(MUTEX_HELD(&assoc->ipsa_lock)); 3757 3758 /* Don't bother sending if there's no queue. */ 3759 if (pfkey_q == NULL) 3760 return; 3761 3762 mp = sadb_keysock_out(0); 3763 if (mp == NULL) { 3764 /* cmn_err(CE_WARN, */ 3765 /* "sadb_expire_assoc: Can't allocate KEYSOCK_OUT.\n"); */ 3766 return; 3767 } 3768 3769 alloclen = sizeof (*samsg) + sizeof (*current) + sizeof (*expire) + 3770 2 * sizeof (sadb_address_t) + sizeof (*saext); 3771 3772 af = assoc->ipsa_addrfam; 3773 switch (af) { 3774 case AF_INET: 3775 alloclen += 2 * sizeof (struct sockaddr_in); 3776 break; 3777 case AF_INET6: 3778 alloclen += 2 * sizeof (struct sockaddr_in6); 3779 break; 3780 default: 3781 /* Won't happen unless there's a kernel bug. */ 3782 freeb(mp); 3783 cmn_err(CE_WARN, 3784 "sadb_expire_assoc: Unknown address length.\n"); 3785 return; 3786 } 3787 3788 tunnel_mode = (assoc->ipsa_flags & IPSA_F_TUNNEL); 3789 if (tunnel_mode) { 3790 alloclen += 2 * sizeof (sadb_address_t); 3791 switch (assoc->ipsa_innerfam) { 3792 case AF_INET: 3793 alloclen += 2 * sizeof (struct sockaddr_in); 3794 break; 3795 case AF_INET6: 3796 alloclen += 2 * sizeof (struct sockaddr_in6); 3797 break; 3798 default: 3799 /* Won't happen unless there's a kernel bug. */ 3800 freeb(mp); 3801 cmn_err(CE_WARN, "sadb_expire_assoc: " 3802 "Unknown inner address length.\n"); 3803 return; 3804 } 3805 } 3806 3807 mp->b_cont = allocb(alloclen, BPRI_HI); 3808 if (mp->b_cont == NULL) { 3809 freeb(mp); 3810 /* cmn_err(CE_WARN, */ 3811 /* "sadb_expire_assoc: Can't allocate message.\n"); */ 3812 return; 3813 } 3814 3815 mp1 = mp; 3816 mp = mp->b_cont; 3817 end = mp->b_wptr + alloclen; 3818 3819 samsg = (sadb_msg_t *)mp->b_wptr; 3820 mp->b_wptr += sizeof (*samsg); 3821 samsg->sadb_msg_version = PF_KEY_V2; 3822 samsg->sadb_msg_type = SADB_EXPIRE; 3823 samsg->sadb_msg_errno = 0; 3824 samsg->sadb_msg_satype = assoc->ipsa_type; 3825 samsg->sadb_msg_len = SADB_8TO64(alloclen); 3826 samsg->sadb_msg_reserved = 0; 3827 samsg->sadb_msg_seq = 0; 3828 samsg->sadb_msg_pid = 0; 3829 3830 saext = (sadb_sa_t *)mp->b_wptr; 3831 mp->b_wptr += sizeof (*saext); 3832 saext->sadb_sa_len = SADB_8TO64(sizeof (*saext)); 3833 saext->sadb_sa_exttype = SADB_EXT_SA; 3834 saext->sadb_sa_spi = assoc->ipsa_spi; 3835 saext->sadb_sa_replay = assoc->ipsa_replay_wsize; 3836 saext->sadb_sa_state = assoc->ipsa_state; 3837 saext->sadb_sa_auth = assoc->ipsa_auth_alg; 3838 saext->sadb_sa_encrypt = assoc->ipsa_encr_alg; 3839 saext->sadb_sa_flags = assoc->ipsa_flags; 3840 3841 current = (sadb_lifetime_t *)mp->b_wptr; 3842 mp->b_wptr += sizeof (sadb_lifetime_t); 3843 current->sadb_lifetime_len = SADB_8TO64(sizeof (*current)); 3844 current->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT; 3845 /* We do not support the concept. */ 3846 current->sadb_lifetime_allocations = 0; 3847 current->sadb_lifetime_bytes = assoc->ipsa_bytes; 3848 current->sadb_lifetime_addtime = assoc->ipsa_addtime; 3849 current->sadb_lifetime_usetime = assoc->ipsa_usetime; 3850 3851 expire = (sadb_lifetime_t *)mp->b_wptr; 3852 mp->b_wptr += sizeof (*expire); 3853 expire->sadb_lifetime_len = SADB_8TO64(sizeof (*expire)); 3854 3855 if (assoc->ipsa_state == IPSA_STATE_DEAD) { 3856 expire->sadb_lifetime_exttype = SADB_EXT_LIFETIME_HARD; 3857 expire->sadb_lifetime_allocations = assoc->ipsa_hardalloc; 3858 expire->sadb_lifetime_bytes = assoc->ipsa_hardbyteslt; 3859 expire->sadb_lifetime_addtime = assoc->ipsa_hardaddlt; 3860 expire->sadb_lifetime_usetime = assoc->ipsa_harduselt; 3861 } else if (assoc->ipsa_state == IPSA_STATE_DYING) { 3862 expire->sadb_lifetime_exttype = SADB_EXT_LIFETIME_SOFT; 3863 expire->sadb_lifetime_allocations = assoc->ipsa_softalloc; 3864 expire->sadb_lifetime_bytes = assoc->ipsa_softbyteslt; 3865 expire->sadb_lifetime_addtime = assoc->ipsa_softaddlt; 3866 expire->sadb_lifetime_usetime = assoc->ipsa_softuselt; 3867 } else { 3868 ASSERT(assoc->ipsa_state == IPSA_STATE_MATURE); 3869 expire->sadb_lifetime_exttype = SADB_X_EXT_LIFETIME_IDLE; 3870 expire->sadb_lifetime_allocations = 0; 3871 expire->sadb_lifetime_bytes = 0; 3872 expire->sadb_lifetime_addtime = assoc->ipsa_idleaddlt; 3873 expire->sadb_lifetime_usetime = assoc->ipsa_idleuselt; 3874 } 3875 3876 mp->b_wptr = sadb_make_addr_ext(mp->b_wptr, end, SADB_EXT_ADDRESS_SRC, 3877 af, assoc->ipsa_srcaddr, tunnel_mode ? 0 : SA_SRCPORT(assoc), 3878 SA_PROTO(assoc), 0); 3879 ASSERT(mp->b_wptr != NULL); 3880 3881 mp->b_wptr = sadb_make_addr_ext(mp->b_wptr, end, SADB_EXT_ADDRESS_DST, 3882 af, assoc->ipsa_dstaddr, tunnel_mode ? 0 : SA_DSTPORT(assoc), 3883 SA_PROTO(assoc), 0); 3884 ASSERT(mp->b_wptr != NULL); 3885 3886 if (tunnel_mode) { 3887 mp->b_wptr = sadb_make_addr_ext(mp->b_wptr, end, 3888 SADB_X_EXT_ADDRESS_INNER_SRC, assoc->ipsa_innerfam, 3889 assoc->ipsa_innersrc, SA_SRCPORT(assoc), SA_IPROTO(assoc), 3890 assoc->ipsa_innersrcpfx); 3891 ASSERT(mp->b_wptr != NULL); 3892 mp->b_wptr = sadb_make_addr_ext(mp->b_wptr, end, 3893 SADB_X_EXT_ADDRESS_INNER_DST, assoc->ipsa_innerfam, 3894 assoc->ipsa_innerdst, SA_DSTPORT(assoc), SA_IPROTO(assoc), 3895 assoc->ipsa_innerdstpfx); 3896 ASSERT(mp->b_wptr != NULL); 3897 } 3898 3899 /* Can just putnext, we're ready to go! */ 3900 putnext(pfkey_q, mp1); 3901 } 3902 3903 /* 3904 * "Age" the SA with the number of bytes that was used to protect traffic. 3905 * Send an SADB_EXPIRE message if appropriate. Return B_TRUE if there was 3906 * enough "charge" left in the SA to protect the data. Return B_FALSE 3907 * otherwise. (If B_FALSE is returned, the association either was, or became 3908 * DEAD.) 3909 */ 3910 boolean_t 3911 sadb_age_bytes(queue_t *pfkey_q, ipsa_t *assoc, uint64_t bytes, 3912 boolean_t sendmsg) 3913 { 3914 boolean_t rc = B_TRUE; 3915 uint64_t newtotal; 3916 3917 mutex_enter(&assoc->ipsa_lock); 3918 newtotal = assoc->ipsa_bytes + bytes; 3919 if (assoc->ipsa_hardbyteslt != 0 && 3920 newtotal >= assoc->ipsa_hardbyteslt) { 3921 if (assoc->ipsa_state != IPSA_STATE_DEAD) { 3922 sadb_delete_cluster(assoc); 3923 /* 3924 * Send EXPIRE message to PF_KEY. May wish to pawn 3925 * this off on another non-interrupt thread. Also 3926 * unlink this SA immediately. 3927 */ 3928 assoc->ipsa_state = IPSA_STATE_DEAD; 3929 if (sendmsg) 3930 sadb_expire_assoc(pfkey_q, assoc); 3931 /* 3932 * Set non-zero expiration time so sadb_age_assoc() 3933 * will work when reaping. 3934 */ 3935 assoc->ipsa_hardexpiretime = (time_t)1; 3936 } /* Else someone beat me to it! */ 3937 rc = B_FALSE; 3938 } else if (assoc->ipsa_softbyteslt != 0 && 3939 (newtotal >= assoc->ipsa_softbyteslt)) { 3940 if (assoc->ipsa_state < IPSA_STATE_DYING) { 3941 /* 3942 * Send EXPIRE message to PF_KEY. May wish to pawn 3943 * this off on another non-interrupt thread. 3944 */ 3945 assoc->ipsa_state = IPSA_STATE_DYING; 3946 assoc->ipsa_bytes = newtotal; 3947 if (sendmsg) 3948 sadb_expire_assoc(pfkey_q, assoc); 3949 } /* Else someone beat me to it! */ 3950 } 3951 if (rc == B_TRUE) 3952 assoc->ipsa_bytes = newtotal; 3953 mutex_exit(&assoc->ipsa_lock); 3954 return (rc); 3955 } 3956 3957 /* 3958 * "Torch" an individual SA. Returns NULL, so it can be tail-called from 3959 * sadb_age_assoc(). 3960 */ 3961 static ipsa_t * 3962 sadb_torch_assoc(isaf_t *head, ipsa_t *sa) 3963 { 3964 ASSERT(MUTEX_HELD(&head->isaf_lock)); 3965 ASSERT(MUTEX_HELD(&sa->ipsa_lock)); 3966 ASSERT(sa->ipsa_state == IPSA_STATE_DEAD); 3967 3968 /* 3969 * Force cached SAs to be revalidated.. 3970 */ 3971 head->isaf_gen++; 3972 3973 mutex_exit(&sa->ipsa_lock); 3974 sadb_unlinkassoc(sa); 3975 3976 return (NULL); 3977 } 3978 3979 /* 3980 * Do various SA-is-idle activities depending on delta (the number of idle 3981 * seconds on the SA) and/or other properties of the SA. 3982 * 3983 * Return B_TRUE if I've sent a packet, because I have to drop the 3984 * association's mutex before sending a packet out the wire. 3985 */ 3986 /* ARGSUSED */ 3987 static boolean_t 3988 sadb_idle_activities(ipsa_t *assoc, time_t delta, boolean_t inbound) 3989 { 3990 ipsecesp_stack_t *espstack = assoc->ipsa_netstack->netstack_ipsecesp; 3991 int nat_t_interval = espstack->ipsecesp_nat_keepalive_interval; 3992 3993 ASSERT(MUTEX_HELD(&assoc->ipsa_lock)); 3994 3995 if (!inbound && (assoc->ipsa_flags & IPSA_F_NATT_LOC) && 3996 delta >= nat_t_interval && 3997 gethrestime_sec() - assoc->ipsa_last_nat_t_ka >= nat_t_interval) { 3998 ASSERT(assoc->ipsa_type == SADB_SATYPE_ESP); 3999 assoc->ipsa_last_nat_t_ka = gethrestime_sec(); 4000 mutex_exit(&assoc->ipsa_lock); 4001 ipsecesp_send_keepalive(assoc); 4002 return (B_TRUE); 4003 } 4004 return (B_FALSE); 4005 } 4006 4007 /* 4008 * Return "assoc" if haspeer is true and I send an expire. This allows 4009 * the consumers' aging functions to tidy up an expired SA's peer. 4010 */ 4011 static ipsa_t * 4012 sadb_age_assoc(isaf_t *head, queue_t *pfkey_q, ipsa_t *assoc, 4013 time_t current, int reap_delay, boolean_t inbound) 4014 { 4015 ipsa_t *retval = NULL; 4016 boolean_t dropped_mutex = B_FALSE; 4017 4018 ASSERT(MUTEX_HELD(&head->isaf_lock)); 4019 4020 mutex_enter(&assoc->ipsa_lock); 4021 4022 if (((assoc->ipsa_state == IPSA_STATE_LARVAL) || 4023 ((assoc->ipsa_state == IPSA_STATE_IDLE) || 4024 (assoc->ipsa_state == IPSA_STATE_ACTIVE_ELSEWHERE) && 4025 (assoc->ipsa_hardexpiretime != 0))) && 4026 (assoc->ipsa_hardexpiretime <= current)) { 4027 assoc->ipsa_state = IPSA_STATE_DEAD; 4028 return (sadb_torch_assoc(head, assoc)); 4029 } 4030 4031 /* 4032 * Check lifetimes. Fortunately, SA setup is done 4033 * such that there are only two times to look at, 4034 * softexpiretime, and hardexpiretime. 4035 * 4036 * Check hard first. 4037 */ 4038 4039 if (assoc->ipsa_hardexpiretime != 0 && 4040 assoc->ipsa_hardexpiretime <= current) { 4041 if (assoc->ipsa_state == IPSA_STATE_DEAD) 4042 return (sadb_torch_assoc(head, assoc)); 4043 4044 if (inbound) { 4045 sadb_delete_cluster(assoc); 4046 } 4047 4048 /* 4049 * Send SADB_EXPIRE with hard lifetime, delay for unlinking. 4050 */ 4051 assoc->ipsa_state = IPSA_STATE_DEAD; 4052 if (assoc->ipsa_haspeer || assoc->ipsa_otherspi != 0) { 4053 /* 4054 * If the SA is paired or peered with another, put 4055 * a copy on a list which can be processed later, the 4056 * pair/peer SA needs to be updated so the both die 4057 * at the same time. 4058 * 4059 * If I return assoc, I have to bump up its reference 4060 * count to keep with the ipsa_t reference count 4061 * semantics. 4062 */ 4063 IPSA_REFHOLD(assoc); 4064 retval = assoc; 4065 } 4066 sadb_expire_assoc(pfkey_q, assoc); 4067 assoc->ipsa_hardexpiretime = current + reap_delay; 4068 } else if (assoc->ipsa_softexpiretime != 0 && 4069 assoc->ipsa_softexpiretime <= current && 4070 assoc->ipsa_state < IPSA_STATE_DYING) { 4071 /* 4072 * Send EXPIRE message to PF_KEY. May wish to pawn 4073 * this off on another non-interrupt thread. 4074 */ 4075 assoc->ipsa_state = IPSA_STATE_DYING; 4076 if (assoc->ipsa_haspeer) { 4077 /* 4078 * If the SA has a peer, update the peer's state 4079 * on SOFT_EXPIRE, this is mostly to prevent two 4080 * expire messages from effectively the same SA. 4081 * 4082 * Don't care about paired SA's, then can (and should) 4083 * be able to soft expire at different times. 4084 * 4085 * If I return assoc, I have to bump up its 4086 * reference count to keep with the ipsa_t reference 4087 * count semantics. 4088 */ 4089 IPSA_REFHOLD(assoc); 4090 retval = assoc; 4091 } 4092 sadb_expire_assoc(pfkey_q, assoc); 4093 } else if (assoc->ipsa_idletime != 0 && 4094 assoc->ipsa_idleexpiretime <= current) { 4095 if (assoc->ipsa_state == IPSA_STATE_ACTIVE_ELSEWHERE) { 4096 assoc->ipsa_state = IPSA_STATE_IDLE; 4097 } 4098 4099 /* 4100 * Need to handle Mature case 4101 */ 4102 if (assoc->ipsa_state == IPSA_STATE_MATURE) { 4103 sadb_expire_assoc(pfkey_q, assoc); 4104 } 4105 } else { 4106 /* Check idle time activities. */ 4107 dropped_mutex = sadb_idle_activities(assoc, 4108 current - assoc->ipsa_lastuse, inbound); 4109 } 4110 4111 if (!dropped_mutex) 4112 mutex_exit(&assoc->ipsa_lock); 4113 return (retval); 4114 } 4115 4116 /* 4117 * Called by a consumer protocol to do ther dirty work of reaping dead 4118 * Security Associations. 4119 * 4120 * NOTE: sadb_age_assoc() marks expired SA's as DEAD but only removed 4121 * SA's that are already marked DEAD, so expired SA's are only reaped 4122 * the second time sadb_ager() runs. 4123 */ 4124 void 4125 sadb_ager(sadb_t *sp, queue_t *pfkey_q, int reap_delay, netstack_t *ns) 4126 { 4127 int i; 4128 isaf_t *bucket; 4129 ipsa_t *assoc, *spare; 4130 iacqf_t *acqlist; 4131 ipsacq_t *acqrec, *spareacq; 4132 templist_t *haspeerlist, *newbie; 4133 /* Snapshot current time now. */ 4134 time_t current = gethrestime_sec(); 4135 haspeerlist = NULL; 4136 4137 /* 4138 * Do my dirty work. This includes aging real entries, aging 4139 * larvals, and aging outstanding ACQUIREs. 4140 * 4141 * I hope I don't tie up resources for too long. 4142 */ 4143 4144 /* Age acquires. */ 4145 4146 for (i = 0; i < sp->sdb_hashsize; i++) { 4147 acqlist = &sp->sdb_acq[i]; 4148 mutex_enter(&acqlist->iacqf_lock); 4149 for (acqrec = acqlist->iacqf_ipsacq; acqrec != NULL; 4150 acqrec = spareacq) { 4151 spareacq = acqrec->ipsacq_next; 4152 if (current > acqrec->ipsacq_expire) 4153 sadb_destroy_acquire(acqrec, ns); 4154 } 4155 mutex_exit(&acqlist->iacqf_lock); 4156 } 4157 4158 /* Age inbound associations. */ 4159 for (i = 0; i < sp->sdb_hashsize; i++) { 4160 bucket = &(sp->sdb_if[i]); 4161 mutex_enter(&bucket->isaf_lock); 4162 for (assoc = bucket->isaf_ipsa; assoc != NULL; 4163 assoc = spare) { 4164 spare = assoc->ipsa_next; 4165 if (sadb_age_assoc(bucket, pfkey_q, assoc, current, 4166 reap_delay, B_TRUE) != NULL) { 4167 /* 4168 * Put SA's which have a peer or SA's which 4169 * are paired on a list for processing after 4170 * all the hash tables have been walked. 4171 * 4172 * sadb_age_assoc() increments the refcnt, 4173 * effectively doing an IPSA_REFHOLD(). 4174 */ 4175 newbie = kmem_alloc(sizeof (*newbie), 4176 KM_NOSLEEP); 4177 if (newbie == NULL) { 4178 /* 4179 * Don't forget to REFRELE(). 4180 */ 4181 IPSA_REFRELE(assoc); 4182 continue; /* for loop... */ 4183 } 4184 newbie->next = haspeerlist; 4185 newbie->ipsa = assoc; 4186 haspeerlist = newbie; 4187 } 4188 } 4189 mutex_exit(&bucket->isaf_lock); 4190 } 4191 4192 age_pair_peer_list(haspeerlist, sp, B_FALSE); 4193 haspeerlist = NULL; 4194 4195 /* Age outbound associations. */ 4196 for (i = 0; i < sp->sdb_hashsize; i++) { 4197 bucket = &(sp->sdb_of[i]); 4198 mutex_enter(&bucket->isaf_lock); 4199 for (assoc = bucket->isaf_ipsa; assoc != NULL; 4200 assoc = spare) { 4201 spare = assoc->ipsa_next; 4202 if (sadb_age_assoc(bucket, pfkey_q, assoc, current, 4203 reap_delay, B_FALSE) != NULL) { 4204 /* 4205 * sadb_age_assoc() increments the refcnt, 4206 * effectively doing an IPSA_REFHOLD(). 4207 */ 4208 newbie = kmem_alloc(sizeof (*newbie), 4209 KM_NOSLEEP); 4210 if (newbie == NULL) { 4211 /* 4212 * Don't forget to REFRELE(). 4213 */ 4214 IPSA_REFRELE(assoc); 4215 continue; /* for loop... */ 4216 } 4217 newbie->next = haspeerlist; 4218 newbie->ipsa = assoc; 4219 haspeerlist = newbie; 4220 } 4221 } 4222 mutex_exit(&bucket->isaf_lock); 4223 } 4224 4225 age_pair_peer_list(haspeerlist, sp, B_TRUE); 4226 4227 /* 4228 * Run a GC pass to clean out dead identities. 4229 */ 4230 ipsid_gc(ns); 4231 } 4232 4233 /* 4234 * Figure out when to reschedule the ager. 4235 */ 4236 timeout_id_t 4237 sadb_retimeout(hrtime_t begin, queue_t *pfkey_q, void (*ager)(void *), 4238 void *agerarg, uint_t *intp, uint_t intmax, short mid) 4239 { 4240 hrtime_t end = gethrtime(); 4241 uint_t interval = *intp; /* "interval" is in ms. */ 4242 4243 /* 4244 * See how long this took. If it took too long, increase the 4245 * aging interval. 4246 */ 4247 if ((end - begin) > MSEC2NSEC(interval)) { 4248 if (interval >= intmax) { 4249 /* XXX Rate limit this? Or recommend flush? */ 4250 (void) strlog(mid, 0, 0, SL_ERROR | SL_WARN, 4251 "Too many SA's to age out in %d msec.\n", 4252 intmax); 4253 } else { 4254 /* Double by shifting by one bit. */ 4255 interval <<= 1; 4256 interval = min(interval, intmax); 4257 } 4258 } else if ((end - begin) <= (MSEC2NSEC(interval) / 2) && 4259 interval > SADB_AGE_INTERVAL_DEFAULT) { 4260 /* 4261 * If I took less than half of the interval, then I should 4262 * ratchet the interval back down. Never automatically 4263 * shift below the default aging interval. 4264 * 4265 * NOTE:This even overrides manual setting of the age 4266 * interval using NDD to lower the setting past the 4267 * default. In other words, if you set the interval 4268 * lower than the default, and your SADB gets too big, 4269 * the interval will only self-lower back to the default. 4270 */ 4271 /* Halve by shifting one bit. */ 4272 interval >>= 1; 4273 interval = max(interval, SADB_AGE_INTERVAL_DEFAULT); 4274 } 4275 *intp = interval; 4276 return (qtimeout(pfkey_q, ager, agerarg, 4277 drv_usectohz(interval * (MICROSEC / MILLISEC)))); 4278 } 4279 4280 4281 /* 4282 * Update the lifetime values of an SA. This is the path an SADB_UPDATE 4283 * message takes when updating a MATURE or DYING SA. 4284 */ 4285 static void 4286 sadb_update_lifetimes(ipsa_t *assoc, sadb_lifetime_t *hard, 4287 sadb_lifetime_t *soft, sadb_lifetime_t *idle, boolean_t outbound) 4288 { 4289 mutex_enter(&assoc->ipsa_lock); 4290 4291 /* 4292 * XXX RFC 2367 mentions how an SADB_EXT_LIFETIME_CURRENT can be 4293 * passed in during an update message. We currently don't handle 4294 * these. 4295 */ 4296 4297 if (hard != NULL) { 4298 if (hard->sadb_lifetime_bytes != 0) 4299 assoc->ipsa_hardbyteslt = hard->sadb_lifetime_bytes; 4300 if (hard->sadb_lifetime_usetime != 0) 4301 assoc->ipsa_harduselt = hard->sadb_lifetime_usetime; 4302 if (hard->sadb_lifetime_addtime != 0) 4303 assoc->ipsa_hardaddlt = hard->sadb_lifetime_addtime; 4304 if (assoc->ipsa_hardaddlt != 0) { 4305 assoc->ipsa_hardexpiretime = 4306 assoc->ipsa_addtime + assoc->ipsa_hardaddlt; 4307 } 4308 if (assoc->ipsa_harduselt != 0 && 4309 assoc->ipsa_flags & IPSA_F_USED) { 4310 UPDATE_EXPIRE(assoc, harduselt, hardexpiretime); 4311 } 4312 if (hard->sadb_lifetime_allocations != 0) 4313 assoc->ipsa_hardalloc = hard->sadb_lifetime_allocations; 4314 } 4315 4316 if (soft != NULL) { 4317 if (soft->sadb_lifetime_bytes != 0) { 4318 if (soft->sadb_lifetime_bytes > 4319 assoc->ipsa_hardbyteslt) { 4320 assoc->ipsa_softbyteslt = 4321 assoc->ipsa_hardbyteslt; 4322 } else { 4323 assoc->ipsa_softbyteslt = 4324 soft->sadb_lifetime_bytes; 4325 } 4326 } 4327 if (soft->sadb_lifetime_usetime != 0) { 4328 if (soft->sadb_lifetime_usetime > 4329 assoc->ipsa_harduselt) { 4330 assoc->ipsa_softuselt = 4331 assoc->ipsa_harduselt; 4332 } else { 4333 assoc->ipsa_softuselt = 4334 soft->sadb_lifetime_usetime; 4335 } 4336 } 4337 if (soft->sadb_lifetime_addtime != 0) { 4338 if (soft->sadb_lifetime_addtime > 4339 assoc->ipsa_hardexpiretime) { 4340 assoc->ipsa_softexpiretime = 4341 assoc->ipsa_hardexpiretime; 4342 } else { 4343 assoc->ipsa_softaddlt = 4344 soft->sadb_lifetime_addtime; 4345 } 4346 } 4347 if (assoc->ipsa_softaddlt != 0) { 4348 assoc->ipsa_softexpiretime = 4349 assoc->ipsa_addtime + assoc->ipsa_softaddlt; 4350 } 4351 if (assoc->ipsa_softuselt != 0 && 4352 assoc->ipsa_flags & IPSA_F_USED) { 4353 UPDATE_EXPIRE(assoc, softuselt, softexpiretime); 4354 } 4355 if (outbound && assoc->ipsa_softexpiretime != 0) { 4356 if (assoc->ipsa_state == IPSA_STATE_MATURE) 4357 lifetime_fuzz(assoc); 4358 } 4359 4360 if (soft->sadb_lifetime_allocations != 0) 4361 assoc->ipsa_softalloc = soft->sadb_lifetime_allocations; 4362 } 4363 4364 if (idle != NULL) { 4365 time_t current = gethrestime_sec(); 4366 if ((assoc->ipsa_idleexpiretime <= current) && 4367 (assoc->ipsa_idleaddlt == idle->sadb_lifetime_addtime)) { 4368 assoc->ipsa_idleexpiretime = 4369 current + assoc->ipsa_idleaddlt; 4370 } 4371 if (idle->sadb_lifetime_addtime != 0) 4372 assoc->ipsa_idleaddlt = idle->sadb_lifetime_addtime; 4373 if (idle->sadb_lifetime_usetime != 0) 4374 assoc->ipsa_idleuselt = idle->sadb_lifetime_usetime; 4375 if (assoc->ipsa_idleaddlt != 0) { 4376 assoc->ipsa_idleexpiretime = 4377 current + idle->sadb_lifetime_addtime; 4378 assoc->ipsa_idletime = idle->sadb_lifetime_addtime; 4379 } 4380 if (assoc->ipsa_idleuselt != 0) { 4381 if (assoc->ipsa_idletime != 0) { 4382 assoc->ipsa_idletime = min(assoc->ipsa_idletime, 4383 assoc->ipsa_idleuselt); 4384 assoc->ipsa_idleexpiretime = 4385 current + assoc->ipsa_idletime; 4386 } else { 4387 assoc->ipsa_idleexpiretime = 4388 current + assoc->ipsa_idleuselt; 4389 assoc->ipsa_idletime = assoc->ipsa_idleuselt; 4390 } 4391 } 4392 } 4393 mutex_exit(&assoc->ipsa_lock); 4394 } 4395 4396 static int 4397 sadb_update_state(ipsa_t *assoc, uint_t new_state, mblk_t **ipkt_lst) 4398 { 4399 int rcode = 0; 4400 time_t current = gethrestime_sec(); 4401 4402 mutex_enter(&assoc->ipsa_lock); 4403 4404 switch (new_state) { 4405 case SADB_X_SASTATE_ACTIVE_ELSEWHERE: 4406 if (assoc->ipsa_state == SADB_X_SASTATE_IDLE) { 4407 assoc->ipsa_state = IPSA_STATE_ACTIVE_ELSEWHERE; 4408 assoc->ipsa_idleexpiretime = 4409 current + assoc->ipsa_idletime; 4410 } 4411 break; 4412 case SADB_X_SASTATE_IDLE: 4413 if (assoc->ipsa_state == SADB_X_SASTATE_ACTIVE_ELSEWHERE) { 4414 assoc->ipsa_state = IPSA_STATE_IDLE; 4415 assoc->ipsa_idleexpiretime = 4416 current + assoc->ipsa_idletime; 4417 } else { 4418 rcode = EINVAL; 4419 } 4420 break; 4421 4422 case SADB_X_SASTATE_ACTIVE: 4423 if (assoc->ipsa_state != SADB_X_SASTATE_IDLE) { 4424 rcode = EINVAL; 4425 break; 4426 } 4427 assoc->ipsa_state = IPSA_STATE_MATURE; 4428 assoc->ipsa_idleexpiretime = current + assoc->ipsa_idletime; 4429 4430 if (ipkt_lst == NULL) { 4431 break; 4432 } 4433 4434 if (assoc->ipsa_bpkt_head != NULL) { 4435 *ipkt_lst = assoc->ipsa_bpkt_head; 4436 assoc->ipsa_bpkt_head = assoc->ipsa_bpkt_tail = NULL; 4437 assoc->ipsa_mblkcnt = 0; 4438 } else { 4439 *ipkt_lst = NULL; 4440 } 4441 break; 4442 default: 4443 rcode = EINVAL; 4444 break; 4445 } 4446 4447 mutex_exit(&assoc->ipsa_lock); 4448 return (rcode); 4449 } 4450 4451 /* 4452 * Check a proposed KMC update for sanity. 4453 */ 4454 static int 4455 sadb_check_kmc(ipsa_query_t *sq, ipsa_t *sa, int *diagnostic) 4456 { 4457 uint32_t kmp = sq->kmp; 4458 uint64_t kmc = sq->kmc; 4459 4460 if (sa == NULL) 4461 return (0); 4462 4463 if (sa->ipsa_state == IPSA_STATE_DEAD) 4464 return (ESRCH); /* DEAD == Not there, in this case. */ 4465 4466 if ((kmp != 0) && (sa->ipsa_kmp != 0) && (sa->ipsa_kmp != kmp)) { 4467 *diagnostic = SADB_X_DIAGNOSTIC_DUPLICATE_KMP; 4468 return (EINVAL); 4469 } 4470 4471 /* Allow IKEv2 KMCs to update the kmc value for rekeying */ 4472 if ((kmp != SADB_X_KMP_IKEV2) && (kmc != 0) && (sa->ipsa_kmc != 0) && 4473 (sa->ipsa_kmc != kmc)) { 4474 *diagnostic = SADB_X_DIAGNOSTIC_DUPLICATE_KMC; 4475 return (EINVAL); 4476 } 4477 4478 return (0); 4479 } 4480 4481 /* 4482 * Actually update the KMC info. 4483 */ 4484 static void 4485 sadb_update_kmc(ipsa_query_t *sq, ipsa_t *sa) 4486 { 4487 uint32_t kmp = sq->kmp; 4488 uint64_t kmc = sq->kmc; 4489 4490 if (kmp != 0) 4491 sa->ipsa_kmp = kmp; 4492 if (kmc != 0) 4493 sa->ipsa_kmc = kmc; 4494 } 4495 4496 /* 4497 * Common code to update an SA. 4498 */ 4499 4500 int 4501 sadb_update_sa(mblk_t *mp, keysock_in_t *ksi, mblk_t **ipkt_lst, 4502 sadbp_t *spp, int *diagnostic, queue_t *pfkey_q, 4503 int (*add_sa_func)(mblk_t *, keysock_in_t *, int *, netstack_t *), 4504 netstack_t *ns, uint8_t sadb_msg_type) 4505 { 4506 sadb_key_t *akey = (sadb_key_t *)ksi->ks_in_extv[SADB_EXT_KEY_AUTH]; 4507 sadb_key_t *ekey = (sadb_key_t *)ksi->ks_in_extv[SADB_EXT_KEY_ENCRYPT]; 4508 sadb_x_replay_ctr_t *replext = 4509 (sadb_x_replay_ctr_t *)ksi->ks_in_extv[SADB_X_EXT_REPLAY_VALUE]; 4510 sadb_lifetime_t *soft = 4511 (sadb_lifetime_t *)ksi->ks_in_extv[SADB_EXT_LIFETIME_SOFT]; 4512 sadb_lifetime_t *hard = 4513 (sadb_lifetime_t *)ksi->ks_in_extv[SADB_EXT_LIFETIME_HARD]; 4514 sadb_lifetime_t *idle = 4515 (sadb_lifetime_t *)ksi->ks_in_extv[SADB_X_EXT_LIFETIME_IDLE]; 4516 sadb_x_pair_t *pair_ext = 4517 (sadb_x_pair_t *)ksi->ks_in_extv[SADB_X_EXT_PAIR]; 4518 ipsa_t *echo_target = NULL; 4519 ipsap_t ipsapp; 4520 ipsa_query_t sq; 4521 time_t current = gethrestime_sec(); 4522 4523 sq.spp = spp; /* XXX param */ 4524 int error = sadb_form_query(ksi, IPSA_Q_SRC|IPSA_Q_DST|IPSA_Q_SA, 4525 IPSA_Q_SRC|IPSA_Q_DST|IPSA_Q_SA|IPSA_Q_INBOUND|IPSA_Q_OUTBOUND| 4526 IPSA_Q_KMC, 4527 &sq, diagnostic); 4528 4529 if (error != 0) 4530 return (error); 4531 4532 error = get_ipsa_pair(&sq, &ipsapp, diagnostic); 4533 if (error != 0) 4534 return (error); 4535 4536 if (ipsapp.ipsap_psa_ptr == NULL && ipsapp.ipsap_sa_ptr != NULL) { 4537 if (ipsapp.ipsap_sa_ptr->ipsa_state == IPSA_STATE_LARVAL) { 4538 /* 4539 * REFRELE the target and let the add_sa_func() 4540 * deal with updating a larval SA. 4541 */ 4542 destroy_ipsa_pair(&ipsapp); 4543 return (add_sa_func(mp, ksi, diagnostic, ns)); 4544 } 4545 } 4546 4547 /* 4548 * At this point we have an UPDATE to a MATURE SA. There should 4549 * not be any keying material present. 4550 */ 4551 if (akey != NULL) { 4552 *diagnostic = SADB_X_DIAGNOSTIC_AKEY_PRESENT; 4553 error = EINVAL; 4554 goto bail; 4555 } 4556 if (ekey != NULL) { 4557 *diagnostic = SADB_X_DIAGNOSTIC_EKEY_PRESENT; 4558 error = EINVAL; 4559 goto bail; 4560 } 4561 4562 if (sq.assoc->sadb_sa_state == SADB_X_SASTATE_ACTIVE_ELSEWHERE) { 4563 if (ipsapp.ipsap_sa_ptr != NULL && 4564 ipsapp.ipsap_sa_ptr->ipsa_state == IPSA_STATE_IDLE) { 4565 if ((error = sadb_update_state(ipsapp.ipsap_sa_ptr, 4566 sq.assoc->sadb_sa_state, NULL)) != 0) { 4567 *diagnostic = SADB_X_DIAGNOSTIC_BAD_SASTATE; 4568 goto bail; 4569 } 4570 } 4571 if (ipsapp.ipsap_psa_ptr != NULL && 4572 ipsapp.ipsap_psa_ptr->ipsa_state == IPSA_STATE_IDLE) { 4573 if ((error = sadb_update_state(ipsapp.ipsap_psa_ptr, 4574 sq.assoc->sadb_sa_state, NULL)) != 0) { 4575 *diagnostic = SADB_X_DIAGNOSTIC_BAD_SASTATE; 4576 goto bail; 4577 } 4578 } 4579 } 4580 if (sq.assoc->sadb_sa_state == SADB_X_SASTATE_ACTIVE) { 4581 if (ipsapp.ipsap_sa_ptr != NULL) { 4582 error = sadb_update_state(ipsapp.ipsap_sa_ptr, 4583 sq.assoc->sadb_sa_state, 4584 (ipsapp.ipsap_sa_ptr->ipsa_flags & 4585 IPSA_F_INBOUND) ? ipkt_lst : NULL); 4586 if (error) { 4587 *diagnostic = SADB_X_DIAGNOSTIC_BAD_SASTATE; 4588 goto bail; 4589 } 4590 } 4591 if (ipsapp.ipsap_psa_ptr != NULL) { 4592 error = sadb_update_state(ipsapp.ipsap_psa_ptr, 4593 sq.assoc->sadb_sa_state, 4594 (ipsapp.ipsap_psa_ptr->ipsa_flags & 4595 IPSA_F_INBOUND) ? ipkt_lst : NULL); 4596 if (error) { 4597 *diagnostic = SADB_X_DIAGNOSTIC_BAD_SASTATE; 4598 goto bail; 4599 } 4600 } 4601 sadb_pfkey_echo(pfkey_q, mp, (sadb_msg_t *)mp->b_cont->b_rptr, 4602 ksi, echo_target); 4603 goto bail; 4604 } 4605 4606 /* 4607 * Reality checks for updates of active associations. 4608 * Sundry first-pass UPDATE-specific reality checks. 4609 * Have to do the checks here, because it's after the add_sa code. 4610 * XXX STATS : logging/stats here? 4611 */ 4612 4613 if (!((sq.assoc->sadb_sa_state == SADB_SASTATE_MATURE) || 4614 (sq.assoc->sadb_sa_state == SADB_X_SASTATE_ACTIVE_ELSEWHERE))) { 4615 *diagnostic = SADB_X_DIAGNOSTIC_BAD_SASTATE; 4616 error = EINVAL; 4617 goto bail; 4618 } 4619 if (sq.assoc->sadb_sa_flags & ~spp->s_updateflags) { 4620 *diagnostic = SADB_X_DIAGNOSTIC_BAD_SAFLAGS; 4621 error = EINVAL; 4622 goto bail; 4623 } 4624 if (ksi->ks_in_extv[SADB_EXT_LIFETIME_CURRENT] != NULL) { 4625 *diagnostic = SADB_X_DIAGNOSTIC_MISSING_LIFETIME; 4626 error = EOPNOTSUPP; 4627 goto bail; 4628 } 4629 4630 if ((*diagnostic = sadb_hardsoftchk(hard, soft, idle)) != 0) { 4631 error = EINVAL; 4632 goto bail; 4633 } 4634 4635 if ((*diagnostic = sadb_labelchk(ksi)) != 0) 4636 return (EINVAL); 4637 4638 error = sadb_check_kmc(&sq, ipsapp.ipsap_sa_ptr, diagnostic); 4639 if (error != 0) 4640 goto bail; 4641 4642 error = sadb_check_kmc(&sq, ipsapp.ipsap_psa_ptr, diagnostic); 4643 if (error != 0) 4644 goto bail; 4645 4646 4647 if (ipsapp.ipsap_sa_ptr != NULL) { 4648 /* 4649 * Do not allow replay value change for MATURE or LARVAL SA. 4650 */ 4651 4652 if ((replext != NULL) && 4653 ((ipsapp.ipsap_sa_ptr->ipsa_state == IPSA_STATE_LARVAL) || 4654 (ipsapp.ipsap_sa_ptr->ipsa_state == IPSA_STATE_MATURE))) { 4655 *diagnostic = SADB_X_DIAGNOSTIC_BAD_SASTATE; 4656 error = EINVAL; 4657 goto bail; 4658 } 4659 } 4660 4661 4662 if (ipsapp.ipsap_sa_ptr != NULL) { 4663 sadb_update_lifetimes(ipsapp.ipsap_sa_ptr, hard, soft, 4664 idle, B_TRUE); 4665 sadb_update_kmc(&sq, ipsapp.ipsap_sa_ptr); 4666 if ((replext != NULL) && 4667 (ipsapp.ipsap_sa_ptr->ipsa_replay_wsize != 0)) { 4668 /* 4669 * If an inbound SA, update the replay counter 4670 * and check off all the other sequence number 4671 */ 4672 if (ksi->ks_in_dsttype == KS_IN_ADDR_ME) { 4673 if (!sadb_replay_check(ipsapp.ipsap_sa_ptr, 4674 replext->sadb_x_rc_replay32)) { 4675 *diagnostic = 4676 SADB_X_DIAGNOSTIC_INVALID_REPLAY; 4677 error = EINVAL; 4678 goto bail; 4679 } 4680 mutex_enter(&ipsapp.ipsap_sa_ptr->ipsa_lock); 4681 ipsapp.ipsap_sa_ptr->ipsa_idleexpiretime = 4682 current + 4683 ipsapp.ipsap_sa_ptr->ipsa_idletime; 4684 mutex_exit(&ipsapp.ipsap_sa_ptr->ipsa_lock); 4685 } else { 4686 mutex_enter(&ipsapp.ipsap_sa_ptr->ipsa_lock); 4687 ipsapp.ipsap_sa_ptr->ipsa_replay = 4688 replext->sadb_x_rc_replay32; 4689 ipsapp.ipsap_sa_ptr->ipsa_idleexpiretime = 4690 current + 4691 ipsapp.ipsap_sa_ptr->ipsa_idletime; 4692 mutex_exit(&ipsapp.ipsap_sa_ptr->ipsa_lock); 4693 } 4694 } 4695 } 4696 4697 if (sadb_msg_type == SADB_X_UPDATEPAIR) { 4698 if (ipsapp.ipsap_psa_ptr != NULL) { 4699 sadb_update_lifetimes(ipsapp.ipsap_psa_ptr, hard, soft, 4700 idle, B_FALSE); 4701 sadb_update_kmc(&sq, ipsapp.ipsap_psa_ptr); 4702 } else { 4703 *diagnostic = SADB_X_DIAGNOSTIC_PAIR_SA_NOTFOUND; 4704 error = ESRCH; 4705 goto bail; 4706 } 4707 } 4708 4709 if (pair_ext != NULL) 4710 error = update_pairing(&ipsapp, &sq, ksi, diagnostic); 4711 4712 if (error == 0) 4713 sadb_pfkey_echo(pfkey_q, mp, (sadb_msg_t *)mp->b_cont->b_rptr, 4714 ksi, echo_target); 4715 bail: 4716 4717 destroy_ipsa_pair(&ipsapp); 4718 4719 return (error); 4720 } 4721 4722 4723 static int 4724 update_pairing(ipsap_t *ipsapp, ipsa_query_t *sq, keysock_in_t *ksi, 4725 int *diagnostic) 4726 { 4727 sadb_sa_t *assoc = (sadb_sa_t *)ksi->ks_in_extv[SADB_EXT_SA]; 4728 sadb_x_pair_t *pair_ext = 4729 (sadb_x_pair_t *)ksi->ks_in_extv[SADB_X_EXT_PAIR]; 4730 int error = 0; 4731 ipsap_t oipsapp; 4732 boolean_t undo_pair = B_FALSE; 4733 uint32_t ipsa_flags; 4734 4735 if (pair_ext->sadb_x_pair_spi == 0 || pair_ext->sadb_x_pair_spi == 4736 assoc->sadb_sa_spi) { 4737 *diagnostic = SADB_X_DIAGNOSTIC_PAIR_INAPPROPRIATE; 4738 return (EINVAL); 4739 } 4740 4741 /* 4742 * Assume for now that the spi value provided in the SADB_UPDATE 4743 * message was valid, update the SA with its pair spi value. 4744 * If the spi turns out to be bogus or the SA no longer exists 4745 * then this will be detected when the reverse update is made 4746 * below. 4747 */ 4748 mutex_enter(&ipsapp->ipsap_sa_ptr->ipsa_lock); 4749 ipsapp->ipsap_sa_ptr->ipsa_flags |= IPSA_F_PAIRED; 4750 ipsapp->ipsap_sa_ptr->ipsa_otherspi = pair_ext->sadb_x_pair_spi; 4751 mutex_exit(&ipsapp->ipsap_sa_ptr->ipsa_lock); 4752 4753 /* 4754 * After updating the ipsa_otherspi element of the SA, get_ipsa_pair() 4755 * should now return pointers to the SA *AND* its pair, if this is not 4756 * the case, the "otherspi" either did not exist or was deleted. Also 4757 * check that "otherspi" is not already paired. If everything looks 4758 * good, complete the update. IPSA_REFRELE the first pair_pointer 4759 * after this update to ensure its not deleted until we are done. 4760 */ 4761 error = get_ipsa_pair(sq, &oipsapp, diagnostic); 4762 if (error != 0) { 4763 /* 4764 * This should never happen, calling function still has 4765 * IPSA_REFHELD on the SA we just updated. 4766 */ 4767 return (error); /* XXX EINVAL instead of ESRCH? */ 4768 } 4769 4770 if (oipsapp.ipsap_psa_ptr == NULL) { 4771 *diagnostic = SADB_X_DIAGNOSTIC_PAIR_INAPPROPRIATE; 4772 error = EINVAL; 4773 undo_pair = B_TRUE; 4774 } else { 4775 ipsa_flags = oipsapp.ipsap_psa_ptr->ipsa_flags; 4776 if ((oipsapp.ipsap_psa_ptr->ipsa_state == IPSA_STATE_DEAD) || 4777 (oipsapp.ipsap_psa_ptr->ipsa_state == IPSA_STATE_DYING)) { 4778 /* Its dead Jim! */ 4779 *diagnostic = SADB_X_DIAGNOSTIC_PAIR_INAPPROPRIATE; 4780 undo_pair = B_TRUE; 4781 } else if ((ipsa_flags & (IPSA_F_OUTBOUND | IPSA_F_INBOUND)) == 4782 (IPSA_F_OUTBOUND | IPSA_F_INBOUND)) { 4783 /* This SA is in both hashtables. */ 4784 *diagnostic = SADB_X_DIAGNOSTIC_PAIR_INAPPROPRIATE; 4785 undo_pair = B_TRUE; 4786 } else if (ipsa_flags & IPSA_F_PAIRED) { 4787 /* This SA is already paired with another. */ 4788 *diagnostic = SADB_X_DIAGNOSTIC_PAIR_ALREADY; 4789 undo_pair = B_TRUE; 4790 } 4791 } 4792 4793 if (undo_pair) { 4794 /* The pair SA does not exist. */ 4795 mutex_enter(&ipsapp->ipsap_sa_ptr->ipsa_lock); 4796 ipsapp->ipsap_sa_ptr->ipsa_flags &= ~IPSA_F_PAIRED; 4797 ipsapp->ipsap_sa_ptr->ipsa_otherspi = 0; 4798 mutex_exit(&ipsapp->ipsap_sa_ptr->ipsa_lock); 4799 } else { 4800 mutex_enter(&oipsapp.ipsap_psa_ptr->ipsa_lock); 4801 oipsapp.ipsap_psa_ptr->ipsa_otherspi = assoc->sadb_sa_spi; 4802 oipsapp.ipsap_psa_ptr->ipsa_flags |= IPSA_F_PAIRED; 4803 mutex_exit(&oipsapp.ipsap_psa_ptr->ipsa_lock); 4804 } 4805 4806 destroy_ipsa_pair(&oipsapp); 4807 return (error); 4808 } 4809 4810 /* 4811 * The following functions deal with ACQUIRE LISTS. An ACQUIRE list is 4812 * a list of outstanding SADB_ACQUIRE messages. If ipsec_getassocbyconn() fails 4813 * for an outbound datagram, that datagram is queued up on an ACQUIRE record, 4814 * and an SADB_ACQUIRE message is sent up. Presumably, a user-space key 4815 * management daemon will process the ACQUIRE, use a SADB_GETSPI to reserve 4816 * an SPI value and a larval SA, then SADB_UPDATE the larval SA, and ADD the 4817 * other direction's SA. 4818 */ 4819 4820 /* 4821 * Check the ACQUIRE lists. If there's an existing ACQUIRE record, 4822 * grab it, lock it, and return it. Otherwise return NULL. 4823 * 4824 * XXX MLS number of arguments getting unwieldy here 4825 */ 4826 static ipsacq_t * 4827 sadb_checkacquire(iacqf_t *bucket, ipsec_action_t *ap, ipsec_policy_t *pp, 4828 uint32_t *src, uint32_t *dst, uint32_t *isrc, uint32_t *idst, 4829 uint64_t unique_id, ts_label_t *tsl) 4830 { 4831 ipsacq_t *walker; 4832 sa_family_t fam; 4833 uint32_t blank_address[4] = {0, 0, 0, 0}; 4834 4835 if (isrc == NULL) { 4836 ASSERT(idst == NULL); 4837 isrc = idst = blank_address; 4838 } 4839 4840 /* 4841 * Scan list for duplicates. Check for UNIQUE, src/dest, policy. 4842 * 4843 * XXX May need search for duplicates based on other things too! 4844 */ 4845 for (walker = bucket->iacqf_ipsacq; walker != NULL; 4846 walker = walker->ipsacq_next) { 4847 mutex_enter(&walker->ipsacq_lock); 4848 fam = walker->ipsacq_addrfam; 4849 if (IPSA_ARE_ADDR_EQUAL(dst, walker->ipsacq_dstaddr, fam) && 4850 IPSA_ARE_ADDR_EQUAL(src, walker->ipsacq_srcaddr, fam) && 4851 ip_addr_match((uint8_t *)isrc, walker->ipsacq_innersrcpfx, 4852 (in6_addr_t *)walker->ipsacq_innersrc) && 4853 ip_addr_match((uint8_t *)idst, walker->ipsacq_innerdstpfx, 4854 (in6_addr_t *)walker->ipsacq_innerdst) && 4855 (ap == walker->ipsacq_act) && 4856 (pp == walker->ipsacq_policy) && 4857 /* XXX do deep compares of ap/pp? */ 4858 (unique_id == walker->ipsacq_unique_id) && 4859 (ipsec_label_match(tsl, walker->ipsacq_tsl))) 4860 break; /* everything matched */ 4861 mutex_exit(&walker->ipsacq_lock); 4862 } 4863 4864 return (walker); 4865 } 4866 4867 /* 4868 * Generate an SADB_ACQUIRE base message mblk, including KEYSOCK_OUT metadata. 4869 * In other words, this will return, upon success, a two-mblk chain. 4870 */ 4871 static inline mblk_t * 4872 sadb_acquire_msg_base(minor_t serial, uint8_t satype, uint32_t seq, pid_t pid) 4873 { 4874 mblk_t *mp; 4875 sadb_msg_t *samsg; 4876 4877 mp = sadb_keysock_out(serial); 4878 if (mp == NULL) 4879 return (NULL); 4880 mp->b_cont = allocb(sizeof (sadb_msg_t), BPRI_HI); 4881 if (mp->b_cont == NULL) { 4882 freeb(mp); 4883 return (NULL); 4884 } 4885 4886 samsg = (sadb_msg_t *)mp->b_cont->b_rptr; 4887 mp->b_cont->b_wptr += sizeof (*samsg); 4888 samsg->sadb_msg_version = PF_KEY_V2; 4889 samsg->sadb_msg_type = SADB_ACQUIRE; 4890 samsg->sadb_msg_errno = 0; 4891 samsg->sadb_msg_reserved = 0; 4892 samsg->sadb_msg_satype = satype; 4893 samsg->sadb_msg_seq = seq; 4894 samsg->sadb_msg_pid = pid; 4895 4896 return (mp); 4897 } 4898 4899 /* 4900 * Generate address and TX/MLS sensitivity label PF_KEY extensions that are 4901 * common to both regular and extended ACQUIREs. 4902 */ 4903 static mblk_t * 4904 sadb_acquire_msg_common(ipsec_selector_t *sel, ipsec_policy_t *pp, 4905 ipsec_action_t *ap, boolean_t tunnel_mode, ts_label_t *tsl, 4906 sadb_sens_t *sens) 4907 { 4908 size_t len; 4909 mblk_t *mp; 4910 uint8_t *start, *cur, *end; 4911 uint32_t *saddrptr, *daddrptr; 4912 sa_family_t af; 4913 ipsec_action_t *oldap; 4914 ipsec_selkey_t *ipsl; 4915 uint8_t proto, pfxlen; 4916 uint16_t lport, rport; 4917 int senslen = 0; 4918 4919 /* 4920 * Get action pointer set if it isn't already. 4921 */ 4922 oldap = ap; 4923 if (pp != NULL) { 4924 ap = pp->ipsp_act; 4925 if (ap == NULL) 4926 ap = oldap; 4927 } 4928 4929 /* 4930 * Biggest-case scenario: 4931 * 4x (sadb_address_t + struct sockaddr_in6) 4932 * (src, dst, isrc, idst) 4933 * (COMING SOON, 6x, because of triggering-packet contents.) 4934 * sadb_x_kmc_t 4935 * sadb_sens_t 4936 * And wiggle room for label bitvectors. Luckily there are 4937 * programmatic ways to find it. 4938 */ 4939 len = 4 * (sizeof (sadb_address_t) + sizeof (struct sockaddr_in6)); 4940 4941 /* Figure out full and proper length of sensitivity labels. */ 4942 if (sens != NULL) { 4943 ASSERT(tsl == NULL); 4944 senslen = SADB_64TO8(sens->sadb_sens_len); 4945 } else if (tsl != NULL) { 4946 senslen = sadb_sens_len_from_label(tsl); 4947 } 4948 #ifdef DEBUG 4949 else { 4950 ASSERT(senslen == 0); 4951 } 4952 #endif /* DEBUG */ 4953 len += senslen; 4954 4955 mp = allocb(len, BPRI_HI); 4956 if (mp == NULL) 4957 return (NULL); 4958 4959 start = mp->b_rptr; 4960 end = start + len; 4961 cur = start; 4962 4963 /* 4964 * Address extensions first, from most-recently-defined to least. 4965 * (This should immediately trigger surprise or verify robustness on 4966 * older apps, like in.iked.) 4967 */ 4968 if (tunnel_mode) { 4969 /* 4970 * Form inner address extensions based NOT on the inner 4971 * selectors (i.e. the packet data), but on the policy's 4972 * selector key (i.e. the policy's selector information). 4973 * 4974 * NOTE: The position of IPv4 and IPv6 addresses is the 4975 * same in ipsec_selkey_t (unless the compiler does very 4976 * strange things with unions, consult your local C language 4977 * lawyer for details). 4978 */ 4979 ASSERT(pp != NULL); 4980 4981 ipsl = &(pp->ipsp_sel->ipsl_key); 4982 if (ipsl->ipsl_valid & IPSL_IPV4) { 4983 af = AF_INET; 4984 ASSERT(sel->ips_protocol == IPPROTO_ENCAP); 4985 ASSERT(!(ipsl->ipsl_valid & IPSL_IPV6)); 4986 } else { 4987 af = AF_INET6; 4988 ASSERT(sel->ips_protocol == IPPROTO_IPV6); 4989 ASSERT(ipsl->ipsl_valid & IPSL_IPV6); 4990 } 4991 4992 if (ipsl->ipsl_valid & IPSL_LOCAL_ADDR) { 4993 saddrptr = (uint32_t *)(&ipsl->ipsl_local); 4994 pfxlen = ipsl->ipsl_local_pfxlen; 4995 } else { 4996 saddrptr = (uint32_t *)(&ipv6_all_zeros); 4997 pfxlen = 0; 4998 } 4999 /* XXX What about ICMP type/code? */ 5000 lport = (ipsl->ipsl_valid & IPSL_LOCAL_PORT) ? 5001 ipsl->ipsl_lport : 0; 5002 proto = (ipsl->ipsl_valid & IPSL_PROTOCOL) ? 5003 ipsl->ipsl_proto : 0; 5004 5005 cur = sadb_make_addr_ext(cur, end, SADB_X_EXT_ADDRESS_INNER_SRC, 5006 af, saddrptr, lport, proto, pfxlen); 5007 if (cur == NULL) { 5008 freeb(mp); 5009 return (NULL); 5010 } 5011 5012 if (ipsl->ipsl_valid & IPSL_REMOTE_ADDR) { 5013 daddrptr = (uint32_t *)(&ipsl->ipsl_remote); 5014 pfxlen = ipsl->ipsl_remote_pfxlen; 5015 } else { 5016 daddrptr = (uint32_t *)(&ipv6_all_zeros); 5017 pfxlen = 0; 5018 } 5019 /* XXX What about ICMP type/code? */ 5020 rport = (ipsl->ipsl_valid & IPSL_REMOTE_PORT) ? 5021 ipsl->ipsl_rport : 0; 5022 5023 cur = sadb_make_addr_ext(cur, end, SADB_X_EXT_ADDRESS_INNER_DST, 5024 af, daddrptr, rport, proto, pfxlen); 5025 if (cur == NULL) { 5026 freeb(mp); 5027 return (NULL); 5028 } 5029 /* 5030 * TODO - if we go to 3884's dream of transport mode IP-in-IP 5031 * _with_ inner-packet address selectors, we'll need to further 5032 * distinguish tunnel mode here. For now, having inner 5033 * addresses and/or ports is sufficient. 5034 * 5035 * Meanwhile, whack proto/ports to reflect IP-in-IP for the 5036 * outer addresses. 5037 */ 5038 proto = sel->ips_protocol; /* Either _ENCAP or _IPV6 */ 5039 lport = rport = 0; 5040 } else if ((ap != NULL) && (!ap->ipa_want_unique)) { 5041 /* 5042 * For cases when the policy calls out specific ports (or not). 5043 */ 5044 proto = 0; 5045 lport = 0; 5046 rport = 0; 5047 if (pp != NULL) { 5048 ipsl = &(pp->ipsp_sel->ipsl_key); 5049 if (ipsl->ipsl_valid & IPSL_PROTOCOL) 5050 proto = ipsl->ipsl_proto; 5051 if (ipsl->ipsl_valid & IPSL_REMOTE_PORT) 5052 rport = ipsl->ipsl_rport; 5053 if (ipsl->ipsl_valid & IPSL_LOCAL_PORT) 5054 lport = ipsl->ipsl_lport; 5055 } 5056 } else { 5057 /* 5058 * For require-unique-SA policies. 5059 */ 5060 proto = sel->ips_protocol; 5061 lport = sel->ips_local_port; 5062 rport = sel->ips_remote_port; 5063 } 5064 5065 /* 5066 * Regular addresses. These are outer-packet ones for tunnel mode. 5067 * Or for transport mode, the regulard address & port information. 5068 */ 5069 af = sel->ips_isv4 ? AF_INET : AF_INET6; 5070 5071 /* 5072 * NOTE: The position of IPv4 and IPv6 addresses is the same in 5073 * ipsec_selector_t. 5074 */ 5075 cur = sadb_make_addr_ext(cur, end, SADB_EXT_ADDRESS_SRC, af, 5076 (uint32_t *)(&sel->ips_local_addr_v6), lport, proto, 0); 5077 if (cur == NULL) { 5078 freeb(mp); 5079 return (NULL); 5080 } 5081 5082 cur = sadb_make_addr_ext(cur, end, SADB_EXT_ADDRESS_DST, af, 5083 (uint32_t *)(&sel->ips_remote_addr_v6), rport, proto, 0); 5084 if (cur == NULL) { 5085 freeb(mp); 5086 return (NULL); 5087 } 5088 5089 /* 5090 * If present, generate a sensitivity label. 5091 */ 5092 if (cur + senslen > end) { 5093 freeb(mp); 5094 return (NULL); 5095 } 5096 if (sens != NULL) { 5097 /* Explicit sadb_sens_t, usually from inverse-ACQUIRE. */ 5098 bcopy(sens, cur, senslen); 5099 } else if (tsl != NULL) { 5100 /* Generate sadb_sens_t from ACQUIRE source. */ 5101 sadb_sens_from_label((sadb_sens_t *)cur, SADB_EXT_SENSITIVITY, 5102 tsl, senslen); 5103 } 5104 #ifdef DEBUG 5105 else { 5106 ASSERT(senslen == 0); 5107 } 5108 #endif /* DEBUG */ 5109 cur += senslen; 5110 mp->b_wptr = cur; 5111 5112 return (mp); 5113 } 5114 5115 /* 5116 * Generate a regular ACQUIRE's proposal extension and KMC information.. 5117 */ 5118 static mblk_t * 5119 sadb_acquire_prop(ipsec_action_t *ap, netstack_t *ns, boolean_t do_esp) 5120 { 5121 ipsec_stack_t *ipss = ns->netstack_ipsec; 5122 ipsecesp_stack_t *espstack = ns->netstack_ipsecesp; 5123 ipsecah_stack_t *ahstack = ns->netstack_ipsecah; 5124 mblk_t *mp = NULL; 5125 sadb_prop_t *prop; 5126 sadb_comb_t *comb; 5127 ipsec_action_t *walker; 5128 int ncombs, allocsize, ealgid, aalgid, aminbits, amaxbits, eminbits, 5129 emaxbits, esaltlen, replay; 5130 uint64_t softbytes, hardbytes, softaddtime, hardaddtime, softusetime, 5131 hardusetime; 5132 uint64_t kmc = 0; 5133 uint32_t kmp = 0; 5134 5135 /* 5136 * Since it's an rwlock read, AND writing to the IPsec algorithms is 5137 * rare, just acquire it once up top, and drop it upon return. 5138 */ 5139 rw_enter(&ipss->ipsec_alg_lock, RW_READER); 5140 if (do_esp) { 5141 uint64_t num_aalgs, num_ealgs; 5142 5143 if (espstack->esp_kstats == NULL) 5144 goto bail; 5145 5146 num_aalgs = ipss->ipsec_nalgs[IPSEC_ALG_AUTH]; 5147 num_ealgs = ipss->ipsec_nalgs[IPSEC_ALG_ENCR]; 5148 if (num_ealgs == 0) 5149 goto bail; /* IPsec not loaded yet, apparently. */ 5150 num_aalgs++; /* No-auth or self-auth-crypto ESP. */ 5151 5152 /* Use netstack's maximum loaded algorithms... */ 5153 ncombs = num_ealgs * num_aalgs; 5154 replay = espstack->ipsecesp_replay_size; 5155 } else { 5156 if (ahstack->ah_kstats == NULL) 5157 goto bail; 5158 5159 ncombs = ipss->ipsec_nalgs[IPSEC_ALG_AUTH]; 5160 5161 if (ncombs == 0) 5162 goto bail; /* IPsec not loaded yet, apparently. */ 5163 replay = ahstack->ipsecah_replay_size; 5164 } 5165 5166 allocsize = sizeof (*prop) + ncombs * sizeof (*comb) + 5167 sizeof (sadb_x_kmc_t); 5168 mp = allocb(allocsize, BPRI_HI); 5169 if (mp == NULL) 5170 goto bail; 5171 prop = (sadb_prop_t *)mp->b_rptr; 5172 mp->b_wptr += sizeof (*prop); 5173 comb = (sadb_comb_t *)mp->b_wptr; 5174 /* Decrement allocsize, if it goes to or below 0, stop. */ 5175 allocsize -= sizeof (*prop); 5176 prop->sadb_prop_exttype = SADB_EXT_PROPOSAL; 5177 prop->sadb_prop_len = SADB_8TO64(sizeof (*prop)); 5178 *(uint32_t *)(&prop->sadb_prop_replay) = 0; /* Quick zero-out! */ 5179 prop->sadb_prop_replay = replay; 5180 5181 /* 5182 * Based upon algorithm properties, and what-not, prioritize a 5183 * proposal, based on the ordering of the ESP algorithms in the 5184 * alternatives in the policy rule or socket that was placed 5185 * in the acquire record. 5186 * 5187 * For each action in policy list 5188 * Add combination. 5189 * I should not hit it, but if I've hit limit, return. 5190 */ 5191 5192 for (walker = ap; walker != NULL; walker = walker->ipa_next) { 5193 ipsec_alginfo_t *ealg, *aalg; 5194 ipsec_prot_t *prot; 5195 5196 if (walker->ipa_act.ipa_type != IPSEC_POLICY_APPLY) 5197 continue; 5198 5199 prot = &walker->ipa_act.ipa_apply; 5200 if (walker->ipa_act.ipa_apply.ipp_km_proto != 0) 5201 kmp = walker->ipa_act.ipa_apply.ipp_km_proto; 5202 if (walker->ipa_act.ipa_apply.ipp_km_cookie != 0) 5203 kmc = walker->ipa_act.ipa_apply.ipp_km_cookie; 5204 if (walker->ipa_act.ipa_apply.ipp_replay_depth) { 5205 prop->sadb_prop_replay = 5206 walker->ipa_act.ipa_apply.ipp_replay_depth; 5207 } 5208 5209 if (do_esp) { 5210 if (!prot->ipp_use_esp) 5211 continue; 5212 5213 if (prot->ipp_esp_auth_alg != 0) { 5214 aalg = ipss->ipsec_alglists[IPSEC_ALG_AUTH] 5215 [prot->ipp_esp_auth_alg]; 5216 if (aalg == NULL || !ALG_VALID(aalg)) 5217 continue; 5218 } else 5219 aalg = NULL; 5220 5221 ASSERT(prot->ipp_encr_alg > 0); 5222 ealg = ipss->ipsec_alglists[IPSEC_ALG_ENCR] 5223 [prot->ipp_encr_alg]; 5224 if (ealg == NULL || !ALG_VALID(ealg)) 5225 continue; 5226 5227 /* 5228 * These may want to come from policy rule.. 5229 */ 5230 softbytes = espstack->ipsecesp_default_soft_bytes; 5231 hardbytes = espstack->ipsecesp_default_hard_bytes; 5232 softaddtime = espstack->ipsecesp_default_soft_addtime; 5233 hardaddtime = espstack->ipsecesp_default_hard_addtime; 5234 softusetime = espstack->ipsecesp_default_soft_usetime; 5235 hardusetime = espstack->ipsecesp_default_hard_usetime; 5236 } else { 5237 if (!prot->ipp_use_ah) 5238 continue; 5239 ealg = NULL; 5240 aalg = ipss->ipsec_alglists[IPSEC_ALG_AUTH] 5241 [prot->ipp_auth_alg]; 5242 if (aalg == NULL || !ALG_VALID(aalg)) 5243 continue; 5244 5245 /* 5246 * These may want to come from policy rule.. 5247 */ 5248 softbytes = ahstack->ipsecah_default_soft_bytes; 5249 hardbytes = ahstack->ipsecah_default_hard_bytes; 5250 softaddtime = ahstack->ipsecah_default_soft_addtime; 5251 hardaddtime = ahstack->ipsecah_default_hard_addtime; 5252 softusetime = ahstack->ipsecah_default_soft_usetime; 5253 hardusetime = ahstack->ipsecah_default_hard_usetime; 5254 } 5255 5256 if (ealg == NULL) { 5257 ealgid = eminbits = emaxbits = esaltlen = 0; 5258 } else { 5259 ealgid = ealg->alg_id; 5260 eminbits = 5261 MAX(prot->ipp_espe_minbits, ealg->alg_ef_minbits); 5262 emaxbits = 5263 MIN(prot->ipp_espe_maxbits, ealg->alg_ef_maxbits); 5264 esaltlen = ealg->alg_saltlen; 5265 } 5266 5267 if (aalg == NULL) { 5268 aalgid = aminbits = amaxbits = 0; 5269 } else { 5270 aalgid = aalg->alg_id; 5271 aminbits = MAX(prot->ipp_espa_minbits, 5272 aalg->alg_ef_minbits); 5273 amaxbits = MIN(prot->ipp_espa_maxbits, 5274 aalg->alg_ef_maxbits); 5275 } 5276 5277 comb->sadb_comb_flags = 0; 5278 comb->sadb_comb_reserved = 0; 5279 comb->sadb_comb_encrypt = ealgid; 5280 comb->sadb_comb_encrypt_minbits = eminbits; 5281 comb->sadb_comb_encrypt_maxbits = emaxbits; 5282 comb->sadb_x_comb_encrypt_saltbits = SADB_8TO1(esaltlen); 5283 comb->sadb_comb_auth = aalgid; 5284 comb->sadb_comb_auth_minbits = aminbits; 5285 comb->sadb_comb_auth_maxbits = amaxbits; 5286 comb->sadb_comb_soft_allocations = 0; 5287 comb->sadb_comb_hard_allocations = 0; 5288 comb->sadb_comb_soft_bytes = softbytes; 5289 comb->sadb_comb_hard_bytes = hardbytes; 5290 comb->sadb_comb_soft_addtime = softaddtime; 5291 comb->sadb_comb_hard_addtime = hardaddtime; 5292 comb->sadb_comb_soft_usetime = softusetime; 5293 comb->sadb_comb_hard_usetime = hardusetime; 5294 5295 prop->sadb_prop_len += SADB_8TO64(sizeof (*comb)); 5296 mp->b_wptr += sizeof (*comb); 5297 allocsize -= sizeof (*comb); 5298 /* Should never dip BELOW sizeof (KM cookie extension). */ 5299 ASSERT3S(allocsize, >=, sizeof (sadb_x_kmc_t)); 5300 if (allocsize <= sizeof (sadb_x_kmc_t)) 5301 break; /* out of space.. */ 5302 comb++; 5303 } 5304 5305 /* Don't include KMC extension if there's no room. */ 5306 if (((kmp != 0) || (kmc != 0)) && allocsize >= sizeof (sadb_x_kmc_t)) { 5307 if (sadb_make_kmc_ext(mp->b_wptr, 5308 mp->b_wptr + sizeof (sadb_x_kmc_t), kmp, kmc) == NULL) { 5309 freeb(mp); 5310 mp = NULL; 5311 goto bail; 5312 } 5313 mp->b_wptr += sizeof (sadb_x_kmc_t); 5314 prop->sadb_prop_len += SADB_8TO64(sizeof (sadb_x_kmc_t)); 5315 } 5316 5317 bail: 5318 rw_exit(&ipss->ipsec_alg_lock); 5319 return (mp); 5320 } 5321 5322 /* 5323 * Generate an extended ACQUIRE's extended-proposal extension. 5324 */ 5325 static mblk_t * 5326 sadb_acquire_extended_prop(ipsec_action_t *ap, netstack_t *ns) 5327 { 5328 sadb_prop_t *eprop; 5329 uint8_t *cur, *end; 5330 mblk_t *mp; 5331 int allocsize, numecombs = 0, numalgdescs = 0; 5332 uint32_t kmp = 0, replay = 0; 5333 uint64_t kmc = 0; 5334 ipsec_action_t *walker; 5335 5336 allocsize = sizeof (*eprop); 5337 5338 /* 5339 * Going to walk through the action list twice. Once for allocation 5340 * measurement, and once for actual construction. 5341 */ 5342 for (walker = ap; walker != NULL; walker = walker->ipa_next) { 5343 ipsec_prot_t *ipp; 5344 5345 /* 5346 * Skip non-IPsec policies 5347 */ 5348 if (walker->ipa_act.ipa_type != IPSEC_ACT_APPLY) 5349 continue; 5350 5351 ipp = &walker->ipa_act.ipa_apply; 5352 5353 if (walker->ipa_act.ipa_apply.ipp_km_proto) 5354 kmp = ipp->ipp_km_proto; 5355 if (walker->ipa_act.ipa_apply.ipp_km_cookie) 5356 kmc = ipp->ipp_km_cookie; 5357 if (walker->ipa_act.ipa_apply.ipp_replay_depth) 5358 replay = ipp->ipp_replay_depth; 5359 5360 if (ipp->ipp_use_ah) 5361 numalgdescs++; 5362 if (ipp->ipp_use_esp) { 5363 numalgdescs++; 5364 if (ipp->ipp_use_espa) 5365 numalgdescs++; 5366 } 5367 5368 numecombs++; 5369 } 5370 ASSERT(numecombs > 0); 5371 5372 allocsize += numecombs * sizeof (sadb_x_ecomb_t) + 5373 numalgdescs * sizeof (sadb_x_algdesc_t) + sizeof (sadb_x_kmc_t); 5374 mp = allocb(allocsize, BPRI_HI); 5375 if (mp == NULL) 5376 return (NULL); 5377 eprop = (sadb_prop_t *)mp->b_rptr; 5378 end = mp->b_rptr + allocsize; 5379 cur = mp->b_rptr + sizeof (*eprop); 5380 5381 eprop->sadb_prop_exttype = SADB_X_EXT_EPROP; 5382 eprop->sadb_x_prop_ereserved = 0; 5383 eprop->sadb_x_prop_numecombs = 0; 5384 *(uint32_t *)(&eprop->sadb_prop_replay) = 0; /* Quick zero-out! */ 5385 /* Pick ESP's replay default if need be. */ 5386 eprop->sadb_prop_replay = (replay == 0) ? 5387 ns->netstack_ipsecesp->ipsecesp_replay_size : replay; 5388 5389 /* This time, walk through and actually allocate. */ 5390 for (walker = ap; walker != NULL; walker = walker->ipa_next) { 5391 /* 5392 * Skip non-IPsec policies 5393 */ 5394 if (walker->ipa_act.ipa_type != IPSEC_ACT_APPLY) 5395 continue; 5396 cur = sadb_action_to_ecomb(cur, end, walker, ns); 5397 if (cur == NULL) { 5398 /* NOTE: inverse-ACQUIRE should note this as ENOMEM. */ 5399 freeb(mp); 5400 return (NULL); 5401 } 5402 eprop->sadb_x_prop_numecombs++; 5403 } 5404 5405 ASSERT(end - cur >= sizeof (sadb_x_kmc_t)); 5406 if ((kmp != 0) || (kmc != 0)) { 5407 cur = sadb_make_kmc_ext(cur, end, kmp, kmc); 5408 if (cur == NULL) { 5409 freeb(mp); 5410 return (NULL); 5411 } 5412 } 5413 mp->b_wptr = cur; 5414 eprop->sadb_prop_len = SADB_8TO64(cur - mp->b_rptr); 5415 5416 return (mp); 5417 } 5418 5419 /* 5420 * For this mblk, insert a new acquire record. Assume bucket contains addrs 5421 * of all of the same length. Give up (and drop) if memory 5422 * cannot be allocated for a new one; otherwise, invoke callback to 5423 * send the acquire up.. 5424 * 5425 * In cases where we need both AH and ESP, add the SA to the ESP ACQUIRE 5426 * list. The ah_add_sa_finish() routines can look at the packet's attached 5427 * attributes and handle this case specially. 5428 */ 5429 void 5430 sadb_acquire(mblk_t *datamp, ip_xmit_attr_t *ixa, boolean_t need_ah, 5431 boolean_t need_esp) 5432 { 5433 mblk_t *asyncmp, *regular, *extended, *common, *prop, *eprop; 5434 sadbp_t *spp; 5435 sadb_t *sp; 5436 ipsacq_t *newbie; 5437 iacqf_t *bucket; 5438 ipha_t *ipha = (ipha_t *)datamp->b_rptr; 5439 ip6_t *ip6h = (ip6_t *)datamp->b_rptr; 5440 uint32_t *src, *dst, *isrc, *idst; 5441 ipsec_policy_t *pp = ixa->ixa_ipsec_policy; 5442 ipsec_action_t *ap = ixa->ixa_ipsec_action; 5443 sa_family_t af; 5444 int hashoffset; 5445 uint32_t seq; 5446 uint64_t unique_id = 0; 5447 boolean_t tunnel_mode = (ixa->ixa_flags & IXAF_IPSEC_TUNNEL) != 0; 5448 ts_label_t *tsl; 5449 netstack_t *ns = ixa->ixa_ipst->ips_netstack; 5450 ipsec_stack_t *ipss = ns->netstack_ipsec; 5451 ipsecesp_stack_t *espstack = ns->netstack_ipsecesp; 5452 ipsecah_stack_t *ahstack = ns->netstack_ipsecah; 5453 ipsec_selector_t sel; 5454 queue_t *q; 5455 5456 ASSERT((pp != NULL) || (ap != NULL)); 5457 5458 ASSERT(need_ah || need_esp); 5459 5460 /* Assign sadb pointers */ 5461 if (need_esp) { 5462 /* 5463 * ESP happens first if we need both AH and ESP. 5464 */ 5465 spp = &espstack->esp_sadb; 5466 } else { 5467 spp = &ahstack->ah_sadb; 5468 } 5469 sp = (ixa->ixa_flags & IXAF_IS_IPV4) ? &spp->s_v4 : &spp->s_v6; 5470 5471 if (is_system_labeled()) 5472 tsl = ixa->ixa_tsl; 5473 else 5474 tsl = NULL; 5475 5476 if (ap == NULL) 5477 ap = pp->ipsp_act; 5478 ASSERT(ap != NULL); 5479 5480 if (ap->ipa_act.ipa_apply.ipp_use_unique || tunnel_mode) 5481 unique_id = SA_FORM_UNIQUE_ID(ixa); 5482 5483 /* 5484 * Set up an ACQUIRE record. 5485 * 5486 * Immediately, make sure the ACQUIRE sequence number doesn't slip 5487 * below the lowest point allowed in the kernel. (In other words, 5488 * make sure the high bit on the sequence number is set.) 5489 */ 5490 5491 seq = keysock_next_seq(ns) | IACQF_LOWEST_SEQ; 5492 5493 if (IPH_HDR_VERSION(ipha) == IP_VERSION) { 5494 src = (uint32_t *)&ipha->ipha_src; 5495 dst = (uint32_t *)&ipha->ipha_dst; 5496 af = AF_INET; 5497 hashoffset = OUTBOUND_HASH_V4(sp, ipha->ipha_dst); 5498 ASSERT(ixa->ixa_flags & IXAF_IS_IPV4); 5499 } else { 5500 ASSERT(IPH_HDR_VERSION(ipha) == IPV6_VERSION); 5501 src = (uint32_t *)&ip6h->ip6_src; 5502 dst = (uint32_t *)&ip6h->ip6_dst; 5503 af = AF_INET6; 5504 hashoffset = OUTBOUND_HASH_V6(sp, ip6h->ip6_dst); 5505 ASSERT(!(ixa->ixa_flags & IXAF_IS_IPV4)); 5506 } 5507 5508 if (tunnel_mode) { 5509 if (pp == NULL) { 5510 /* 5511 * Tunnel mode with no policy pointer means this is a 5512 * reflected ICMP (like a ECHO REQUEST) that came in 5513 * with self-encapsulated protection. Until we better 5514 * support this, drop the packet. 5515 */ 5516 ip_drop_packet(datamp, B_FALSE, NULL, 5517 DROPPER(ipss, ipds_spd_got_selfencap), 5518 &ipss->ipsec_spd_dropper); 5519 return; 5520 } 5521 /* Snag inner addresses. */ 5522 isrc = ixa->ixa_ipsec_insrc; 5523 idst = ixa->ixa_ipsec_indst; 5524 } else { 5525 isrc = idst = NULL; 5526 } 5527 5528 /* 5529 * Check buckets to see if there is an existing entry. If so, 5530 * grab it. sadb_checkacquire locks newbie if found. 5531 */ 5532 bucket = &(sp->sdb_acq[hashoffset]); 5533 mutex_enter(&bucket->iacqf_lock); 5534 newbie = sadb_checkacquire(bucket, ap, pp, src, dst, isrc, idst, 5535 unique_id, tsl); 5536 5537 if (newbie == NULL) { 5538 /* 5539 * Otherwise, allocate a new one. 5540 */ 5541 newbie = kmem_zalloc(sizeof (*newbie), KM_NOSLEEP); 5542 if (newbie == NULL) { 5543 mutex_exit(&bucket->iacqf_lock); 5544 ip_drop_packet(datamp, B_FALSE, NULL, 5545 DROPPER(ipss, ipds_sadb_acquire_nomem), 5546 &ipss->ipsec_sadb_dropper); 5547 return; 5548 } 5549 newbie->ipsacq_policy = pp; 5550 if (pp != NULL) { 5551 IPPOL_REFHOLD(pp); 5552 } 5553 IPACT_REFHOLD(ap); 5554 newbie->ipsacq_act = ap; 5555 newbie->ipsacq_linklock = &bucket->iacqf_lock; 5556 newbie->ipsacq_next = bucket->iacqf_ipsacq; 5557 newbie->ipsacq_ptpn = &bucket->iacqf_ipsacq; 5558 if (newbie->ipsacq_next != NULL) 5559 newbie->ipsacq_next->ipsacq_ptpn = &newbie->ipsacq_next; 5560 5561 bucket->iacqf_ipsacq = newbie; 5562 mutex_init(&newbie->ipsacq_lock, NULL, MUTEX_DEFAULT, NULL); 5563 mutex_enter(&newbie->ipsacq_lock); 5564 } 5565 5566 /* 5567 * XXX MLS does it actually help us to drop the bucket lock here? 5568 * we have inserted a half-built, locked acquire record into the 5569 * bucket. any competing thread will now be able to lock the bucket 5570 * to scan it, but will immediately pile up on the new acquire 5571 * record's lock; I don't think we gain anything here other than to 5572 * disperse blame for lock contention. 5573 * 5574 * we might be able to dispense with acquire record locks entirely.. 5575 * just use the bucket locks.. 5576 */ 5577 5578 mutex_exit(&bucket->iacqf_lock); 5579 5580 /* 5581 * This assert looks silly for now, but we may need to enter newbie's 5582 * mutex during a search. 5583 */ 5584 ASSERT(MUTEX_HELD(&newbie->ipsacq_lock)); 5585 5586 /* 5587 * Make the ip_xmit_attr_t into something we can queue. 5588 * If no memory it frees datamp. 5589 */ 5590 asyncmp = ip_xmit_attr_to_mblk(ixa); 5591 if (asyncmp != NULL) 5592 linkb(asyncmp, datamp); 5593 5594 /* Queue up packet. Use b_next. */ 5595 5596 if (asyncmp == NULL) { 5597 /* Statistics for allocation failure */ 5598 if (ixa->ixa_flags & IXAF_IS_IPV4) { 5599 BUMP_MIB(&ixa->ixa_ipst->ips_ip_mib, 5600 ipIfStatsOutDiscards); 5601 } else { 5602 BUMP_MIB(&ixa->ixa_ipst->ips_ip6_mib, 5603 ipIfStatsOutDiscards); 5604 } 5605 ip_drop_output("No memory for asyncmp", datamp, NULL); 5606 freemsg(datamp); 5607 /* 5608 * The acquire record will be freed quickly if it's new 5609 * (ipsacq_expire == 0), and will proceed as if no packet 5610 * showed up if not. 5611 */ 5612 mutex_exit(&newbie->ipsacq_lock); 5613 return; 5614 } else if (newbie->ipsacq_numpackets == 0) { 5615 /* First one. */ 5616 newbie->ipsacq_mp = asyncmp; 5617 newbie->ipsacq_numpackets = 1; 5618 newbie->ipsacq_expire = gethrestime_sec(); 5619 /* 5620 * Extended ACQUIRE with both AH+ESP will use ESP's timeout 5621 * value. 5622 */ 5623 newbie->ipsacq_expire += *spp->s_acquire_timeout; 5624 newbie->ipsacq_seq = seq; 5625 newbie->ipsacq_addrfam = af; 5626 5627 newbie->ipsacq_srcport = ixa->ixa_ipsec_src_port; 5628 newbie->ipsacq_dstport = ixa->ixa_ipsec_dst_port; 5629 newbie->ipsacq_icmp_type = ixa->ixa_ipsec_icmp_type; 5630 newbie->ipsacq_icmp_code = ixa->ixa_ipsec_icmp_code; 5631 if (tunnel_mode) { 5632 newbie->ipsacq_inneraddrfam = ixa->ixa_ipsec_inaf; 5633 newbie->ipsacq_proto = ixa->ixa_ipsec_inaf == AF_INET6 ? 5634 IPPROTO_IPV6 : IPPROTO_ENCAP; 5635 newbie->ipsacq_innersrcpfx = ixa->ixa_ipsec_insrcpfx; 5636 newbie->ipsacq_innerdstpfx = ixa->ixa_ipsec_indstpfx; 5637 IPSA_COPY_ADDR(newbie->ipsacq_innersrc, 5638 ixa->ixa_ipsec_insrc, ixa->ixa_ipsec_inaf); 5639 IPSA_COPY_ADDR(newbie->ipsacq_innerdst, 5640 ixa->ixa_ipsec_indst, ixa->ixa_ipsec_inaf); 5641 } else { 5642 newbie->ipsacq_proto = ixa->ixa_ipsec_proto; 5643 } 5644 newbie->ipsacq_unique_id = unique_id; 5645 5646 if (tsl != NULL) { 5647 label_hold(tsl); 5648 newbie->ipsacq_tsl = tsl; 5649 } 5650 } else { 5651 /* Scan to the end of the list & insert. */ 5652 mblk_t *lastone = newbie->ipsacq_mp; 5653 5654 while (lastone->b_next != NULL) 5655 lastone = lastone->b_next; 5656 lastone->b_next = asyncmp; 5657 if (newbie->ipsacq_numpackets++ == ipsacq_maxpackets) { 5658 newbie->ipsacq_numpackets = ipsacq_maxpackets; 5659 lastone = newbie->ipsacq_mp; 5660 newbie->ipsacq_mp = lastone->b_next; 5661 lastone->b_next = NULL; 5662 5663 /* Freeing the async message */ 5664 lastone = ip_xmit_attr_free_mblk(lastone); 5665 ip_drop_packet(lastone, B_FALSE, NULL, 5666 DROPPER(ipss, ipds_sadb_acquire_toofull), 5667 &ipss->ipsec_sadb_dropper); 5668 } else { 5669 IP_ACQUIRE_STAT(ipss, qhiwater, 5670 newbie->ipsacq_numpackets); 5671 } 5672 } 5673 5674 /* 5675 * Reset addresses. Set them to the most recently added mblk chain, 5676 * so that the address pointers in the acquire record will point 5677 * at an mblk still attached to the acquire list. 5678 */ 5679 5680 newbie->ipsacq_srcaddr = src; 5681 newbie->ipsacq_dstaddr = dst; 5682 5683 /* 5684 * If the acquire record has more than one queued packet, we've 5685 * already sent an ACQUIRE, and don't need to repeat ourself. 5686 */ 5687 if (newbie->ipsacq_seq != seq || newbie->ipsacq_numpackets > 1) { 5688 /* I have an acquire outstanding already! */ 5689 mutex_exit(&newbie->ipsacq_lock); 5690 return; 5691 } 5692 5693 if (need_esp) { 5694 ESP_BUMP_STAT(espstack, acquire_requests); 5695 q = espstack->esp_pfkey_q; 5696 } else { 5697 /* 5698 * Two cases get us here: 5699 * 1.) AH-only policy. 5700 * 5701 * 2.) A continuation of an AH+ESP policy, and this is the 5702 * post-ESP, AH-needs-to-send-a-regular-ACQUIRE case. 5703 * (i.e. called from esp_do_outbound_ah().) 5704 */ 5705 AH_BUMP_STAT(ahstack, acquire_requests); 5706 q = ahstack->ah_pfkey_q; 5707 } 5708 5709 /* 5710 * Get selectors and other policy-expression bits needed for an 5711 * ACQUIRE. 5712 */ 5713 bzero(&sel, sizeof (sel)); 5714 sel.ips_isv4 = (ixa->ixa_flags & IXAF_IS_IPV4) != 0; 5715 if (tunnel_mode) { 5716 sel.ips_protocol = (ixa->ixa_ipsec_inaf == AF_INET) ? 5717 IPPROTO_ENCAP : IPPROTO_IPV6; 5718 } else { 5719 sel.ips_protocol = ixa->ixa_ipsec_proto; 5720 sel.ips_local_port = ixa->ixa_ipsec_src_port; 5721 sel.ips_remote_port = ixa->ixa_ipsec_dst_port; 5722 } 5723 sel.ips_icmp_type = ixa->ixa_ipsec_icmp_type; 5724 sel.ips_icmp_code = ixa->ixa_ipsec_icmp_code; 5725 sel.ips_is_icmp_inv_acq = 0; 5726 if (af == AF_INET) { 5727 sel.ips_local_addr_v4 = ipha->ipha_src; 5728 sel.ips_remote_addr_v4 = ipha->ipha_dst; 5729 } else { 5730 sel.ips_local_addr_v6 = ip6h->ip6_src; 5731 sel.ips_remote_addr_v6 = ip6h->ip6_dst; 5732 } 5733 5734 5735 /* 5736 * 1. Generate addresses, kmc, and sensitivity. These are "common" 5737 * and should be an mblk pointed to by common. TBD -- eventually it 5738 * will include triggering packet contents as more address extensions. 5739 * 5740 * 2. Generate ACQUIRE & KEYSOCK_OUT and single-protocol proposal. 5741 * These are "regular" and "prop". String regular->b_cont->b_cont = 5742 * common, common->b_cont = prop. 5743 * 5744 * 3. If extended register got turned on, generate EXT_ACQUIRE & 5745 * KEYSOCK_OUT and multi-protocol eprop. These are "extended" and 5746 * "eprop". String extended->b_cont->b_cont = dupb(common) and 5747 * extended->b_cont->b_cont->b_cont = prop. 5748 * 5749 * 4. Deliver: putnext(q, regular) and if there, putnext(q, extended). 5750 */ 5751 5752 regular = extended = prop = eprop = NULL; 5753 5754 common = sadb_acquire_msg_common(&sel, pp, ap, tunnel_mode, tsl, NULL); 5755 if (common == NULL) 5756 goto bail; 5757 5758 regular = sadb_acquire_msg_base(0, (need_esp ? 5759 SADB_SATYPE_ESP : SADB_SATYPE_AH), newbie->ipsacq_seq, 0); 5760 if (regular == NULL) 5761 goto bail; 5762 5763 /* 5764 * Pardon the boolean cleverness. At least one of need_* must be true. 5765 * If they are equal, it's an AH & ESP policy and ESP needs to go 5766 * first. If they aren't, just check the contents of need_esp. 5767 */ 5768 prop = sadb_acquire_prop(ap, ns, need_esp); 5769 if (prop == NULL) 5770 goto bail; 5771 5772 /* Link the parts together. */ 5773 regular->b_cont->b_cont = common; 5774 common->b_cont = prop; 5775 /* 5776 * Prop is now linked, so don't freemsg() it if the extended 5777 * construction goes off the rails. 5778 */ 5779 prop = NULL; 5780 5781 ((sadb_msg_t *)(regular->b_cont->b_rptr))->sadb_msg_len = 5782 SADB_8TO64(msgsize(regular->b_cont)); 5783 5784 /* 5785 * If we need an extended ACQUIRE, build it here. 5786 */ 5787 if (keysock_extended_reg(ns)) { 5788 /* NOTE: "common" still points to what we need. */ 5789 extended = sadb_acquire_msg_base(0, 0, newbie->ipsacq_seq, 0); 5790 if (extended == NULL) { 5791 common = NULL; 5792 goto bail; 5793 } 5794 5795 extended->b_cont->b_cont = dupb(common); 5796 common = NULL; 5797 if (extended->b_cont->b_cont == NULL) 5798 goto bail; 5799 5800 eprop = sadb_acquire_extended_prop(ap, ns); 5801 if (eprop == NULL) 5802 goto bail; 5803 extended->b_cont->b_cont->b_cont = eprop; 5804 5805 ((sadb_msg_t *)(extended->b_cont->b_rptr))->sadb_msg_len = 5806 SADB_8TO64(msgsize(extended->b_cont)); 5807 } 5808 5809 /* So we don't hold a lock across putnext()... */ 5810 mutex_exit(&newbie->ipsacq_lock); 5811 5812 if (extended != NULL) 5813 putnext(q, extended); 5814 ASSERT(regular != NULL); 5815 putnext(q, regular); 5816 return; 5817 5818 bail: 5819 /* Make this acquire record go away quickly... */ 5820 newbie->ipsacq_expire = 0; 5821 /* Exploit freemsg(NULL) being legal for fun & profit. */ 5822 freemsg(common); 5823 freemsg(prop); 5824 freemsg(extended); 5825 freemsg(regular); 5826 mutex_exit(&newbie->ipsacq_lock); 5827 } 5828 5829 /* 5830 * Unlink and free an acquire record. 5831 */ 5832 void 5833 sadb_destroy_acquire(ipsacq_t *acqrec, netstack_t *ns) 5834 { 5835 mblk_t *mp; 5836 ipsec_stack_t *ipss = ns->netstack_ipsec; 5837 5838 ASSERT(MUTEX_HELD(acqrec->ipsacq_linklock)); 5839 5840 if (acqrec->ipsacq_policy != NULL) { 5841 IPPOL_REFRELE(acqrec->ipsacq_policy); 5842 } 5843 if (acqrec->ipsacq_act != NULL) { 5844 IPACT_REFRELE(acqrec->ipsacq_act); 5845 } 5846 5847 /* Unlink */ 5848 *(acqrec->ipsacq_ptpn) = acqrec->ipsacq_next; 5849 if (acqrec->ipsacq_next != NULL) 5850 acqrec->ipsacq_next->ipsacq_ptpn = acqrec->ipsacq_ptpn; 5851 5852 if (acqrec->ipsacq_tsl != NULL) { 5853 label_rele(acqrec->ipsacq_tsl); 5854 acqrec->ipsacq_tsl = NULL; 5855 } 5856 5857 /* 5858 * Free hanging mp's. 5859 * 5860 * XXX Instead of freemsg(), perhaps use IPSEC_REQ_FAILED. 5861 */ 5862 5863 mutex_enter(&acqrec->ipsacq_lock); 5864 while (acqrec->ipsacq_mp != NULL) { 5865 mp = acqrec->ipsacq_mp; 5866 acqrec->ipsacq_mp = mp->b_next; 5867 mp->b_next = NULL; 5868 /* Freeing the async message */ 5869 mp = ip_xmit_attr_free_mblk(mp); 5870 ip_drop_packet(mp, B_FALSE, NULL, 5871 DROPPER(ipss, ipds_sadb_acquire_timeout), 5872 &ipss->ipsec_sadb_dropper); 5873 } 5874 mutex_exit(&acqrec->ipsacq_lock); 5875 5876 /* Free */ 5877 mutex_destroy(&acqrec->ipsacq_lock); 5878 kmem_free(acqrec, sizeof (*acqrec)); 5879 } 5880 5881 /* 5882 * Destroy an acquire list fanout. 5883 */ 5884 static void 5885 sadb_destroy_acqlist(iacqf_t **listp, uint_t numentries, boolean_t forever, 5886 netstack_t *ns) 5887 { 5888 int i; 5889 iacqf_t *list = *listp; 5890 5891 if (list == NULL) 5892 return; 5893 5894 for (i = 0; i < numentries; i++) { 5895 mutex_enter(&(list[i].iacqf_lock)); 5896 while (list[i].iacqf_ipsacq != NULL) 5897 sadb_destroy_acquire(list[i].iacqf_ipsacq, ns); 5898 mutex_exit(&(list[i].iacqf_lock)); 5899 if (forever) 5900 mutex_destroy(&(list[i].iacqf_lock)); 5901 } 5902 5903 if (forever) { 5904 *listp = NULL; 5905 kmem_free(list, numentries * sizeof (*list)); 5906 } 5907 } 5908 5909 /* 5910 * Create an algorithm descriptor for an extended ACQUIRE. Filter crypto 5911 * framework's view of reality vs. IPsec's. EF's wins, BTW. 5912 */ 5913 static uint8_t * 5914 sadb_new_algdesc(uint8_t *start, uint8_t *limit, 5915 sadb_x_ecomb_t *ecomb, uint8_t satype, uint8_t algtype, 5916 uint8_t alg, uint16_t minbits, uint16_t maxbits, ipsec_stack_t *ipss) 5917 { 5918 uint8_t *cur = start; 5919 ipsec_alginfo_t *algp; 5920 sadb_x_algdesc_t *algdesc = (sadb_x_algdesc_t *)cur; 5921 5922 cur += sizeof (*algdesc); 5923 if (cur >= limit) 5924 return (NULL); 5925 5926 ecomb->sadb_x_ecomb_numalgs++; 5927 5928 /* 5929 * Normalize vs. crypto framework's limits. This way, you can specify 5930 * a stronger policy, and when the framework loads a stronger version, 5931 * you can just keep plowing w/o rewhacking your SPD. 5932 */ 5933 rw_enter(&ipss->ipsec_alg_lock, RW_READER); 5934 algp = ipss->ipsec_alglists[(algtype == SADB_X_ALGTYPE_AUTH) ? 5935 IPSEC_ALG_AUTH : IPSEC_ALG_ENCR][alg]; 5936 if (algp == NULL) { 5937 rw_exit(&ipss->ipsec_alg_lock); 5938 return (NULL); /* Algorithm doesn't exist. Fail gracefully. */ 5939 } 5940 if (minbits < algp->alg_ef_minbits) 5941 minbits = algp->alg_ef_minbits; 5942 if (maxbits > algp->alg_ef_maxbits) 5943 maxbits = algp->alg_ef_maxbits; 5944 rw_exit(&ipss->ipsec_alg_lock); 5945 5946 algdesc->sadb_x_algdesc_saltbits = SADB_8TO1(algp->alg_saltlen); 5947 algdesc->sadb_x_algdesc_satype = satype; 5948 algdesc->sadb_x_algdesc_algtype = algtype; 5949 algdesc->sadb_x_algdesc_alg = alg; 5950 algdesc->sadb_x_algdesc_minbits = minbits; 5951 algdesc->sadb_x_algdesc_maxbits = maxbits; 5952 5953 return (cur); 5954 } 5955 5956 /* 5957 * Convert the given ipsec_action_t into an ecomb starting at *ecomb 5958 * which must fit before *limit 5959 * 5960 * return NULL if we ran out of room or a pointer to the end of the ecomb. 5961 */ 5962 static uint8_t * 5963 sadb_action_to_ecomb(uint8_t *start, uint8_t *limit, ipsec_action_t *act, 5964 netstack_t *ns) 5965 { 5966 uint8_t *cur = start; 5967 sadb_x_ecomb_t *ecomb = (sadb_x_ecomb_t *)cur; 5968 ipsec_prot_t *ipp; 5969 ipsec_stack_t *ipss = ns->netstack_ipsec; 5970 5971 cur += sizeof (*ecomb); 5972 if (cur >= limit) 5973 return (NULL); 5974 5975 ASSERT(act->ipa_act.ipa_type == IPSEC_ACT_APPLY); 5976 5977 ipp = &act->ipa_act.ipa_apply; 5978 5979 ecomb->sadb_x_ecomb_numalgs = 0; 5980 ecomb->sadb_x_ecomb_reserved = 0; 5981 ecomb->sadb_x_ecomb_reserved2 = 0; 5982 /* 5983 * No limits on allocations, since we really don't support that 5984 * concept currently. 5985 */ 5986 ecomb->sadb_x_ecomb_soft_allocations = 0; 5987 ecomb->sadb_x_ecomb_hard_allocations = 0; 5988 5989 /* 5990 * XXX TBD: Policy or global parameters will eventually be 5991 * able to fill in some of these. 5992 */ 5993 ecomb->sadb_x_ecomb_flags = 0; 5994 ecomb->sadb_x_ecomb_soft_bytes = 0; 5995 ecomb->sadb_x_ecomb_hard_bytes = 0; 5996 ecomb->sadb_x_ecomb_soft_addtime = 0; 5997 ecomb->sadb_x_ecomb_hard_addtime = 0; 5998 ecomb->sadb_x_ecomb_soft_usetime = 0; 5999 ecomb->sadb_x_ecomb_hard_usetime = 0; 6000 6001 if (ipp->ipp_use_ah) { 6002 cur = sadb_new_algdesc(cur, limit, ecomb, 6003 SADB_SATYPE_AH, SADB_X_ALGTYPE_AUTH, ipp->ipp_auth_alg, 6004 ipp->ipp_ah_minbits, ipp->ipp_ah_maxbits, ipss); 6005 if (cur == NULL) 6006 return (NULL); 6007 ipsecah_fill_defs(ecomb, ns); 6008 } 6009 6010 if (ipp->ipp_use_esp) { 6011 if (ipp->ipp_use_espa) { 6012 cur = sadb_new_algdesc(cur, limit, ecomb, 6013 SADB_SATYPE_ESP, SADB_X_ALGTYPE_AUTH, 6014 ipp->ipp_esp_auth_alg, 6015 ipp->ipp_espa_minbits, 6016 ipp->ipp_espa_maxbits, ipss); 6017 if (cur == NULL) 6018 return (NULL); 6019 } 6020 6021 cur = sadb_new_algdesc(cur, limit, ecomb, 6022 SADB_SATYPE_ESP, SADB_X_ALGTYPE_CRYPT, 6023 ipp->ipp_encr_alg, 6024 ipp->ipp_espe_minbits, 6025 ipp->ipp_espe_maxbits, ipss); 6026 if (cur == NULL) 6027 return (NULL); 6028 /* Fill in lifetimes if and only if AH didn't already... */ 6029 if (!ipp->ipp_use_ah) 6030 ipsecesp_fill_defs(ecomb, ns); 6031 } 6032 6033 return (cur); 6034 } 6035 6036 #include <sys/tsol/label_macro.h> /* XXX should not need this */ 6037 6038 /* 6039 * From a cred_t, construct a sensitivity label extension 6040 * 6041 * We send up a fixed-size sensitivity label bitmap, and are perhaps 6042 * overly chummy with the underlying data structures here. 6043 */ 6044 6045 /* ARGSUSED */ 6046 int 6047 sadb_sens_len_from_label(ts_label_t *tsl) 6048 { 6049 int baselen = sizeof (sadb_sens_t) + _C_LEN * 4; 6050 return (roundup(baselen, sizeof (uint64_t))); 6051 } 6052 6053 void 6054 sadb_sens_from_label(sadb_sens_t *sens, int exttype, ts_label_t *tsl, 6055 int senslen) 6056 { 6057 uint8_t *bitmap; 6058 bslabel_t *sl; 6059 6060 /* LINTED */ 6061 ASSERT((_C_LEN & 1) == 0); 6062 ASSERT((senslen & 7) == 0); 6063 6064 sl = label2bslabel(tsl); 6065 6066 sens->sadb_sens_exttype = exttype; 6067 sens->sadb_sens_len = SADB_8TO64(senslen); 6068 6069 sens->sadb_sens_dpd = tsl->tsl_doi; 6070 sens->sadb_sens_sens_level = LCLASS(sl); 6071 sens->sadb_sens_integ_level = 0; /* TBD */ 6072 sens->sadb_sens_sens_len = _C_LEN >> 1; 6073 sens->sadb_sens_integ_len = 0; /* TBD */ 6074 sens->sadb_x_sens_flags = 0; 6075 6076 bitmap = (uint8_t *)(sens + 1); 6077 bcopy(&(((_bslabel_impl_t *)sl)->compartments), bitmap, _C_LEN * 4); 6078 } 6079 6080 /* 6081 * Okay, how do we report errors/invalid labels from this? 6082 * With a special designated "not a label" cred_t ? 6083 */ 6084 /* ARGSUSED */ 6085 ts_label_t * 6086 sadb_label_from_sens(sadb_sens_t *sens, uint64_t *bitmap) 6087 { 6088 int bitmap_len = SADB_64TO8(sens->sadb_sens_sens_len); 6089 bslabel_t sl; 6090 ts_label_t *tsl; 6091 6092 if (sens->sadb_sens_integ_level != 0) 6093 return (NULL); 6094 if (sens->sadb_sens_integ_len != 0) 6095 return (NULL); 6096 if (bitmap_len > _C_LEN * 4) 6097 return (NULL); 6098 6099 bsllow(&sl); 6100 LCLASS_SET((_bslabel_impl_t *)&sl, sens->sadb_sens_sens_level); 6101 bcopy(bitmap, &((_bslabel_impl_t *)&sl)->compartments, 6102 bitmap_len); 6103 6104 tsl = labelalloc(&sl, sens->sadb_sens_dpd, KM_NOSLEEP); 6105 if (tsl == NULL) 6106 return (NULL); 6107 6108 if (sens->sadb_x_sens_flags & SADB_X_SENS_UNLABELED) 6109 tsl->tsl_flags |= TSLF_UNLABELED; 6110 return (tsl); 6111 } 6112 6113 /* End XXX label-library-leakage */ 6114 6115 /* 6116 * Given an SADB_GETSPI message, find an appropriately ranged SA and 6117 * allocate an SA. If there are message improprieties, return (ipsa_t *)-1. 6118 * If there was a memory allocation error, return NULL. (Assume NULL != 6119 * (ipsa_t *)-1). 6120 * 6121 * master_spi is passed in host order. 6122 */ 6123 ipsa_t * 6124 sadb_getspi(keysock_in_t *ksi, uint32_t master_spi, int *diagnostic, 6125 netstack_t *ns, uint_t sa_type) 6126 { 6127 sadb_address_t *src = 6128 (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_SRC], 6129 *dst = (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_DST]; 6130 sadb_spirange_t *range = 6131 (sadb_spirange_t *)ksi->ks_in_extv[SADB_EXT_SPIRANGE]; 6132 struct sockaddr_in *ssa, *dsa; 6133 struct sockaddr_in6 *ssa6, *dsa6; 6134 uint32_t *srcaddr, *dstaddr; 6135 sa_family_t af; 6136 uint32_t add, min, max; 6137 uint8_t protocol = 6138 (sa_type == SADB_SATYPE_AH) ? IPPROTO_AH : IPPROTO_ESP; 6139 6140 if (src == NULL) { 6141 *diagnostic = SADB_X_DIAGNOSTIC_MISSING_SRC; 6142 return ((ipsa_t *)-1); 6143 } 6144 if (dst == NULL) { 6145 *diagnostic = SADB_X_DIAGNOSTIC_MISSING_DST; 6146 return ((ipsa_t *)-1); 6147 } 6148 if (range == NULL) { 6149 *diagnostic = SADB_X_DIAGNOSTIC_MISSING_RANGE; 6150 return ((ipsa_t *)-1); 6151 } 6152 6153 min = ntohl(range->sadb_spirange_min); 6154 max = ntohl(range->sadb_spirange_max); 6155 dsa = (struct sockaddr_in *)(dst + 1); 6156 dsa6 = (struct sockaddr_in6 *)dsa; 6157 6158 ssa = (struct sockaddr_in *)(src + 1); 6159 ssa6 = (struct sockaddr_in6 *)ssa; 6160 ASSERT(dsa->sin_family == ssa->sin_family); 6161 6162 srcaddr = ALL_ZEROES_PTR; 6163 af = dsa->sin_family; 6164 switch (af) { 6165 case AF_INET: 6166 if (src != NULL) 6167 srcaddr = (uint32_t *)(&ssa->sin_addr); 6168 dstaddr = (uint32_t *)(&dsa->sin_addr); 6169 break; 6170 case AF_INET6: 6171 if (src != NULL) 6172 srcaddr = (uint32_t *)(&ssa6->sin6_addr); 6173 dstaddr = (uint32_t *)(&dsa6->sin6_addr); 6174 break; 6175 default: 6176 *diagnostic = SADB_X_DIAGNOSTIC_BAD_DST_AF; 6177 return ((ipsa_t *)-1); 6178 } 6179 6180 if (master_spi < min || master_spi > max) { 6181 /* Return a random value in the range. */ 6182 if (cl_inet_getspi) { 6183 cl_inet_getspi(ns->netstack_stackid, protocol, 6184 (uint8_t *)&add, sizeof (add), NULL); 6185 } else { 6186 (void) random_get_pseudo_bytes((uint8_t *)&add, 6187 sizeof (add)); 6188 } 6189 master_spi = min + (add % (max - min + 1)); 6190 } 6191 6192 /* 6193 * Since master_spi is passed in host order, we need to htonl() it 6194 * for the purposes of creating a new SA. 6195 */ 6196 return (sadb_makelarvalassoc(htonl(master_spi), srcaddr, dstaddr, af, 6197 ns)); 6198 } 6199 6200 /* 6201 * 6202 * Locate an ACQUIRE and nuke it. If I have an samsg that's larger than the 6203 * base header, just ignore it. Otherwise, lock down the whole ACQUIRE list 6204 * and scan for the sequence number in question. I may wish to accept an 6205 * address pair with it, for easier searching. 6206 * 6207 * Caller frees the message, so we don't have to here. 6208 * 6209 * NOTE: The pfkey_q parameter may be used in the future for ACQUIRE 6210 * failures. 6211 */ 6212 /* ARGSUSED */ 6213 void 6214 sadb_in_acquire(sadb_msg_t *samsg, sadbp_t *sp, queue_t *pfkey_q, 6215 netstack_t *ns) 6216 { 6217 int i; 6218 ipsacq_t *acqrec; 6219 iacqf_t *bucket; 6220 6221 /* 6222 * I only accept the base header for this! 6223 * Though to be honest, requiring the dst address would help 6224 * immensely. 6225 * 6226 * XXX There are already cases where I can get the dst address. 6227 */ 6228 if (samsg->sadb_msg_len > SADB_8TO64(sizeof (*samsg))) 6229 return; 6230 6231 /* 6232 * Using the samsg->sadb_msg_seq, find the ACQUIRE record, delete it, 6233 * (and in the future send a message to IP with the appropriate error 6234 * number). 6235 * 6236 * Q: Do I want to reject if pid != 0? 6237 */ 6238 6239 for (i = 0; i < sp->s_v4.sdb_hashsize; i++) { 6240 bucket = &sp->s_v4.sdb_acq[i]; 6241 mutex_enter(&bucket->iacqf_lock); 6242 for (acqrec = bucket->iacqf_ipsacq; acqrec != NULL; 6243 acqrec = acqrec->ipsacq_next) { 6244 if (samsg->sadb_msg_seq == acqrec->ipsacq_seq) 6245 break; /* for acqrec... loop. */ 6246 } 6247 if (acqrec != NULL) 6248 break; /* for i = 0... loop. */ 6249 6250 mutex_exit(&bucket->iacqf_lock); 6251 } 6252 6253 if (acqrec == NULL) { 6254 for (i = 0; i < sp->s_v6.sdb_hashsize; i++) { 6255 bucket = &sp->s_v6.sdb_acq[i]; 6256 mutex_enter(&bucket->iacqf_lock); 6257 for (acqrec = bucket->iacqf_ipsacq; acqrec != NULL; 6258 acqrec = acqrec->ipsacq_next) { 6259 if (samsg->sadb_msg_seq == acqrec->ipsacq_seq) 6260 break; /* for acqrec... loop. */ 6261 } 6262 if (acqrec != NULL) 6263 break; /* for i = 0... loop. */ 6264 6265 mutex_exit(&bucket->iacqf_lock); 6266 } 6267 } 6268 6269 6270 if (acqrec == NULL) 6271 return; 6272 6273 /* 6274 * What do I do with the errno and IP? I may need mp's services a 6275 * little more. See sadb_destroy_acquire() for future directions 6276 * beyond free the mblk chain on the acquire record. 6277 */ 6278 6279 ASSERT(&bucket->iacqf_lock == acqrec->ipsacq_linklock); 6280 sadb_destroy_acquire(acqrec, ns); 6281 /* Have to exit mutex here, because of breaking out of for loop. */ 6282 mutex_exit(&bucket->iacqf_lock); 6283 } 6284 6285 /* 6286 * The following functions work with the replay windows of an SA. They assume 6287 * the ipsa->ipsa_replay_arr is an array of uint64_t, and that the bit vector 6288 * represents the highest sequence number packet received, and back 6289 * (ipsa->ipsa_replay_wsize) packets. 6290 */ 6291 6292 /* 6293 * Is the replay bit set? 6294 */ 6295 static boolean_t 6296 ipsa_is_replay_set(ipsa_t *ipsa, uint32_t offset) 6297 { 6298 uint64_t bit = (uint64_t)1 << (uint64_t)(offset & 63); 6299 6300 return ((bit & ipsa->ipsa_replay_arr[offset >> 6]) ? B_TRUE : B_FALSE); 6301 } 6302 6303 /* 6304 * Shift the bits of the replay window over. 6305 */ 6306 static void 6307 ipsa_shift_replay(ipsa_t *ipsa, uint32_t shift) 6308 { 6309 int i; 6310 int jump = ((shift - 1) >> 6) + 1; 6311 6312 if (shift == 0) 6313 return; 6314 6315 for (i = (ipsa->ipsa_replay_wsize - 1) >> 6; i >= 0; i--) { 6316 if (i + jump <= (ipsa->ipsa_replay_wsize - 1) >> 6) { 6317 ipsa->ipsa_replay_arr[i + jump] |= 6318 ipsa->ipsa_replay_arr[i] >> (64 - (shift & 63)); 6319 } 6320 ipsa->ipsa_replay_arr[i] <<= shift; 6321 } 6322 } 6323 6324 /* 6325 * Set a bit in the bit vector. 6326 */ 6327 static void 6328 ipsa_set_replay(ipsa_t *ipsa, uint32_t offset) 6329 { 6330 uint64_t bit = (uint64_t)1 << (uint64_t)(offset & 63); 6331 6332 ipsa->ipsa_replay_arr[offset >> 6] |= bit; 6333 } 6334 6335 #define SADB_MAX_REPLAY_VALUE 0xffffffff 6336 6337 /* 6338 * Assume caller has NOT done ntohl() already on seq. Check to see 6339 * if replay sequence number "seq" has been seen already. 6340 */ 6341 boolean_t 6342 sadb_replay_check(ipsa_t *ipsa, uint32_t seq) 6343 { 6344 boolean_t rc; 6345 uint32_t diff; 6346 6347 if (ipsa->ipsa_replay_wsize == 0) 6348 return (B_TRUE); 6349 6350 /* 6351 * NOTE: I've already checked for 0 on the wire in sadb_replay_peek(). 6352 */ 6353 6354 /* Convert sequence number into host order before holding the mutex. */ 6355 seq = ntohl(seq); 6356 6357 mutex_enter(&ipsa->ipsa_lock); 6358 6359 /* Initialize inbound SA's ipsa_replay field to last one received. */ 6360 if (ipsa->ipsa_replay == 0) 6361 ipsa->ipsa_replay = 1; 6362 6363 if (seq > ipsa->ipsa_replay) { 6364 /* 6365 * I have received a new "highest value received". Shift 6366 * the replay window over. 6367 */ 6368 diff = seq - ipsa->ipsa_replay; 6369 if (diff < ipsa->ipsa_replay_wsize) { 6370 /* In replay window, shift bits over. */ 6371 ipsa_shift_replay(ipsa, diff); 6372 } else { 6373 /* WAY FAR AHEAD, clear bits and start again. */ 6374 bzero(ipsa->ipsa_replay_arr, 6375 sizeof (ipsa->ipsa_replay_arr)); 6376 } 6377 ipsa_set_replay(ipsa, 0); 6378 ipsa->ipsa_replay = seq; 6379 rc = B_TRUE; 6380 goto done; 6381 } 6382 diff = ipsa->ipsa_replay - seq; 6383 if (diff >= ipsa->ipsa_replay_wsize || ipsa_is_replay_set(ipsa, diff)) { 6384 rc = B_FALSE; 6385 goto done; 6386 } 6387 /* Set this packet as seen. */ 6388 ipsa_set_replay(ipsa, diff); 6389 6390 rc = B_TRUE; 6391 done: 6392 mutex_exit(&ipsa->ipsa_lock); 6393 return (rc); 6394 } 6395 6396 /* 6397 * "Peek" and see if we should even bother going through the effort of 6398 * running an authentication check on the sequence number passed in. 6399 * this takes into account packets that are below the replay window, 6400 * and collisions with already replayed packets. Return B_TRUE if it 6401 * is okay to proceed, B_FALSE if this packet should be dropped immediately. 6402 * Assume same byte-ordering as sadb_replay_check. 6403 */ 6404 boolean_t 6405 sadb_replay_peek(ipsa_t *ipsa, uint32_t seq) 6406 { 6407 boolean_t rc = B_FALSE; 6408 uint32_t diff; 6409 6410 if (ipsa->ipsa_replay_wsize == 0) 6411 return (B_TRUE); 6412 6413 /* 6414 * 0 is 0, regardless of byte order... :) 6415 * 6416 * If I get 0 on the wire (and there is a replay window) then the 6417 * sender most likely wrapped. This ipsa may need to be marked or 6418 * something. 6419 */ 6420 if (seq == 0) 6421 return (B_FALSE); 6422 6423 seq = ntohl(seq); 6424 mutex_enter(&ipsa->ipsa_lock); 6425 if (seq < ipsa->ipsa_replay - ipsa->ipsa_replay_wsize && 6426 ipsa->ipsa_replay >= ipsa->ipsa_replay_wsize) 6427 goto done; 6428 6429 /* 6430 * If I've hit 0xffffffff, then quite honestly, I don't need to 6431 * bother with formalities. I'm not accepting any more packets 6432 * on this SA. 6433 */ 6434 if (ipsa->ipsa_replay == SADB_MAX_REPLAY_VALUE) { 6435 /* 6436 * Since we're already holding the lock, update the 6437 * expire time ala. sadb_replay_delete() and return. 6438 */ 6439 ipsa->ipsa_hardexpiretime = (time_t)1; 6440 goto done; 6441 } 6442 6443 if (seq <= ipsa->ipsa_replay) { 6444 /* 6445 * This seq is in the replay window. I'm not below it, 6446 * because I already checked for that above! 6447 */ 6448 diff = ipsa->ipsa_replay - seq; 6449 if (ipsa_is_replay_set(ipsa, diff)) 6450 goto done; 6451 } 6452 /* Else return B_TRUE, I'm going to advance the window. */ 6453 6454 rc = B_TRUE; 6455 done: 6456 mutex_exit(&ipsa->ipsa_lock); 6457 return (rc); 6458 } 6459 6460 /* 6461 * Delete a single SA. 6462 * 6463 * For now, use the quick-and-dirty trick of making the association's 6464 * hard-expire lifetime (time_t)1, ensuring deletion by the *_ager(). 6465 */ 6466 void 6467 sadb_replay_delete(ipsa_t *assoc) 6468 { 6469 mutex_enter(&assoc->ipsa_lock); 6470 assoc->ipsa_hardexpiretime = (time_t)1; 6471 mutex_exit(&assoc->ipsa_lock); 6472 } 6473 6474 /* 6475 * Special front-end to ipsec_rl_strlog() dealing with SA failure. 6476 * this is designed to take only a format string with "* %x * %s *", so 6477 * that "spi" is printed first, then "addr" is converted using inet_pton(). 6478 * 6479 * This is abstracted out to save the stack space for only when inet_pton() 6480 * is called. Make sure "spi" is in network order; it usually is when this 6481 * would get called. 6482 */ 6483 void 6484 ipsec_assocfailure(short mid, short sid, char level, ushort_t sl, char *fmt, 6485 uint32_t spi, void *addr, int af, netstack_t *ns) 6486 { 6487 char buf[INET6_ADDRSTRLEN]; 6488 6489 ASSERT(af == AF_INET6 || af == AF_INET); 6490 6491 ipsec_rl_strlog(ns, mid, sid, level, sl, fmt, ntohl(spi), 6492 inet_ntop(af, addr, buf, sizeof (buf))); 6493 } 6494 6495 /* 6496 * Fills in a reference to the policy, if any, from the conn, in *ppp 6497 */ 6498 static void 6499 ipsec_conn_pol(ipsec_selector_t *sel, conn_t *connp, ipsec_policy_t **ppp) 6500 { 6501 ipsec_policy_t *pp; 6502 ipsec_latch_t *ipl = connp->conn_latch; 6503 6504 if ((ipl != NULL) && (connp->conn_ixa->ixa_ipsec_policy != NULL)) { 6505 pp = connp->conn_ixa->ixa_ipsec_policy; 6506 IPPOL_REFHOLD(pp); 6507 } else { 6508 pp = ipsec_find_policy(IPSEC_TYPE_OUTBOUND, connp, sel, 6509 connp->conn_netstack); 6510 } 6511 *ppp = pp; 6512 } 6513 6514 /* 6515 * The following functions scan through active conn_t structures 6516 * and return a reference to the best-matching policy it can find. 6517 * Caller must release the reference. 6518 */ 6519 static void 6520 ipsec_udp_pol(ipsec_selector_t *sel, ipsec_policy_t **ppp, ip_stack_t *ipst) 6521 { 6522 connf_t *connfp; 6523 conn_t *connp = NULL; 6524 ipsec_selector_t portonly; 6525 6526 bzero((void *)&portonly, sizeof (portonly)); 6527 6528 if (sel->ips_local_port == 0) 6529 return; 6530 6531 connfp = &ipst->ips_ipcl_udp_fanout[IPCL_UDP_HASH(sel->ips_local_port, 6532 ipst)]; 6533 mutex_enter(&connfp->connf_lock); 6534 6535 if (sel->ips_isv4) { 6536 connp = connfp->connf_head; 6537 while (connp != NULL) { 6538 if (IPCL_UDP_MATCH(connp, sel->ips_local_port, 6539 sel->ips_local_addr_v4, sel->ips_remote_port, 6540 sel->ips_remote_addr_v4)) 6541 break; 6542 connp = connp->conn_next; 6543 } 6544 6545 if (connp == NULL) { 6546 /* Try port-only match in IPv6. */ 6547 portonly.ips_local_port = sel->ips_local_port; 6548 sel = &portonly; 6549 } 6550 } 6551 6552 if (connp == NULL) { 6553 connp = connfp->connf_head; 6554 while (connp != NULL) { 6555 if (IPCL_UDP_MATCH_V6(connp, sel->ips_local_port, 6556 sel->ips_local_addr_v6, sel->ips_remote_port, 6557 sel->ips_remote_addr_v6)) 6558 break; 6559 connp = connp->conn_next; 6560 } 6561 6562 if (connp == NULL) { 6563 mutex_exit(&connfp->connf_lock); 6564 return; 6565 } 6566 } 6567 6568 CONN_INC_REF(connp); 6569 mutex_exit(&connfp->connf_lock); 6570 6571 ipsec_conn_pol(sel, connp, ppp); 6572 CONN_DEC_REF(connp); 6573 } 6574 6575 static conn_t * 6576 ipsec_find_listen_conn(uint16_t *pptr, ipsec_selector_t *sel, ip_stack_t *ipst) 6577 { 6578 connf_t *connfp; 6579 conn_t *connp = NULL; 6580 const in6_addr_t *v6addrmatch = &sel->ips_local_addr_v6; 6581 6582 if (sel->ips_local_port == 0) 6583 return (NULL); 6584 6585 connfp = &ipst->ips_ipcl_bind_fanout[ 6586 IPCL_BIND_HASH(sel->ips_local_port, ipst)]; 6587 mutex_enter(&connfp->connf_lock); 6588 6589 if (sel->ips_isv4) { 6590 connp = connfp->connf_head; 6591 while (connp != NULL) { 6592 if (IPCL_BIND_MATCH(connp, IPPROTO_TCP, 6593 sel->ips_local_addr_v4, pptr[1])) 6594 break; 6595 connp = connp->conn_next; 6596 } 6597 6598 if (connp == NULL) { 6599 /* Match to all-zeroes. */ 6600 v6addrmatch = &ipv6_all_zeros; 6601 } 6602 } 6603 6604 if (connp == NULL) { 6605 connp = connfp->connf_head; 6606 while (connp != NULL) { 6607 if (IPCL_BIND_MATCH_V6(connp, IPPROTO_TCP, 6608 *v6addrmatch, pptr[1])) 6609 break; 6610 connp = connp->conn_next; 6611 } 6612 6613 if (connp == NULL) { 6614 mutex_exit(&connfp->connf_lock); 6615 return (NULL); 6616 } 6617 } 6618 6619 CONN_INC_REF(connp); 6620 mutex_exit(&connfp->connf_lock); 6621 return (connp); 6622 } 6623 6624 static void 6625 ipsec_tcp_pol(ipsec_selector_t *sel, ipsec_policy_t **ppp, ip_stack_t *ipst) 6626 { 6627 connf_t *connfp; 6628 conn_t *connp; 6629 uint32_t ports; 6630 uint16_t *pptr = (uint16_t *)&ports; 6631 6632 /* 6633 * Find TCP state in the following order: 6634 * 1.) Connected conns. 6635 * 2.) Listeners. 6636 * 6637 * Even though #2 will be the common case for inbound traffic, only 6638 * following this order insures correctness. 6639 */ 6640 6641 if (sel->ips_local_port == 0) 6642 return; 6643 6644 /* 6645 * 0 should be fport, 1 should be lport. SRC is the local one here. 6646 * See ipsec_construct_inverse_acquire() for details. 6647 */ 6648 pptr[0] = sel->ips_remote_port; 6649 pptr[1] = sel->ips_local_port; 6650 6651 connfp = &ipst->ips_ipcl_conn_fanout[ 6652 IPCL_CONN_HASH(sel->ips_remote_addr_v4, ports, ipst)]; 6653 mutex_enter(&connfp->connf_lock); 6654 connp = connfp->connf_head; 6655 6656 if (sel->ips_isv4) { 6657 while (connp != NULL) { 6658 if (IPCL_CONN_MATCH(connp, IPPROTO_TCP, 6659 sel->ips_remote_addr_v4, sel->ips_local_addr_v4, 6660 ports)) 6661 break; 6662 connp = connp->conn_next; 6663 } 6664 } else { 6665 while (connp != NULL) { 6666 if (IPCL_CONN_MATCH_V6(connp, IPPROTO_TCP, 6667 sel->ips_remote_addr_v6, sel->ips_local_addr_v6, 6668 ports)) 6669 break; 6670 connp = connp->conn_next; 6671 } 6672 } 6673 6674 if (connp != NULL) { 6675 CONN_INC_REF(connp); 6676 mutex_exit(&connfp->connf_lock); 6677 } else { 6678 mutex_exit(&connfp->connf_lock); 6679 6680 /* Try the listen hash. */ 6681 if ((connp = ipsec_find_listen_conn(pptr, sel, ipst)) == NULL) 6682 return; 6683 } 6684 6685 ipsec_conn_pol(sel, connp, ppp); 6686 CONN_DEC_REF(connp); 6687 } 6688 6689 static void 6690 ipsec_sctp_pol(ipsec_selector_t *sel, ipsec_policy_t **ppp, 6691 ip_stack_t *ipst) 6692 { 6693 conn_t *connp; 6694 uint32_t ports; 6695 uint16_t *pptr = (uint16_t *)&ports; 6696 6697 /* 6698 * Find SCP state in the following order: 6699 * 1.) Connected conns. 6700 * 2.) Listeners. 6701 * 6702 * Even though #2 will be the common case for inbound traffic, only 6703 * following this order insures correctness. 6704 */ 6705 6706 if (sel->ips_local_port == 0) 6707 return; 6708 6709 /* 6710 * 0 should be fport, 1 should be lport. SRC is the local one here. 6711 * See ipsec_construct_inverse_acquire() for details. 6712 */ 6713 pptr[0] = sel->ips_remote_port; 6714 pptr[1] = sel->ips_local_port; 6715 6716 /* 6717 * For labeled systems, there's no need to check the 6718 * label here. It's known to be good as we checked 6719 * before allowing the connection to become bound. 6720 */ 6721 if (sel->ips_isv4) { 6722 in6_addr_t src, dst; 6723 6724 IN6_IPADDR_TO_V4MAPPED(sel->ips_remote_addr_v4, &dst); 6725 IN6_IPADDR_TO_V4MAPPED(sel->ips_local_addr_v4, &src); 6726 connp = sctp_find_conn(&dst, &src, ports, ALL_ZONES, 6727 0, ipst->ips_netstack->netstack_sctp); 6728 } else { 6729 connp = sctp_find_conn(&sel->ips_remote_addr_v6, 6730 &sel->ips_local_addr_v6, ports, ALL_ZONES, 6731 0, ipst->ips_netstack->netstack_sctp); 6732 } 6733 if (connp == NULL) 6734 return; 6735 ipsec_conn_pol(sel, connp, ppp); 6736 CONN_DEC_REF(connp); 6737 } 6738 6739 /* 6740 * Fill in a query for the SPD (in "sel") using two PF_KEY address extensions. 6741 * Returns 0 or errno, and always sets *diagnostic to something appropriate 6742 * to PF_KEY. 6743 * 6744 * NOTE: For right now, this function (and ipsec_selector_t for that matter), 6745 * ignore prefix lengths in the address extension. Since we match on first- 6746 * entered policies, this shouldn't matter. Also, since we normalize prefix- 6747 * set addresses to mask out the lower bits, we should get a suitable search 6748 * key for the SPD anyway. This is the function to change if the assumption 6749 * about suitable search keys is wrong. 6750 */ 6751 static int 6752 ipsec_get_inverse_acquire_sel(ipsec_selector_t *sel, sadb_address_t *srcext, 6753 sadb_address_t *dstext, int *diagnostic) 6754 { 6755 struct sockaddr_in *src, *dst; 6756 struct sockaddr_in6 *src6, *dst6; 6757 6758 *diagnostic = 0; 6759 6760 bzero(sel, sizeof (*sel)); 6761 sel->ips_protocol = srcext->sadb_address_proto; 6762 dst = (struct sockaddr_in *)(dstext + 1); 6763 if (dst->sin_family == AF_INET6) { 6764 dst6 = (struct sockaddr_in6 *)dst; 6765 src6 = (struct sockaddr_in6 *)(srcext + 1); 6766 if (src6->sin6_family != AF_INET6) { 6767 *diagnostic = SADB_X_DIAGNOSTIC_AF_MISMATCH; 6768 return (EINVAL); 6769 } 6770 sel->ips_remote_addr_v6 = dst6->sin6_addr; 6771 sel->ips_local_addr_v6 = src6->sin6_addr; 6772 if (sel->ips_protocol == IPPROTO_ICMPV6) { 6773 sel->ips_is_icmp_inv_acq = 1; 6774 } else { 6775 sel->ips_remote_port = dst6->sin6_port; 6776 sel->ips_local_port = src6->sin6_port; 6777 } 6778 sel->ips_isv4 = B_FALSE; 6779 } else { 6780 src = (struct sockaddr_in *)(srcext + 1); 6781 if (src->sin_family != AF_INET) { 6782 *diagnostic = SADB_X_DIAGNOSTIC_AF_MISMATCH; 6783 return (EINVAL); 6784 } 6785 sel->ips_remote_addr_v4 = dst->sin_addr.s_addr; 6786 sel->ips_local_addr_v4 = src->sin_addr.s_addr; 6787 if (sel->ips_protocol == IPPROTO_ICMP) { 6788 sel->ips_is_icmp_inv_acq = 1; 6789 } else { 6790 sel->ips_remote_port = dst->sin_port; 6791 sel->ips_local_port = src->sin_port; 6792 } 6793 sel->ips_isv4 = B_TRUE; 6794 } 6795 return (0); 6796 } 6797 6798 /* 6799 * We have encapsulation. 6800 * - Lookup tun_t by address and look for an associated 6801 * tunnel policy 6802 * - If there are inner selectors 6803 * - check ITPF_P_TUNNEL and ITPF_P_ACTIVE 6804 * - Look up tunnel policy based on selectors 6805 * - Else 6806 * - Sanity check the negotation 6807 * - If appropriate, fall through to global policy 6808 */ 6809 static int 6810 ipsec_tun_pol(ipsec_selector_t *sel, ipsec_policy_t **ppp, 6811 sadb_address_t *innsrcext, sadb_address_t *inndstext, ipsec_tun_pol_t *itp, 6812 int *diagnostic) 6813 { 6814 int err; 6815 ipsec_policy_head_t *polhead; 6816 6817 *diagnostic = 0; 6818 6819 /* Check for inner selectors and act appropriately */ 6820 6821 if (innsrcext != NULL) { 6822 /* Inner selectors present */ 6823 ASSERT(inndstext != NULL); 6824 if ((itp == NULL) || 6825 (itp->itp_flags & (ITPF_P_ACTIVE | ITPF_P_TUNNEL)) != 6826 (ITPF_P_ACTIVE | ITPF_P_TUNNEL)) { 6827 /* 6828 * If inner packet selectors, we must have negotiate 6829 * tunnel and active policy. If the tunnel has 6830 * transport-mode policy set on it, or has no policy, 6831 * fail. 6832 */ 6833 return (ENOENT); 6834 } else { 6835 /* 6836 * Reset "sel" to indicate inner selectors. Pass 6837 * inner PF_KEY address extensions for this to happen. 6838 */ 6839 if ((err = ipsec_get_inverse_acquire_sel(sel, 6840 innsrcext, inndstext, diagnostic)) != 0) 6841 return (err); 6842 /* 6843 * Now look for a tunnel policy based on those inner 6844 * selectors. (Common code is below.) 6845 */ 6846 } 6847 } else { 6848 /* No inner selectors present */ 6849 if ((itp == NULL) || !(itp->itp_flags & ITPF_P_ACTIVE)) { 6850 /* 6851 * Transport mode negotiation with no tunnel policy 6852 * configured - return to indicate a global policy 6853 * check is needed. 6854 */ 6855 return (0); 6856 } else if (itp->itp_flags & ITPF_P_TUNNEL) { 6857 /* Tunnel mode set with no inner selectors. */ 6858 return (ENOENT); 6859 } 6860 /* 6861 * Else, this is a tunnel policy configured with ifconfig(1m) 6862 * or "negotiate transport" with ipsecconf(1m). We have an 6863 * itp with policy set based on any match, so don't bother 6864 * changing fields in "sel". 6865 */ 6866 } 6867 6868 ASSERT(itp != NULL); 6869 polhead = itp->itp_policy; 6870 ASSERT(polhead != NULL); 6871 rw_enter(&polhead->iph_lock, RW_READER); 6872 *ppp = ipsec_find_policy_head(NULL, polhead, IPSEC_TYPE_INBOUND, sel); 6873 rw_exit(&polhead->iph_lock); 6874 6875 /* 6876 * Don't default to global if we didn't find a matching policy entry. 6877 * Instead, send ENOENT, just like if we hit a transport-mode tunnel. 6878 */ 6879 if (*ppp == NULL) 6880 return (ENOENT); 6881 6882 return (0); 6883 } 6884 6885 /* 6886 * For sctp conn_faddr is the primary address, hence this is of limited 6887 * use for sctp. 6888 */ 6889 static void 6890 ipsec_oth_pol(ipsec_selector_t *sel, ipsec_policy_t **ppp, 6891 ip_stack_t *ipst) 6892 { 6893 boolean_t isv4 = sel->ips_isv4; 6894 connf_t *connfp; 6895 conn_t *connp; 6896 6897 if (isv4) { 6898 connfp = &ipst->ips_ipcl_proto_fanout_v4[sel->ips_protocol]; 6899 } else { 6900 connfp = &ipst->ips_ipcl_proto_fanout_v6[sel->ips_protocol]; 6901 } 6902 6903 mutex_enter(&connfp->connf_lock); 6904 for (connp = connfp->connf_head; connp != NULL; 6905 connp = connp->conn_next) { 6906 if (isv4) { 6907 if ((connp->conn_laddr_v4 == INADDR_ANY || 6908 connp->conn_laddr_v4 == sel->ips_local_addr_v4) && 6909 (connp->conn_faddr_v4 == INADDR_ANY || 6910 connp->conn_faddr_v4 == sel->ips_remote_addr_v4)) 6911 break; 6912 } else { 6913 if ((IN6_IS_ADDR_UNSPECIFIED(&connp->conn_laddr_v6) || 6914 IN6_ARE_ADDR_EQUAL(&connp->conn_laddr_v6, 6915 &sel->ips_local_addr_v6)) && 6916 (IN6_IS_ADDR_UNSPECIFIED(&connp->conn_faddr_v6) || 6917 IN6_ARE_ADDR_EQUAL(&connp->conn_faddr_v6, 6918 &sel->ips_remote_addr_v6))) 6919 break; 6920 } 6921 } 6922 if (connp == NULL) { 6923 mutex_exit(&connfp->connf_lock); 6924 return; 6925 } 6926 6927 CONN_INC_REF(connp); 6928 mutex_exit(&connfp->connf_lock); 6929 6930 ipsec_conn_pol(sel, connp, ppp); 6931 CONN_DEC_REF(connp); 6932 } 6933 6934 /* 6935 * Construct an inverse ACQUIRE reply based on: 6936 * 6937 * 1.) Current global policy. 6938 * 2.) An conn_t match depending on what all was passed in the extv[]. 6939 * 3.) A tunnel's policy head. 6940 * ... 6941 * N.) Other stuff TBD (e.g. identities) 6942 * 6943 * If there is an error, set sadb_msg_errno and sadb_x_msg_diagnostic 6944 * in this function so the caller can extract them where appropriately. 6945 * 6946 * The SRC address is the local one - just like an outbound ACQUIRE message. 6947 * 6948 * XXX MLS: key management supplies a label which we just reflect back up 6949 * again. clearly we need to involve the label in the rest of the checks. 6950 */ 6951 mblk_t * 6952 ipsec_construct_inverse_acquire(sadb_msg_t *samsg, sadb_ext_t *extv[], 6953 netstack_t *ns) 6954 { 6955 int err; 6956 int diagnostic; 6957 sadb_address_t *srcext = (sadb_address_t *)extv[SADB_EXT_ADDRESS_SRC], 6958 *dstext = (sadb_address_t *)extv[SADB_EXT_ADDRESS_DST], 6959 *innsrcext = (sadb_address_t *)extv[SADB_X_EXT_ADDRESS_INNER_SRC], 6960 *inndstext = (sadb_address_t *)extv[SADB_X_EXT_ADDRESS_INNER_DST]; 6961 sadb_sens_t *sens = (sadb_sens_t *)extv[SADB_EXT_SENSITIVITY]; 6962 struct sockaddr_in6 *src, *dst; 6963 struct sockaddr_in6 *isrc, *idst; 6964 ipsec_tun_pol_t *itp = NULL; 6965 ipsec_policy_t *pp = NULL; 6966 ipsec_selector_t sel, isel; 6967 mblk_t *retmp = NULL; 6968 ip_stack_t *ipst = ns->netstack_ip; 6969 6970 6971 /* Normalize addresses */ 6972 if (sadb_addrcheck(NULL, (mblk_t *)samsg, (sadb_ext_t *)srcext, 0, ns) 6973 == KS_IN_ADDR_UNKNOWN) { 6974 err = EINVAL; 6975 diagnostic = SADB_X_DIAGNOSTIC_BAD_SRC; 6976 goto bail; 6977 } 6978 src = (struct sockaddr_in6 *)(srcext + 1); 6979 if (sadb_addrcheck(NULL, (mblk_t *)samsg, (sadb_ext_t *)dstext, 0, ns) 6980 == KS_IN_ADDR_UNKNOWN) { 6981 err = EINVAL; 6982 diagnostic = SADB_X_DIAGNOSTIC_BAD_DST; 6983 goto bail; 6984 } 6985 dst = (struct sockaddr_in6 *)(dstext + 1); 6986 if (src->sin6_family != dst->sin6_family) { 6987 err = EINVAL; 6988 diagnostic = SADB_X_DIAGNOSTIC_AF_MISMATCH; 6989 goto bail; 6990 } 6991 6992 /* Check for tunnel mode and act appropriately */ 6993 if (innsrcext != NULL) { 6994 if (inndstext == NULL) { 6995 err = EINVAL; 6996 diagnostic = SADB_X_DIAGNOSTIC_MISSING_INNER_DST; 6997 goto bail; 6998 } 6999 if (sadb_addrcheck(NULL, (mblk_t *)samsg, 7000 (sadb_ext_t *)innsrcext, 0, ns) == KS_IN_ADDR_UNKNOWN) { 7001 err = EINVAL; 7002 diagnostic = SADB_X_DIAGNOSTIC_MALFORMED_INNER_SRC; 7003 goto bail; 7004 } 7005 isrc = (struct sockaddr_in6 *)(innsrcext + 1); 7006 if (sadb_addrcheck(NULL, (mblk_t *)samsg, 7007 (sadb_ext_t *)inndstext, 0, ns) == KS_IN_ADDR_UNKNOWN) { 7008 err = EINVAL; 7009 diagnostic = SADB_X_DIAGNOSTIC_MALFORMED_INNER_DST; 7010 goto bail; 7011 } 7012 idst = (struct sockaddr_in6 *)(inndstext + 1); 7013 if (isrc->sin6_family != idst->sin6_family) { 7014 err = EINVAL; 7015 diagnostic = SADB_X_DIAGNOSTIC_INNER_AF_MISMATCH; 7016 goto bail; 7017 } 7018 if (isrc->sin6_family != AF_INET && 7019 isrc->sin6_family != AF_INET6) { 7020 err = EINVAL; 7021 diagnostic = SADB_X_DIAGNOSTIC_BAD_INNER_SRC_AF; 7022 goto bail; 7023 } 7024 } else if (inndstext != NULL) { 7025 err = EINVAL; 7026 diagnostic = SADB_X_DIAGNOSTIC_MISSING_INNER_SRC; 7027 goto bail; 7028 } 7029 7030 /* Get selectors first, based on outer addresses */ 7031 err = ipsec_get_inverse_acquire_sel(&sel, srcext, dstext, &diagnostic); 7032 if (err != 0) 7033 goto bail; 7034 7035 /* Check for tunnel mode mismatches. */ 7036 if (innsrcext != NULL && 7037 ((isrc->sin6_family == AF_INET && 7038 sel.ips_protocol != IPPROTO_ENCAP && sel.ips_protocol != 0) || 7039 (isrc->sin6_family == AF_INET6 && 7040 sel.ips_protocol != IPPROTO_IPV6 && sel.ips_protocol != 0))) { 7041 err = EPROTOTYPE; 7042 goto bail; 7043 } 7044 7045 /* 7046 * Okay, we have the addresses and other selector information. 7047 * Let's first find a conn... 7048 */ 7049 pp = NULL; 7050 switch (sel.ips_protocol) { 7051 case IPPROTO_TCP: 7052 ipsec_tcp_pol(&sel, &pp, ipst); 7053 break; 7054 case IPPROTO_UDP: 7055 ipsec_udp_pol(&sel, &pp, ipst); 7056 break; 7057 case IPPROTO_SCTP: 7058 ipsec_sctp_pol(&sel, &pp, ipst); 7059 break; 7060 case IPPROTO_ENCAP: 7061 case IPPROTO_IPV6: 7062 /* 7063 * Assume sel.ips_remote_addr_* has the right address at 7064 * that exact position. 7065 */ 7066 itp = itp_get_byaddr((uint32_t *)(&sel.ips_local_addr_v6), 7067 (uint32_t *)(&sel.ips_remote_addr_v6), src->sin6_family, 7068 ipst); 7069 7070 if (innsrcext == NULL) { 7071 /* 7072 * Transport-mode tunnel, make sure we fake out isel 7073 * to contain something based on the outer protocol. 7074 */ 7075 bzero(&isel, sizeof (isel)); 7076 isel.ips_isv4 = (sel.ips_protocol == IPPROTO_ENCAP); 7077 } /* Else isel is initialized by ipsec_tun_pol(). */ 7078 err = ipsec_tun_pol(&isel, &pp, innsrcext, inndstext, itp, 7079 &diagnostic); 7080 /* 7081 * NOTE: isel isn't used for now, but in RFC 430x IPsec, it 7082 * may be. 7083 */ 7084 if (err != 0) 7085 goto bail; 7086 break; 7087 default: 7088 ipsec_oth_pol(&sel, &pp, ipst); 7089 break; 7090 } 7091 7092 /* 7093 * If we didn't find a matching conn_t or other policy head, take a 7094 * look in the global policy. 7095 */ 7096 if (pp == NULL) { 7097 pp = ipsec_find_policy(IPSEC_TYPE_OUTBOUND, NULL, &sel, ns); 7098 if (pp == NULL) { 7099 /* There's no global policy. */ 7100 err = ENOENT; 7101 diagnostic = 0; 7102 goto bail; 7103 } 7104 } 7105 7106 ASSERT(pp != NULL); 7107 retmp = sadb_acquire_msg_base(0, 0, samsg->sadb_msg_seq, 7108 samsg->sadb_msg_pid); 7109 if (retmp != NULL) { 7110 /* Remove KEYSOCK_OUT, because caller constructs it instead. */ 7111 mblk_t *kso = retmp; 7112 7113 retmp = retmp->b_cont; 7114 freeb(kso); 7115 /* Append addresses... */ 7116 retmp->b_cont = sadb_acquire_msg_common(&sel, pp, NULL, 7117 (itp != NULL && (itp->itp_flags & ITPF_P_TUNNEL)), NULL, 7118 sens); 7119 if (retmp->b_cont == NULL) { 7120 freemsg(retmp); 7121 retmp = NULL; 7122 } 7123 /* And the policy result. */ 7124 retmp->b_cont->b_cont = 7125 sadb_acquire_extended_prop(pp->ipsp_act, ns); 7126 if (retmp->b_cont->b_cont == NULL) { 7127 freemsg(retmp); 7128 retmp = NULL; 7129 } 7130 ((sadb_msg_t *)retmp->b_rptr)->sadb_msg_len = 7131 SADB_8TO64(msgsize(retmp)); 7132 } 7133 7134 if (pp != NULL) { 7135 IPPOL_REFRELE(pp); 7136 } 7137 ASSERT(err == 0 && diagnostic == 0); 7138 if (retmp == NULL) 7139 err = ENOMEM; 7140 bail: 7141 if (itp != NULL) { 7142 ITP_REFRELE(itp, ns); 7143 } 7144 samsg->sadb_msg_errno = (uint8_t)err; 7145 samsg->sadb_x_msg_diagnostic = (uint16_t)diagnostic; 7146 return (retmp); 7147 } 7148 7149 /* 7150 * ipsa_lpkt is a one-element queue, only manipulated by the next two 7151 * functions. They have to hold the ipsa_lock because of potential races 7152 * between key management using SADB_UPDATE, and inbound packets that may 7153 * queue up on the larval SA (hence the 'l' in "lpkt"). 7154 */ 7155 7156 /* 7157 * sadb_set_lpkt: 7158 * 7159 * Returns the passed-in packet if the SA is no longer larval. 7160 * 7161 * Returns NULL if the SA is larval, and needs to be swapped into the SA for 7162 * processing after an SADB_UPDATE. 7163 */ 7164 mblk_t * 7165 sadb_set_lpkt(ipsa_t *ipsa, mblk_t *npkt, ip_recv_attr_t *ira) 7166 { 7167 mblk_t *opkt; 7168 7169 mutex_enter(&ipsa->ipsa_lock); 7170 opkt = ipsa->ipsa_lpkt; 7171 if (ipsa->ipsa_state == IPSA_STATE_LARVAL) { 7172 /* 7173 * Consume npkt and place it in the LARVAL SA's inbound 7174 * packet slot. 7175 */ 7176 mblk_t *attrmp; 7177 7178 attrmp = ip_recv_attr_to_mblk(ira); 7179 if (attrmp == NULL) { 7180 ill_t *ill = ira->ira_ill; 7181 7182 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 7183 ip_drop_input("ipIfStatsInDiscards", npkt, ill); 7184 freemsg(npkt); 7185 opkt = NULL; 7186 } else { 7187 ASSERT(attrmp->b_cont == NULL); 7188 attrmp->b_cont = npkt; 7189 ipsa->ipsa_lpkt = attrmp; 7190 } 7191 npkt = NULL; 7192 } else { 7193 /* 7194 * If not larval, we lost the race. NOTE: ipsa_lpkt may still 7195 * have been non-NULL in the non-larval case, because of 7196 * inbound packets arriving prior to sadb_common_add() 7197 * transferring the SA completely out of larval state, but 7198 * after lpkt was grabbed by the AH/ESP-specific add routines. 7199 * We should clear the old ipsa_lpkt in this case to make sure 7200 * that it doesn't linger on the now-MATURE IPsec SA, or get 7201 * picked up as an out-of-order packet. 7202 */ 7203 ipsa->ipsa_lpkt = NULL; 7204 } 7205 mutex_exit(&ipsa->ipsa_lock); 7206 7207 if (opkt != NULL) { 7208 ipsec_stack_t *ipss; 7209 7210 ipss = ira->ira_ill->ill_ipst->ips_netstack->netstack_ipsec; 7211 opkt = ip_recv_attr_free_mblk(opkt); 7212 ip_drop_packet(opkt, B_TRUE, ira->ira_ill, 7213 DROPPER(ipss, ipds_sadb_inlarval_replace), 7214 &ipss->ipsec_sadb_dropper); 7215 } 7216 return (npkt); 7217 } 7218 7219 /* 7220 * sadb_clear_lpkt: Atomically clear ipsa->ipsa_lpkt and return the 7221 * previous value. 7222 */ 7223 mblk_t * 7224 sadb_clear_lpkt(ipsa_t *ipsa) 7225 { 7226 mblk_t *opkt; 7227 7228 mutex_enter(&ipsa->ipsa_lock); 7229 opkt = ipsa->ipsa_lpkt; 7230 ipsa->ipsa_lpkt = NULL; 7231 mutex_exit(&ipsa->ipsa_lock); 7232 return (opkt); 7233 } 7234 7235 /* 7236 * Buffer a packet that's in IDLE state as set by Solaris Clustering. 7237 */ 7238 void 7239 sadb_buf_pkt(ipsa_t *ipsa, mblk_t *bpkt, ip_recv_attr_t *ira) 7240 { 7241 netstack_t *ns = ira->ira_ill->ill_ipst->ips_netstack; 7242 ipsec_stack_t *ipss = ns->netstack_ipsec; 7243 in6_addr_t *srcaddr = (in6_addr_t *)(&ipsa->ipsa_srcaddr); 7244 in6_addr_t *dstaddr = (in6_addr_t *)(&ipsa->ipsa_dstaddr); 7245 mblk_t *mp; 7246 7247 ASSERT(ipsa->ipsa_state == IPSA_STATE_IDLE); 7248 7249 if (cl_inet_idlesa == NULL) { 7250 ip_drop_packet(bpkt, B_TRUE, ira->ira_ill, 7251 DROPPER(ipss, ipds_sadb_inidle_overflow), 7252 &ipss->ipsec_sadb_dropper); 7253 return; 7254 } 7255 7256 cl_inet_idlesa(ns->netstack_stackid, 7257 (ipsa->ipsa_type == SADB_SATYPE_AH) ? IPPROTO_AH : IPPROTO_ESP, 7258 ipsa->ipsa_spi, ipsa->ipsa_addrfam, *srcaddr, *dstaddr, NULL); 7259 7260 mp = ip_recv_attr_to_mblk(ira); 7261 if (mp == NULL) { 7262 ip_drop_packet(bpkt, B_TRUE, ira->ira_ill, 7263 DROPPER(ipss, ipds_sadb_inidle_overflow), 7264 &ipss->ipsec_sadb_dropper); 7265 return; 7266 } 7267 linkb(mp, bpkt); 7268 7269 mutex_enter(&ipsa->ipsa_lock); 7270 ipsa->ipsa_mblkcnt++; 7271 if (ipsa->ipsa_bpkt_head == NULL) { 7272 ipsa->ipsa_bpkt_head = ipsa->ipsa_bpkt_tail = bpkt; 7273 } else { 7274 ipsa->ipsa_bpkt_tail->b_next = bpkt; 7275 ipsa->ipsa_bpkt_tail = bpkt; 7276 if (ipsa->ipsa_mblkcnt > SADB_MAX_IDLEPKTS) { 7277 mblk_t *tmp; 7278 7279 tmp = ipsa->ipsa_bpkt_head; 7280 ipsa->ipsa_bpkt_head = ipsa->ipsa_bpkt_head->b_next; 7281 tmp = ip_recv_attr_free_mblk(tmp); 7282 ip_drop_packet(tmp, B_TRUE, NULL, 7283 DROPPER(ipss, ipds_sadb_inidle_overflow), 7284 &ipss->ipsec_sadb_dropper); 7285 ipsa->ipsa_mblkcnt --; 7286 } 7287 } 7288 mutex_exit(&ipsa->ipsa_lock); 7289 } 7290 7291 /* 7292 * Stub function that taskq_dispatch() invokes to take the mblk (in arg) 7293 * and put into STREAMS again. 7294 */ 7295 void 7296 sadb_clear_buf_pkt(void *ipkt) 7297 { 7298 mblk_t *tmp, *buf_pkt; 7299 ip_recv_attr_t iras; 7300 7301 buf_pkt = (mblk_t *)ipkt; 7302 7303 while (buf_pkt != NULL) { 7304 mblk_t *data_mp; 7305 7306 tmp = buf_pkt->b_next; 7307 buf_pkt->b_next = NULL; 7308 7309 data_mp = buf_pkt->b_cont; 7310 buf_pkt->b_cont = NULL; 7311 if (!ip_recv_attr_from_mblk(buf_pkt, &iras)) { 7312 /* The ill or ip_stack_t disappeared on us. */ 7313 ip_drop_input("ip_recv_attr_from_mblk", data_mp, NULL); 7314 freemsg(data_mp); 7315 } else { 7316 ip_input_post_ipsec(data_mp, &iras); 7317 } 7318 ira_cleanup(&iras, B_TRUE); 7319 buf_pkt = tmp; 7320 } 7321 } 7322 /* 7323 * Walker callback used by sadb_alg_update() to free/create crypto 7324 * context template when a crypto software provider is removed or 7325 * added. 7326 */ 7327 7328 struct sadb_update_alg_state { 7329 ipsec_algtype_t alg_type; 7330 uint8_t alg_id; 7331 boolean_t is_added; 7332 boolean_t async_auth; 7333 boolean_t async_encr; 7334 }; 7335 7336 static void 7337 sadb_alg_update_cb(isaf_t *head, ipsa_t *entry, void *cookie) 7338 { 7339 struct sadb_update_alg_state *update_state = 7340 (struct sadb_update_alg_state *)cookie; 7341 crypto_ctx_template_t *ctx_tmpl = NULL; 7342 7343 ASSERT(MUTEX_HELD(&head->isaf_lock)); 7344 7345 if (entry->ipsa_state == IPSA_STATE_LARVAL) 7346 return; 7347 7348 mutex_enter(&entry->ipsa_lock); 7349 7350 if ((entry->ipsa_encr_alg != SADB_EALG_NONE && entry->ipsa_encr_alg != 7351 SADB_EALG_NULL && update_state->async_encr) || 7352 (entry->ipsa_auth_alg != SADB_AALG_NONE && 7353 update_state->async_auth)) { 7354 entry->ipsa_flags |= IPSA_F_ASYNC; 7355 } else { 7356 entry->ipsa_flags &= ~IPSA_F_ASYNC; 7357 } 7358 7359 switch (update_state->alg_type) { 7360 case IPSEC_ALG_AUTH: 7361 if (entry->ipsa_auth_alg == update_state->alg_id) 7362 ctx_tmpl = &entry->ipsa_authtmpl; 7363 break; 7364 case IPSEC_ALG_ENCR: 7365 if (entry->ipsa_encr_alg == update_state->alg_id) 7366 ctx_tmpl = &entry->ipsa_encrtmpl; 7367 break; 7368 default: 7369 ctx_tmpl = NULL; 7370 } 7371 7372 if (ctx_tmpl == NULL) { 7373 mutex_exit(&entry->ipsa_lock); 7374 return; 7375 } 7376 7377 /* 7378 * The context template of the SA may be affected by the change 7379 * of crypto provider. 7380 */ 7381 if (update_state->is_added) { 7382 /* create the context template if not already done */ 7383 if (*ctx_tmpl == NULL) { 7384 (void) ipsec_create_ctx_tmpl(entry, 7385 update_state->alg_type); 7386 } 7387 } else { 7388 /* 7389 * The crypto provider was removed. If the context template 7390 * exists but it is no longer valid, free it. 7391 */ 7392 if (*ctx_tmpl != NULL) 7393 ipsec_destroy_ctx_tmpl(entry, update_state->alg_type); 7394 } 7395 7396 mutex_exit(&entry->ipsa_lock); 7397 } 7398 7399 /* 7400 * Invoked by IP when an software crypto provider has been updated, or if 7401 * the crypto synchrony changes. The type and id of the corresponding 7402 * algorithm is passed as argument. The type is set to ALL in the case of 7403 * a synchrony change. 7404 * 7405 * is_added is B_TRUE if the provider was added, B_FALSE if it was 7406 * removed. The function updates the SADB and free/creates the 7407 * context templates associated with SAs if needed. 7408 */ 7409 7410 #define SADB_ALG_UPDATE_WALK(sadb, table) \ 7411 sadb_walker((sadb).table, (sadb).sdb_hashsize, sadb_alg_update_cb, \ 7412 &update_state) 7413 7414 void 7415 sadb_alg_update(ipsec_algtype_t alg_type, uint8_t alg_id, boolean_t is_added, 7416 netstack_t *ns) 7417 { 7418 struct sadb_update_alg_state update_state; 7419 ipsecah_stack_t *ahstack = ns->netstack_ipsecah; 7420 ipsecesp_stack_t *espstack = ns->netstack_ipsecesp; 7421 ipsec_stack_t *ipss = ns->netstack_ipsec; 7422 7423 update_state.alg_type = alg_type; 7424 update_state.alg_id = alg_id; 7425 update_state.is_added = is_added; 7426 update_state.async_auth = ipss->ipsec_algs_exec_mode[IPSEC_ALG_AUTH] == 7427 IPSEC_ALGS_EXEC_ASYNC; 7428 update_state.async_encr = ipss->ipsec_algs_exec_mode[IPSEC_ALG_ENCR] == 7429 IPSEC_ALGS_EXEC_ASYNC; 7430 7431 if (alg_type == IPSEC_ALG_AUTH || alg_type == IPSEC_ALG_ALL) { 7432 /* walk the AH tables only for auth. algorithm changes */ 7433 SADB_ALG_UPDATE_WALK(ahstack->ah_sadb.s_v4, sdb_of); 7434 SADB_ALG_UPDATE_WALK(ahstack->ah_sadb.s_v4, sdb_if); 7435 SADB_ALG_UPDATE_WALK(ahstack->ah_sadb.s_v6, sdb_of); 7436 SADB_ALG_UPDATE_WALK(ahstack->ah_sadb.s_v6, sdb_if); 7437 } 7438 7439 /* walk the ESP tables */ 7440 SADB_ALG_UPDATE_WALK(espstack->esp_sadb.s_v4, sdb_of); 7441 SADB_ALG_UPDATE_WALK(espstack->esp_sadb.s_v4, sdb_if); 7442 SADB_ALG_UPDATE_WALK(espstack->esp_sadb.s_v6, sdb_of); 7443 SADB_ALG_UPDATE_WALK(espstack->esp_sadb.s_v6, sdb_if); 7444 } 7445 7446 /* 7447 * Creates a context template for the specified SA. This function 7448 * is called when an SA is created and when a context template needs 7449 * to be created due to a change of software provider. 7450 */ 7451 int 7452 ipsec_create_ctx_tmpl(ipsa_t *sa, ipsec_algtype_t alg_type) 7453 { 7454 ipsec_alginfo_t *alg; 7455 crypto_mechanism_t mech; 7456 crypto_key_t *key; 7457 crypto_ctx_template_t *sa_tmpl; 7458 int rv; 7459 ipsec_stack_t *ipss = sa->ipsa_netstack->netstack_ipsec; 7460 7461 ASSERT(RW_READ_HELD(&ipss->ipsec_alg_lock)); 7462 ASSERT(MUTEX_HELD(&sa->ipsa_lock)); 7463 7464 /* get pointers to the algorithm info, context template, and key */ 7465 switch (alg_type) { 7466 case IPSEC_ALG_AUTH: 7467 key = &sa->ipsa_kcfauthkey; 7468 sa_tmpl = &sa->ipsa_authtmpl; 7469 alg = ipss->ipsec_alglists[alg_type][sa->ipsa_auth_alg]; 7470 break; 7471 case IPSEC_ALG_ENCR: 7472 key = &sa->ipsa_kcfencrkey; 7473 sa_tmpl = &sa->ipsa_encrtmpl; 7474 alg = ipss->ipsec_alglists[alg_type][sa->ipsa_encr_alg]; 7475 break; 7476 default: 7477 alg = NULL; 7478 } 7479 7480 if (alg == NULL || !ALG_VALID(alg)) 7481 return (EINVAL); 7482 7483 /* initialize the mech info structure for the framework */ 7484 ASSERT(alg->alg_mech_type != CRYPTO_MECHANISM_INVALID); 7485 mech.cm_type = alg->alg_mech_type; 7486 mech.cm_param = NULL; 7487 mech.cm_param_len = 0; 7488 7489 /* create a new context template */ 7490 rv = crypto_create_ctx_template(&mech, key, sa_tmpl, KM_NOSLEEP); 7491 7492 /* 7493 * CRYPTO_MECH_NOT_SUPPORTED can be returned if only hardware 7494 * providers are available for that mechanism. In that case 7495 * we don't fail, and will generate the context template from 7496 * the framework callback when a software provider for that 7497 * mechanism registers. 7498 * 7499 * The context template is assigned the special value 7500 * IPSEC_CTX_TMPL_ALLOC if the allocation failed due to a 7501 * lack of memory. No attempt will be made to use 7502 * the context template if it is set to this value. 7503 */ 7504 if (rv == CRYPTO_HOST_MEMORY) { 7505 *sa_tmpl = IPSEC_CTX_TMPL_ALLOC; 7506 } else if (rv != CRYPTO_SUCCESS) { 7507 *sa_tmpl = NULL; 7508 if (rv != CRYPTO_MECH_NOT_SUPPORTED) 7509 return (EINVAL); 7510 } 7511 7512 return (0); 7513 } 7514 7515 /* 7516 * Destroy the context template of the specified algorithm type 7517 * of the specified SA. Must be called while holding the SA lock. 7518 */ 7519 void 7520 ipsec_destroy_ctx_tmpl(ipsa_t *sa, ipsec_algtype_t alg_type) 7521 { 7522 ASSERT(MUTEX_HELD(&sa->ipsa_lock)); 7523 7524 if (alg_type == IPSEC_ALG_AUTH) { 7525 if (sa->ipsa_authtmpl == IPSEC_CTX_TMPL_ALLOC) 7526 sa->ipsa_authtmpl = NULL; 7527 else if (sa->ipsa_authtmpl != NULL) { 7528 crypto_destroy_ctx_template(sa->ipsa_authtmpl); 7529 sa->ipsa_authtmpl = NULL; 7530 } 7531 } else { 7532 ASSERT(alg_type == IPSEC_ALG_ENCR); 7533 if (sa->ipsa_encrtmpl == IPSEC_CTX_TMPL_ALLOC) 7534 sa->ipsa_encrtmpl = NULL; 7535 else if (sa->ipsa_encrtmpl != NULL) { 7536 crypto_destroy_ctx_template(sa->ipsa_encrtmpl); 7537 sa->ipsa_encrtmpl = NULL; 7538 } 7539 } 7540 } 7541 7542 /* 7543 * Use the kernel crypto framework to check the validity of a key received 7544 * via keysock. Returns 0 if the key is OK, -1 otherwise. 7545 */ 7546 int 7547 ipsec_check_key(crypto_mech_type_t mech_type, sadb_key_t *sadb_key, 7548 boolean_t is_auth, int *diag) 7549 { 7550 crypto_mechanism_t mech; 7551 crypto_key_t crypto_key; 7552 int crypto_rc; 7553 7554 mech.cm_type = mech_type; 7555 mech.cm_param = NULL; 7556 mech.cm_param_len = 0; 7557 7558 crypto_key.ck_format = CRYPTO_KEY_RAW; 7559 crypto_key.ck_data = sadb_key + 1; 7560 crypto_key.ck_length = sadb_key->sadb_key_bits; 7561 7562 crypto_rc = crypto_key_check(&mech, &crypto_key); 7563 7564 switch (crypto_rc) { 7565 case CRYPTO_SUCCESS: 7566 return (0); 7567 case CRYPTO_MECHANISM_INVALID: 7568 case CRYPTO_MECH_NOT_SUPPORTED: 7569 *diag = is_auth ? SADB_X_DIAGNOSTIC_BAD_AALG : 7570 SADB_X_DIAGNOSTIC_BAD_EALG; 7571 break; 7572 case CRYPTO_KEY_SIZE_RANGE: 7573 *diag = is_auth ? SADB_X_DIAGNOSTIC_BAD_AKEYBITS : 7574 SADB_X_DIAGNOSTIC_BAD_EKEYBITS; 7575 break; 7576 case CRYPTO_WEAK_KEY: 7577 *diag = is_auth ? SADB_X_DIAGNOSTIC_WEAK_AKEY : 7578 SADB_X_DIAGNOSTIC_WEAK_EKEY; 7579 break; 7580 } 7581 7582 return (-1); 7583 } 7584 7585 /* 7586 * Whack options in the outer IP header when ipsec changes the outer label 7587 * 7588 * This is inelegant and really could use refactoring. 7589 */ 7590 mblk_t * 7591 sadb_whack_label_v4(mblk_t *mp, ipsa_t *assoc, kstat_named_t *counter, 7592 ipdropper_t *dropper) 7593 { 7594 int delta; 7595 int plen; 7596 dblk_t *db; 7597 int hlen; 7598 uint8_t *opt_storage = assoc->ipsa_opt_storage; 7599 ipha_t *ipha = (ipha_t *)mp->b_rptr; 7600 7601 plen = ntohs(ipha->ipha_length); 7602 7603 delta = tsol_remove_secopt(ipha, MBLKL(mp)); 7604 mp->b_wptr += delta; 7605 plen += delta; 7606 7607 /* XXX XXX code copied from tsol_check_label */ 7608 7609 /* Make sure we have room for the worst-case addition */ 7610 hlen = IPH_HDR_LENGTH(ipha) + opt_storage[IPOPT_OLEN]; 7611 hlen = (hlen + 3) & ~3; 7612 if (hlen > IP_MAX_HDR_LENGTH) 7613 hlen = IP_MAX_HDR_LENGTH; 7614 hlen -= IPH_HDR_LENGTH(ipha); 7615 7616 db = mp->b_datap; 7617 if ((db->db_ref != 1) || (mp->b_wptr + hlen > db->db_lim)) { 7618 int copylen; 7619 mblk_t *new_mp; 7620 7621 /* allocate enough to be meaningful, but not *too* much */ 7622 copylen = MBLKL(mp); 7623 if (copylen > 256) 7624 copylen = 256; 7625 new_mp = allocb_tmpl(hlen + copylen + 7626 (mp->b_rptr - mp->b_datap->db_base), mp); 7627 7628 if (new_mp == NULL) { 7629 ip_drop_packet(mp, B_FALSE, NULL, counter, dropper); 7630 return (NULL); 7631 } 7632 7633 /* keep the bias */ 7634 new_mp->b_rptr += mp->b_rptr - mp->b_datap->db_base; 7635 new_mp->b_wptr = new_mp->b_rptr + copylen; 7636 bcopy(mp->b_rptr, new_mp->b_rptr, copylen); 7637 new_mp->b_cont = mp; 7638 if ((mp->b_rptr += copylen) >= mp->b_wptr) { 7639 new_mp->b_cont = mp->b_cont; 7640 freeb(mp); 7641 } 7642 mp = new_mp; 7643 ipha = (ipha_t *)mp->b_rptr; 7644 } 7645 7646 delta = tsol_prepend_option(assoc->ipsa_opt_storage, ipha, MBLKL(mp)); 7647 7648 ASSERT(delta != -1); 7649 7650 plen += delta; 7651 mp->b_wptr += delta; 7652 7653 /* 7654 * Paranoia 7655 */ 7656 db = mp->b_datap; 7657 7658 ASSERT3P(mp->b_wptr, <=, db->db_lim); 7659 ASSERT3P(mp->b_rptr, <=, db->db_lim); 7660 7661 ASSERT3P(mp->b_wptr, >=, db->db_base); 7662 ASSERT3P(mp->b_rptr, >=, db->db_base); 7663 /* End paranoia */ 7664 7665 ipha->ipha_length = htons(plen); 7666 7667 return (mp); 7668 } 7669 7670 mblk_t * 7671 sadb_whack_label_v6(mblk_t *mp, ipsa_t *assoc, kstat_named_t *counter, 7672 ipdropper_t *dropper) 7673 { 7674 int delta; 7675 int plen; 7676 dblk_t *db; 7677 int hlen; 7678 uint8_t *opt_storage = assoc->ipsa_opt_storage; 7679 uint_t sec_opt_len; /* label option length not including type, len */ 7680 ip6_t *ip6h = (ip6_t *)mp->b_rptr; 7681 7682 plen = ntohs(ip6h->ip6_plen); 7683 7684 delta = tsol_remove_secopt_v6(ip6h, MBLKL(mp)); 7685 mp->b_wptr += delta; 7686 plen += delta; 7687 7688 /* XXX XXX code copied from tsol_check_label_v6 */ 7689 /* 7690 * Make sure we have room for the worst-case addition. Add 2 bytes for 7691 * the hop-by-hop ext header's next header and length fields. Add 7692 * another 2 bytes for the label option type, len and then round 7693 * up to the next 8-byte multiple. 7694 */ 7695 sec_opt_len = opt_storage[1]; 7696 7697 db = mp->b_datap; 7698 hlen = (4 + sec_opt_len + 7) & ~7; 7699 7700 if ((db->db_ref != 1) || (mp->b_wptr + hlen > db->db_lim)) { 7701 int copylen; 7702 mblk_t *new_mp; 7703 uint16_t hdr_len; 7704 7705 hdr_len = ip_hdr_length_v6(mp, ip6h); 7706 /* 7707 * Allocate enough to be meaningful, but not *too* much. 7708 * Also all the IPv6 extension headers must be in the same mblk 7709 */ 7710 copylen = MBLKL(mp); 7711 if (copylen > 256) 7712 copylen = 256; 7713 if (copylen < hdr_len) 7714 copylen = hdr_len; 7715 new_mp = allocb_tmpl(hlen + copylen + 7716 (mp->b_rptr - mp->b_datap->db_base), mp); 7717 if (new_mp == NULL) { 7718 ip_drop_packet(mp, B_FALSE, NULL, counter, dropper); 7719 return (NULL); 7720 } 7721 7722 /* keep the bias */ 7723 new_mp->b_rptr += mp->b_rptr - mp->b_datap->db_base; 7724 new_mp->b_wptr = new_mp->b_rptr + copylen; 7725 bcopy(mp->b_rptr, new_mp->b_rptr, copylen); 7726 new_mp->b_cont = mp; 7727 if ((mp->b_rptr += copylen) >= mp->b_wptr) { 7728 new_mp->b_cont = mp->b_cont; 7729 freeb(mp); 7730 } 7731 mp = new_mp; 7732 ip6h = (ip6_t *)mp->b_rptr; 7733 } 7734 7735 delta = tsol_prepend_option_v6(assoc->ipsa_opt_storage, 7736 ip6h, MBLKL(mp)); 7737 7738 ASSERT(delta != -1); 7739 7740 plen += delta; 7741 mp->b_wptr += delta; 7742 7743 /* 7744 * Paranoia 7745 */ 7746 db = mp->b_datap; 7747 7748 ASSERT3P(mp->b_wptr, <=, db->db_lim); 7749 ASSERT3P(mp->b_rptr, <=, db->db_lim); 7750 7751 ASSERT3P(mp->b_wptr, >=, db->db_base); 7752 ASSERT3P(mp->b_rptr, >=, db->db_base); 7753 /* End paranoia */ 7754 7755 ip6h->ip6_plen = htons(plen); 7756 7757 return (mp); 7758 } 7759 7760 /* Whack the labels and update ip_xmit_attr_t as needed */ 7761 mblk_t * 7762 sadb_whack_label(mblk_t *mp, ipsa_t *assoc, ip_xmit_attr_t *ixa, 7763 kstat_named_t *counter, ipdropper_t *dropper) 7764 { 7765 int adjust; 7766 int iplen; 7767 7768 if (ixa->ixa_flags & IXAF_IS_IPV4) { 7769 ipha_t *ipha = (ipha_t *)mp->b_rptr; 7770 7771 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION); 7772 iplen = ntohs(ipha->ipha_length); 7773 mp = sadb_whack_label_v4(mp, assoc, counter, dropper); 7774 if (mp == NULL) 7775 return (NULL); 7776 7777 ipha = (ipha_t *)mp->b_rptr; 7778 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION); 7779 adjust = (int)ntohs(ipha->ipha_length) - iplen; 7780 } else { 7781 ip6_t *ip6h = (ip6_t *)mp->b_rptr; 7782 7783 ASSERT(IPH_HDR_VERSION(ip6h) == IPV6_VERSION); 7784 iplen = ntohs(ip6h->ip6_plen); 7785 mp = sadb_whack_label_v6(mp, assoc, counter, dropper); 7786 if (mp == NULL) 7787 return (NULL); 7788 7789 ip6h = (ip6_t *)mp->b_rptr; 7790 ASSERT(IPH_HDR_VERSION(ip6h) == IPV6_VERSION); 7791 adjust = (int)ntohs(ip6h->ip6_plen) - iplen; 7792 } 7793 ixa->ixa_pktlen += adjust; 7794 ixa->ixa_ip_hdr_length += adjust; 7795 return (mp); 7796 } 7797 7798 /* 7799 * If this is an outgoing SA then add some fuzz to the 7800 * SOFT EXPIRE time. The reason for this is to stop 7801 * peers trying to renegotiate SOFT expiring SA's at 7802 * the same time. The amount of fuzz needs to be at 7803 * least 8 seconds which is the typical interval 7804 * sadb_ager(), although this is only a guide as it 7805 * selftunes. 7806 */ 7807 static void 7808 lifetime_fuzz(ipsa_t *assoc) 7809 { 7810 uint8_t rnd; 7811 7812 if (assoc->ipsa_softaddlt == 0) 7813 return; 7814 7815 (void) random_get_pseudo_bytes(&rnd, sizeof (rnd)); 7816 rnd = (rnd & 0xF) + 8; 7817 assoc->ipsa_softexpiretime -= rnd; 7818 assoc->ipsa_softaddlt -= rnd; 7819 } 7820 7821 static void 7822 destroy_ipsa_pair(ipsap_t *ipsapp) 7823 { 7824 /* 7825 * Because of the multi-line macro nature of IPSA_REFRELE, keep 7826 * them in { }. 7827 */ 7828 if (ipsapp->ipsap_sa_ptr != NULL) { 7829 IPSA_REFRELE(ipsapp->ipsap_sa_ptr); 7830 } 7831 if (ipsapp->ipsap_psa_ptr != NULL) { 7832 IPSA_REFRELE(ipsapp->ipsap_psa_ptr); 7833 } 7834 init_ipsa_pair(ipsapp); 7835 } 7836 7837 static void 7838 init_ipsa_pair(ipsap_t *ipsapp) 7839 { 7840 ipsapp->ipsap_bucket = NULL; 7841 ipsapp->ipsap_sa_ptr = NULL; 7842 ipsapp->ipsap_pbucket = NULL; 7843 ipsapp->ipsap_psa_ptr = NULL; 7844 } 7845 7846 /* 7847 * The sadb_ager() function walks through the hash tables of SA's and ages 7848 * them, if the SA expires as a result, its marked as DEAD and will be reaped 7849 * the next time sadb_ager() runs. SA's which are paired or have a peer (same 7850 * SA appears in both the inbound and outbound tables because its not possible 7851 * to determine its direction) are placed on a list when they expire. This is 7852 * to ensure that pair/peer SA's are reaped at the same time, even if they 7853 * expire at different times. 7854 * 7855 * This function is called twice by sadb_ager(), one after processing the 7856 * inbound table, then again after processing the outbound table. 7857 */ 7858 void 7859 age_pair_peer_list(templist_t *haspeerlist, sadb_t *sp, boolean_t outbound) 7860 { 7861 templist_t *listptr; 7862 int outhash; 7863 isaf_t *bucket; 7864 boolean_t haspeer; 7865 ipsa_t *peer_assoc, *dying; 7866 /* 7867 * Haspeer cases will contain both IPv4 and IPv6. This code 7868 * is address independent. 7869 */ 7870 while (haspeerlist != NULL) { 7871 /* "dying" contains the SA that has a peer. */ 7872 dying = haspeerlist->ipsa; 7873 haspeer = (dying->ipsa_haspeer); 7874 listptr = haspeerlist; 7875 haspeerlist = listptr->next; 7876 kmem_free(listptr, sizeof (*listptr)); 7877 /* 7878 * Pick peer bucket based on addrfam. 7879 */ 7880 if (outbound) { 7881 if (haspeer) 7882 bucket = INBOUND_BUCKET(sp, dying->ipsa_spi); 7883 else 7884 bucket = INBOUND_BUCKET(sp, 7885 dying->ipsa_otherspi); 7886 } else { /* inbound */ 7887 if (haspeer) { 7888 if (dying->ipsa_addrfam == AF_INET6) { 7889 outhash = OUTBOUND_HASH_V6(sp, 7890 *((in6_addr_t *)&dying-> 7891 ipsa_dstaddr)); 7892 } else { 7893 outhash = OUTBOUND_HASH_V4(sp, 7894 *((ipaddr_t *)&dying-> 7895 ipsa_dstaddr)); 7896 } 7897 } else if (dying->ipsa_addrfam == AF_INET6) { 7898 outhash = OUTBOUND_HASH_V6(sp, 7899 *((in6_addr_t *)&dying-> 7900 ipsa_srcaddr)); 7901 } else { 7902 outhash = OUTBOUND_HASH_V4(sp, 7903 *((ipaddr_t *)&dying-> 7904 ipsa_srcaddr)); 7905 } 7906 bucket = &(sp->sdb_of[outhash]); 7907 } 7908 7909 mutex_enter(&bucket->isaf_lock); 7910 /* 7911 * "haspeer" SA's have the same src/dst address ordering, 7912 * "paired" SA's have the src/dst addresses reversed. 7913 */ 7914 if (haspeer) { 7915 peer_assoc = ipsec_getassocbyspi(bucket, 7916 dying->ipsa_spi, dying->ipsa_srcaddr, 7917 dying->ipsa_dstaddr, dying->ipsa_addrfam); 7918 } else { 7919 peer_assoc = ipsec_getassocbyspi(bucket, 7920 dying->ipsa_otherspi, dying->ipsa_dstaddr, 7921 dying->ipsa_srcaddr, dying->ipsa_addrfam); 7922 } 7923 7924 mutex_exit(&bucket->isaf_lock); 7925 if (peer_assoc != NULL) { 7926 mutex_enter(&peer_assoc->ipsa_lock); 7927 mutex_enter(&dying->ipsa_lock); 7928 if (!haspeer) { 7929 /* 7930 * Only SA's which have a "peer" or are 7931 * "paired" end up on this list, so this 7932 * must be a "paired" SA, update the flags 7933 * to break the pair. 7934 */ 7935 peer_assoc->ipsa_otherspi = 0; 7936 peer_assoc->ipsa_flags &= ~IPSA_F_PAIRED; 7937 dying->ipsa_otherspi = 0; 7938 dying->ipsa_flags &= ~IPSA_F_PAIRED; 7939 } 7940 if (haspeer || outbound) { 7941 /* 7942 * Update the state of the "inbound" SA when 7943 * the "outbound" SA has expired. Don't update 7944 * the "outbound" SA when the "inbound" SA 7945 * SA expires because setting the hard_addtime 7946 * below will cause this to happen. 7947 */ 7948 peer_assoc->ipsa_state = dying->ipsa_state; 7949 } 7950 if (dying->ipsa_state == IPSA_STATE_DEAD) 7951 peer_assoc->ipsa_hardexpiretime = 1; 7952 7953 mutex_exit(&dying->ipsa_lock); 7954 mutex_exit(&peer_assoc->ipsa_lock); 7955 IPSA_REFRELE(peer_assoc); 7956 } 7957 IPSA_REFRELE(dying); 7958 } 7959 } 7960 7961 /* 7962 * Ensure that the IV used for CCM mode never repeats. The IV should 7963 * only be updated by this function. Also check to see if the IV 7964 * is about to wrap and generate a SOFT Expire. This function is only 7965 * called for outgoing packets, the IV for incomming packets is taken 7966 * from the wire. If the outgoing SA needs to be expired, update 7967 * the matching incomming SA. 7968 */ 7969 boolean_t 7970 update_iv(uint8_t *iv_ptr, queue_t *pfkey_q, ipsa_t *assoc, 7971 ipsecesp_stack_t *espstack) 7972 { 7973 boolean_t rc = B_TRUE; 7974 isaf_t *inbound_bucket; 7975 sadb_t *sp; 7976 ipsa_t *pair_sa = NULL; 7977 int sa_new_state = 0; 7978 7979 /* For non counter modes, the IV is random data. */ 7980 if (!(assoc->ipsa_flags & IPSA_F_COUNTERMODE)) { 7981 (void) random_get_pseudo_bytes(iv_ptr, assoc->ipsa_iv_len); 7982 return (rc); 7983 } 7984 7985 mutex_enter(&assoc->ipsa_lock); 7986 7987 (*assoc->ipsa_iv)++; 7988 7989 if (*assoc->ipsa_iv == assoc->ipsa_iv_hardexpire) { 7990 sa_new_state = IPSA_STATE_DEAD; 7991 rc = B_FALSE; 7992 } else if (*assoc->ipsa_iv == assoc->ipsa_iv_softexpire) { 7993 if (assoc->ipsa_state != IPSA_STATE_DYING) { 7994 /* 7995 * This SA may have already been expired when its 7996 * PAIR_SA expired. 7997 */ 7998 sa_new_state = IPSA_STATE_DYING; 7999 } 8000 } 8001 if (sa_new_state) { 8002 /* 8003 * If there is a state change, we need to update this SA 8004 * and its "pair", we can find the bucket for the "pair" SA 8005 * while holding the ipsa_t mutex, but we won't actually 8006 * update anything untill the ipsa_t mutex has been released 8007 * for _this_ SA. 8008 */ 8009 assoc->ipsa_state = sa_new_state; 8010 if (assoc->ipsa_addrfam == AF_INET6) { 8011 sp = &espstack->esp_sadb.s_v6; 8012 } else { 8013 sp = &espstack->esp_sadb.s_v4; 8014 } 8015 inbound_bucket = INBOUND_BUCKET(sp, assoc->ipsa_otherspi); 8016 sadb_expire_assoc(pfkey_q, assoc); 8017 } 8018 if (rc == B_TRUE) 8019 bcopy(assoc->ipsa_iv, iv_ptr, assoc->ipsa_iv_len); 8020 8021 mutex_exit(&assoc->ipsa_lock); 8022 8023 if (sa_new_state) { 8024 /* Find the inbound SA, need to lock hash bucket. */ 8025 mutex_enter(&inbound_bucket->isaf_lock); 8026 pair_sa = ipsec_getassocbyspi(inbound_bucket, 8027 assoc->ipsa_otherspi, assoc->ipsa_dstaddr, 8028 assoc->ipsa_srcaddr, assoc->ipsa_addrfam); 8029 mutex_exit(&inbound_bucket->isaf_lock); 8030 if (pair_sa != NULL) { 8031 mutex_enter(&pair_sa->ipsa_lock); 8032 pair_sa->ipsa_state = sa_new_state; 8033 mutex_exit(&pair_sa->ipsa_lock); 8034 IPSA_REFRELE(pair_sa); 8035 } 8036 } 8037 8038 return (rc); 8039 } 8040 8041 void 8042 ccm_params_init(ipsa_t *assoc, uchar_t *esph, uint_t data_len, uchar_t *iv_ptr, 8043 ipsa_cm_mech_t *cm_mech, crypto_data_t *crypto_data) 8044 { 8045 uchar_t *nonce; 8046 crypto_mechanism_t *combined_mech; 8047 CK_AES_CCM_PARAMS *params; 8048 8049 combined_mech = (crypto_mechanism_t *)cm_mech; 8050 params = (CK_AES_CCM_PARAMS *)(combined_mech + 1); 8051 nonce = (uchar_t *)(params + 1); 8052 params->ulMACSize = assoc->ipsa_mac_len; 8053 params->ulNonceSize = assoc->ipsa_nonce_len; 8054 params->ulAuthDataSize = sizeof (esph_t); 8055 params->ulDataSize = data_len; 8056 params->nonce = nonce; 8057 params->authData = esph; 8058 8059 cm_mech->combined_mech.cm_type = assoc->ipsa_emech.cm_type; 8060 cm_mech->combined_mech.cm_param_len = sizeof (CK_AES_CCM_PARAMS); 8061 cm_mech->combined_mech.cm_param = (caddr_t)params; 8062 /* See gcm_params_init() for comments. */ 8063 bcopy(assoc->ipsa_nonce, nonce, assoc->ipsa_saltlen); 8064 nonce += assoc->ipsa_saltlen; 8065 bcopy(iv_ptr, nonce, assoc->ipsa_iv_len); 8066 crypto_data->cd_miscdata = NULL; 8067 } 8068 8069 /* ARGSUSED */ 8070 void 8071 cbc_params_init(ipsa_t *assoc, uchar_t *esph, uint_t data_len, uchar_t *iv_ptr, 8072 ipsa_cm_mech_t *cm_mech, crypto_data_t *crypto_data) 8073 { 8074 cm_mech->combined_mech.cm_type = assoc->ipsa_emech.cm_type; 8075 cm_mech->combined_mech.cm_param_len = 0; 8076 cm_mech->combined_mech.cm_param = NULL; 8077 crypto_data->cd_miscdata = (char *)iv_ptr; 8078 } 8079 8080 /* ARGSUSED */ 8081 void 8082 gcm_params_init(ipsa_t *assoc, uchar_t *esph, uint_t data_len, uchar_t *iv_ptr, 8083 ipsa_cm_mech_t *cm_mech, crypto_data_t *crypto_data) 8084 { 8085 uchar_t *nonce; 8086 crypto_mechanism_t *combined_mech; 8087 CK_AES_GCM_PARAMS *params; 8088 8089 combined_mech = (crypto_mechanism_t *)cm_mech; 8090 params = (CK_AES_GCM_PARAMS *)(combined_mech + 1); 8091 nonce = (uchar_t *)(params + 1); 8092 8093 params->pIv = nonce; 8094 params->ulIvLen = assoc->ipsa_nonce_len; 8095 params->ulIvBits = SADB_8TO1(assoc->ipsa_nonce_len); 8096 params->pAAD = esph; 8097 params->ulAADLen = sizeof (esph_t); 8098 params->ulTagBits = SADB_8TO1(assoc->ipsa_mac_len); 8099 8100 cm_mech->combined_mech.cm_type = assoc->ipsa_emech.cm_type; 8101 cm_mech->combined_mech.cm_param_len = sizeof (CK_AES_GCM_PARAMS); 8102 cm_mech->combined_mech.cm_param = (caddr_t)params; 8103 /* 8104 * Create the nonce, which is made up of the salt and the IV. 8105 * Copy the salt from the SA and the IV from the packet. 8106 * For inbound packets we copy the IV from the packet because it 8107 * was set by the sending system, for outbound packets we copy the IV 8108 * from the packet because the IV in the SA may be changed by another 8109 * thread, the IV in the packet was created while holding a mutex. 8110 */ 8111 bcopy(assoc->ipsa_nonce, nonce, assoc->ipsa_saltlen); 8112 nonce += assoc->ipsa_saltlen; 8113 bcopy(iv_ptr, nonce, assoc->ipsa_iv_len); 8114 crypto_data->cd_miscdata = NULL; 8115 } 8116