1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/param.h> 27 #include <sys/types.h> 28 #include <sys/stream.h> 29 #include <sys/strsubr.h> 30 #include <sys/strsun.h> 31 #include <sys/stropts.h> 32 #include <sys/zone.h> 33 #include <sys/vnode.h> 34 #include <sys/sysmacros.h> 35 #define _SUN_TPI_VERSION 2 36 #include <sys/tihdr.h> 37 #include <sys/ddi.h> 38 #include <sys/sunddi.h> 39 #include <sys/mkdev.h> 40 #include <sys/debug.h> 41 #include <sys/kmem.h> 42 #include <sys/cmn_err.h> 43 #include <sys/suntpi.h> 44 #include <sys/policy.h> 45 #include <sys/dls.h> 46 47 #include <sys/socket.h> 48 #include <netinet/in.h> 49 #include <net/pfkeyv2.h> 50 #include <net/pfpolicy.h> 51 52 #include <inet/common.h> 53 #include <netinet/ip6.h> 54 #include <inet/ip.h> 55 #include <inet/ip6.h> 56 #include <inet/mi.h> 57 #include <inet/proto_set.h> 58 #include <inet/nd.h> 59 #include <inet/ip_if.h> 60 #include <inet/optcom.h> 61 #include <inet/ipsec_info.h> 62 #include <inet/ipsec_impl.h> 63 #include <inet/spdsock.h> 64 #include <inet/sadb.h> 65 #include <inet/iptun.h> 66 #include <inet/iptun/iptun_impl.h> 67 68 #include <sys/isa_defs.h> 69 70 #include <c2/audit.h> 71 72 /* 73 * This is a transport provider for the PF_POLICY IPsec policy 74 * management socket, which provides a management interface into the 75 * SPD, allowing policy rules to be added, deleted, and queried. 76 * 77 * This effectively replaces the old private SIOC*IPSECONFIG ioctls 78 * with an extensible interface which will hopefully be public some 79 * day. 80 * 81 * See <net/pfpolicy.h> for more details on the protocol. 82 * 83 * We link against drv/ip and call directly into it to manipulate the 84 * SPD; see ipsec_impl.h for the policy data structures and spd.c for 85 * the code which maintains them. 86 * 87 * The MT model of this is QPAIR with the addition of some explicit 88 * locking to protect system-wide policy data structures. 89 */ 90 91 static vmem_t *spdsock_vmem; /* for minor numbers. */ 92 93 #define ALIGNED64(x) IS_P2ALIGNED((x), sizeof (uint64_t)) 94 95 /* Default structure copied into T_INFO_ACK messages (from rts.c...) */ 96 static struct T_info_ack spdsock_g_t_info_ack = { 97 T_INFO_ACK, 98 T_INFINITE, /* TSDU_size. Maximum size messages. */ 99 T_INVALID, /* ETSDU_size. No expedited data. */ 100 T_INVALID, /* CDATA_size. No connect data. */ 101 T_INVALID, /* DDATA_size. No disconnect data. */ 102 0, /* ADDR_size. */ 103 0, /* OPT_size. No user-settable options */ 104 64 * 1024, /* TIDU_size. spdsock allows maximum size messages. */ 105 T_COTS, /* SERV_type. spdsock supports connection oriented. */ 106 TS_UNBND, /* CURRENT_state. This is set from spdsock_state. */ 107 (XPG4_1) /* Provider flags */ 108 }; 109 110 /* Named Dispatch Parameter Management Structure */ 111 typedef struct spdsockparam_s { 112 uint_t spdsock_param_min; 113 uint_t spdsock_param_max; 114 uint_t spdsock_param_value; 115 char *spdsock_param_name; 116 } spdsockparam_t; 117 118 /* 119 * Table of NDD variables supported by spdsock. These are loaded into 120 * spdsock_g_nd in spdsock_init_nd. 121 * All of these are alterable, within the min/max values given, at run time. 122 */ 123 static spdsockparam_t lcl_param_arr[] = { 124 /* min max value name */ 125 { 4096, 65536, 8192, "spdsock_xmit_hiwat"}, 126 { 0, 65536, 1024, "spdsock_xmit_lowat"}, 127 { 4096, 65536, 8192, "spdsock_recv_hiwat"}, 128 { 65536, 1024*1024*1024, 256*1024, "spdsock_max_buf"}, 129 { 0, 3, 0, "spdsock_debug"}, 130 }; 131 #define spds_xmit_hiwat spds_params[0].spdsock_param_value 132 #define spds_xmit_lowat spds_params[1].spdsock_param_value 133 #define spds_recv_hiwat spds_params[2].spdsock_param_value 134 #define spds_max_buf spds_params[3].spdsock_param_value 135 #define spds_debug spds_params[4].spdsock_param_value 136 137 #define ss0dbg(a) printf a 138 /* NOTE: != 0 instead of > 0 so lint doesn't complain. */ 139 #define ss1dbg(spds, a) if (spds->spds_debug != 0) printf a 140 #define ss2dbg(spds, a) if (spds->spds_debug > 1) printf a 141 #define ss3dbg(spds, a) if (spds->spds_debug > 2) printf a 142 143 #define RESET_SPDSOCK_DUMP_POLHEAD(ss, iph) { \ 144 ASSERT(RW_READ_HELD(&(iph)->iph_lock)); \ 145 (ss)->spdsock_dump_head = (iph); \ 146 (ss)->spdsock_dump_gen = (iph)->iph_gen; \ 147 (ss)->spdsock_dump_cur_type = 0; \ 148 (ss)->spdsock_dump_cur_af = IPSEC_AF_V4; \ 149 (ss)->spdsock_dump_cur_rule = NULL; \ 150 (ss)->spdsock_dump_count = 0; \ 151 (ss)->spdsock_dump_cur_chain = 0; \ 152 } 153 154 static int spdsock_close(queue_t *); 155 static int spdsock_open(queue_t *, dev_t *, int, int, cred_t *); 156 static void spdsock_wput(queue_t *, mblk_t *); 157 static void spdsock_wsrv(queue_t *); 158 static void spdsock_rsrv(queue_t *); 159 static void *spdsock_stack_init(netstackid_t stackid, netstack_t *ns); 160 static void spdsock_stack_fini(netstackid_t stackid, void *arg); 161 static void spdsock_loadcheck(void *); 162 static void spdsock_merge_algs(spd_stack_t *); 163 static void spdsock_flush_one(ipsec_policy_head_t *, netstack_t *); 164 static mblk_t *spdsock_dump_next_record(spdsock_t *); 165 166 static struct module_info info = { 167 5138, "spdsock", 1, INFPSZ, 512, 128 168 }; 169 170 static struct qinit rinit = { 171 NULL, (pfi_t)spdsock_rsrv, spdsock_open, spdsock_close, 172 NULL, &info 173 }; 174 175 static struct qinit winit = { 176 (pfi_t)spdsock_wput, (pfi_t)spdsock_wsrv, NULL, NULL, NULL, &info 177 }; 178 179 struct streamtab spdsockinfo = { 180 &rinit, &winit 181 }; 182 183 /* mapping from alg type to protocol number, as per RFC 2407 */ 184 static const uint_t algproto[] = { 185 PROTO_IPSEC_AH, 186 PROTO_IPSEC_ESP, 187 }; 188 189 #define NALGPROTOS (sizeof (algproto) / sizeof (algproto[0])) 190 191 /* mapping from kernel exec mode to spdsock exec mode */ 192 static const uint_t execmodes[] = { 193 SPD_ALG_EXEC_MODE_SYNC, 194 SPD_ALG_EXEC_MODE_ASYNC 195 }; 196 197 #define NEXECMODES (sizeof (execmodes) / sizeof (execmodes[0])) 198 199 #define ALL_ACTIVE_POLHEADS ((ipsec_policy_head_t *)-1) 200 #define ALL_INACTIVE_POLHEADS ((ipsec_policy_head_t *)-2) 201 202 #define ITP_NAME(itp) (itp != NULL ? itp->itp_name : NULL) 203 204 /* ARGSUSED */ 205 static int 206 spdsock_param_get(q, mp, cp, cr) 207 queue_t *q; 208 mblk_t *mp; 209 caddr_t cp; 210 cred_t *cr; 211 { 212 spdsockparam_t *spdsockpa = (spdsockparam_t *)cp; 213 uint_t value; 214 spdsock_t *ss = (spdsock_t *)q->q_ptr; 215 spd_stack_t *spds = ss->spdsock_spds; 216 217 mutex_enter(&spds->spds_param_lock); 218 value = spdsockpa->spdsock_param_value; 219 mutex_exit(&spds->spds_param_lock); 220 221 (void) mi_mpprintf(mp, "%u", value); 222 return (0); 223 } 224 225 /* This routine sets an NDD variable in a spdsockparam_t structure. */ 226 /* ARGSUSED */ 227 static int 228 spdsock_param_set(q, mp, value, cp, cr) 229 queue_t *q; 230 mblk_t *mp; 231 char *value; 232 caddr_t cp; 233 cred_t *cr; 234 { 235 ulong_t new_value; 236 spdsockparam_t *spdsockpa = (spdsockparam_t *)cp; 237 spdsock_t *ss = (spdsock_t *)q->q_ptr; 238 spd_stack_t *spds = ss->spdsock_spds; 239 240 /* Convert the value from a string into a long integer. */ 241 if (ddi_strtoul(value, NULL, 10, &new_value) != 0) 242 return (EINVAL); 243 244 mutex_enter(&spds->spds_param_lock); 245 /* 246 * Fail the request if the new value does not lie within the 247 * required bounds. 248 */ 249 if (new_value < spdsockpa->spdsock_param_min || 250 new_value > spdsockpa->spdsock_param_max) { 251 mutex_exit(&spds->spds_param_lock); 252 return (EINVAL); 253 } 254 255 /* Set the new value */ 256 spdsockpa->spdsock_param_value = new_value; 257 mutex_exit(&spds->spds_param_lock); 258 259 return (0); 260 } 261 262 /* 263 * Initialize at module load time 264 */ 265 boolean_t 266 spdsock_ddi_init(void) 267 { 268 spdsock_max_optsize = optcom_max_optsize( 269 spdsock_opt_obj.odb_opt_des_arr, spdsock_opt_obj.odb_opt_arr_cnt); 270 271 spdsock_vmem = vmem_create("spdsock", (void *)1, MAXMIN, 1, 272 NULL, NULL, NULL, 1, VM_SLEEP | VMC_IDENTIFIER); 273 274 /* 275 * We want to be informed each time a stack is created or 276 * destroyed in the kernel, so we can maintain the 277 * set of spd_stack_t's. 278 */ 279 netstack_register(NS_SPDSOCK, spdsock_stack_init, NULL, 280 spdsock_stack_fini); 281 282 return (B_TRUE); 283 } 284 285 /* 286 * Walk through the param array specified registering each element with the 287 * named dispatch handler. 288 */ 289 static boolean_t 290 spdsock_param_register(IDP *ndp, spdsockparam_t *ssp, int cnt) 291 { 292 for (; cnt-- > 0; ssp++) { 293 if (ssp->spdsock_param_name != NULL && 294 ssp->spdsock_param_name[0]) { 295 if (!nd_load(ndp, 296 ssp->spdsock_param_name, 297 spdsock_param_get, spdsock_param_set, 298 (caddr_t)ssp)) { 299 nd_free(ndp); 300 return (B_FALSE); 301 } 302 } 303 } 304 return (B_TRUE); 305 } 306 307 /* 308 * Initialize for each stack instance 309 */ 310 /* ARGSUSED */ 311 static void * 312 spdsock_stack_init(netstackid_t stackid, netstack_t *ns) 313 { 314 spd_stack_t *spds; 315 spdsockparam_t *ssp; 316 317 spds = (spd_stack_t *)kmem_zalloc(sizeof (*spds), KM_SLEEP); 318 spds->spds_netstack = ns; 319 320 ASSERT(spds->spds_g_nd == NULL); 321 322 ssp = (spdsockparam_t *)kmem_alloc(sizeof (lcl_param_arr), KM_SLEEP); 323 spds->spds_params = ssp; 324 bcopy(lcl_param_arr, ssp, sizeof (lcl_param_arr)); 325 326 (void) spdsock_param_register(&spds->spds_g_nd, ssp, 327 A_CNT(lcl_param_arr)); 328 329 mutex_init(&spds->spds_param_lock, NULL, MUTEX_DEFAULT, NULL); 330 mutex_init(&spds->spds_alg_lock, NULL, MUTEX_DEFAULT, NULL); 331 332 return (spds); 333 } 334 335 void 336 spdsock_ddi_destroy(void) 337 { 338 vmem_destroy(spdsock_vmem); 339 340 netstack_unregister(NS_SPDSOCK); 341 } 342 343 /* ARGSUSED */ 344 static void 345 spdsock_stack_fini(netstackid_t stackid, void *arg) 346 { 347 spd_stack_t *spds = (spd_stack_t *)arg; 348 349 freemsg(spds->spds_mp_algs); 350 mutex_destroy(&spds->spds_param_lock); 351 mutex_destroy(&spds->spds_alg_lock); 352 nd_free(&spds->spds_g_nd); 353 kmem_free(spds->spds_params, sizeof (lcl_param_arr)); 354 spds->spds_params = NULL; 355 356 kmem_free(spds, sizeof (*spds)); 357 } 358 359 /* 360 * NOTE: large quantities of this should be shared with keysock. 361 * Would be nice to combine some of this into a common module, but 362 * not possible given time pressures. 363 */ 364 365 /* 366 * High-level reality checking of extensions. 367 */ 368 /* ARGSUSED */ /* XXX */ 369 static boolean_t 370 ext_check(spd_ext_t *ext) 371 { 372 spd_if_t *tunname = (spd_if_t *)ext; 373 int i; 374 char *idstr; 375 376 if (ext->spd_ext_type == SPD_EXT_TUN_NAME) { 377 /* (NOTE: Modified from SADB_EXT_IDENTITY..) */ 378 379 /* 380 * Make sure the strings in these identities are 381 * null-terminated. Let's "proactively" null-terminate the 382 * string at the last byte if it's not terminated sooner. 383 */ 384 i = SPD_64TO8(tunname->spd_if_len) - sizeof (spd_if_t); 385 idstr = (char *)(tunname + 1); 386 while (*idstr != '\0' && i > 0) { 387 i--; 388 idstr++; 389 } 390 if (i == 0) { 391 /* 392 * I.e., if the bozo user didn't NULL-terminate the 393 * string... 394 */ 395 idstr--; 396 *idstr = '\0'; 397 } 398 } 399 return (B_TRUE); /* For now... */ 400 } 401 402 403 404 /* Return values for spdsock_get_ext(). */ 405 #define KGE_OK 0 406 #define KGE_DUP 1 407 #define KGE_UNK 2 408 #define KGE_LEN 3 409 #define KGE_CHK 4 410 411 /* 412 * Parse basic extension headers and return in the passed-in pointer vector. 413 * Return values include: 414 * 415 * KGE_OK Everything's nice and parsed out. 416 * If there are no extensions, place NULL in extv[0]. 417 * KGE_DUP There is a duplicate extension. 418 * First instance in appropriate bin. First duplicate in 419 * extv[0]. 420 * KGE_UNK Unknown extension type encountered. extv[0] contains 421 * unknown header. 422 * KGE_LEN Extension length error. 423 * KGE_CHK High-level reality check failed on specific extension. 424 * 425 * My apologies for some of the pointer arithmetic in here. I'm thinking 426 * like an assembly programmer, yet trying to make the compiler happy. 427 */ 428 static int 429 spdsock_get_ext(spd_ext_t *extv[], spd_msg_t *basehdr, uint_t msgsize) 430 { 431 bzero(extv, sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1)); 432 433 /* Use extv[0] as the "current working pointer". */ 434 435 extv[0] = (spd_ext_t *)(basehdr + 1); 436 437 while (extv[0] < (spd_ext_t *)(((uint8_t *)basehdr) + msgsize)) { 438 /* Check for unknown headers. */ 439 if (extv[0]->spd_ext_type == 0 || 440 extv[0]->spd_ext_type > SPD_EXT_MAX) 441 return (KGE_UNK); 442 443 /* 444 * Check length. Use uint64_t because extlen is in units 445 * of 64-bit words. If length goes beyond the msgsize, 446 * return an error. (Zero length also qualifies here.) 447 */ 448 if (extv[0]->spd_ext_len == 0 || 449 (void *)((uint64_t *)extv[0] + extv[0]->spd_ext_len) > 450 (void *)((uint8_t *)basehdr + msgsize)) 451 return (KGE_LEN); 452 453 /* Check for redundant headers. */ 454 if (extv[extv[0]->spd_ext_type] != NULL) 455 return (KGE_DUP); 456 457 /* 458 * Reality check the extension if possible at the spdsock 459 * level. 460 */ 461 if (!ext_check(extv[0])) 462 return (KGE_CHK); 463 464 /* If I make it here, assign the appropriate bin. */ 465 extv[extv[0]->spd_ext_type] = extv[0]; 466 467 /* Advance pointer (See above for uint64_t ptr reasoning.) */ 468 extv[0] = (spd_ext_t *) 469 ((uint64_t *)extv[0] + extv[0]->spd_ext_len); 470 } 471 472 /* Everything's cool. */ 473 474 /* 475 * If extv[0] == NULL, then there are no extension headers in this 476 * message. Ensure that this is the case. 477 */ 478 if (extv[0] == (spd_ext_t *)(basehdr + 1)) 479 extv[0] = NULL; 480 481 return (KGE_OK); 482 } 483 484 static const int bad_ext_diag[] = { 485 SPD_DIAGNOSTIC_MALFORMED_LCLPORT, 486 SPD_DIAGNOSTIC_MALFORMED_REMPORT, 487 SPD_DIAGNOSTIC_MALFORMED_PROTO, 488 SPD_DIAGNOSTIC_MALFORMED_LCLADDR, 489 SPD_DIAGNOSTIC_MALFORMED_REMADDR, 490 SPD_DIAGNOSTIC_MALFORMED_ACTION, 491 SPD_DIAGNOSTIC_MALFORMED_RULE, 492 SPD_DIAGNOSTIC_MALFORMED_RULESET, 493 SPD_DIAGNOSTIC_MALFORMED_ICMP_TYPECODE 494 }; 495 496 static const int dup_ext_diag[] = { 497 SPD_DIAGNOSTIC_DUPLICATE_LCLPORT, 498 SPD_DIAGNOSTIC_DUPLICATE_REMPORT, 499 SPD_DIAGNOSTIC_DUPLICATE_PROTO, 500 SPD_DIAGNOSTIC_DUPLICATE_LCLADDR, 501 SPD_DIAGNOSTIC_DUPLICATE_REMADDR, 502 SPD_DIAGNOSTIC_DUPLICATE_ACTION, 503 SPD_DIAGNOSTIC_DUPLICATE_RULE, 504 SPD_DIAGNOSTIC_DUPLICATE_RULESET, 505 SPD_DIAGNOSTIC_DUPLICATE_ICMP_TYPECODE 506 }; 507 508 /* 509 * Transmit a PF_POLICY error message to the instance either pointed to 510 * by ks, the instance with serial number serial, or more, depending. 511 * 512 * The faulty message (or a reasonable facsimile thereof) is in mp. 513 * This function will free mp or recycle it for delivery, thereby causing 514 * the stream head to free it. 515 */ 516 static void 517 spdsock_error(queue_t *q, mblk_t *mp, int error, int diagnostic) 518 { 519 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 520 521 ASSERT(mp->b_datap->db_type == M_DATA); 522 523 if (spmsg->spd_msg_type < SPD_MIN || 524 spmsg->spd_msg_type > SPD_MAX) 525 spmsg->spd_msg_type = SPD_RESERVED; 526 527 /* 528 * Strip out extension headers. 529 */ 530 ASSERT(mp->b_rptr + sizeof (*spmsg) <= mp->b_datap->db_lim); 531 mp->b_wptr = mp->b_rptr + sizeof (*spmsg); 532 spmsg->spd_msg_len = SPD_8TO64(sizeof (spd_msg_t)); 533 spmsg->spd_msg_errno = (uint8_t)error; 534 spmsg->spd_msg_diagnostic = (uint16_t)diagnostic; 535 536 qreply(q, mp); 537 } 538 539 static void 540 spdsock_diag(queue_t *q, mblk_t *mp, int diagnostic) 541 { 542 spdsock_error(q, mp, EINVAL, diagnostic); 543 } 544 545 static void 546 spd_echo(queue_t *q, mblk_t *mp) 547 { 548 qreply(q, mp); 549 } 550 551 /* 552 * Do NOT consume a reference to itp. 553 */ 554 /*ARGSUSED*/ 555 static void 556 spdsock_flush_node(ipsec_tun_pol_t *itp, void *cookie, netstack_t *ns) 557 { 558 boolean_t active = (boolean_t)cookie; 559 ipsec_policy_head_t *iph; 560 561 iph = active ? itp->itp_policy : itp->itp_inactive; 562 IPPH_REFHOLD(iph); 563 mutex_enter(&itp->itp_lock); 564 spdsock_flush_one(iph, ns); 565 if (active) 566 itp->itp_flags &= ~ITPF_PFLAGS; 567 else 568 itp->itp_flags &= ~ITPF_IFLAGS; 569 mutex_exit(&itp->itp_lock); 570 } 571 572 /* 573 * Clear out one polhead. 574 */ 575 static void 576 spdsock_flush_one(ipsec_policy_head_t *iph, netstack_t *ns) 577 { 578 rw_enter(&iph->iph_lock, RW_WRITER); 579 ipsec_polhead_flush(iph, ns); 580 rw_exit(&iph->iph_lock); 581 IPPH_REFRELE(iph, ns); 582 } 583 584 static void 585 spdsock_flush(queue_t *q, ipsec_policy_head_t *iph, ipsec_tun_pol_t *itp, 586 mblk_t *mp) 587 { 588 boolean_t active; 589 spdsock_t *ss = (spdsock_t *)q->q_ptr; 590 netstack_t *ns = ss->spdsock_spds->spds_netstack; 591 592 if (iph != ALL_ACTIVE_POLHEADS && iph != ALL_INACTIVE_POLHEADS) { 593 spdsock_flush_one(iph, ns); 594 if (audit_active) { 595 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 596 cred_t *cr; 597 pid_t cpid; 598 599 cr = msg_getcred(mp, &cpid); 600 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 601 audit_pf_policy(SPD_FLUSH, cr, ns, 602 ITP_NAME(itp), active, 0, cpid); 603 } 604 } else { 605 active = (iph == ALL_ACTIVE_POLHEADS); 606 607 /* First flush the global policy. */ 608 spdsock_flush_one(active ? ipsec_system_policy(ns) : 609 ipsec_inactive_policy(ns), ns); 610 if (audit_active) { 611 cred_t *cr; 612 pid_t cpid; 613 614 cr = msg_getcred(mp, &cpid); 615 audit_pf_policy(SPD_FLUSH, cr, ns, NULL, 616 active, 0, cpid); 617 } 618 /* Then flush every tunnel's appropriate one. */ 619 itp_walk(spdsock_flush_node, (void *)active, ns); 620 if (audit_active) { 621 cred_t *cr; 622 pid_t cpid; 623 624 cr = msg_getcred(mp, &cpid); 625 audit_pf_policy(SPD_FLUSH, cr, ns, 626 "all tunnels", active, 0, cpid); 627 } 628 } 629 630 spd_echo(q, mp); 631 } 632 633 static boolean_t 634 spdsock_ext_to_sel(spd_ext_t **extv, ipsec_selkey_t *sel, int *diag) 635 { 636 bzero(sel, sizeof (*sel)); 637 638 if (extv[SPD_EXT_PROTO] != NULL) { 639 struct spd_proto *pr = 640 (struct spd_proto *)extv[SPD_EXT_PROTO]; 641 sel->ipsl_proto = pr->spd_proto_number; 642 sel->ipsl_valid |= IPSL_PROTOCOL; 643 } 644 if (extv[SPD_EXT_LCLPORT] != NULL) { 645 struct spd_portrange *pr = 646 (struct spd_portrange *)extv[SPD_EXT_LCLPORT]; 647 sel->ipsl_lport = pr->spd_ports_minport; 648 sel->ipsl_valid |= IPSL_LOCAL_PORT; 649 } 650 if (extv[SPD_EXT_REMPORT] != NULL) { 651 struct spd_portrange *pr = 652 (struct spd_portrange *)extv[SPD_EXT_REMPORT]; 653 sel->ipsl_rport = pr->spd_ports_minport; 654 sel->ipsl_valid |= IPSL_REMOTE_PORT; 655 } 656 657 if (extv[SPD_EXT_ICMP_TYPECODE] != NULL) { 658 struct spd_typecode *tc= 659 (struct spd_typecode *)extv[SPD_EXT_ICMP_TYPECODE]; 660 661 sel->ipsl_valid |= IPSL_ICMP_TYPE; 662 sel->ipsl_icmp_type = tc->spd_typecode_type; 663 if (tc->spd_typecode_type_end < tc->spd_typecode_type) 664 sel->ipsl_icmp_type_end = tc->spd_typecode_type; 665 else 666 sel->ipsl_icmp_type_end = tc->spd_typecode_type_end; 667 668 if (tc->spd_typecode_code != 255) { 669 sel->ipsl_valid |= IPSL_ICMP_CODE; 670 sel->ipsl_icmp_code = tc->spd_typecode_code; 671 if (tc->spd_typecode_code_end < tc->spd_typecode_code) 672 sel->ipsl_icmp_code_end = tc->spd_typecode_code; 673 else 674 sel->ipsl_icmp_code_end = 675 tc->spd_typecode_code_end; 676 } 677 } 678 #define ADDR2SEL(sel, extv, field, pfield, extn, bit) \ 679 if ((extv)[(extn)] != NULL) { \ 680 uint_t addrlen; \ 681 struct spd_address *ap = \ 682 (struct spd_address *)((extv)[(extn)]); \ 683 addrlen = (ap->spd_address_af == AF_INET6) ? \ 684 IPV6_ADDR_LEN : IP_ADDR_LEN; \ 685 if (SPD_64TO8(ap->spd_address_len) < \ 686 (addrlen + sizeof (*ap))) { \ 687 *diag = SPD_DIAGNOSTIC_BAD_ADDR_LEN; \ 688 return (B_FALSE); \ 689 } \ 690 bcopy((ap+1), &((sel)->field), addrlen); \ 691 (sel)->pfield = ap->spd_address_prefixlen; \ 692 (sel)->ipsl_valid |= (bit); \ 693 (sel)->ipsl_valid |= (ap->spd_address_af == AF_INET6) ? \ 694 IPSL_IPV6 : IPSL_IPV4; \ 695 } 696 697 ADDR2SEL(sel, extv, ipsl_local, ipsl_local_pfxlen, 698 SPD_EXT_LCLADDR, IPSL_LOCAL_ADDR); 699 ADDR2SEL(sel, extv, ipsl_remote, ipsl_remote_pfxlen, 700 SPD_EXT_REMADDR, IPSL_REMOTE_ADDR); 701 702 if ((sel->ipsl_valid & (IPSL_IPV6|IPSL_IPV4)) == 703 (IPSL_IPV6|IPSL_IPV4)) { 704 *diag = SPD_DIAGNOSTIC_MIXED_AF; 705 return (B_FALSE); 706 } 707 708 #undef ADDR2SEL 709 710 return (B_TRUE); 711 } 712 713 static boolean_t 714 spd_convert_type(uint32_t type, ipsec_act_t *act) 715 { 716 switch (type) { 717 case SPD_ACTTYPE_DROP: 718 act->ipa_type = IPSEC_ACT_DISCARD; 719 return (B_TRUE); 720 721 case SPD_ACTTYPE_PASS: 722 act->ipa_type = IPSEC_ACT_CLEAR; 723 return (B_TRUE); 724 725 case SPD_ACTTYPE_IPSEC: 726 act->ipa_type = IPSEC_ACT_APPLY; 727 return (B_TRUE); 728 } 729 return (B_FALSE); 730 } 731 732 static boolean_t 733 spd_convert_flags(uint32_t flags, ipsec_act_t *act) 734 { 735 /* 736 * Note use of !! for boolean canonicalization. 737 */ 738 act->ipa_apply.ipp_use_ah = !!(flags & SPD_APPLY_AH); 739 act->ipa_apply.ipp_use_esp = !!(flags & SPD_APPLY_ESP); 740 act->ipa_apply.ipp_use_espa = !!(flags & SPD_APPLY_ESPA); 741 act->ipa_apply.ipp_use_se = !!(flags & SPD_APPLY_SE); 742 act->ipa_apply.ipp_use_unique = !!(flags & SPD_APPLY_UNIQUE); 743 return (B_TRUE); 744 } 745 746 static void 747 spdsock_reset_act(ipsec_act_t *act) 748 { 749 bzero(act, sizeof (*act)); 750 act->ipa_apply.ipp_espe_maxbits = IPSEC_MAX_KEYBITS; 751 act->ipa_apply.ipp_espa_maxbits = IPSEC_MAX_KEYBITS; 752 act->ipa_apply.ipp_ah_maxbits = IPSEC_MAX_KEYBITS; 753 } 754 755 /* 756 * Sanity check action against reality, and shrink-wrap key sizes.. 757 */ 758 static boolean_t 759 spdsock_check_action(ipsec_act_t *act, boolean_t tunnel_polhead, int *diag, 760 spd_stack_t *spds) 761 { 762 if (tunnel_polhead && act->ipa_apply.ipp_use_unique) { 763 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS; 764 return (B_FALSE); 765 } 766 if ((act->ipa_type != IPSEC_ACT_APPLY) && 767 (act->ipa_apply.ipp_use_ah || 768 act->ipa_apply.ipp_use_esp || 769 act->ipa_apply.ipp_use_espa || 770 act->ipa_apply.ipp_use_se || 771 act->ipa_apply.ipp_use_unique)) { 772 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS; 773 return (B_FALSE); 774 } 775 if ((act->ipa_type == IPSEC_ACT_APPLY) && 776 !act->ipa_apply.ipp_use_ah && 777 !act->ipa_apply.ipp_use_esp) { 778 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS; 779 return (B_FALSE); 780 } 781 return (ipsec_check_action(act, diag, spds->spds_netstack)); 782 } 783 784 /* 785 * We may be short a few error checks here.. 786 */ 787 static boolean_t 788 spdsock_ext_to_actvec(spd_ext_t **extv, ipsec_act_t **actpp, uint_t *nactp, 789 int *diag, spd_stack_t *spds) 790 { 791 struct spd_ext_actions *sactp = 792 (struct spd_ext_actions *)extv[SPD_EXT_ACTION]; 793 ipsec_act_t act, *actp, *endactp; 794 struct spd_attribute *attrp, *endattrp; 795 uint64_t *endp; 796 int nact; 797 boolean_t tunnel_polhead; 798 799 tunnel_polhead = (extv[SPD_EXT_TUN_NAME] != NULL && 800 (((struct spd_rule *)extv[SPD_EXT_RULE])->spd_rule_flags & 801 SPD_RULE_FLAG_TUNNEL)); 802 803 *actpp = NULL; 804 *nactp = 0; 805 806 if (sactp == NULL) { 807 *diag = SPD_DIAGNOSTIC_NO_ACTION_EXT; 808 return (B_FALSE); 809 } 810 811 /* 812 * Parse the "action" extension and convert into an action chain. 813 */ 814 815 nact = sactp->spd_actions_count; 816 817 endp = (uint64_t *)sactp; 818 endp += sactp->spd_actions_len; 819 endattrp = (struct spd_attribute *)endp; 820 821 actp = kmem_alloc(sizeof (*actp) * nact, KM_NOSLEEP); 822 if (actp == NULL) { 823 *diag = SPD_DIAGNOSTIC_ADD_NO_MEM; 824 return (B_FALSE); 825 } 826 *actpp = actp; 827 *nactp = nact; 828 endactp = actp + nact; 829 830 spdsock_reset_act(&act); 831 attrp = (struct spd_attribute *)(&sactp[1]); 832 833 for (; attrp < endattrp; attrp++) { 834 switch (attrp->spd_attr_tag) { 835 case SPD_ATTR_NOP: 836 break; 837 838 case SPD_ATTR_EMPTY: 839 spdsock_reset_act(&act); 840 break; 841 842 case SPD_ATTR_END: 843 attrp = endattrp; 844 /* FALLTHRU */ 845 case SPD_ATTR_NEXT: 846 if (actp >= endactp) { 847 *diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT; 848 goto fail; 849 } 850 if (!spdsock_check_action(&act, tunnel_polhead, 851 diag, spds)) 852 goto fail; 853 *actp++ = act; 854 spdsock_reset_act(&act); 855 break; 856 857 case SPD_ATTR_TYPE: 858 if (!spd_convert_type(attrp->spd_attr_value, &act)) { 859 *diag = SPD_DIAGNOSTIC_ADD_BAD_TYPE; 860 goto fail; 861 } 862 break; 863 864 case SPD_ATTR_FLAGS: 865 if (!tunnel_polhead && extv[SPD_EXT_TUN_NAME] != NULL) { 866 /* 867 * Set "sa unique" for transport-mode 868 * tunnels whether we want to or not. 869 */ 870 attrp->spd_attr_value |= SPD_APPLY_UNIQUE; 871 } 872 if (!spd_convert_flags(attrp->spd_attr_value, &act)) { 873 *diag = SPD_DIAGNOSTIC_ADD_BAD_FLAGS; 874 goto fail; 875 } 876 break; 877 878 case SPD_ATTR_AH_AUTH: 879 if (attrp->spd_attr_value == 0) { 880 *diag = SPD_DIAGNOSTIC_UNSUPP_AH_ALG; 881 goto fail; 882 } 883 act.ipa_apply.ipp_auth_alg = attrp->spd_attr_value; 884 break; 885 886 case SPD_ATTR_ESP_ENCR: 887 if (attrp->spd_attr_value == 0) { 888 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_ALG; 889 goto fail; 890 } 891 act.ipa_apply.ipp_encr_alg = attrp->spd_attr_value; 892 break; 893 894 case SPD_ATTR_ESP_AUTH: 895 if (attrp->spd_attr_value == 0) { 896 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_ALG; 897 goto fail; 898 } 899 act.ipa_apply.ipp_esp_auth_alg = attrp->spd_attr_value; 900 break; 901 902 case SPD_ATTR_ENCR_MINBITS: 903 act.ipa_apply.ipp_espe_minbits = attrp->spd_attr_value; 904 break; 905 906 case SPD_ATTR_ENCR_MAXBITS: 907 act.ipa_apply.ipp_espe_maxbits = attrp->spd_attr_value; 908 break; 909 910 case SPD_ATTR_AH_MINBITS: 911 act.ipa_apply.ipp_ah_minbits = attrp->spd_attr_value; 912 break; 913 914 case SPD_ATTR_AH_MAXBITS: 915 act.ipa_apply.ipp_ah_maxbits = attrp->spd_attr_value; 916 break; 917 918 case SPD_ATTR_ESPA_MINBITS: 919 act.ipa_apply.ipp_espa_minbits = attrp->spd_attr_value; 920 break; 921 922 case SPD_ATTR_ESPA_MAXBITS: 923 act.ipa_apply.ipp_espa_maxbits = attrp->spd_attr_value; 924 break; 925 926 case SPD_ATTR_LIFE_SOFT_TIME: 927 case SPD_ATTR_LIFE_HARD_TIME: 928 case SPD_ATTR_LIFE_SOFT_BYTES: 929 case SPD_ATTR_LIFE_HARD_BYTES: 930 break; 931 932 case SPD_ATTR_KM_PROTO: 933 act.ipa_apply.ipp_km_proto = attrp->spd_attr_value; 934 break; 935 936 case SPD_ATTR_KM_COOKIE: 937 act.ipa_apply.ipp_km_cookie = attrp->spd_attr_value; 938 break; 939 940 case SPD_ATTR_REPLAY_DEPTH: 941 act.ipa_apply.ipp_replay_depth = attrp->spd_attr_value; 942 break; 943 } 944 } 945 if (actp != endactp) { 946 *diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT; 947 goto fail; 948 } 949 950 return (B_TRUE); 951 fail: 952 ipsec_actvec_free(*actpp, nact); 953 *actpp = NULL; 954 return (B_FALSE); 955 } 956 957 typedef struct 958 { 959 ipsec_policy_t *pol; 960 int dir; 961 } tmprule_t; 962 963 static int 964 mkrule(ipsec_policy_head_t *iph, struct spd_rule *rule, 965 ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t af, 966 tmprule_t **rp, uint64_t *index, spd_stack_t *spds) 967 { 968 ipsec_policy_t *pol; 969 970 sel->ipsl_valid &= ~(IPSL_IPV6|IPSL_IPV4); 971 sel->ipsl_valid |= af; 972 973 pol = ipsec_policy_create(sel, actp, nact, rule->spd_rule_priority, 974 index, spds->spds_netstack); 975 if (pol == NULL) 976 return (ENOMEM); 977 978 (*rp)->pol = pol; 979 (*rp)->dir = dir; 980 (*rp)++; 981 982 if (!ipsec_check_policy(iph, pol, dir)) 983 return (EEXIST); 984 985 rule->spd_rule_index = pol->ipsp_index; 986 return (0); 987 } 988 989 static int 990 mkrulepair(ipsec_policy_head_t *iph, struct spd_rule *rule, 991 ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t afs, 992 tmprule_t **rp, uint64_t *index, spd_stack_t *spds) 993 { 994 int error; 995 996 if (afs & IPSL_IPV4) { 997 error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV4, rp, 998 index, spds); 999 if (error != 0) 1000 return (error); 1001 } 1002 if (afs & IPSL_IPV6) { 1003 error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV6, rp, 1004 index, spds); 1005 if (error != 0) 1006 return (error); 1007 } 1008 return (0); 1009 } 1010 1011 1012 static void 1013 spdsock_addrule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp, 1014 spd_ext_t **extv, ipsec_tun_pol_t *itp) 1015 { 1016 ipsec_selkey_t sel; 1017 ipsec_act_t *actp; 1018 uint_t nact; 1019 int diag = 0, error, afs; 1020 struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE]; 1021 tmprule_t rules[4], *rulep = &rules[0]; 1022 boolean_t tunnel_mode, empty_itp, active; 1023 uint64_t *index = (itp == NULL) ? NULL : &itp->itp_next_policy_index; 1024 spdsock_t *ss = (spdsock_t *)q->q_ptr; 1025 spd_stack_t *spds = ss->spdsock_spds; 1026 1027 if (rule == NULL) { 1028 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT); 1029 if (audit_active) { 1030 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1031 cred_t *cr; 1032 pid_t cpid; 1033 1034 cr = msg_getcred(mp, &cpid); 1035 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1036 audit_pf_policy(SPD_ADDRULE, cr, 1037 spds->spds_netstack, ITP_NAME(itp), active, 1038 SPD_DIAGNOSTIC_NO_RULE_EXT, cpid); 1039 } 1040 return; 1041 } 1042 1043 tunnel_mode = (rule->spd_rule_flags & SPD_RULE_FLAG_TUNNEL); 1044 1045 if (itp != NULL) { 1046 mutex_enter(&itp->itp_lock); 1047 ASSERT(itp->itp_policy == iph || itp->itp_inactive == iph); 1048 active = (itp->itp_policy == iph); 1049 if (ITP_P_ISACTIVE(itp, iph)) { 1050 /* Check for mix-and-match of tunnel/transport. */ 1051 if ((tunnel_mode && !ITP_P_ISTUNNEL(itp, iph)) || 1052 (!tunnel_mode && ITP_P_ISTUNNEL(itp, iph))) { 1053 mutex_exit(&itp->itp_lock); 1054 spdsock_error(q, mp, EBUSY, 0); 1055 return; 1056 } 1057 empty_itp = B_FALSE; 1058 } else { 1059 empty_itp = B_TRUE; 1060 itp->itp_flags = active ? ITPF_P_ACTIVE : ITPF_I_ACTIVE; 1061 if (tunnel_mode) 1062 itp->itp_flags |= active ? ITPF_P_TUNNEL : 1063 ITPF_I_TUNNEL; 1064 } 1065 } else { 1066 empty_itp = B_FALSE; 1067 } 1068 1069 if (rule->spd_rule_index != 0) { 1070 diag = SPD_DIAGNOSTIC_INVALID_RULE_INDEX; 1071 error = EINVAL; 1072 goto fail2; 1073 } 1074 1075 if (!spdsock_ext_to_sel(extv, &sel, &diag)) { 1076 error = EINVAL; 1077 goto fail2; 1078 } 1079 1080 if (itp != NULL) { 1081 if (tunnel_mode) { 1082 if (sel.ipsl_valid & 1083 (IPSL_REMOTE_PORT | IPSL_LOCAL_PORT)) { 1084 itp->itp_flags |= active ? 1085 ITPF_P_PER_PORT_SECURITY : 1086 ITPF_I_PER_PORT_SECURITY; 1087 } 1088 } else { 1089 /* 1090 * For now, we don't allow transport-mode on a tunnel 1091 * with ANY specific selectors. Bail if we have such 1092 * a request. 1093 */ 1094 if (sel.ipsl_valid & IPSL_WILDCARD) { 1095 diag = SPD_DIAGNOSTIC_NO_TUNNEL_SELECTORS; 1096 error = EINVAL; 1097 goto fail2; 1098 } 1099 } 1100 } 1101 1102 if (!spdsock_ext_to_actvec(extv, &actp, &nact, &diag, spds)) { 1103 error = EINVAL; 1104 goto fail2; 1105 } 1106 /* 1107 * If no addresses were specified, add both. 1108 */ 1109 afs = sel.ipsl_valid & (IPSL_IPV6|IPSL_IPV4); 1110 if (afs == 0) 1111 afs = (IPSL_IPV6|IPSL_IPV4); 1112 1113 rw_enter(&iph->iph_lock, RW_WRITER); 1114 1115 if (rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) { 1116 error = mkrulepair(iph, rule, &sel, actp, nact, 1117 IPSEC_TYPE_OUTBOUND, afs, &rulep, index, spds); 1118 if (error != 0) 1119 goto fail; 1120 } 1121 1122 if (rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) { 1123 error = mkrulepair(iph, rule, &sel, actp, nact, 1124 IPSEC_TYPE_INBOUND, afs, &rulep, index, spds); 1125 if (error != 0) 1126 goto fail; 1127 } 1128 1129 while ((--rulep) >= &rules[0]) { 1130 ipsec_enter_policy(iph, rulep->pol, rulep->dir, 1131 spds->spds_netstack); 1132 } 1133 rw_exit(&iph->iph_lock); 1134 if (itp != NULL) 1135 mutex_exit(&itp->itp_lock); 1136 1137 ipsec_actvec_free(actp, nact); 1138 spd_echo(q, mp); 1139 if (audit_active) { 1140 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1141 cred_t *cr; 1142 pid_t cpid; 1143 1144 cr = msg_getcred(mp, &cpid); 1145 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1146 audit_pf_policy(SPD_ADDRULE, cr, spds->spds_netstack, 1147 ITP_NAME(itp), active, 0, cpid); 1148 } 1149 return; 1150 1151 fail: 1152 rw_exit(&iph->iph_lock); 1153 while ((--rulep) >= &rules[0]) { 1154 IPPOL_REFRELE(rulep->pol, spds->spds_netstack); 1155 } 1156 ipsec_actvec_free(actp, nact); 1157 fail2: 1158 if (itp != NULL) { 1159 if (empty_itp) 1160 itp->itp_flags = 0; 1161 mutex_exit(&itp->itp_lock); 1162 } 1163 spdsock_error(q, mp, error, diag); 1164 if (audit_active) { 1165 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1166 cred_t *cr; 1167 pid_t cpid; 1168 1169 cr = msg_getcred(mp, &cpid); 1170 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1171 audit_pf_policy(SPD_ADDRULE, cr, spds->spds_netstack, 1172 ITP_NAME(itp), active, error, cpid); 1173 } 1174 } 1175 1176 void 1177 spdsock_deleterule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp, 1178 spd_ext_t **extv, ipsec_tun_pol_t *itp) 1179 { 1180 ipsec_selkey_t sel; 1181 struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE]; 1182 int err, diag = 0; 1183 spdsock_t *ss = (spdsock_t *)q->q_ptr; 1184 netstack_t *ns = ss->spdsock_spds->spds_netstack; 1185 1186 if (rule == NULL) { 1187 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT); 1188 if (audit_active) { 1189 boolean_t active; 1190 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1191 cred_t *cr; 1192 pid_t cpid; 1193 1194 cr = msg_getcred(mp, &cpid); 1195 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1196 audit_pf_policy(SPD_DELETERULE, cr, ns, 1197 ITP_NAME(itp), active, SPD_DIAGNOSTIC_NO_RULE_EXT, 1198 cpid); 1199 } 1200 return; 1201 } 1202 1203 /* 1204 * Must enter itp_lock first to avoid deadlock. See tun.c's 1205 * set_sec_simple() for the other case of itp_lock and iph_lock. 1206 */ 1207 if (itp != NULL) 1208 mutex_enter(&itp->itp_lock); 1209 1210 if (rule->spd_rule_index != 0) { 1211 if (ipsec_policy_delete_index(iph, rule->spd_rule_index, ns) != 1212 0) { 1213 err = ESRCH; 1214 goto fail; 1215 } 1216 } else { 1217 if (!spdsock_ext_to_sel(extv, &sel, &diag)) { 1218 err = EINVAL; /* diag already set... */ 1219 goto fail; 1220 } 1221 1222 if ((rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) && 1223 !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_INBOUND, ns)) { 1224 err = ESRCH; 1225 goto fail; 1226 } 1227 1228 if ((rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) && 1229 !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_OUTBOUND, ns)) { 1230 err = ESRCH; 1231 goto fail; 1232 } 1233 } 1234 1235 if (itp != NULL) { 1236 ASSERT(iph == itp->itp_policy || iph == itp->itp_inactive); 1237 rw_enter(&iph->iph_lock, RW_READER); 1238 if (avl_numnodes(&iph->iph_rulebyid) == 0) { 1239 if (iph == itp->itp_policy) 1240 itp->itp_flags &= ~ITPF_PFLAGS; 1241 else 1242 itp->itp_flags &= ~ITPF_IFLAGS; 1243 } 1244 /* Can exit locks in any order. */ 1245 rw_exit(&iph->iph_lock); 1246 mutex_exit(&itp->itp_lock); 1247 } 1248 spd_echo(q, mp); 1249 if (audit_active) { 1250 boolean_t active; 1251 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1252 cred_t *cr; 1253 pid_t cpid; 1254 1255 cr = msg_getcred(mp, &cpid); 1256 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1257 audit_pf_policy(SPD_DELETERULE, cr, ns, ITP_NAME(itp), 1258 active, 0, cpid); 1259 } 1260 return; 1261 fail: 1262 if (itp != NULL) 1263 mutex_exit(&itp->itp_lock); 1264 spdsock_error(q, mp, err, diag); 1265 if (audit_active) { 1266 boolean_t active; 1267 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1268 cred_t *cr; 1269 pid_t cpid; 1270 1271 cr = msg_getcred(mp, &cpid); 1272 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1273 audit_pf_policy(SPD_DELETERULE, cr, ns, ITP_NAME(itp), 1274 active, err, cpid); 1275 } 1276 } 1277 1278 /* Do NOT consume a reference to itp. */ 1279 /* ARGSUSED */ 1280 static void 1281 spdsock_flip_node(ipsec_tun_pol_t *itp, void *ignoreme, netstack_t *ns) 1282 { 1283 mutex_enter(&itp->itp_lock); 1284 ITPF_SWAP(itp->itp_flags); 1285 ipsec_swap_policy(itp->itp_policy, itp->itp_inactive, ns); 1286 mutex_exit(&itp->itp_lock); 1287 } 1288 1289 void 1290 spdsock_flip(queue_t *q, mblk_t *mp, spd_if_t *tunname) 1291 { 1292 char *tname; 1293 ipsec_tun_pol_t *itp; 1294 spdsock_t *ss = (spdsock_t *)q->q_ptr; 1295 netstack_t *ns = ss->spdsock_spds->spds_netstack; 1296 1297 if (tunname != NULL) { 1298 tname = (char *)tunname->spd_if_name; 1299 if (*tname == '\0') { 1300 /* can't fail */ 1301 ipsec_swap_global_policy(ns); 1302 if (audit_active) { 1303 boolean_t active; 1304 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1305 cred_t *cr; 1306 pid_t cpid; 1307 1308 cr = msg_getcred(mp, &cpid); 1309 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1310 audit_pf_policy(SPD_FLIP, cr, ns, 1311 NULL, active, 0, cpid); 1312 } 1313 itp_walk(spdsock_flip_node, NULL, ns); 1314 if (audit_active) { 1315 boolean_t active; 1316 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1317 cred_t *cr; 1318 pid_t cpid; 1319 1320 cr = msg_getcred(mp, &cpid); 1321 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1322 audit_pf_policy(SPD_FLIP, cr, ns, 1323 "all tunnels", active, 0, cpid); 1324 } 1325 } else { 1326 itp = get_tunnel_policy(tname, ns); 1327 if (itp == NULL) { 1328 /* Better idea for "tunnel not found"? */ 1329 spdsock_error(q, mp, ESRCH, 0); 1330 if (audit_active) { 1331 boolean_t active; 1332 spd_msg_t *spmsg = 1333 (spd_msg_t *)mp->b_rptr; 1334 cred_t *cr; 1335 pid_t cpid; 1336 1337 cr = msg_getcred(mp, &cpid); 1338 active = (spmsg->spd_msg_spdid == 1339 SPD_ACTIVE); 1340 audit_pf_policy(SPD_FLIP, cr, ns, 1341 ITP_NAME(itp), active, 1342 ESRCH, cpid); 1343 } 1344 return; 1345 } 1346 spdsock_flip_node(itp, NULL, NULL); 1347 if (audit_active) { 1348 boolean_t active; 1349 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1350 cred_t *cr; 1351 pid_t cpid; 1352 1353 cr = msg_getcred(mp, &cpid); 1354 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1355 audit_pf_policy(SPD_FLIP, cr, ns, 1356 ITP_NAME(itp), active, 0, cpid); 1357 } 1358 ITP_REFRELE(itp, ns); 1359 } 1360 } else { 1361 ipsec_swap_global_policy(ns); /* can't fail */ 1362 if (audit_active) { 1363 boolean_t active; 1364 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1365 cred_t *cr; 1366 pid_t cpid; 1367 1368 cr = msg_getcred(mp, &cpid); 1369 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1370 audit_pf_policy(SPD_FLIP, cr, 1371 ns, NULL, active, 0, cpid); 1372 } 1373 } 1374 spd_echo(q, mp); 1375 } 1376 1377 /* 1378 * Unimplemented feature 1379 */ 1380 /* ARGSUSED */ 1381 static void 1382 spdsock_lookup(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp, 1383 spd_ext_t **extv, ipsec_tun_pol_t *itp) 1384 { 1385 spdsock_error(q, mp, EINVAL, 0); 1386 } 1387 1388 1389 static mblk_t * 1390 spdsock_dump_ruleset(mblk_t *req, ipsec_policy_head_t *iph, 1391 uint32_t count, uint16_t error) 1392 { 1393 size_t len = sizeof (spd_ruleset_ext_t) + sizeof (spd_msg_t); 1394 spd_msg_t *msg; 1395 spd_ruleset_ext_t *ruleset; 1396 mblk_t *m = allocb(len, BPRI_HI); 1397 1398 ASSERT(RW_READ_HELD(&iph->iph_lock)); 1399 1400 if (m == NULL) { 1401 return (NULL); 1402 } 1403 msg = (spd_msg_t *)m->b_rptr; 1404 ruleset = (spd_ruleset_ext_t *)(&msg[1]); 1405 1406 m->b_wptr = (uint8_t *)&ruleset[1]; 1407 1408 *msg = *(spd_msg_t *)(req->b_rptr); 1409 msg->spd_msg_len = SPD_8TO64(len); 1410 msg->spd_msg_errno = error; 1411 1412 ruleset->spd_ruleset_len = SPD_8TO64(sizeof (*ruleset)); 1413 ruleset->spd_ruleset_type = SPD_EXT_RULESET; 1414 ruleset->spd_ruleset_count = count; 1415 ruleset->spd_ruleset_version = iph->iph_gen; 1416 return (m); 1417 } 1418 1419 static mblk_t * 1420 spdsock_dump_finish(spdsock_t *ss, int error) 1421 { 1422 mblk_t *m; 1423 ipsec_policy_head_t *iph = ss->spdsock_dump_head; 1424 mblk_t *req = ss->spdsock_dump_req; 1425 netstack_t *ns = ss->spdsock_spds->spds_netstack; 1426 1427 rw_enter(&iph->iph_lock, RW_READER); 1428 m = spdsock_dump_ruleset(req, iph, ss->spdsock_dump_count, error); 1429 rw_exit(&iph->iph_lock); 1430 IPPH_REFRELE(iph, ns); 1431 if (ss->spdsock_itp != NULL) { 1432 ITP_REFRELE(ss->spdsock_itp, ns); 1433 ss->spdsock_itp = NULL; 1434 } 1435 ss->spdsock_dump_req = NULL; 1436 freemsg(req); 1437 1438 return (m); 1439 } 1440 1441 /* 1442 * Rule encoding functions. 1443 * We do a two-pass encode. 1444 * If base != NULL, fill in encoded rule part starting at base+offset. 1445 * Always return "offset" plus length of to-be-encoded data. 1446 */ 1447 static uint_t 1448 spdsock_encode_typecode(uint8_t *base, uint_t offset, uint8_t type, 1449 uint8_t type_end, uint8_t code, uint8_t code_end) 1450 { 1451 struct spd_typecode *tcp; 1452 1453 ASSERT(ALIGNED64(offset)); 1454 1455 if (base != NULL) { 1456 tcp = (struct spd_typecode *)(base + offset); 1457 tcp->spd_typecode_len = SPD_8TO64(sizeof (*tcp)); 1458 tcp->spd_typecode_exttype = SPD_EXT_ICMP_TYPECODE; 1459 tcp->spd_typecode_code = code; 1460 tcp->spd_typecode_type = type; 1461 tcp->spd_typecode_type_end = type_end; 1462 tcp->spd_typecode_code_end = code_end; 1463 } 1464 offset += sizeof (*tcp); 1465 1466 ASSERT(ALIGNED64(offset)); 1467 1468 return (offset); 1469 } 1470 1471 static uint_t 1472 spdsock_encode_proto(uint8_t *base, uint_t offset, uint8_t proto) 1473 { 1474 struct spd_proto *spp; 1475 1476 ASSERT(ALIGNED64(offset)); 1477 1478 if (base != NULL) { 1479 spp = (struct spd_proto *)(base + offset); 1480 spp->spd_proto_len = SPD_8TO64(sizeof (*spp)); 1481 spp->spd_proto_exttype = SPD_EXT_PROTO; 1482 spp->spd_proto_number = proto; 1483 spp->spd_proto_reserved1 = 0; 1484 spp->spd_proto_reserved2 = 0; 1485 } 1486 offset += sizeof (*spp); 1487 1488 ASSERT(ALIGNED64(offset)); 1489 1490 return (offset); 1491 } 1492 1493 static uint_t 1494 spdsock_encode_port(uint8_t *base, uint_t offset, uint16_t ext, uint16_t port) 1495 { 1496 struct spd_portrange *spp; 1497 1498 ASSERT(ALIGNED64(offset)); 1499 1500 if (base != NULL) { 1501 spp = (struct spd_portrange *)(base + offset); 1502 spp->spd_ports_len = SPD_8TO64(sizeof (*spp)); 1503 spp->spd_ports_exttype = ext; 1504 spp->spd_ports_minport = port; 1505 spp->spd_ports_maxport = port; 1506 } 1507 offset += sizeof (*spp); 1508 1509 ASSERT(ALIGNED64(offset)); 1510 1511 return (offset); 1512 } 1513 1514 static uint_t 1515 spdsock_encode_addr(uint8_t *base, uint_t offset, uint16_t ext, 1516 const ipsec_selkey_t *sel, const ipsec_addr_t *addr, uint_t pfxlen) 1517 { 1518 struct spd_address *sae; 1519 ipsec_addr_t *spdaddr; 1520 uint_t start = offset; 1521 uint_t addrlen; 1522 uint_t af; 1523 1524 if (sel->ipsl_valid & IPSL_IPV4) { 1525 af = AF_INET; 1526 addrlen = IP_ADDR_LEN; 1527 } else { 1528 af = AF_INET6; 1529 addrlen = IPV6_ADDR_LEN; 1530 } 1531 1532 ASSERT(ALIGNED64(offset)); 1533 1534 if (base != NULL) { 1535 sae = (struct spd_address *)(base + offset); 1536 sae->spd_address_exttype = ext; 1537 sae->spd_address_af = af; 1538 sae->spd_address_prefixlen = pfxlen; 1539 sae->spd_address_reserved2 = 0; 1540 1541 spdaddr = (ipsec_addr_t *)(&sae[1]); 1542 bcopy(addr, spdaddr, addrlen); 1543 } 1544 offset += sizeof (*sae); 1545 addrlen = roundup(addrlen, sizeof (uint64_t)); 1546 offset += addrlen; 1547 1548 ASSERT(ALIGNED64(offset)); 1549 1550 if (base != NULL) 1551 sae->spd_address_len = SPD_8TO64(offset - start); 1552 return (offset); 1553 } 1554 1555 static uint_t 1556 spdsock_encode_sel(uint8_t *base, uint_t offset, const ipsec_sel_t *sel) 1557 { 1558 const ipsec_selkey_t *selkey = &sel->ipsl_key; 1559 1560 if (selkey->ipsl_valid & IPSL_PROTOCOL) 1561 offset = spdsock_encode_proto(base, offset, selkey->ipsl_proto); 1562 if (selkey->ipsl_valid & IPSL_LOCAL_PORT) 1563 offset = spdsock_encode_port(base, offset, SPD_EXT_LCLPORT, 1564 selkey->ipsl_lport); 1565 if (selkey->ipsl_valid & IPSL_REMOTE_PORT) 1566 offset = spdsock_encode_port(base, offset, SPD_EXT_REMPORT, 1567 selkey->ipsl_rport); 1568 if (selkey->ipsl_valid & IPSL_REMOTE_ADDR) 1569 offset = spdsock_encode_addr(base, offset, SPD_EXT_REMADDR, 1570 selkey, &selkey->ipsl_remote, selkey->ipsl_remote_pfxlen); 1571 if (selkey->ipsl_valid & IPSL_LOCAL_ADDR) 1572 offset = spdsock_encode_addr(base, offset, SPD_EXT_LCLADDR, 1573 selkey, &selkey->ipsl_local, selkey->ipsl_local_pfxlen); 1574 if (selkey->ipsl_valid & IPSL_ICMP_TYPE) { 1575 offset = spdsock_encode_typecode(base, offset, 1576 selkey->ipsl_icmp_type, selkey->ipsl_icmp_type_end, 1577 (selkey->ipsl_valid & IPSL_ICMP_CODE) ? 1578 selkey->ipsl_icmp_code : 255, 1579 (selkey->ipsl_valid & IPSL_ICMP_CODE) ? 1580 selkey->ipsl_icmp_code_end : 255); 1581 } 1582 return (offset); 1583 } 1584 1585 static uint_t 1586 spdsock_encode_actattr(uint8_t *base, uint_t offset, uint32_t tag, 1587 uint32_t value) 1588 { 1589 struct spd_attribute *attr; 1590 1591 ASSERT(ALIGNED64(offset)); 1592 1593 if (base != NULL) { 1594 attr = (struct spd_attribute *)(base + offset); 1595 attr->spd_attr_tag = tag; 1596 attr->spd_attr_value = value; 1597 } 1598 offset += sizeof (struct spd_attribute); 1599 1600 ASSERT(ALIGNED64(offset)); 1601 1602 return (offset); 1603 } 1604 1605 1606 #define EMIT(t, v) offset = spdsock_encode_actattr(base, offset, (t), (v)) 1607 1608 static uint_t 1609 spdsock_encode_action(uint8_t *base, uint_t offset, const ipsec_action_t *ap) 1610 { 1611 const struct ipsec_act *act = &(ap->ipa_act); 1612 uint_t flags; 1613 1614 EMIT(SPD_ATTR_EMPTY, 0); 1615 switch (act->ipa_type) { 1616 case IPSEC_ACT_DISCARD: 1617 case IPSEC_ACT_REJECT: 1618 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_DROP); 1619 break; 1620 case IPSEC_ACT_BYPASS: 1621 case IPSEC_ACT_CLEAR: 1622 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_PASS); 1623 break; 1624 1625 case IPSEC_ACT_APPLY: 1626 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_IPSEC); 1627 flags = 0; 1628 if (act->ipa_apply.ipp_use_ah) 1629 flags |= SPD_APPLY_AH; 1630 if (act->ipa_apply.ipp_use_esp) 1631 flags |= SPD_APPLY_ESP; 1632 if (act->ipa_apply.ipp_use_espa) 1633 flags |= SPD_APPLY_ESPA; 1634 if (act->ipa_apply.ipp_use_se) 1635 flags |= SPD_APPLY_SE; 1636 if (act->ipa_apply.ipp_use_unique) 1637 flags |= SPD_APPLY_UNIQUE; 1638 EMIT(SPD_ATTR_FLAGS, flags); 1639 if (flags & SPD_APPLY_AH) { 1640 EMIT(SPD_ATTR_AH_AUTH, act->ipa_apply.ipp_auth_alg); 1641 EMIT(SPD_ATTR_AH_MINBITS, 1642 act->ipa_apply.ipp_ah_minbits); 1643 EMIT(SPD_ATTR_AH_MAXBITS, 1644 act->ipa_apply.ipp_ah_maxbits); 1645 } 1646 if (flags & SPD_APPLY_ESP) { 1647 EMIT(SPD_ATTR_ESP_ENCR, act->ipa_apply.ipp_encr_alg); 1648 EMIT(SPD_ATTR_ENCR_MINBITS, 1649 act->ipa_apply.ipp_espe_minbits); 1650 EMIT(SPD_ATTR_ENCR_MAXBITS, 1651 act->ipa_apply.ipp_espe_maxbits); 1652 if (flags & SPD_APPLY_ESPA) { 1653 EMIT(SPD_ATTR_ESP_AUTH, 1654 act->ipa_apply.ipp_esp_auth_alg); 1655 EMIT(SPD_ATTR_ESPA_MINBITS, 1656 act->ipa_apply.ipp_espa_minbits); 1657 EMIT(SPD_ATTR_ESPA_MAXBITS, 1658 act->ipa_apply.ipp_espa_maxbits); 1659 } 1660 } 1661 if (act->ipa_apply.ipp_km_proto != 0) 1662 EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_proto); 1663 if (act->ipa_apply.ipp_km_cookie != 0) 1664 EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_cookie); 1665 if (act->ipa_apply.ipp_replay_depth != 0) 1666 EMIT(SPD_ATTR_REPLAY_DEPTH, 1667 act->ipa_apply.ipp_replay_depth); 1668 /* Add more here */ 1669 break; 1670 } 1671 1672 return (offset); 1673 } 1674 1675 static uint_t 1676 spdsock_encode_action_list(uint8_t *base, uint_t offset, 1677 const ipsec_action_t *ap) 1678 { 1679 struct spd_ext_actions *act; 1680 uint_t nact = 0; 1681 uint_t start = offset; 1682 1683 ASSERT(ALIGNED64(offset)); 1684 1685 if (base != NULL) { 1686 act = (struct spd_ext_actions *)(base + offset); 1687 act->spd_actions_len = 0; 1688 act->spd_actions_exttype = SPD_EXT_ACTION; 1689 act->spd_actions_count = 0; 1690 act->spd_actions_reserved = 0; 1691 } 1692 1693 offset += sizeof (*act); 1694 1695 ASSERT(ALIGNED64(offset)); 1696 1697 while (ap != NULL) { 1698 offset = spdsock_encode_action(base, offset, ap); 1699 ap = ap->ipa_next; 1700 nact++; 1701 if (ap != NULL) { 1702 EMIT(SPD_ATTR_NEXT, 0); 1703 } 1704 } 1705 EMIT(SPD_ATTR_END, 0); 1706 1707 ASSERT(ALIGNED64(offset)); 1708 1709 if (base != NULL) { 1710 act->spd_actions_count = nact; 1711 act->spd_actions_len = SPD_8TO64(offset - start); 1712 } 1713 1714 return (offset); 1715 } 1716 1717 #undef EMIT 1718 1719 /* ARGSUSED */ 1720 static uint_t 1721 spdsock_rule_flags(uint_t dir, uint_t af) 1722 { 1723 uint_t flags = 0; 1724 1725 if (dir == IPSEC_TYPE_INBOUND) 1726 flags |= SPD_RULE_FLAG_INBOUND; 1727 if (dir == IPSEC_TYPE_OUTBOUND) 1728 flags |= SPD_RULE_FLAG_OUTBOUND; 1729 1730 return (flags); 1731 } 1732 1733 1734 static uint_t 1735 spdsock_encode_rule_head(uint8_t *base, uint_t offset, spd_msg_t *req, 1736 const ipsec_policy_t *rule, uint_t dir, uint_t af, char *name, 1737 boolean_t tunnel) 1738 { 1739 struct spd_msg *spmsg; 1740 struct spd_rule *spr; 1741 spd_if_t *sid; 1742 1743 uint_t start = offset; 1744 1745 ASSERT(ALIGNED64(offset)); 1746 1747 if (base != NULL) { 1748 spmsg = (struct spd_msg *)(base + offset); 1749 bzero(spmsg, sizeof (*spmsg)); 1750 spmsg->spd_msg_version = PF_POLICY_V1; 1751 spmsg->spd_msg_type = SPD_DUMP; 1752 spmsg->spd_msg_seq = req->spd_msg_seq; 1753 spmsg->spd_msg_pid = req->spd_msg_pid; 1754 } 1755 offset += sizeof (struct spd_msg); 1756 1757 ASSERT(ALIGNED64(offset)); 1758 1759 if (base != NULL) { 1760 spr = (struct spd_rule *)(base + offset); 1761 spr->spd_rule_type = SPD_EXT_RULE; 1762 spr->spd_rule_priority = rule->ipsp_prio; 1763 spr->spd_rule_flags = spdsock_rule_flags(dir, af); 1764 if (tunnel) 1765 spr->spd_rule_flags |= SPD_RULE_FLAG_TUNNEL; 1766 spr->spd_rule_unused = 0; 1767 spr->spd_rule_len = SPD_8TO64(sizeof (*spr)); 1768 spr->spd_rule_index = rule->ipsp_index; 1769 } 1770 offset += sizeof (struct spd_rule); 1771 1772 /* 1773 * If we have an interface name (i.e. if this policy head came from 1774 * a tunnel), add the SPD_EXT_TUN_NAME extension. 1775 */ 1776 if (name != NULL) { 1777 1778 ASSERT(ALIGNED64(offset)); 1779 1780 if (base != NULL) { 1781 sid = (spd_if_t *)(base + offset); 1782 sid->spd_if_exttype = SPD_EXT_TUN_NAME; 1783 sid->spd_if_len = SPD_8TO64(sizeof (spd_if_t) + 1784 roundup((strlen(name) - 4), 8)); 1785 (void) strlcpy((char *)sid->spd_if_name, name, 1786 LIFNAMSIZ); 1787 } 1788 1789 offset += sizeof (spd_if_t) + roundup((strlen(name) - 4), 8); 1790 } 1791 1792 offset = spdsock_encode_sel(base, offset, rule->ipsp_sel); 1793 offset = spdsock_encode_action_list(base, offset, rule->ipsp_act); 1794 1795 ASSERT(ALIGNED64(offset)); 1796 1797 if (base != NULL) { 1798 spmsg->spd_msg_len = SPD_8TO64(offset - start); 1799 } 1800 return (offset); 1801 } 1802 1803 /* ARGSUSED */ 1804 static mblk_t * 1805 spdsock_encode_rule(mblk_t *req, const ipsec_policy_t *rule, 1806 uint_t dir, uint_t af, char *name, boolean_t tunnel) 1807 { 1808 mblk_t *m; 1809 uint_t len; 1810 spd_msg_t *mreq = (spd_msg_t *)req->b_rptr; 1811 1812 /* 1813 * Figure out how much space we'll need. 1814 */ 1815 len = spdsock_encode_rule_head(NULL, 0, mreq, rule, dir, af, name, 1816 tunnel); 1817 1818 /* 1819 * Allocate mblk. 1820 */ 1821 m = allocb(len, BPRI_HI); 1822 if (m == NULL) 1823 return (NULL); 1824 1825 /* 1826 * Fill it in.. 1827 */ 1828 m->b_wptr = m->b_rptr + len; 1829 bzero(m->b_rptr, len); 1830 (void) spdsock_encode_rule_head(m->b_rptr, 0, mreq, rule, dir, af, 1831 name, tunnel); 1832 return (m); 1833 } 1834 1835 static ipsec_policy_t * 1836 spdsock_dump_next_in_chain(spdsock_t *ss, ipsec_policy_head_t *iph, 1837 ipsec_policy_t *cur) 1838 { 1839 ASSERT(RW_READ_HELD(&iph->iph_lock)); 1840 1841 ss->spdsock_dump_count++; 1842 ss->spdsock_dump_cur_rule = cur->ipsp_hash.hash_next; 1843 return (cur); 1844 } 1845 1846 static ipsec_policy_t * 1847 spdsock_dump_next_rule(spdsock_t *ss, ipsec_policy_head_t *iph) 1848 { 1849 ipsec_policy_t *cur; 1850 ipsec_policy_root_t *ipr; 1851 int chain, nchains, type, af; 1852 1853 ASSERT(RW_READ_HELD(&iph->iph_lock)); 1854 1855 cur = ss->spdsock_dump_cur_rule; 1856 1857 if (cur != NULL) 1858 return (spdsock_dump_next_in_chain(ss, iph, cur)); 1859 1860 type = ss->spdsock_dump_cur_type; 1861 1862 next: 1863 chain = ss->spdsock_dump_cur_chain; 1864 ipr = &iph->iph_root[type]; 1865 nchains = ipr->ipr_nchains; 1866 1867 while (chain < nchains) { 1868 cur = ipr->ipr_hash[chain].hash_head; 1869 chain++; 1870 if (cur != NULL) { 1871 ss->spdsock_dump_cur_chain = chain; 1872 return (spdsock_dump_next_in_chain(ss, iph, cur)); 1873 } 1874 } 1875 ss->spdsock_dump_cur_chain = nchains; 1876 1877 af = ss->spdsock_dump_cur_af; 1878 while (af < IPSEC_NAF) { 1879 cur = ipr->ipr_nonhash[af]; 1880 af++; 1881 if (cur != NULL) { 1882 ss->spdsock_dump_cur_af = af; 1883 return (spdsock_dump_next_in_chain(ss, iph, cur)); 1884 } 1885 } 1886 1887 type++; 1888 if (type >= IPSEC_NTYPES) 1889 return (NULL); 1890 1891 ss->spdsock_dump_cur_chain = 0; 1892 ss->spdsock_dump_cur_type = type; 1893 ss->spdsock_dump_cur_af = IPSEC_AF_V4; 1894 goto next; 1895 1896 } 1897 1898 /* 1899 * If we're done with one policy head, but have more to go, we iterate through 1900 * another IPsec tunnel policy head (itp). Return NULL if it is an error 1901 * worthy of returning EAGAIN via PF_POLICY. 1902 */ 1903 static ipsec_tun_pol_t * 1904 spdsock_dump_iterate_next_tunnel(spdsock_t *ss, ipsec_stack_t *ipss) 1905 { 1906 ipsec_tun_pol_t *itp; 1907 1908 ASSERT(RW_READ_HELD(&ipss->ipsec_tunnel_policy_lock)); 1909 if (ipss->ipsec_tunnel_policy_gen > ss->spdsock_dump_tun_gen) { 1910 /* Oops, state of the tunnel polheads changed. */ 1911 itp = NULL; 1912 } else if (ss->spdsock_itp == NULL) { 1913 /* Just finished global, find first node. */ 1914 itp = avl_first(&ipss->ipsec_tunnel_policies); 1915 } else { 1916 /* We just finished current polhead, find the next one. */ 1917 itp = AVL_NEXT(&ipss->ipsec_tunnel_policies, ss->spdsock_itp); 1918 } 1919 if (itp != NULL) { 1920 ITP_REFHOLD(itp); 1921 } 1922 if (ss->spdsock_itp != NULL) { 1923 ITP_REFRELE(ss->spdsock_itp, ipss->ipsec_netstack); 1924 } 1925 ss->spdsock_itp = itp; 1926 return (itp); 1927 } 1928 1929 static mblk_t * 1930 spdsock_dump_next_record(spdsock_t *ss) 1931 { 1932 ipsec_policy_head_t *iph; 1933 ipsec_policy_t *rule; 1934 mblk_t *m; 1935 ipsec_tun_pol_t *itp; 1936 netstack_t *ns = ss->spdsock_spds->spds_netstack; 1937 ipsec_stack_t *ipss = ns->netstack_ipsec; 1938 1939 iph = ss->spdsock_dump_head; 1940 1941 ASSERT(iph != NULL); 1942 1943 rw_enter(&iph->iph_lock, RW_READER); 1944 1945 if (iph->iph_gen != ss->spdsock_dump_gen) { 1946 rw_exit(&iph->iph_lock); 1947 return (spdsock_dump_finish(ss, EAGAIN)); 1948 } 1949 1950 while ((rule = spdsock_dump_next_rule(ss, iph)) == NULL) { 1951 rw_exit(&iph->iph_lock); 1952 if (--(ss->spdsock_dump_remaining_polheads) == 0) 1953 return (spdsock_dump_finish(ss, 0)); 1954 1955 1956 /* 1957 * If we reach here, we have more policy heads (tunnel 1958 * entries) to dump. Let's reset to a new policy head 1959 * and get some more rules. 1960 * 1961 * An empty policy head will have spdsock_dump_next_rule() 1962 * return NULL, and we loop (while dropping the number of 1963 * remaining polheads). If we loop to 0, we finish. We 1964 * keep looping until we hit 0 or until we have a rule to 1965 * encode. 1966 * 1967 * NOTE: No need for ITP_REF*() macros here as we're only 1968 * going after and refholding the policy head itself. 1969 */ 1970 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER); 1971 itp = spdsock_dump_iterate_next_tunnel(ss, ipss); 1972 if (itp == NULL) { 1973 rw_exit(&ipss->ipsec_tunnel_policy_lock); 1974 return (spdsock_dump_finish(ss, EAGAIN)); 1975 } 1976 1977 /* Reset other spdsock_dump thingies. */ 1978 IPPH_REFRELE(ss->spdsock_dump_head, ns); 1979 if (ss->spdsock_dump_active) { 1980 ss->spdsock_dump_tunnel = 1981 itp->itp_flags & ITPF_P_TUNNEL; 1982 iph = itp->itp_policy; 1983 } else { 1984 ss->spdsock_dump_tunnel = 1985 itp->itp_flags & ITPF_I_TUNNEL; 1986 iph = itp->itp_inactive; 1987 } 1988 IPPH_REFHOLD(iph); 1989 rw_exit(&ipss->ipsec_tunnel_policy_lock); 1990 1991 rw_enter(&iph->iph_lock, RW_READER); 1992 RESET_SPDSOCK_DUMP_POLHEAD(ss, iph); 1993 } 1994 1995 m = spdsock_encode_rule(ss->spdsock_dump_req, rule, 1996 ss->spdsock_dump_cur_type, ss->spdsock_dump_cur_af, 1997 (ss->spdsock_itp == NULL) ? NULL : ss->spdsock_itp->itp_name, 1998 ss->spdsock_dump_tunnel); 1999 rw_exit(&iph->iph_lock); 2000 2001 if (m == NULL) 2002 return (spdsock_dump_finish(ss, ENOMEM)); 2003 return (m); 2004 } 2005 2006 /* 2007 * Dump records until we run into flow-control back-pressure. 2008 */ 2009 static void 2010 spdsock_dump_some(queue_t *q, spdsock_t *ss) 2011 { 2012 mblk_t *m, *dataind; 2013 2014 while ((ss->spdsock_dump_req != NULL) && canputnext(q)) { 2015 m = spdsock_dump_next_record(ss); 2016 if (m == NULL) 2017 return; 2018 dataind = allocb(sizeof (struct T_data_req), BPRI_HI); 2019 if (dataind == NULL) { 2020 freemsg(m); 2021 return; 2022 } 2023 dataind->b_cont = m; 2024 dataind->b_wptr += sizeof (struct T_data_req); 2025 ((struct T_data_ind *)dataind->b_rptr)->PRIM_type = T_DATA_IND; 2026 ((struct T_data_ind *)dataind->b_rptr)->MORE_flag = 0; 2027 dataind->b_datap->db_type = M_PROTO; 2028 putnext(q, dataind); 2029 } 2030 } 2031 2032 /* 2033 * Start dumping. 2034 * Format a start-of-dump record, and set up the stream and kick the rsrv 2035 * procedure to continue the job.. 2036 */ 2037 /* ARGSUSED */ 2038 static void 2039 spdsock_dump(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp) 2040 { 2041 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2042 netstack_t *ns = ss->spdsock_spds->spds_netstack; 2043 ipsec_stack_t *ipss = ns->netstack_ipsec; 2044 mblk_t *mr; 2045 2046 /* spdsock_open() already set spdsock_itp to NULL. */ 2047 if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) { 2048 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER); 2049 ss->spdsock_dump_remaining_polheads = 1 + 2050 avl_numnodes(&ipss->ipsec_tunnel_policies); 2051 ss->spdsock_dump_tun_gen = ipss->ipsec_tunnel_policy_gen; 2052 rw_exit(&ipss->ipsec_tunnel_policy_lock); 2053 if (iph == ALL_ACTIVE_POLHEADS) { 2054 iph = ipsec_system_policy(ns); 2055 ss->spdsock_dump_active = B_TRUE; 2056 } else { 2057 iph = ipsec_inactive_policy(ns); 2058 ss->spdsock_dump_active = B_FALSE; 2059 } 2060 ASSERT(ss->spdsock_itp == NULL); 2061 } else { 2062 ss->spdsock_dump_remaining_polheads = 1; 2063 } 2064 2065 rw_enter(&iph->iph_lock, RW_READER); 2066 2067 mr = spdsock_dump_ruleset(mp, iph, 0, 0); 2068 2069 if (!mr) { 2070 rw_exit(&iph->iph_lock); 2071 spdsock_error(q, mp, ENOMEM, 0); 2072 return; 2073 } 2074 2075 ss->spdsock_dump_req = mp; 2076 RESET_SPDSOCK_DUMP_POLHEAD(ss, iph); 2077 2078 rw_exit(&iph->iph_lock); 2079 2080 qreply(q, mr); 2081 qenable(OTHERQ(q)); 2082 } 2083 2084 /* Do NOT consume a reference to ITP. */ 2085 void 2086 spdsock_clone_node(ipsec_tun_pol_t *itp, void *ep, netstack_t *ns) 2087 { 2088 int *errptr = (int *)ep; 2089 2090 if (*errptr != 0) 2091 return; /* We've failed already for some reason. */ 2092 mutex_enter(&itp->itp_lock); 2093 ITPF_CLONE(itp->itp_flags); 2094 *errptr = ipsec_copy_polhead(itp->itp_policy, itp->itp_inactive, ns); 2095 mutex_exit(&itp->itp_lock); 2096 } 2097 2098 void 2099 spdsock_clone(queue_t *q, mblk_t *mp, spd_if_t *tunname) 2100 { 2101 int error; 2102 char *tname; 2103 ipsec_tun_pol_t *itp; 2104 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2105 netstack_t *ns = ss->spdsock_spds->spds_netstack; 2106 2107 if (tunname != NULL) { 2108 tname = (char *)tunname->spd_if_name; 2109 if (*tname == '\0') { 2110 error = ipsec_clone_system_policy(ns); 2111 if (audit_active) { 2112 boolean_t active; 2113 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 2114 cred_t *cr; 2115 pid_t cpid; 2116 2117 cr = msg_getcred(mp, &cpid); 2118 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 2119 audit_pf_policy(SPD_CLONE, cr, ns, 2120 NULL, active, error, cpid); 2121 } 2122 if (error == 0) { 2123 itp_walk(spdsock_clone_node, &error, ns); 2124 if (audit_active) { 2125 boolean_t active; 2126 spd_msg_t *spmsg = 2127 (spd_msg_t *)mp->b_rptr; 2128 cred_t *cr; 2129 pid_t cpid; 2130 2131 cr = msg_getcred(mp, &cpid); 2132 active = (spmsg->spd_msg_spdid == 2133 SPD_ACTIVE); 2134 audit_pf_policy(SPD_CLONE, cr, 2135 ns, "all tunnels", active, 0, 2136 cpid); 2137 } 2138 } 2139 } else { 2140 itp = get_tunnel_policy(tname, ns); 2141 if (itp == NULL) { 2142 spdsock_error(q, mp, ENOENT, 0); 2143 if (audit_active) { 2144 boolean_t active; 2145 spd_msg_t *spmsg = 2146 (spd_msg_t *)mp->b_rptr; 2147 cred_t *cr; 2148 pid_t cpid; 2149 2150 cr = msg_getcred(mp, &cpid); 2151 active = (spmsg->spd_msg_spdid == 2152 SPD_ACTIVE); 2153 audit_pf_policy(SPD_CLONE, cr, 2154 ns, NULL, active, ENOENT, cpid); 2155 } 2156 return; 2157 } 2158 spdsock_clone_node(itp, &error, NULL); 2159 if (audit_active) { 2160 boolean_t active; 2161 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 2162 cred_t *cr; 2163 pid_t cpid; 2164 2165 cr = msg_getcred(mp, &cpid); 2166 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 2167 audit_pf_policy(SPD_CLONE, cr, ns, 2168 ITP_NAME(itp), active, error, cpid); 2169 } 2170 ITP_REFRELE(itp, ns); 2171 } 2172 } else { 2173 error = ipsec_clone_system_policy(ns); 2174 if (audit_active) { 2175 boolean_t active; 2176 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 2177 cred_t *cr; 2178 pid_t cpid; 2179 2180 cr = msg_getcred(mp, &cpid); 2181 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 2182 audit_pf_policy(SPD_CLONE, cr, ns, NULL, 2183 active, error, cpid); 2184 } 2185 } 2186 2187 if (error != 0) 2188 spdsock_error(q, mp, error, 0); 2189 else 2190 spd_echo(q, mp); 2191 } 2192 2193 /* 2194 * Process a SPD_ALGLIST request. The caller expects separate alg entries 2195 * for AH authentication, ESP authentication, and ESP encryption. 2196 * The same distinction is then used when setting the min and max key 2197 * sizes when defining policies. 2198 */ 2199 2200 #define SPDSOCK_AH_AUTH 0 2201 #define SPDSOCK_ESP_AUTH 1 2202 #define SPDSOCK_ESP_ENCR 2 2203 #define SPDSOCK_NTYPES 3 2204 2205 static const uint_t algattr[SPDSOCK_NTYPES] = { 2206 SPD_ATTR_AH_AUTH, 2207 SPD_ATTR_ESP_AUTH, 2208 SPD_ATTR_ESP_ENCR 2209 }; 2210 static const uint_t minbitsattr[SPDSOCK_NTYPES] = { 2211 SPD_ATTR_AH_MINBITS, 2212 SPD_ATTR_ESPA_MINBITS, 2213 SPD_ATTR_ENCR_MINBITS 2214 }; 2215 static const uint_t maxbitsattr[SPDSOCK_NTYPES] = { 2216 SPD_ATTR_AH_MAXBITS, 2217 SPD_ATTR_ESPA_MAXBITS, 2218 SPD_ATTR_ENCR_MAXBITS 2219 }; 2220 static const uint_t defbitsattr[SPDSOCK_NTYPES] = { 2221 SPD_ATTR_AH_DEFBITS, 2222 SPD_ATTR_ESPA_DEFBITS, 2223 SPD_ATTR_ENCR_DEFBITS 2224 }; 2225 static const uint_t incrbitsattr[SPDSOCK_NTYPES] = { 2226 SPD_ATTR_AH_INCRBITS, 2227 SPD_ATTR_ESPA_INCRBITS, 2228 SPD_ATTR_ENCR_INCRBITS 2229 }; 2230 2231 #define ATTRPERALG 6 /* fixed attributes per algs */ 2232 2233 void 2234 spdsock_alglist(queue_t *q, mblk_t *mp) 2235 { 2236 uint_t algtype; 2237 uint_t algidx; 2238 uint_t algcount; 2239 uint_t size; 2240 mblk_t *m; 2241 uint8_t *cur; 2242 spd_msg_t *msg; 2243 struct spd_ext_actions *act; 2244 struct spd_attribute *attr; 2245 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2246 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec; 2247 2248 mutex_enter(&ipss->ipsec_alg_lock); 2249 /* 2250 * The SPD client expects to receive separate entries for 2251 * AH authentication and ESP authentication supported algorithms. 2252 * 2253 * Don't return the "any" algorithms, if defined, as no 2254 * kernel policies can be set for these algorithms. 2255 */ 2256 algcount = 2 * ipss->ipsec_nalgs[IPSEC_ALG_AUTH] + 2257 ipss->ipsec_nalgs[IPSEC_ALG_ENCR]; 2258 2259 if (ipss->ipsec_alglists[IPSEC_ALG_AUTH][SADB_AALG_NONE] != NULL) 2260 algcount--; 2261 if (ipss->ipsec_alglists[IPSEC_ALG_ENCR][SADB_EALG_NONE] != NULL) 2262 algcount--; 2263 2264 /* 2265 * For each algorithm, we encode: 2266 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT} 2267 */ 2268 2269 size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions) + 2270 ATTRPERALG * sizeof (struct spd_attribute) * algcount; 2271 2272 ASSERT(ALIGNED64(size)); 2273 2274 m = allocb(size, BPRI_HI); 2275 if (m == NULL) { 2276 mutex_exit(&ipss->ipsec_alg_lock); 2277 spdsock_error(q, mp, ENOMEM, 0); 2278 return; 2279 } 2280 2281 m->b_wptr = m->b_rptr + size; 2282 cur = m->b_rptr; 2283 2284 msg = (spd_msg_t *)cur; 2285 bcopy(mp->b_rptr, cur, sizeof (*msg)); 2286 2287 msg->spd_msg_len = SPD_8TO64(size); 2288 msg->spd_msg_errno = 0; 2289 msg->spd_msg_diagnostic = 0; 2290 2291 cur += sizeof (*msg); 2292 2293 act = (struct spd_ext_actions *)cur; 2294 cur += sizeof (*act); 2295 2296 act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t)); 2297 act->spd_actions_exttype = SPD_EXT_ACTION; 2298 act->spd_actions_count = algcount; 2299 act->spd_actions_reserved = 0; 2300 2301 attr = (struct spd_attribute *)cur; 2302 2303 #define EMIT(tag, value) { \ 2304 attr->spd_attr_tag = (tag); \ 2305 attr->spd_attr_value = (value); \ 2306 attr++; \ 2307 } 2308 2309 /* 2310 * If you change the number of EMIT's here, change 2311 * ATTRPERALG above to match 2312 */ 2313 #define EMITALGATTRS(_type) { \ 2314 EMIT(algattr[_type], algid); /* 1 */ \ 2315 EMIT(minbitsattr[_type], minbits); /* 2 */ \ 2316 EMIT(maxbitsattr[_type], maxbits); /* 3 */ \ 2317 EMIT(defbitsattr[_type], defbits); /* 4 */ \ 2318 EMIT(incrbitsattr[_type], incr); /* 5 */ \ 2319 EMIT(SPD_ATTR_NEXT, 0); /* 6 */ \ 2320 } 2321 2322 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 2323 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype]; 2324 algidx++) { 2325 int algid = ipss->ipsec_sortlist[algtype][algidx]; 2326 ipsec_alginfo_t *alg = 2327 ipss->ipsec_alglists[algtype][algid]; 2328 uint_t minbits = alg->alg_minbits; 2329 uint_t maxbits = alg->alg_maxbits; 2330 uint_t defbits = alg->alg_default_bits; 2331 uint_t incr = alg->alg_increment; 2332 2333 if (algtype == IPSEC_ALG_AUTH) { 2334 if (algid == SADB_AALG_NONE) 2335 continue; 2336 EMITALGATTRS(SPDSOCK_AH_AUTH); 2337 EMITALGATTRS(SPDSOCK_ESP_AUTH); 2338 } else { 2339 if (algid == SADB_EALG_NONE) 2340 continue; 2341 ASSERT(algtype == IPSEC_ALG_ENCR); 2342 EMITALGATTRS(SPDSOCK_ESP_ENCR); 2343 } 2344 } 2345 } 2346 2347 mutex_exit(&ipss->ipsec_alg_lock); 2348 2349 #undef EMITALGATTRS 2350 #undef EMIT 2351 #undef ATTRPERALG 2352 2353 attr--; 2354 attr->spd_attr_tag = SPD_ATTR_END; 2355 2356 freemsg(mp); 2357 qreply(q, m); 2358 } 2359 2360 /* 2361 * Process a SPD_DUMPALGS request. 2362 */ 2363 2364 #define ATTRPERALG 7 /* fixed attributes per algs */ 2365 2366 void 2367 spdsock_dumpalgs(queue_t *q, mblk_t *mp) 2368 { 2369 uint_t algtype; 2370 uint_t algidx; 2371 uint_t size; 2372 mblk_t *m; 2373 uint8_t *cur; 2374 spd_msg_t *msg; 2375 struct spd_ext_actions *act; 2376 struct spd_attribute *attr; 2377 ipsec_alginfo_t *alg; 2378 uint_t algid; 2379 uint_t i; 2380 uint_t alg_size; 2381 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2382 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec; 2383 2384 mutex_enter(&ipss->ipsec_alg_lock); 2385 2386 /* 2387 * For each algorithm, we encode: 2388 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT} 2389 * 2390 * ALG_ID / ALG_PROTO / ALG_INCRBITS / ALG_NKEYSIZES / ALG_KEYSIZE* 2391 * ALG_NBLOCKSIZES / ALG_BLOCKSIZE* / ALG_MECHNAME / {END, NEXT} 2392 */ 2393 2394 /* 2395 * Compute the size of the SPD message. 2396 */ 2397 size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions); 2398 2399 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 2400 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype]; 2401 algidx++) { 2402 algid = ipss->ipsec_sortlist[algtype][algidx]; 2403 alg = ipss->ipsec_alglists[algtype][algid]; 2404 alg_size = sizeof (struct spd_attribute) * 2405 (ATTRPERALG + alg->alg_nkey_sizes + 2406 alg->alg_nblock_sizes) + CRYPTO_MAX_MECH_NAME; 2407 size += alg_size; 2408 } 2409 } 2410 2411 ASSERT(ALIGNED64(size)); 2412 2413 m = allocb(size, BPRI_HI); 2414 if (m == NULL) { 2415 mutex_exit(&ipss->ipsec_alg_lock); 2416 spdsock_error(q, mp, ENOMEM, 0); 2417 return; 2418 } 2419 2420 m->b_wptr = m->b_rptr + size; 2421 cur = m->b_rptr; 2422 2423 msg = (spd_msg_t *)cur; 2424 bcopy(mp->b_rptr, cur, sizeof (*msg)); 2425 2426 msg->spd_msg_len = SPD_8TO64(size); 2427 msg->spd_msg_errno = 0; 2428 msg->spd_msg_type = SPD_ALGLIST; 2429 2430 msg->spd_msg_diagnostic = 0; 2431 2432 cur += sizeof (*msg); 2433 2434 act = (struct spd_ext_actions *)cur; 2435 cur += sizeof (*act); 2436 2437 act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t)); 2438 act->spd_actions_exttype = SPD_EXT_ACTION; 2439 act->spd_actions_count = ipss->ipsec_nalgs[IPSEC_ALG_AUTH] + 2440 ipss->ipsec_nalgs[IPSEC_ALG_ENCR]; 2441 act->spd_actions_reserved = 0; 2442 2443 /* 2444 * If there aren't any algorithms registered, return an empty message. 2445 * spdsock_get_ext() knows how to deal with this. 2446 */ 2447 if (act->spd_actions_count == 0) { 2448 act->spd_actions_len = 0; 2449 mutex_exit(&ipss->ipsec_alg_lock); 2450 goto error; 2451 } 2452 2453 attr = (struct spd_attribute *)cur; 2454 2455 #define EMIT(tag, value) { \ 2456 attr->spd_attr_tag = (tag); \ 2457 attr->spd_attr_value = (value); \ 2458 attr++; \ 2459 } 2460 2461 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 2462 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype]; 2463 algidx++) { 2464 2465 algid = ipss->ipsec_sortlist[algtype][algidx]; 2466 alg = ipss->ipsec_alglists[algtype][algid]; 2467 2468 /* 2469 * If you change the number of EMIT's here, change 2470 * ATTRPERALG above to match 2471 */ 2472 EMIT(SPD_ATTR_ALG_ID, algid); 2473 EMIT(SPD_ATTR_ALG_PROTO, algproto[algtype]); 2474 EMIT(SPD_ATTR_ALG_INCRBITS, alg->alg_increment); 2475 2476 EMIT(SPD_ATTR_ALG_NKEYSIZES, alg->alg_nkey_sizes); 2477 for (i = 0; i < alg->alg_nkey_sizes; i++) 2478 EMIT(SPD_ATTR_ALG_KEYSIZE, 2479 alg->alg_key_sizes[i]); 2480 2481 EMIT(SPD_ATTR_ALG_NBLOCKSIZES, alg->alg_nblock_sizes); 2482 for (i = 0; i < alg->alg_nblock_sizes; i++) 2483 EMIT(SPD_ATTR_ALG_BLOCKSIZE, 2484 alg->alg_block_sizes[i]); 2485 2486 EMIT(SPD_ATTR_ALG_MECHNAME, CRYPTO_MAX_MECH_NAME); 2487 bcopy(alg->alg_mech_name, attr, CRYPTO_MAX_MECH_NAME); 2488 attr = (struct spd_attribute *)((char *)attr + 2489 CRYPTO_MAX_MECH_NAME); 2490 2491 EMIT(SPD_ATTR_NEXT, 0); 2492 } 2493 } 2494 2495 mutex_exit(&ipss->ipsec_alg_lock); 2496 2497 #undef EMITALGATTRS 2498 #undef EMIT 2499 #undef ATTRPERALG 2500 2501 attr--; 2502 attr->spd_attr_tag = SPD_ATTR_END; 2503 2504 error: 2505 freemsg(mp); 2506 qreply(q, m); 2507 } 2508 2509 /* 2510 * Do the actual work of processing an SPD_UPDATEALGS request. Can 2511 * be invoked either once IPsec is loaded on a cached request, or 2512 * when a request is received while IPsec is loaded. 2513 */ 2514 static void 2515 spdsock_do_updatealg(spd_ext_t *extv[], int *diag, spd_stack_t *spds) 2516 { 2517 struct spd_ext_actions *actp; 2518 struct spd_attribute *attr, *endattr; 2519 uint64_t *start, *end; 2520 ipsec_alginfo_t *alg = NULL; 2521 ipsec_algtype_t alg_type = 0; 2522 boolean_t skip_alg = B_TRUE, doing_proto = B_FALSE; 2523 uint_t i, cur_key, cur_block, algid; 2524 2525 *diag = -1; 2526 ASSERT(MUTEX_HELD(&spds->spds_alg_lock)); 2527 2528 /* parse the message, building the list of algorithms */ 2529 2530 actp = (struct spd_ext_actions *)extv[SPD_EXT_ACTION]; 2531 if (actp == NULL) { 2532 *diag = SPD_DIAGNOSTIC_NO_ACTION_EXT; 2533 return; 2534 } 2535 2536 start = (uint64_t *)actp; 2537 end = (start + actp->spd_actions_len); 2538 endattr = (struct spd_attribute *)end; 2539 attr = (struct spd_attribute *)&actp[1]; 2540 2541 bzero(spds->spds_algs, IPSEC_NALGTYPES * IPSEC_MAX_ALGS * 2542 sizeof (ipsec_alginfo_t *)); 2543 2544 alg = kmem_zalloc(sizeof (*alg), KM_SLEEP); 2545 2546 #define ALG_KEY_SIZES(a) (((a)->alg_nkey_sizes + 1) * sizeof (uint16_t)) 2547 #define ALG_BLOCK_SIZES(a) (((a)->alg_nblock_sizes + 1) * sizeof (uint16_t)) 2548 2549 while (attr < endattr) { 2550 switch (attr->spd_attr_tag) { 2551 case SPD_ATTR_NOP: 2552 case SPD_ATTR_EMPTY: 2553 break; 2554 case SPD_ATTR_END: 2555 attr = endattr; 2556 /* FALLTHRU */ 2557 case SPD_ATTR_NEXT: 2558 if (doing_proto) { 2559 doing_proto = B_FALSE; 2560 break; 2561 } 2562 if (skip_alg) { 2563 ipsec_alg_free(alg); 2564 } else { 2565 ipsec_alg_free( 2566 spds->spds_algs[alg_type][alg->alg_id]); 2567 spds->spds_algs[alg_type][alg->alg_id] = 2568 alg; 2569 } 2570 alg = kmem_zalloc(sizeof (*alg), KM_SLEEP); 2571 break; 2572 2573 case SPD_ATTR_ALG_ID: 2574 if (attr->spd_attr_value >= IPSEC_MAX_ALGS) { 2575 ss1dbg(spds, ("spdsock_do_updatealg: " 2576 "invalid alg id %d\n", 2577 attr->spd_attr_value)); 2578 *diag = SPD_DIAGNOSTIC_ALG_ID_RANGE; 2579 goto bail; 2580 } 2581 alg->alg_id = attr->spd_attr_value; 2582 break; 2583 2584 case SPD_ATTR_ALG_PROTO: 2585 /* find the alg type */ 2586 for (i = 0; i < NALGPROTOS; i++) 2587 if (algproto[i] == attr->spd_attr_value) 2588 break; 2589 skip_alg = (i == NALGPROTOS); 2590 if (!skip_alg) 2591 alg_type = i; 2592 break; 2593 2594 case SPD_ATTR_ALG_INCRBITS: 2595 alg->alg_increment = attr->spd_attr_value; 2596 break; 2597 2598 case SPD_ATTR_ALG_NKEYSIZES: 2599 if (alg->alg_key_sizes != NULL) { 2600 kmem_free(alg->alg_key_sizes, 2601 ALG_KEY_SIZES(alg)); 2602 } 2603 alg->alg_nkey_sizes = attr->spd_attr_value; 2604 /* 2605 * Allocate room for the trailing zero key size 2606 * value as well. 2607 */ 2608 alg->alg_key_sizes = kmem_zalloc(ALG_KEY_SIZES(alg), 2609 KM_SLEEP); 2610 cur_key = 0; 2611 break; 2612 2613 case SPD_ATTR_ALG_KEYSIZE: 2614 if (alg->alg_key_sizes == NULL || 2615 cur_key >= alg->alg_nkey_sizes) { 2616 ss1dbg(spds, ("spdsock_do_updatealg: " 2617 "too many key sizes\n")); 2618 *diag = SPD_DIAGNOSTIC_ALG_NUM_KEY_SIZES; 2619 goto bail; 2620 } 2621 alg->alg_key_sizes[cur_key++] = attr->spd_attr_value; 2622 break; 2623 2624 case SPD_ATTR_ALG_NBLOCKSIZES: 2625 if (alg->alg_block_sizes != NULL) { 2626 kmem_free(alg->alg_block_sizes, 2627 ALG_BLOCK_SIZES(alg)); 2628 } 2629 alg->alg_nblock_sizes = attr->spd_attr_value; 2630 /* 2631 * Allocate room for the trailing zero block size 2632 * value as well. 2633 */ 2634 alg->alg_block_sizes = kmem_zalloc(ALG_BLOCK_SIZES(alg), 2635 KM_SLEEP); 2636 cur_block = 0; 2637 break; 2638 2639 case SPD_ATTR_ALG_BLOCKSIZE: 2640 if (alg->alg_block_sizes == NULL || 2641 cur_block >= alg->alg_nblock_sizes) { 2642 ss1dbg(spds, ("spdsock_do_updatealg: " 2643 "too many block sizes\n")); 2644 *diag = SPD_DIAGNOSTIC_ALG_NUM_BLOCK_SIZES; 2645 goto bail; 2646 } 2647 alg->alg_block_sizes[cur_block++] = 2648 attr->spd_attr_value; 2649 break; 2650 2651 case SPD_ATTR_ALG_MECHNAME: { 2652 char *mech_name; 2653 2654 if (attr->spd_attr_value > CRYPTO_MAX_MECH_NAME) { 2655 ss1dbg(spds, ("spdsock_do_updatealg: " 2656 "mech name too long\n")); 2657 *diag = SPD_DIAGNOSTIC_ALG_MECH_NAME_LEN; 2658 goto bail; 2659 } 2660 mech_name = (char *)(attr + 1); 2661 bcopy(mech_name, alg->alg_mech_name, 2662 attr->spd_attr_value); 2663 alg->alg_mech_name[CRYPTO_MAX_MECH_NAME-1] = '\0'; 2664 attr = (struct spd_attribute *)((char *)attr + 2665 attr->spd_attr_value); 2666 break; 2667 } 2668 2669 case SPD_ATTR_PROTO_ID: 2670 doing_proto = B_TRUE; 2671 for (i = 0; i < NALGPROTOS; i++) { 2672 if (algproto[i] == attr->spd_attr_value) { 2673 alg_type = i; 2674 break; 2675 } 2676 } 2677 break; 2678 2679 case SPD_ATTR_PROTO_EXEC_MODE: 2680 if (!doing_proto) 2681 break; 2682 for (i = 0; i < NEXECMODES; i++) { 2683 if (execmodes[i] == attr->spd_attr_value) { 2684 spds->spds_algs_exec_mode[alg_type] = i; 2685 break; 2686 } 2687 } 2688 break; 2689 } 2690 attr++; 2691 } 2692 2693 #undef ALG_KEY_SIZES 2694 #undef ALG_BLOCK_SIZES 2695 2696 /* update the algorithm tables */ 2697 spdsock_merge_algs(spds); 2698 bail: 2699 /* cleanup */ 2700 ipsec_alg_free(alg); 2701 for (alg_type = 0; alg_type < IPSEC_NALGTYPES; alg_type++) 2702 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) 2703 if (spds->spds_algs[alg_type][algid] != NULL) 2704 ipsec_alg_free(spds->spds_algs[alg_type][algid]); 2705 } 2706 2707 /* 2708 * Process an SPD_UPDATEALGS request. If IPsec is not loaded, queue 2709 * the request until IPsec loads. If IPsec is loaded, act on it 2710 * immediately. 2711 */ 2712 2713 static void 2714 spdsock_updatealg(queue_t *q, mblk_t *mp, spd_ext_t *extv[]) 2715 { 2716 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2717 spd_stack_t *spds = ss->spdsock_spds; 2718 ipsec_stack_t *ipss = spds->spds_netstack->netstack_ipsec; 2719 2720 if (!ipsec_loaded(ipss)) { 2721 /* 2722 * IPsec is not loaded, save request and return nicely, 2723 * the message will be processed once IPsec loads. 2724 */ 2725 mblk_t *new_mp; 2726 2727 /* last update message wins */ 2728 if ((new_mp = copymsg(mp)) == NULL) { 2729 spdsock_error(q, mp, ENOMEM, 0); 2730 return; 2731 } 2732 mutex_enter(&spds->spds_alg_lock); 2733 bcopy(extv, spds->spds_extv_algs, 2734 sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1)); 2735 if (spds->spds_mp_algs != NULL) 2736 freemsg(spds->spds_mp_algs); 2737 spds->spds_mp_algs = mp; 2738 spds->spds_algs_pending = B_TRUE; 2739 mutex_exit(&spds->spds_alg_lock); 2740 if (audit_active) { 2741 cred_t *cr; 2742 pid_t cpid; 2743 2744 cr = msg_getcred(mp, &cpid); 2745 audit_pf_policy(SPD_UPDATEALGS, cr, 2746 spds->spds_netstack, NULL, B_TRUE, EAGAIN, 2747 cpid); 2748 } 2749 spd_echo(q, new_mp); 2750 } else { 2751 /* 2752 * IPsec is loaded, act on the message immediately. 2753 */ 2754 int diag; 2755 2756 mutex_enter(&spds->spds_alg_lock); 2757 spdsock_do_updatealg(extv, &diag, spds); 2758 mutex_exit(&spds->spds_alg_lock); 2759 if (diag == -1) { 2760 spd_echo(q, mp); 2761 if (audit_active) { 2762 cred_t *cr; 2763 pid_t cpid; 2764 2765 cr = msg_getcred(mp, &cpid); 2766 audit_pf_policy(SPD_UPDATEALGS, cr, 2767 spds->spds_netstack, NULL, B_TRUE, 0, 2768 cpid); 2769 } 2770 } else { 2771 spdsock_diag(q, mp, diag); 2772 if (audit_active) { 2773 cred_t *cr; 2774 pid_t cpid; 2775 2776 cr = msg_getcred(mp, &cpid); 2777 audit_pf_policy(SPD_UPDATEALGS, cr, 2778 spds->spds_netstack, NULL, B_TRUE, diag, 2779 cpid); 2780 } 2781 } 2782 } 2783 } 2784 2785 /* 2786 * Sort through the mess of polhead options to retrieve an appropriate one. 2787 * Returns NULL if we send an spdsock error. Returns a valid pointer if we 2788 * found a valid polhead. Returns ALL_ACTIVE_POLHEADS (aka. -1) or 2789 * ALL_INACTIVE_POLHEADS (aka. -2) if the operation calls for the operation to 2790 * act on ALL policy heads. 2791 */ 2792 static ipsec_policy_head_t * 2793 get_appropriate_polhead(queue_t *q, mblk_t *mp, spd_if_t *tunname, int spdid, 2794 int msgtype, ipsec_tun_pol_t **itpp) 2795 { 2796 ipsec_tun_pol_t *itp; 2797 ipsec_policy_head_t *iph; 2798 int errno; 2799 char *tname; 2800 boolean_t active; 2801 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2802 netstack_t *ns = ss->spdsock_spds->spds_netstack; 2803 uint64_t gen; /* Placeholder */ 2804 datalink_id_t linkid; 2805 2806 active = (spdid == SPD_ACTIVE); 2807 *itpp = NULL; 2808 if (!active && spdid != SPD_STANDBY) { 2809 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_SPDID); 2810 return (NULL); 2811 } 2812 2813 if (tunname != NULL) { 2814 /* Acting on a tunnel's SPD. */ 2815 tname = (char *)tunname->spd_if_name; 2816 if (*tname == '\0') { 2817 /* Handle all-polhead cases here. */ 2818 if (msgtype != SPD_FLUSH && msgtype != SPD_DUMP) { 2819 spdsock_diag(q, mp, 2820 SPD_DIAGNOSTIC_NOT_GLOBAL_OP); 2821 return (NULL); 2822 } 2823 return (active ? ALL_ACTIVE_POLHEADS : 2824 ALL_INACTIVE_POLHEADS); 2825 } 2826 2827 itp = get_tunnel_policy(tname, ns); 2828 if (itp == NULL) { 2829 if (msgtype != SPD_ADDRULE) { 2830 /* "Tunnel not found" */ 2831 spdsock_error(q, mp, ENOENT, 0); 2832 return (NULL); 2833 } 2834 2835 errno = 0; 2836 itp = create_tunnel_policy(tname, &errno, &gen, ns); 2837 if (itp == NULL) { 2838 /* 2839 * Something very bad happened, most likely 2840 * ENOMEM. Return an indicator. 2841 */ 2842 spdsock_error(q, mp, errno, 0); 2843 return (NULL); 2844 } 2845 } 2846 /* 2847 * Troll the plumbed tunnels and see if we have a match. We 2848 * need to do this always in case we add policy AFTER plumbing 2849 * a tunnel. 2850 */ 2851 if (dls_mgmt_get_linkid(tname, &linkid) == 0) 2852 iptun_set_policy(linkid, itp); 2853 2854 *itpp = itp; 2855 /* For spdsock dump state, set the polhead's name. */ 2856 if (msgtype == SPD_DUMP) { 2857 ITP_REFHOLD(itp); 2858 ss->spdsock_itp = itp; 2859 ss->spdsock_dump_tunnel = itp->itp_flags & 2860 (active ? ITPF_P_TUNNEL : ITPF_I_TUNNEL); 2861 } 2862 } else { 2863 itp = NULL; 2864 /* For spdsock dump state, indicate it's global policy. */ 2865 if (msgtype == SPD_DUMP) 2866 ss->spdsock_itp = NULL; 2867 } 2868 2869 if (active) 2870 iph = (itp == NULL) ? ipsec_system_policy(ns) : itp->itp_policy; 2871 else 2872 iph = (itp == NULL) ? ipsec_inactive_policy(ns) : 2873 itp->itp_inactive; 2874 2875 ASSERT(iph != NULL); 2876 if (itp != NULL) { 2877 IPPH_REFHOLD(iph); 2878 } 2879 2880 return (iph); 2881 } 2882 2883 static void 2884 spdsock_parse(queue_t *q, mblk_t *mp) 2885 { 2886 spd_msg_t *spmsg; 2887 spd_ext_t *extv[SPD_EXT_MAX + 1]; 2888 uint_t msgsize; 2889 ipsec_policy_head_t *iph; 2890 ipsec_tun_pol_t *itp; 2891 spd_if_t *tunname; 2892 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2893 spd_stack_t *spds = ss->spdsock_spds; 2894 netstack_t *ns = spds->spds_netstack; 2895 ipsec_stack_t *ipss = ns->netstack_ipsec; 2896 2897 /* Make sure nothing's below me. */ 2898 ASSERT(WR(q)->q_next == NULL); 2899 2900 spmsg = (spd_msg_t *)mp->b_rptr; 2901 2902 msgsize = SPD_64TO8(spmsg->spd_msg_len); 2903 2904 if (msgdsize(mp) != msgsize) { 2905 /* 2906 * Message len incorrect w.r.t. actual size. Send an error 2907 * (EMSGSIZE). It may be necessary to massage things a 2908 * bit. For example, if the spd_msg_type is hosed, 2909 * I need to set it to SPD_RESERVED to get delivery to 2910 * do the right thing. Then again, maybe just letting 2911 * the error delivery do the right thing. 2912 */ 2913 ss2dbg(spds, 2914 ("mblk (%lu) and base (%d) message sizes don't jibe.\n", 2915 msgdsize(mp), msgsize)); 2916 spdsock_error(q, mp, EMSGSIZE, SPD_DIAGNOSTIC_NONE); 2917 return; 2918 } 2919 2920 if (msgsize > (uint_t)(mp->b_wptr - mp->b_rptr)) { 2921 /* Get all message into one mblk. */ 2922 if (pullupmsg(mp, -1) == 0) { 2923 /* 2924 * Something screwy happened. 2925 */ 2926 ss3dbg(spds, ("spdsock_parse: pullupmsg() failed.\n")); 2927 return; 2928 } else { 2929 spmsg = (spd_msg_t *)mp->b_rptr; 2930 } 2931 } 2932 2933 switch (spdsock_get_ext(extv, spmsg, msgsize)) { 2934 case KGE_DUP: 2935 /* Handle duplicate extension. */ 2936 ss1dbg(spds, ("Got duplicate extension of type %d.\n", 2937 extv[0]->spd_ext_type)); 2938 spdsock_diag(q, mp, dup_ext_diag[extv[0]->spd_ext_type]); 2939 return; 2940 case KGE_UNK: 2941 /* Handle unknown extension. */ 2942 ss1dbg(spds, ("Got unknown extension of type %d.\n", 2943 extv[0]->spd_ext_type)); 2944 spdsock_diag(q, mp, SPD_DIAGNOSTIC_UNKNOWN_EXT); 2945 return; 2946 case KGE_LEN: 2947 /* Length error. */ 2948 ss1dbg(spds, ("Length %d on extension type %d overrun or 0.\n", 2949 extv[0]->spd_ext_len, extv[0]->spd_ext_type)); 2950 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_EXTLEN); 2951 return; 2952 case KGE_CHK: 2953 /* Reality check failed. */ 2954 ss1dbg(spds, ("Reality check failed on extension type %d.\n", 2955 extv[0]->spd_ext_type)); 2956 spdsock_diag(q, mp, bad_ext_diag[extv[0]->spd_ext_type]); 2957 return; 2958 default: 2959 /* Default case is no errors. */ 2960 break; 2961 } 2962 2963 /* 2964 * Special-case SPD_UPDATEALGS so as not to load IPsec. 2965 */ 2966 if (!ipsec_loaded(ipss) && spmsg->spd_msg_type != SPD_UPDATEALGS) { 2967 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2968 2969 ASSERT(ss != NULL); 2970 ipsec_loader_loadnow(ipss); 2971 ss->spdsock_timeout_arg = mp; 2972 ss->spdsock_timeout = qtimeout(q, spdsock_loadcheck, 2973 q, LOADCHECK_INTERVAL); 2974 return; 2975 } 2976 2977 /* First check for messages that need no polheads at all. */ 2978 switch (spmsg->spd_msg_type) { 2979 case SPD_UPDATEALGS: 2980 spdsock_updatealg(q, mp, extv); 2981 return; 2982 case SPD_ALGLIST: 2983 spdsock_alglist(q, mp); 2984 return; 2985 case SPD_DUMPALGS: 2986 spdsock_dumpalgs(q, mp); 2987 return; 2988 } 2989 2990 /* 2991 * Then check for ones that need both primary/secondary polheads, 2992 * finding the appropriate tunnel policy if need be. 2993 */ 2994 tunname = (spd_if_t *)extv[SPD_EXT_TUN_NAME]; 2995 switch (spmsg->spd_msg_type) { 2996 case SPD_FLIP: 2997 spdsock_flip(q, mp, tunname); 2998 return; 2999 case SPD_CLONE: 3000 spdsock_clone(q, mp, tunname); 3001 return; 3002 } 3003 3004 /* 3005 * Finally, find ones that operate on exactly one polhead, or 3006 * "all polheads" of a given type (active/inactive). 3007 */ 3008 iph = get_appropriate_polhead(q, mp, tunname, spmsg->spd_msg_spdid, 3009 spmsg->spd_msg_type, &itp); 3010 if (iph == NULL) 3011 return; 3012 3013 /* All-polheads-ready operations. */ 3014 switch (spmsg->spd_msg_type) { 3015 case SPD_FLUSH: 3016 if (itp != NULL) { 3017 mutex_enter(&itp->itp_lock); 3018 if (spmsg->spd_msg_spdid == SPD_ACTIVE) 3019 itp->itp_flags &= ~ITPF_PFLAGS; 3020 else 3021 itp->itp_flags &= ~ITPF_IFLAGS; 3022 mutex_exit(&itp->itp_lock); 3023 ITP_REFRELE(itp, ns); 3024 } 3025 spdsock_flush(q, iph, itp, mp); 3026 return; 3027 case SPD_DUMP: 3028 if (itp != NULL) 3029 ITP_REFRELE(itp, ns); 3030 spdsock_dump(q, iph, mp); 3031 return; 3032 } 3033 3034 if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) { 3035 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NOT_GLOBAL_OP); 3036 return; 3037 } 3038 3039 /* Single-polhead-only operations. */ 3040 switch (spmsg->spd_msg_type) { 3041 case SPD_ADDRULE: 3042 spdsock_addrule(q, iph, mp, extv, itp); 3043 break; 3044 case SPD_DELETERULE: 3045 spdsock_deleterule(q, iph, mp, extv, itp); 3046 break; 3047 case SPD_LOOKUP: 3048 spdsock_lookup(q, iph, mp, extv, itp); 3049 break; 3050 default: 3051 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_MSG_TYPE); 3052 break; 3053 } 3054 3055 IPPH_REFRELE(iph, ns); 3056 if (itp != NULL) 3057 ITP_REFRELE(itp, ns); 3058 } 3059 3060 /* 3061 * If an algorithm mapping was received before IPsec was loaded, process it. 3062 * Called from the IPsec loader. 3063 */ 3064 void 3065 spdsock_update_pending_algs(netstack_t *ns) 3066 { 3067 spd_stack_t *spds = ns->netstack_spdsock; 3068 3069 mutex_enter(&spds->spds_alg_lock); 3070 if (spds->spds_algs_pending) { 3071 int diag; 3072 3073 spdsock_do_updatealg(spds->spds_extv_algs, &diag, 3074 spds); 3075 spds->spds_algs_pending = B_FALSE; 3076 } 3077 mutex_exit(&spds->spds_alg_lock); 3078 } 3079 3080 static void 3081 spdsock_loadcheck(void *arg) 3082 { 3083 queue_t *q = (queue_t *)arg; 3084 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3085 mblk_t *mp; 3086 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec; 3087 3088 ASSERT(ss != NULL); 3089 3090 ss->spdsock_timeout = 0; 3091 mp = ss->spdsock_timeout_arg; 3092 ASSERT(mp != NULL); 3093 ss->spdsock_timeout_arg = NULL; 3094 if (ipsec_failed(ipss)) 3095 spdsock_error(q, mp, EPROTONOSUPPORT, 0); 3096 else 3097 spdsock_parse(q, mp); 3098 } 3099 3100 /* 3101 * Copy relevant state bits. 3102 */ 3103 static void 3104 spdsock_copy_info(struct T_info_ack *tap, spdsock_t *ss) 3105 { 3106 *tap = spdsock_g_t_info_ack; 3107 tap->CURRENT_state = ss->spdsock_state; 3108 tap->OPT_size = spdsock_max_optsize; 3109 } 3110 3111 /* 3112 * This routine responds to T_CAPABILITY_REQ messages. It is called by 3113 * spdsock_wput. Much of the T_CAPABILITY_ACK information is copied from 3114 * spdsock_g_t_info_ack. The current state of the stream is copied from 3115 * spdsock_state. 3116 */ 3117 static void 3118 spdsock_capability_req(queue_t *q, mblk_t *mp) 3119 { 3120 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3121 t_uscalar_t cap_bits1; 3122 struct T_capability_ack *tcap; 3123 3124 cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1; 3125 3126 mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack), 3127 mp->b_datap->db_type, T_CAPABILITY_ACK); 3128 if (mp == NULL) 3129 return; 3130 3131 tcap = (struct T_capability_ack *)mp->b_rptr; 3132 tcap->CAP_bits1 = 0; 3133 3134 if (cap_bits1 & TC1_INFO) { 3135 spdsock_copy_info(&tcap->INFO_ack, ss); 3136 tcap->CAP_bits1 |= TC1_INFO; 3137 } 3138 3139 qreply(q, mp); 3140 } 3141 3142 /* 3143 * This routine responds to T_INFO_REQ messages. It is called by 3144 * spdsock_wput_other. 3145 * Most of the T_INFO_ACK information is copied from spdsock_g_t_info_ack. 3146 * The current state of the stream is copied from spdsock_state. 3147 */ 3148 static void 3149 spdsock_info_req(q, mp) 3150 queue_t *q; 3151 mblk_t *mp; 3152 { 3153 mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO, 3154 T_INFO_ACK); 3155 if (mp == NULL) 3156 return; 3157 spdsock_copy_info((struct T_info_ack *)mp->b_rptr, 3158 (spdsock_t *)q->q_ptr); 3159 qreply(q, mp); 3160 } 3161 3162 /* 3163 * spdsock_err_ack. This routine creates a 3164 * T_ERROR_ACK message and passes it 3165 * upstream. 3166 */ 3167 static void 3168 spdsock_err_ack(q, mp, t_error, sys_error) 3169 queue_t *q; 3170 mblk_t *mp; 3171 int t_error; 3172 int sys_error; 3173 { 3174 if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL) 3175 qreply(q, mp); 3176 } 3177 3178 /* 3179 * This routine retrieves the current status of socket options. 3180 * It returns the size of the option retrieved. 3181 */ 3182 /* ARGSUSED */ 3183 int 3184 spdsock_opt_get(queue_t *q, int level, int name, uchar_t *ptr) 3185 { 3186 int *i1 = (int *)ptr; 3187 3188 switch (level) { 3189 case SOL_SOCKET: 3190 switch (name) { 3191 case SO_TYPE: 3192 *i1 = SOCK_RAW; 3193 break; 3194 /* 3195 * The following two items can be manipulated, 3196 * but changing them should do nothing. 3197 */ 3198 case SO_SNDBUF: 3199 *i1 = (int)q->q_hiwat; 3200 break; 3201 case SO_RCVBUF: 3202 *i1 = (int)(RD(q)->q_hiwat); 3203 break; 3204 } 3205 break; 3206 default: 3207 return (0); 3208 } 3209 return (sizeof (int)); 3210 } 3211 3212 /* 3213 * This routine sets socket options. 3214 */ 3215 /* ARGSUSED */ 3216 int 3217 spdsock_opt_set(queue_t *q, uint_t mgmt_flags, int level, int name, 3218 uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp, 3219 void *thisdg_attrs, cred_t *cr, mblk_t *mblk) 3220 { 3221 int *i1 = (int *)invalp; 3222 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3223 spd_stack_t *spds = ss->spdsock_spds; 3224 3225 switch (level) { 3226 case SOL_SOCKET: 3227 switch (name) { 3228 case SO_SNDBUF: 3229 if (*i1 > spds->spds_max_buf) 3230 return (ENOBUFS); 3231 q->q_hiwat = *i1; 3232 break; 3233 case SO_RCVBUF: 3234 if (*i1 > spds->spds_max_buf) 3235 return (ENOBUFS); 3236 RD(q)->q_hiwat = *i1; 3237 (void) proto_set_rx_hiwat(RD(q), NULL, *i1); 3238 break; 3239 } 3240 break; 3241 } 3242 return (0); 3243 } 3244 3245 3246 /* 3247 * Handle STREAMS messages. 3248 */ 3249 static void 3250 spdsock_wput_other(queue_t *q, mblk_t *mp) 3251 { 3252 struct iocblk *iocp; 3253 int error; 3254 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3255 spd_stack_t *spds = ss->spdsock_spds; 3256 cred_t *cr; 3257 3258 switch (mp->b_datap->db_type) { 3259 case M_PROTO: 3260 case M_PCPROTO: 3261 if ((mp->b_wptr - mp->b_rptr) < sizeof (long)) { 3262 ss3dbg(spds, ( 3263 "spdsock_wput_other: Not big enough M_PROTO\n")); 3264 freemsg(mp); 3265 return; 3266 } 3267 switch (((union T_primitives *)mp->b_rptr)->type) { 3268 case T_CAPABILITY_REQ: 3269 spdsock_capability_req(q, mp); 3270 break; 3271 case T_INFO_REQ: 3272 spdsock_info_req(q, mp); 3273 break; 3274 case T_SVR4_OPTMGMT_REQ: 3275 case T_OPTMGMT_REQ: 3276 /* 3277 * All Solaris components should pass a db_credp 3278 * for this TPI message, hence we ASSERT. 3279 * But in case there is some other M_PROTO that looks 3280 * like a TPI message sent by some other kernel 3281 * component, we check and return an error. 3282 */ 3283 cr = msg_getcred(mp, NULL); 3284 ASSERT(cr != NULL); 3285 if (cr == NULL) { 3286 spdsock_err_ack(q, mp, TSYSERR, EINVAL); 3287 return; 3288 } 3289 if (((union T_primitives *)mp->b_rptr)->type == 3290 T_SVR4_OPTMGMT_REQ) { 3291 (void) svr4_optcom_req(q, mp, cr, 3292 &spdsock_opt_obj, B_FALSE); 3293 } else { 3294 (void) tpi_optcom_req(q, mp, cr, 3295 &spdsock_opt_obj, B_FALSE); 3296 } 3297 break; 3298 case T_DATA_REQ: 3299 case T_EXDATA_REQ: 3300 case T_ORDREL_REQ: 3301 /* Illegal for spdsock. */ 3302 freemsg(mp); 3303 (void) putnextctl1(RD(q), M_ERROR, EPROTO); 3304 break; 3305 default: 3306 /* Not supported by spdsock. */ 3307 spdsock_err_ack(q, mp, TNOTSUPPORT, 0); 3308 break; 3309 } 3310 return; 3311 case M_IOCTL: 3312 iocp = (struct iocblk *)mp->b_rptr; 3313 error = EINVAL; 3314 3315 switch (iocp->ioc_cmd) { 3316 case ND_SET: 3317 case ND_GET: 3318 if (nd_getset(q, spds->spds_g_nd, mp)) { 3319 qreply(q, mp); 3320 return; 3321 } else 3322 error = ENOENT; 3323 /* FALLTHRU */ 3324 default: 3325 miocnak(q, mp, 0, error); 3326 return; 3327 } 3328 case M_FLUSH: 3329 if (*mp->b_rptr & FLUSHW) { 3330 flushq(q, FLUSHALL); 3331 *mp->b_rptr &= ~FLUSHW; 3332 } 3333 if (*mp->b_rptr & FLUSHR) { 3334 qreply(q, mp); 3335 return; 3336 } 3337 /* Else FALLTHRU */ 3338 } 3339 3340 /* If fell through, just black-hole the message. */ 3341 freemsg(mp); 3342 } 3343 3344 static void 3345 spdsock_wput(queue_t *q, mblk_t *mp) 3346 { 3347 uint8_t *rptr = mp->b_rptr; 3348 mblk_t *mp1; 3349 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3350 spd_stack_t *spds = ss->spdsock_spds; 3351 3352 /* 3353 * If we're dumping, defer processing other messages until the 3354 * dump completes. 3355 */ 3356 if (ss->spdsock_dump_req != NULL) { 3357 if (!putq(q, mp)) 3358 freemsg(mp); 3359 return; 3360 } 3361 3362 switch (mp->b_datap->db_type) { 3363 case M_DATA: 3364 /* 3365 * Silently discard. 3366 */ 3367 ss2dbg(spds, ("raw M_DATA in spdsock.\n")); 3368 freemsg(mp); 3369 return; 3370 case M_PROTO: 3371 case M_PCPROTO: 3372 if ((mp->b_wptr - rptr) >= sizeof (struct T_data_req)) { 3373 if (((union T_primitives *)rptr)->type == T_DATA_REQ) { 3374 if ((mp1 = mp->b_cont) == NULL) { 3375 /* No data after T_DATA_REQ. */ 3376 ss2dbg(spds, 3377 ("No data after DATA_REQ.\n")); 3378 freemsg(mp); 3379 return; 3380 } 3381 freeb(mp); 3382 mp = mp1; 3383 ss2dbg(spds, ("T_DATA_REQ\n")); 3384 break; /* Out of switch. */ 3385 } 3386 } 3387 /* FALLTHRU */ 3388 default: 3389 ss3dbg(spds, ("In default wput case (%d %d).\n", 3390 mp->b_datap->db_type, ((union T_primitives *)rptr)->type)); 3391 spdsock_wput_other(q, mp); 3392 return; 3393 } 3394 3395 /* I now have a PF_POLICY message in an M_DATA block. */ 3396 spdsock_parse(q, mp); 3397 } 3398 3399 /* 3400 * Device open procedure, called when new queue pair created. 3401 * We are passed the read-side queue. 3402 */ 3403 /* ARGSUSED */ 3404 static int 3405 spdsock_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 3406 { 3407 spdsock_t *ss; 3408 queue_t *oq = OTHERQ(q); 3409 minor_t ssminor; 3410 netstack_t *ns; 3411 spd_stack_t *spds; 3412 3413 if (secpolicy_ip_config(credp, B_FALSE) != 0) 3414 return (EPERM); 3415 3416 if (q->q_ptr != NULL) 3417 return (0); /* Re-open of an already open instance. */ 3418 3419 if (sflag & MODOPEN) 3420 return (EINVAL); 3421 3422 ns = netstack_find_by_cred(credp); 3423 ASSERT(ns != NULL); 3424 spds = ns->netstack_spdsock; 3425 ASSERT(spds != NULL); 3426 3427 ss2dbg(spds, ("Made it into PF_POLICY socket open.\n")); 3428 3429 ssminor = (minor_t)(uintptr_t)vmem_alloc(spdsock_vmem, 1, VM_NOSLEEP); 3430 if (ssminor == 0) { 3431 netstack_rele(spds->spds_netstack); 3432 return (ENOMEM); 3433 } 3434 ss = kmem_zalloc(sizeof (spdsock_t), KM_NOSLEEP); 3435 if (ss == NULL) { 3436 vmem_free(spdsock_vmem, (void *)(uintptr_t)ssminor, 1); 3437 netstack_rele(spds->spds_netstack); 3438 return (ENOMEM); 3439 } 3440 3441 ss->spdsock_minor = ssminor; 3442 ss->spdsock_state = TS_UNBND; 3443 ss->spdsock_dump_req = NULL; 3444 3445 ss->spdsock_spds = spds; 3446 3447 q->q_ptr = ss; 3448 oq->q_ptr = ss; 3449 3450 q->q_hiwat = spds->spds_recv_hiwat; 3451 3452 oq->q_hiwat = spds->spds_xmit_hiwat; 3453 oq->q_lowat = spds->spds_xmit_lowat; 3454 3455 qprocson(q); 3456 (void) proto_set_rx_hiwat(q, NULL, spds->spds_recv_hiwat); 3457 3458 *devp = makedevice(getmajor(*devp), ss->spdsock_minor); 3459 return (0); 3460 } 3461 3462 /* 3463 * Read-side service procedure, invoked when we get back-enabled 3464 * when buffer space becomes available. 3465 * 3466 * Dump another chunk if we were dumping before; when we finish, kick 3467 * the write-side queue in case it's waiting for read queue space. 3468 */ 3469 void 3470 spdsock_rsrv(queue_t *q) 3471 { 3472 spdsock_t *ss = q->q_ptr; 3473 3474 if (ss->spdsock_dump_req != NULL) 3475 spdsock_dump_some(q, ss); 3476 3477 if (ss->spdsock_dump_req == NULL) 3478 qenable(OTHERQ(q)); 3479 } 3480 3481 /* 3482 * Write-side service procedure, invoked when we defer processing 3483 * if another message is received while a dump is in progress. 3484 */ 3485 void 3486 spdsock_wsrv(queue_t *q) 3487 { 3488 spdsock_t *ss = q->q_ptr; 3489 mblk_t *mp; 3490 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec; 3491 3492 if (ss->spdsock_dump_req != NULL) { 3493 qenable(OTHERQ(q)); 3494 return; 3495 } 3496 3497 while ((mp = getq(q)) != NULL) { 3498 if (ipsec_loaded(ipss)) { 3499 spdsock_wput(q, mp); 3500 if (ss->spdsock_dump_req != NULL) 3501 return; 3502 } else if (!ipsec_failed(ipss)) { 3503 (void) putq(q, mp); 3504 } else { 3505 spdsock_error(q, mp, EPFNOSUPPORT, 0); 3506 } 3507 } 3508 } 3509 3510 static int 3511 spdsock_close(queue_t *q) 3512 { 3513 spdsock_t *ss = q->q_ptr; 3514 spd_stack_t *spds = ss->spdsock_spds; 3515 3516 qprocsoff(q); 3517 3518 /* Safe assumption. */ 3519 ASSERT(ss != NULL); 3520 3521 if (ss->spdsock_timeout != 0) 3522 (void) quntimeout(q, ss->spdsock_timeout); 3523 3524 ss3dbg(spds, ("Driver close, PF_POLICY socket is going away.\n")); 3525 3526 vmem_free(spdsock_vmem, (void *)(uintptr_t)ss->spdsock_minor, 1); 3527 netstack_rele(ss->spdsock_spds->spds_netstack); 3528 3529 kmem_free(ss, sizeof (spdsock_t)); 3530 return (0); 3531 } 3532 3533 /* 3534 * Merge the IPsec algorithms tables with the received algorithm information. 3535 */ 3536 void 3537 spdsock_merge_algs(spd_stack_t *spds) 3538 { 3539 ipsec_alginfo_t *alg, *oalg; 3540 ipsec_algtype_t algtype; 3541 uint_t algidx, algid, nalgs; 3542 crypto_mech_name_t *mechs; 3543 uint_t mech_count, mech_idx; 3544 netstack_t *ns = spds->spds_netstack; 3545 ipsec_stack_t *ipss = ns->netstack_ipsec; 3546 3547 ASSERT(MUTEX_HELD(&spds->spds_alg_lock)); 3548 3549 /* 3550 * Get the list of supported mechanisms from the crypto framework. 3551 * If a mechanism is supported by KCF, resolve its mechanism 3552 * id and mark it as being valid. This operation must be done 3553 * without holding alg_lock, since it can cause a provider 3554 * module to be loaded and the provider notification callback to 3555 * be invoked. 3556 */ 3557 mechs = crypto_get_mech_list(&mech_count, KM_SLEEP); 3558 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 3559 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) { 3560 int algflags = 0; 3561 crypto_mech_type_t mt = CRYPTO_MECHANISM_INVALID; 3562 3563 alg = spds->spds_algs[algtype][algid]; 3564 if (alg == NULL) 3565 continue; 3566 3567 /* 3568 * The NULL encryption algorithm is a special 3569 * case because there are no mechanisms, yet 3570 * the algorithm is still valid. 3571 */ 3572 if (alg->alg_id == SADB_EALG_NULL) { 3573 alg->alg_mech_type = CRYPTO_MECHANISM_INVALID; 3574 alg->alg_flags = ALG_FLAG_VALID; 3575 continue; 3576 } 3577 3578 for (mech_idx = 0; mech_idx < mech_count; mech_idx++) { 3579 if (strncmp(alg->alg_mech_name, mechs[mech_idx], 3580 CRYPTO_MAX_MECH_NAME) == 0) { 3581 mt = crypto_mech2id(alg->alg_mech_name); 3582 ASSERT(mt != CRYPTO_MECHANISM_INVALID); 3583 algflags = ALG_FLAG_VALID; 3584 break; 3585 } 3586 } 3587 alg->alg_mech_type = mt; 3588 alg->alg_flags = algflags; 3589 } 3590 } 3591 3592 mutex_enter(&ipss->ipsec_alg_lock); 3593 3594 /* 3595 * For each algorithm currently defined, check if it is 3596 * present in the new tables created from the SPD_UPDATEALGS 3597 * message received from user-space. 3598 * Delete the algorithm entries that are currently defined 3599 * but not part of the new tables. 3600 */ 3601 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 3602 nalgs = ipss->ipsec_nalgs[algtype]; 3603 for (algidx = 0; algidx < nalgs; algidx++) { 3604 algid = ipss->ipsec_sortlist[algtype][algidx]; 3605 if (spds->spds_algs[algtype][algid] == NULL) 3606 ipsec_alg_unreg(algtype, algid, ns); 3607 } 3608 } 3609 3610 /* 3611 * For each algorithm we just received, check if it is 3612 * present in the currently defined tables. If it is, swap 3613 * the entry with the one we just allocated. 3614 * If the new algorithm is not in the current tables, 3615 * add it. 3616 */ 3617 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 3618 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) { 3619 alg = spds->spds_algs[algtype][algid]; 3620 if (alg == NULL) 3621 continue; 3622 3623 if ((oalg = ipss->ipsec_alglists[algtype][algid]) == 3624 NULL) { 3625 /* 3626 * New algorithm, add it to the algorithm 3627 * table. 3628 */ 3629 ipsec_alg_reg(algtype, alg, ns); 3630 } else { 3631 /* 3632 * Algorithm is already in the table. Swap 3633 * the existing entry with the new one. 3634 */ 3635 ipsec_alg_fix_min_max(alg, algtype, ns); 3636 ipss->ipsec_alglists[algtype][algid] = alg; 3637 ipsec_alg_free(oalg); 3638 } 3639 spds->spds_algs[algtype][algid] = NULL; 3640 } 3641 } 3642 3643 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 3644 ipss->ipsec_algs_exec_mode[algtype] = 3645 spds->spds_algs_exec_mode[algtype]; 3646 } 3647 3648 mutex_exit(&ipss->ipsec_alg_lock); 3649 3650 crypto_free_mech_list(mechs, mech_count); 3651 3652 ipsecah_algs_changed(ns); 3653 ipsecesp_algs_changed(ns); 3654 } 3655