1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/param.h> 27 #include <sys/types.h> 28 #include <sys/stream.h> 29 #include <sys/strsubr.h> 30 #include <sys/strsun.h> 31 #include <sys/stropts.h> 32 #include <sys/zone.h> 33 #include <sys/vnode.h> 34 #include <sys/sysmacros.h> 35 #define _SUN_TPI_VERSION 2 36 #include <sys/tihdr.h> 37 #include <sys/ddi.h> 38 #include <sys/sunddi.h> 39 #include <sys/mkdev.h> 40 #include <sys/debug.h> 41 #include <sys/kmem.h> 42 #include <sys/cmn_err.h> 43 #include <sys/suntpi.h> 44 #include <sys/policy.h> 45 46 #include <sys/socket.h> 47 #include <netinet/in.h> 48 #include <net/pfkeyv2.h> 49 #include <net/pfpolicy.h> 50 51 #include <inet/common.h> 52 #include <netinet/ip6.h> 53 #include <inet/ip.h> 54 #include <inet/ip6.h> 55 #include <inet/mi.h> 56 #include <inet/proto_set.h> 57 #include <inet/nd.h> 58 #include <inet/ip_if.h> 59 #include <inet/tun.h> 60 #include <inet/optcom.h> 61 #include <inet/ipsec_info.h> 62 #include <inet/ipsec_impl.h> 63 #include <inet/spdsock.h> 64 #include <inet/sadb.h> 65 66 #include <sys/isa_defs.h> 67 68 #include <c2/audit.h> 69 70 /* 71 * This is a transport provider for the PF_POLICY IPsec policy 72 * management socket, which provides a management interface into the 73 * SPD, allowing policy rules to be added, deleted, and queried. 74 * 75 * This effectively replaces the old private SIOC*IPSECONFIG ioctls 76 * with an extensible interface which will hopefully be public some 77 * day. 78 * 79 * See <net/pfpolicy.h> for more details on the protocol. 80 * 81 * We link against drv/ip and call directly into it to manipulate the 82 * SPD; see ipsec_impl.h for the policy data structures and spd.c for 83 * the code which maintains them. 84 * 85 * The MT model of this is QPAIR with the addition of some explicit 86 * locking to protect system-wide policy data structures. 87 */ 88 89 static vmem_t *spdsock_vmem; /* for minor numbers. */ 90 91 #define ALIGNED64(x) IS_P2ALIGNED((x), sizeof (uint64_t)) 92 93 /* Default structure copied into T_INFO_ACK messages (from rts.c...) */ 94 static struct T_info_ack spdsock_g_t_info_ack = { 95 T_INFO_ACK, 96 T_INFINITE, /* TSDU_size. Maximum size messages. */ 97 T_INVALID, /* ETSDU_size. No expedited data. */ 98 T_INVALID, /* CDATA_size. No connect data. */ 99 T_INVALID, /* DDATA_size. No disconnect data. */ 100 0, /* ADDR_size. */ 101 0, /* OPT_size. No user-settable options */ 102 64 * 1024, /* TIDU_size. spdsock allows maximum size messages. */ 103 T_COTS, /* SERV_type. spdsock supports connection oriented. */ 104 TS_UNBND, /* CURRENT_state. This is set from spdsock_state. */ 105 (XPG4_1) /* Provider flags */ 106 }; 107 108 /* Named Dispatch Parameter Management Structure */ 109 typedef struct spdsockparam_s { 110 uint_t spdsock_param_min; 111 uint_t spdsock_param_max; 112 uint_t spdsock_param_value; 113 char *spdsock_param_name; 114 } spdsockparam_t; 115 116 /* 117 * Table of NDD variables supported by spdsock. These are loaded into 118 * spdsock_g_nd in spdsock_init_nd. 119 * All of these are alterable, within the min/max values given, at run time. 120 */ 121 static spdsockparam_t lcl_param_arr[] = { 122 /* min max value name */ 123 { 4096, 65536, 8192, "spdsock_xmit_hiwat"}, 124 { 0, 65536, 1024, "spdsock_xmit_lowat"}, 125 { 4096, 65536, 8192, "spdsock_recv_hiwat"}, 126 { 65536, 1024*1024*1024, 256*1024, "spdsock_max_buf"}, 127 { 0, 3, 0, "spdsock_debug"}, 128 }; 129 #define spds_xmit_hiwat spds_params[0].spdsock_param_value 130 #define spds_xmit_lowat spds_params[1].spdsock_param_value 131 #define spds_recv_hiwat spds_params[2].spdsock_param_value 132 #define spds_max_buf spds_params[3].spdsock_param_value 133 #define spds_debug spds_params[4].spdsock_param_value 134 135 #define ss0dbg(a) printf a 136 /* NOTE: != 0 instead of > 0 so lint doesn't complain. */ 137 #define ss1dbg(spds, a) if (spds->spds_debug != 0) printf a 138 #define ss2dbg(spds, a) if (spds->spds_debug > 1) printf a 139 #define ss3dbg(spds, a) if (spds->spds_debug > 2) printf a 140 141 #define RESET_SPDSOCK_DUMP_POLHEAD(ss, iph) { \ 142 ASSERT(RW_READ_HELD(&(iph)->iph_lock)); \ 143 (ss)->spdsock_dump_head = (iph); \ 144 (ss)->spdsock_dump_gen = (iph)->iph_gen; \ 145 (ss)->spdsock_dump_cur_type = 0; \ 146 (ss)->spdsock_dump_cur_af = IPSEC_AF_V4; \ 147 (ss)->spdsock_dump_cur_rule = NULL; \ 148 (ss)->spdsock_dump_count = 0; \ 149 (ss)->spdsock_dump_cur_chain = 0; \ 150 } 151 152 static int spdsock_close(queue_t *); 153 static int spdsock_open(queue_t *, dev_t *, int, int, cred_t *); 154 static void spdsock_wput(queue_t *, mblk_t *); 155 static void spdsock_wsrv(queue_t *); 156 static void spdsock_rsrv(queue_t *); 157 static void *spdsock_stack_init(netstackid_t stackid, netstack_t *ns); 158 static void spdsock_stack_fini(netstackid_t stackid, void *arg); 159 static void spdsock_loadcheck(void *); 160 static void spdsock_merge_algs(spd_stack_t *); 161 static void spdsock_flush_one(ipsec_policy_head_t *, netstack_t *); 162 static mblk_t *spdsock_dump_next_record(spdsock_t *); 163 164 static struct module_info info = { 165 5138, "spdsock", 1, INFPSZ, 512, 128 166 }; 167 168 static struct qinit rinit = { 169 NULL, (pfi_t)spdsock_rsrv, spdsock_open, spdsock_close, 170 NULL, &info 171 }; 172 173 static struct qinit winit = { 174 (pfi_t)spdsock_wput, (pfi_t)spdsock_wsrv, NULL, NULL, NULL, &info 175 }; 176 177 struct streamtab spdsockinfo = { 178 &rinit, &winit 179 }; 180 181 /* mapping from alg type to protocol number, as per RFC 2407 */ 182 static const uint_t algproto[] = { 183 PROTO_IPSEC_AH, 184 PROTO_IPSEC_ESP, 185 }; 186 187 #define NALGPROTOS (sizeof (algproto) / sizeof (algproto[0])) 188 189 /* mapping from kernel exec mode to spdsock exec mode */ 190 static const uint_t execmodes[] = { 191 SPD_ALG_EXEC_MODE_SYNC, 192 SPD_ALG_EXEC_MODE_ASYNC 193 }; 194 195 #define NEXECMODES (sizeof (execmodes) / sizeof (execmodes[0])) 196 197 #define ALL_ACTIVE_POLHEADS ((ipsec_policy_head_t *)-1) 198 #define ALL_INACTIVE_POLHEADS ((ipsec_policy_head_t *)-2) 199 200 #define ITP_NAME(itp) (itp != NULL ? itp->itp_name : NULL) 201 202 /* ARGSUSED */ 203 static int 204 spdsock_param_get(q, mp, cp, cr) 205 queue_t *q; 206 mblk_t *mp; 207 caddr_t cp; 208 cred_t *cr; 209 { 210 spdsockparam_t *spdsockpa = (spdsockparam_t *)cp; 211 uint_t value; 212 spdsock_t *ss = (spdsock_t *)q->q_ptr; 213 spd_stack_t *spds = ss->spdsock_spds; 214 215 mutex_enter(&spds->spds_param_lock); 216 value = spdsockpa->spdsock_param_value; 217 mutex_exit(&spds->spds_param_lock); 218 219 (void) mi_mpprintf(mp, "%u", value); 220 return (0); 221 } 222 223 /* This routine sets an NDD variable in a spdsockparam_t structure. */ 224 /* ARGSUSED */ 225 static int 226 spdsock_param_set(q, mp, value, cp, cr) 227 queue_t *q; 228 mblk_t *mp; 229 char *value; 230 caddr_t cp; 231 cred_t *cr; 232 { 233 ulong_t new_value; 234 spdsockparam_t *spdsockpa = (spdsockparam_t *)cp; 235 spdsock_t *ss = (spdsock_t *)q->q_ptr; 236 spd_stack_t *spds = ss->spdsock_spds; 237 238 /* Convert the value from a string into a long integer. */ 239 if (ddi_strtoul(value, NULL, 10, &new_value) != 0) 240 return (EINVAL); 241 242 mutex_enter(&spds->spds_param_lock); 243 /* 244 * Fail the request if the new value does not lie within the 245 * required bounds. 246 */ 247 if (new_value < spdsockpa->spdsock_param_min || 248 new_value > spdsockpa->spdsock_param_max) { 249 mutex_exit(&spds->spds_param_lock); 250 return (EINVAL); 251 } 252 253 /* Set the new value */ 254 spdsockpa->spdsock_param_value = new_value; 255 mutex_exit(&spds->spds_param_lock); 256 257 return (0); 258 } 259 260 /* 261 * Initialize at module load time 262 */ 263 boolean_t 264 spdsock_ddi_init(void) 265 { 266 spdsock_max_optsize = optcom_max_optsize( 267 spdsock_opt_obj.odb_opt_des_arr, spdsock_opt_obj.odb_opt_arr_cnt); 268 269 spdsock_vmem = vmem_create("spdsock", (void *)1, MAXMIN, 1, 270 NULL, NULL, NULL, 1, VM_SLEEP | VMC_IDENTIFIER); 271 272 /* 273 * We want to be informed each time a stack is created or 274 * destroyed in the kernel, so we can maintain the 275 * set of spd_stack_t's. 276 */ 277 netstack_register(NS_SPDSOCK, spdsock_stack_init, NULL, 278 spdsock_stack_fini); 279 280 return (B_TRUE); 281 } 282 283 /* 284 * Walk through the param array specified registering each element with the 285 * named dispatch handler. 286 */ 287 static boolean_t 288 spdsock_param_register(IDP *ndp, spdsockparam_t *ssp, int cnt) 289 { 290 for (; cnt-- > 0; ssp++) { 291 if (ssp->spdsock_param_name != NULL && 292 ssp->spdsock_param_name[0]) { 293 if (!nd_load(ndp, 294 ssp->spdsock_param_name, 295 spdsock_param_get, spdsock_param_set, 296 (caddr_t)ssp)) { 297 nd_free(ndp); 298 return (B_FALSE); 299 } 300 } 301 } 302 return (B_TRUE); 303 } 304 305 /* 306 * Initialize for each stack instance 307 */ 308 /* ARGSUSED */ 309 static void * 310 spdsock_stack_init(netstackid_t stackid, netstack_t *ns) 311 { 312 spd_stack_t *spds; 313 spdsockparam_t *ssp; 314 315 spds = (spd_stack_t *)kmem_zalloc(sizeof (*spds), KM_SLEEP); 316 spds->spds_netstack = ns; 317 318 ASSERT(spds->spds_g_nd == NULL); 319 320 ssp = (spdsockparam_t *)kmem_alloc(sizeof (lcl_param_arr), KM_SLEEP); 321 spds->spds_params = ssp; 322 bcopy(lcl_param_arr, ssp, sizeof (lcl_param_arr)); 323 324 (void) spdsock_param_register(&spds->spds_g_nd, ssp, 325 A_CNT(lcl_param_arr)); 326 327 mutex_init(&spds->spds_param_lock, NULL, MUTEX_DEFAULT, NULL); 328 mutex_init(&spds->spds_alg_lock, NULL, MUTEX_DEFAULT, NULL); 329 330 return (spds); 331 } 332 333 void 334 spdsock_ddi_destroy(void) 335 { 336 vmem_destroy(spdsock_vmem); 337 338 netstack_unregister(NS_SPDSOCK); 339 } 340 341 /* ARGSUSED */ 342 static void 343 spdsock_stack_fini(netstackid_t stackid, void *arg) 344 { 345 spd_stack_t *spds = (spd_stack_t *)arg; 346 347 freemsg(spds->spds_mp_algs); 348 mutex_destroy(&spds->spds_param_lock); 349 mutex_destroy(&spds->spds_alg_lock); 350 nd_free(&spds->spds_g_nd); 351 kmem_free(spds->spds_params, sizeof (lcl_param_arr)); 352 spds->spds_params = NULL; 353 354 kmem_free(spds, sizeof (*spds)); 355 } 356 357 /* 358 * NOTE: large quantities of this should be shared with keysock. 359 * Would be nice to combine some of this into a common module, but 360 * not possible given time pressures. 361 */ 362 363 /* 364 * High-level reality checking of extensions. 365 */ 366 /* ARGSUSED */ /* XXX */ 367 static boolean_t 368 ext_check(spd_ext_t *ext) 369 { 370 spd_if_t *tunname = (spd_if_t *)ext; 371 int i; 372 char *idstr; 373 374 if (ext->spd_ext_type == SPD_EXT_TUN_NAME) { 375 /* (NOTE: Modified from SADB_EXT_IDENTITY..) */ 376 377 /* 378 * Make sure the strings in these identities are 379 * null-terminated. Let's "proactively" null-terminate the 380 * string at the last byte if it's not terminated sooner. 381 */ 382 i = SPD_64TO8(tunname->spd_if_len) - sizeof (spd_if_t); 383 idstr = (char *)(tunname + 1); 384 while (*idstr != '\0' && i > 0) { 385 i--; 386 idstr++; 387 } 388 if (i == 0) { 389 /* 390 * I.e., if the bozo user didn't NULL-terminate the 391 * string... 392 */ 393 idstr--; 394 *idstr = '\0'; 395 } 396 } 397 return (B_TRUE); /* For now... */ 398 } 399 400 401 402 /* Return values for spdsock_get_ext(). */ 403 #define KGE_OK 0 404 #define KGE_DUP 1 405 #define KGE_UNK 2 406 #define KGE_LEN 3 407 #define KGE_CHK 4 408 409 /* 410 * Parse basic extension headers and return in the passed-in pointer vector. 411 * Return values include: 412 * 413 * KGE_OK Everything's nice and parsed out. 414 * If there are no extensions, place NULL in extv[0]. 415 * KGE_DUP There is a duplicate extension. 416 * First instance in appropriate bin. First duplicate in 417 * extv[0]. 418 * KGE_UNK Unknown extension type encountered. extv[0] contains 419 * unknown header. 420 * KGE_LEN Extension length error. 421 * KGE_CHK High-level reality check failed on specific extension. 422 * 423 * My apologies for some of the pointer arithmetic in here. I'm thinking 424 * like an assembly programmer, yet trying to make the compiler happy. 425 */ 426 static int 427 spdsock_get_ext(spd_ext_t *extv[], spd_msg_t *basehdr, uint_t msgsize) 428 { 429 bzero(extv, sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1)); 430 431 /* Use extv[0] as the "current working pointer". */ 432 433 extv[0] = (spd_ext_t *)(basehdr + 1); 434 435 while (extv[0] < (spd_ext_t *)(((uint8_t *)basehdr) + msgsize)) { 436 /* Check for unknown headers. */ 437 if (extv[0]->spd_ext_type == 0 || 438 extv[0]->spd_ext_type > SPD_EXT_MAX) 439 return (KGE_UNK); 440 441 /* 442 * Check length. Use uint64_t because extlen is in units 443 * of 64-bit words. If length goes beyond the msgsize, 444 * return an error. (Zero length also qualifies here.) 445 */ 446 if (extv[0]->spd_ext_len == 0 || 447 (void *)((uint64_t *)extv[0] + extv[0]->spd_ext_len) > 448 (void *)((uint8_t *)basehdr + msgsize)) 449 return (KGE_LEN); 450 451 /* Check for redundant headers. */ 452 if (extv[extv[0]->spd_ext_type] != NULL) 453 return (KGE_DUP); 454 455 /* 456 * Reality check the extension if possible at the spdsock 457 * level. 458 */ 459 if (!ext_check(extv[0])) 460 return (KGE_CHK); 461 462 /* If I make it here, assign the appropriate bin. */ 463 extv[extv[0]->spd_ext_type] = extv[0]; 464 465 /* Advance pointer (See above for uint64_t ptr reasoning.) */ 466 extv[0] = (spd_ext_t *) 467 ((uint64_t *)extv[0] + extv[0]->spd_ext_len); 468 } 469 470 /* Everything's cool. */ 471 472 /* 473 * If extv[0] == NULL, then there are no extension headers in this 474 * message. Ensure that this is the case. 475 */ 476 if (extv[0] == (spd_ext_t *)(basehdr + 1)) 477 extv[0] = NULL; 478 479 return (KGE_OK); 480 } 481 482 static const int bad_ext_diag[] = { 483 SPD_DIAGNOSTIC_MALFORMED_LCLPORT, 484 SPD_DIAGNOSTIC_MALFORMED_REMPORT, 485 SPD_DIAGNOSTIC_MALFORMED_PROTO, 486 SPD_DIAGNOSTIC_MALFORMED_LCLADDR, 487 SPD_DIAGNOSTIC_MALFORMED_REMADDR, 488 SPD_DIAGNOSTIC_MALFORMED_ACTION, 489 SPD_DIAGNOSTIC_MALFORMED_RULE, 490 SPD_DIAGNOSTIC_MALFORMED_RULESET, 491 SPD_DIAGNOSTIC_MALFORMED_ICMP_TYPECODE 492 }; 493 494 static const int dup_ext_diag[] = { 495 SPD_DIAGNOSTIC_DUPLICATE_LCLPORT, 496 SPD_DIAGNOSTIC_DUPLICATE_REMPORT, 497 SPD_DIAGNOSTIC_DUPLICATE_PROTO, 498 SPD_DIAGNOSTIC_DUPLICATE_LCLADDR, 499 SPD_DIAGNOSTIC_DUPLICATE_REMADDR, 500 SPD_DIAGNOSTIC_DUPLICATE_ACTION, 501 SPD_DIAGNOSTIC_DUPLICATE_RULE, 502 SPD_DIAGNOSTIC_DUPLICATE_RULESET, 503 SPD_DIAGNOSTIC_DUPLICATE_ICMP_TYPECODE 504 }; 505 506 /* 507 * Transmit a PF_POLICY error message to the instance either pointed to 508 * by ks, the instance with serial number serial, or more, depending. 509 * 510 * The faulty message (or a reasonable facsimile thereof) is in mp. 511 * This function will free mp or recycle it for delivery, thereby causing 512 * the stream head to free it. 513 */ 514 static void 515 spdsock_error(queue_t *q, mblk_t *mp, int error, int diagnostic) 516 { 517 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 518 519 ASSERT(mp->b_datap->db_type == M_DATA); 520 521 if (spmsg->spd_msg_type < SPD_MIN || 522 spmsg->spd_msg_type > SPD_MAX) 523 spmsg->spd_msg_type = SPD_RESERVED; 524 525 /* 526 * Strip out extension headers. 527 */ 528 ASSERT(mp->b_rptr + sizeof (*spmsg) <= mp->b_datap->db_lim); 529 mp->b_wptr = mp->b_rptr + sizeof (*spmsg); 530 spmsg->spd_msg_len = SPD_8TO64(sizeof (spd_msg_t)); 531 spmsg->spd_msg_errno = (uint8_t)error; 532 spmsg->spd_msg_diagnostic = (uint16_t)diagnostic; 533 534 qreply(q, mp); 535 } 536 537 static void 538 spdsock_diag(queue_t *q, mblk_t *mp, int diagnostic) 539 { 540 spdsock_error(q, mp, EINVAL, diagnostic); 541 } 542 543 static void 544 spd_echo(queue_t *q, mblk_t *mp) 545 { 546 qreply(q, mp); 547 } 548 549 /* 550 * Do NOT consume a reference to itp. 551 */ 552 /*ARGSUSED*/ 553 static void 554 spdsock_flush_node(ipsec_tun_pol_t *itp, void *cookie, netstack_t *ns) 555 { 556 boolean_t active = (boolean_t)cookie; 557 ipsec_policy_head_t *iph; 558 559 iph = active ? itp->itp_policy : itp->itp_inactive; 560 IPPH_REFHOLD(iph); 561 mutex_enter(&itp->itp_lock); 562 spdsock_flush_one(iph, ns); 563 if (active) 564 itp->itp_flags &= ~ITPF_PFLAGS; 565 else 566 itp->itp_flags &= ~ITPF_IFLAGS; 567 mutex_exit(&itp->itp_lock); 568 } 569 570 /* 571 * Clear out one polhead. 572 */ 573 static void 574 spdsock_flush_one(ipsec_policy_head_t *iph, netstack_t *ns) 575 { 576 rw_enter(&iph->iph_lock, RW_WRITER); 577 ipsec_polhead_flush(iph, ns); 578 rw_exit(&iph->iph_lock); 579 IPPH_REFRELE(iph, ns); 580 } 581 582 static void 583 spdsock_flush(queue_t *q, ipsec_policy_head_t *iph, ipsec_tun_pol_t *itp, 584 mblk_t *mp) 585 { 586 boolean_t active; 587 spdsock_t *ss = (spdsock_t *)q->q_ptr; 588 netstack_t *ns = ss->spdsock_spds->spds_netstack; 589 590 if (iph != ALL_ACTIVE_POLHEADS && iph != ALL_INACTIVE_POLHEADS) { 591 spdsock_flush_one(iph, ns); 592 if (audit_active) { 593 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 594 cred_t *cr; 595 pid_t cpid; 596 597 cr = msg_getcred(mp, &cpid); 598 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 599 audit_pf_policy(SPD_FLUSH, cr, ns, 600 ITP_NAME(itp), active, 0, cpid); 601 } 602 } else { 603 active = (iph == ALL_ACTIVE_POLHEADS); 604 605 /* First flush the global policy. */ 606 spdsock_flush_one(active ? ipsec_system_policy(ns) : 607 ipsec_inactive_policy(ns), ns); 608 if (audit_active) { 609 cred_t *cr; 610 pid_t cpid; 611 612 cr = msg_getcred(mp, &cpid); 613 audit_pf_policy(SPD_FLUSH, cr, ns, NULL, 614 active, 0, cpid); 615 } 616 /* Then flush every tunnel's appropriate one. */ 617 itp_walk(spdsock_flush_node, (void *)active, ns); 618 if (audit_active) { 619 cred_t *cr; 620 pid_t cpid; 621 622 cr = msg_getcred(mp, &cpid); 623 audit_pf_policy(SPD_FLUSH, cr, ns, 624 "all tunnels", active, 0, cpid); 625 } 626 } 627 628 spd_echo(q, mp); 629 } 630 631 static boolean_t 632 spdsock_ext_to_sel(spd_ext_t **extv, ipsec_selkey_t *sel, int *diag) 633 { 634 bzero(sel, sizeof (*sel)); 635 636 if (extv[SPD_EXT_PROTO] != NULL) { 637 struct spd_proto *pr = 638 (struct spd_proto *)extv[SPD_EXT_PROTO]; 639 sel->ipsl_proto = pr->spd_proto_number; 640 sel->ipsl_valid |= IPSL_PROTOCOL; 641 } 642 if (extv[SPD_EXT_LCLPORT] != NULL) { 643 struct spd_portrange *pr = 644 (struct spd_portrange *)extv[SPD_EXT_LCLPORT]; 645 sel->ipsl_lport = pr->spd_ports_minport; 646 sel->ipsl_valid |= IPSL_LOCAL_PORT; 647 } 648 if (extv[SPD_EXT_REMPORT] != NULL) { 649 struct spd_portrange *pr = 650 (struct spd_portrange *)extv[SPD_EXT_REMPORT]; 651 sel->ipsl_rport = pr->spd_ports_minport; 652 sel->ipsl_valid |= IPSL_REMOTE_PORT; 653 } 654 655 if (extv[SPD_EXT_ICMP_TYPECODE] != NULL) { 656 struct spd_typecode *tc= 657 (struct spd_typecode *)extv[SPD_EXT_ICMP_TYPECODE]; 658 659 sel->ipsl_valid |= IPSL_ICMP_TYPE; 660 sel->ipsl_icmp_type = tc->spd_typecode_type; 661 if (tc->spd_typecode_type_end < tc->spd_typecode_type) 662 sel->ipsl_icmp_type_end = tc->spd_typecode_type; 663 else 664 sel->ipsl_icmp_type_end = tc->spd_typecode_type_end; 665 666 if (tc->spd_typecode_code != 255) { 667 sel->ipsl_valid |= IPSL_ICMP_CODE; 668 sel->ipsl_icmp_code = tc->spd_typecode_code; 669 if (tc->spd_typecode_code_end < tc->spd_typecode_code) 670 sel->ipsl_icmp_code_end = tc->spd_typecode_code; 671 else 672 sel->ipsl_icmp_code_end = 673 tc->spd_typecode_code_end; 674 } 675 } 676 #define ADDR2SEL(sel, extv, field, pfield, extn, bit) \ 677 if ((extv)[(extn)] != NULL) { \ 678 uint_t addrlen; \ 679 struct spd_address *ap = \ 680 (struct spd_address *)((extv)[(extn)]); \ 681 addrlen = (ap->spd_address_af == AF_INET6) ? \ 682 IPV6_ADDR_LEN : IP_ADDR_LEN; \ 683 if (SPD_64TO8(ap->spd_address_len) < \ 684 (addrlen + sizeof (*ap))) { \ 685 *diag = SPD_DIAGNOSTIC_BAD_ADDR_LEN; \ 686 return (B_FALSE); \ 687 } \ 688 bcopy((ap+1), &((sel)->field), addrlen); \ 689 (sel)->pfield = ap->spd_address_prefixlen; \ 690 (sel)->ipsl_valid |= (bit); \ 691 (sel)->ipsl_valid |= (ap->spd_address_af == AF_INET6) ? \ 692 IPSL_IPV6 : IPSL_IPV4; \ 693 } 694 695 ADDR2SEL(sel, extv, ipsl_local, ipsl_local_pfxlen, 696 SPD_EXT_LCLADDR, IPSL_LOCAL_ADDR); 697 ADDR2SEL(sel, extv, ipsl_remote, ipsl_remote_pfxlen, 698 SPD_EXT_REMADDR, IPSL_REMOTE_ADDR); 699 700 if ((sel->ipsl_valid & (IPSL_IPV6|IPSL_IPV4)) == 701 (IPSL_IPV6|IPSL_IPV4)) { 702 *diag = SPD_DIAGNOSTIC_MIXED_AF; 703 return (B_FALSE); 704 } 705 706 #undef ADDR2SEL 707 708 return (B_TRUE); 709 } 710 711 static boolean_t 712 spd_convert_type(uint32_t type, ipsec_act_t *act) 713 { 714 switch (type) { 715 case SPD_ACTTYPE_DROP: 716 act->ipa_type = IPSEC_ACT_DISCARD; 717 return (B_TRUE); 718 719 case SPD_ACTTYPE_PASS: 720 act->ipa_type = IPSEC_ACT_CLEAR; 721 return (B_TRUE); 722 723 case SPD_ACTTYPE_IPSEC: 724 act->ipa_type = IPSEC_ACT_APPLY; 725 return (B_TRUE); 726 } 727 return (B_FALSE); 728 } 729 730 static boolean_t 731 spd_convert_flags(uint32_t flags, ipsec_act_t *act) 732 { 733 /* 734 * Note use of !! for boolean canonicalization. 735 */ 736 act->ipa_apply.ipp_use_ah = !!(flags & SPD_APPLY_AH); 737 act->ipa_apply.ipp_use_esp = !!(flags & SPD_APPLY_ESP); 738 act->ipa_apply.ipp_use_espa = !!(flags & SPD_APPLY_ESPA); 739 act->ipa_apply.ipp_use_se = !!(flags & SPD_APPLY_SE); 740 act->ipa_apply.ipp_use_unique = !!(flags & SPD_APPLY_UNIQUE); 741 return (B_TRUE); 742 } 743 744 static void 745 spdsock_reset_act(ipsec_act_t *act) 746 { 747 bzero(act, sizeof (*act)); 748 act->ipa_apply.ipp_espe_maxbits = IPSEC_MAX_KEYBITS; 749 act->ipa_apply.ipp_espa_maxbits = IPSEC_MAX_KEYBITS; 750 act->ipa_apply.ipp_ah_maxbits = IPSEC_MAX_KEYBITS; 751 } 752 753 /* 754 * Sanity check action against reality, and shrink-wrap key sizes.. 755 */ 756 static boolean_t 757 spdsock_check_action(ipsec_act_t *act, boolean_t tunnel_polhead, int *diag, 758 spd_stack_t *spds) 759 { 760 if (tunnel_polhead && act->ipa_apply.ipp_use_unique) { 761 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS; 762 return (B_FALSE); 763 } 764 if ((act->ipa_type != IPSEC_ACT_APPLY) && 765 (act->ipa_apply.ipp_use_ah || 766 act->ipa_apply.ipp_use_esp || 767 act->ipa_apply.ipp_use_espa || 768 act->ipa_apply.ipp_use_se || 769 act->ipa_apply.ipp_use_unique)) { 770 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS; 771 return (B_FALSE); 772 } 773 if ((act->ipa_type == IPSEC_ACT_APPLY) && 774 !act->ipa_apply.ipp_use_ah && 775 !act->ipa_apply.ipp_use_esp) { 776 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS; 777 return (B_FALSE); 778 } 779 return (ipsec_check_action(act, diag, spds->spds_netstack)); 780 } 781 782 /* 783 * We may be short a few error checks here.. 784 */ 785 static boolean_t 786 spdsock_ext_to_actvec(spd_ext_t **extv, ipsec_act_t **actpp, uint_t *nactp, 787 int *diag, spd_stack_t *spds) 788 { 789 struct spd_ext_actions *sactp = 790 (struct spd_ext_actions *)extv[SPD_EXT_ACTION]; 791 ipsec_act_t act, *actp, *endactp; 792 struct spd_attribute *attrp, *endattrp; 793 uint64_t *endp; 794 int nact; 795 boolean_t tunnel_polhead; 796 797 tunnel_polhead = (extv[SPD_EXT_TUN_NAME] != NULL && 798 (((struct spd_rule *)extv[SPD_EXT_RULE])->spd_rule_flags & 799 SPD_RULE_FLAG_TUNNEL)); 800 801 *actpp = NULL; 802 *nactp = 0; 803 804 if (sactp == NULL) { 805 *diag = SPD_DIAGNOSTIC_NO_ACTION_EXT; 806 return (B_FALSE); 807 } 808 809 /* 810 * Parse the "action" extension and convert into an action chain. 811 */ 812 813 nact = sactp->spd_actions_count; 814 815 endp = (uint64_t *)sactp; 816 endp += sactp->spd_actions_len; 817 endattrp = (struct spd_attribute *)endp; 818 819 actp = kmem_alloc(sizeof (*actp) * nact, KM_NOSLEEP); 820 if (actp == NULL) { 821 *diag = SPD_DIAGNOSTIC_ADD_NO_MEM; 822 return (B_FALSE); 823 } 824 *actpp = actp; 825 *nactp = nact; 826 endactp = actp + nact; 827 828 spdsock_reset_act(&act); 829 attrp = (struct spd_attribute *)(&sactp[1]); 830 831 for (; attrp < endattrp; attrp++) { 832 switch (attrp->spd_attr_tag) { 833 case SPD_ATTR_NOP: 834 break; 835 836 case SPD_ATTR_EMPTY: 837 spdsock_reset_act(&act); 838 break; 839 840 case SPD_ATTR_END: 841 attrp = endattrp; 842 /* FALLTHRU */ 843 case SPD_ATTR_NEXT: 844 if (actp >= endactp) { 845 *diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT; 846 goto fail; 847 } 848 if (!spdsock_check_action(&act, tunnel_polhead, 849 diag, spds)) 850 goto fail; 851 *actp++ = act; 852 spdsock_reset_act(&act); 853 break; 854 855 case SPD_ATTR_TYPE: 856 if (!spd_convert_type(attrp->spd_attr_value, &act)) { 857 *diag = SPD_DIAGNOSTIC_ADD_BAD_TYPE; 858 goto fail; 859 } 860 break; 861 862 case SPD_ATTR_FLAGS: 863 if (!tunnel_polhead && extv[SPD_EXT_TUN_NAME] != NULL) { 864 /* 865 * Set "sa unique" for transport-mode 866 * tunnels whether we want to or not. 867 */ 868 attrp->spd_attr_value |= SPD_APPLY_UNIQUE; 869 } 870 if (!spd_convert_flags(attrp->spd_attr_value, &act)) { 871 *diag = SPD_DIAGNOSTIC_ADD_BAD_FLAGS; 872 goto fail; 873 } 874 break; 875 876 case SPD_ATTR_AH_AUTH: 877 if (attrp->spd_attr_value == 0) { 878 *diag = SPD_DIAGNOSTIC_UNSUPP_AH_ALG; 879 goto fail; 880 } 881 act.ipa_apply.ipp_auth_alg = attrp->spd_attr_value; 882 break; 883 884 case SPD_ATTR_ESP_ENCR: 885 if (attrp->spd_attr_value == 0) { 886 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_ALG; 887 goto fail; 888 } 889 act.ipa_apply.ipp_encr_alg = attrp->spd_attr_value; 890 break; 891 892 case SPD_ATTR_ESP_AUTH: 893 if (attrp->spd_attr_value == 0) { 894 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_ALG; 895 goto fail; 896 } 897 act.ipa_apply.ipp_esp_auth_alg = attrp->spd_attr_value; 898 break; 899 900 case SPD_ATTR_ENCR_MINBITS: 901 act.ipa_apply.ipp_espe_minbits = attrp->spd_attr_value; 902 break; 903 904 case SPD_ATTR_ENCR_MAXBITS: 905 act.ipa_apply.ipp_espe_maxbits = attrp->spd_attr_value; 906 break; 907 908 case SPD_ATTR_AH_MINBITS: 909 act.ipa_apply.ipp_ah_minbits = attrp->spd_attr_value; 910 break; 911 912 case SPD_ATTR_AH_MAXBITS: 913 act.ipa_apply.ipp_ah_maxbits = attrp->spd_attr_value; 914 break; 915 916 case SPD_ATTR_ESPA_MINBITS: 917 act.ipa_apply.ipp_espa_minbits = attrp->spd_attr_value; 918 break; 919 920 case SPD_ATTR_ESPA_MAXBITS: 921 act.ipa_apply.ipp_espa_maxbits = attrp->spd_attr_value; 922 break; 923 924 case SPD_ATTR_LIFE_SOFT_TIME: 925 case SPD_ATTR_LIFE_HARD_TIME: 926 case SPD_ATTR_LIFE_SOFT_BYTES: 927 case SPD_ATTR_LIFE_HARD_BYTES: 928 break; 929 930 case SPD_ATTR_KM_PROTO: 931 act.ipa_apply.ipp_km_proto = attrp->spd_attr_value; 932 break; 933 934 case SPD_ATTR_KM_COOKIE: 935 act.ipa_apply.ipp_km_cookie = attrp->spd_attr_value; 936 break; 937 938 case SPD_ATTR_REPLAY_DEPTH: 939 act.ipa_apply.ipp_replay_depth = attrp->spd_attr_value; 940 break; 941 } 942 } 943 if (actp != endactp) { 944 *diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT; 945 goto fail; 946 } 947 948 return (B_TRUE); 949 fail: 950 ipsec_actvec_free(*actpp, nact); 951 *actpp = NULL; 952 return (B_FALSE); 953 } 954 955 typedef struct 956 { 957 ipsec_policy_t *pol; 958 int dir; 959 } tmprule_t; 960 961 static int 962 mkrule(ipsec_policy_head_t *iph, struct spd_rule *rule, 963 ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t af, 964 tmprule_t **rp, uint64_t *index, spd_stack_t *spds) 965 { 966 ipsec_policy_t *pol; 967 968 sel->ipsl_valid &= ~(IPSL_IPV6|IPSL_IPV4); 969 sel->ipsl_valid |= af; 970 971 pol = ipsec_policy_create(sel, actp, nact, rule->spd_rule_priority, 972 index, spds->spds_netstack); 973 if (pol == NULL) 974 return (ENOMEM); 975 976 (*rp)->pol = pol; 977 (*rp)->dir = dir; 978 (*rp)++; 979 980 if (!ipsec_check_policy(iph, pol, dir)) 981 return (EEXIST); 982 983 rule->spd_rule_index = pol->ipsp_index; 984 return (0); 985 } 986 987 static int 988 mkrulepair(ipsec_policy_head_t *iph, struct spd_rule *rule, 989 ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t afs, 990 tmprule_t **rp, uint64_t *index, spd_stack_t *spds) 991 { 992 int error; 993 994 if (afs & IPSL_IPV4) { 995 error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV4, rp, 996 index, spds); 997 if (error != 0) 998 return (error); 999 } 1000 if (afs & IPSL_IPV6) { 1001 error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV6, rp, 1002 index, spds); 1003 if (error != 0) 1004 return (error); 1005 } 1006 return (0); 1007 } 1008 1009 1010 static void 1011 spdsock_addrule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp, 1012 spd_ext_t **extv, ipsec_tun_pol_t *itp) 1013 { 1014 ipsec_selkey_t sel; 1015 ipsec_act_t *actp; 1016 uint_t nact; 1017 int diag = 0, error, afs; 1018 struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE]; 1019 tmprule_t rules[4], *rulep = &rules[0]; 1020 boolean_t tunnel_mode, empty_itp, active; 1021 uint64_t *index = (itp == NULL) ? NULL : &itp->itp_next_policy_index; 1022 spdsock_t *ss = (spdsock_t *)q->q_ptr; 1023 spd_stack_t *spds = ss->spdsock_spds; 1024 1025 if (rule == NULL) { 1026 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT); 1027 if (audit_active) { 1028 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1029 cred_t *cr; 1030 pid_t cpid; 1031 1032 cr = msg_getcred(mp, &cpid); 1033 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1034 audit_pf_policy(SPD_ADDRULE, cr, 1035 spds->spds_netstack, ITP_NAME(itp), active, 1036 SPD_DIAGNOSTIC_NO_RULE_EXT, cpid); 1037 } 1038 return; 1039 } 1040 1041 tunnel_mode = (rule->spd_rule_flags & SPD_RULE_FLAG_TUNNEL); 1042 1043 if (itp != NULL) { 1044 mutex_enter(&itp->itp_lock); 1045 ASSERT(itp->itp_policy == iph || itp->itp_inactive == iph); 1046 active = (itp->itp_policy == iph); 1047 if (ITP_P_ISACTIVE(itp, iph)) { 1048 /* Check for mix-and-match of tunnel/transport. */ 1049 if ((tunnel_mode && !ITP_P_ISTUNNEL(itp, iph)) || 1050 (!tunnel_mode && ITP_P_ISTUNNEL(itp, iph))) { 1051 mutex_exit(&itp->itp_lock); 1052 spdsock_error(q, mp, EBUSY, 0); 1053 return; 1054 } 1055 empty_itp = B_FALSE; 1056 } else { 1057 empty_itp = B_TRUE; 1058 itp->itp_flags = active ? ITPF_P_ACTIVE : ITPF_I_ACTIVE; 1059 if (tunnel_mode) 1060 itp->itp_flags |= active ? ITPF_P_TUNNEL : 1061 ITPF_I_TUNNEL; 1062 } 1063 } else { 1064 empty_itp = B_FALSE; 1065 } 1066 1067 if (rule->spd_rule_index != 0) { 1068 diag = SPD_DIAGNOSTIC_INVALID_RULE_INDEX; 1069 error = EINVAL; 1070 goto fail2; 1071 } 1072 1073 if (!spdsock_ext_to_sel(extv, &sel, &diag)) { 1074 error = EINVAL; 1075 goto fail2; 1076 } 1077 1078 if (itp != NULL) { 1079 if (tunnel_mode) { 1080 if (sel.ipsl_valid & 1081 (IPSL_REMOTE_PORT | IPSL_LOCAL_PORT)) { 1082 itp->itp_flags |= active ? 1083 ITPF_P_PER_PORT_SECURITY : 1084 ITPF_I_PER_PORT_SECURITY; 1085 } 1086 } else { 1087 /* 1088 * For now, we don't allow transport-mode on a tunnel 1089 * with ANY specific selectors. Bail if we have such 1090 * a request. 1091 */ 1092 if (sel.ipsl_valid & IPSL_WILDCARD) { 1093 diag = SPD_DIAGNOSTIC_NO_TUNNEL_SELECTORS; 1094 error = EINVAL; 1095 goto fail2; 1096 } 1097 } 1098 } 1099 1100 if (!spdsock_ext_to_actvec(extv, &actp, &nact, &diag, spds)) { 1101 error = EINVAL; 1102 goto fail2; 1103 } 1104 /* 1105 * If no addresses were specified, add both. 1106 */ 1107 afs = sel.ipsl_valid & (IPSL_IPV6|IPSL_IPV4); 1108 if (afs == 0) 1109 afs = (IPSL_IPV6|IPSL_IPV4); 1110 1111 rw_enter(&iph->iph_lock, RW_WRITER); 1112 1113 if (rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) { 1114 error = mkrulepair(iph, rule, &sel, actp, nact, 1115 IPSEC_TYPE_OUTBOUND, afs, &rulep, index, spds); 1116 if (error != 0) 1117 goto fail; 1118 } 1119 1120 if (rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) { 1121 error = mkrulepair(iph, rule, &sel, actp, nact, 1122 IPSEC_TYPE_INBOUND, afs, &rulep, index, spds); 1123 if (error != 0) 1124 goto fail; 1125 } 1126 1127 while ((--rulep) >= &rules[0]) { 1128 ipsec_enter_policy(iph, rulep->pol, rulep->dir, 1129 spds->spds_netstack); 1130 } 1131 rw_exit(&iph->iph_lock); 1132 if (itp != NULL) 1133 mutex_exit(&itp->itp_lock); 1134 1135 ipsec_actvec_free(actp, nact); 1136 spd_echo(q, mp); 1137 if (audit_active) { 1138 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1139 cred_t *cr; 1140 pid_t cpid; 1141 1142 cr = msg_getcred(mp, &cpid); 1143 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1144 audit_pf_policy(SPD_ADDRULE, cr, spds->spds_netstack, 1145 ITP_NAME(itp), active, 0, cpid); 1146 } 1147 return; 1148 1149 fail: 1150 rw_exit(&iph->iph_lock); 1151 while ((--rulep) >= &rules[0]) { 1152 IPPOL_REFRELE(rulep->pol, spds->spds_netstack); 1153 } 1154 ipsec_actvec_free(actp, nact); 1155 fail2: 1156 if (itp != NULL) { 1157 if (empty_itp) 1158 itp->itp_flags = 0; 1159 mutex_exit(&itp->itp_lock); 1160 } 1161 spdsock_error(q, mp, error, diag); 1162 if (audit_active) { 1163 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1164 cred_t *cr; 1165 pid_t cpid; 1166 1167 cr = msg_getcred(mp, &cpid); 1168 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1169 audit_pf_policy(SPD_ADDRULE, cr, spds->spds_netstack, 1170 ITP_NAME(itp), active, error, cpid); 1171 } 1172 } 1173 1174 void 1175 spdsock_deleterule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp, 1176 spd_ext_t **extv, ipsec_tun_pol_t *itp) 1177 { 1178 ipsec_selkey_t sel; 1179 struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE]; 1180 int err, diag = 0; 1181 spdsock_t *ss = (spdsock_t *)q->q_ptr; 1182 netstack_t *ns = ss->spdsock_spds->spds_netstack; 1183 1184 if (rule == NULL) { 1185 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT); 1186 if (audit_active) { 1187 boolean_t active; 1188 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1189 cred_t *cr; 1190 pid_t cpid; 1191 1192 cr = msg_getcred(mp, &cpid); 1193 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1194 audit_pf_policy(SPD_DELETERULE, cr, ns, 1195 ITP_NAME(itp), active, SPD_DIAGNOSTIC_NO_RULE_EXT, 1196 cpid); 1197 } 1198 return; 1199 } 1200 1201 /* 1202 * Must enter itp_lock first to avoid deadlock. See tun.c's 1203 * set_sec_simple() for the other case of itp_lock and iph_lock. 1204 */ 1205 if (itp != NULL) 1206 mutex_enter(&itp->itp_lock); 1207 1208 if (rule->spd_rule_index != 0) { 1209 if (ipsec_policy_delete_index(iph, rule->spd_rule_index, ns) != 1210 0) { 1211 err = ESRCH; 1212 goto fail; 1213 } 1214 } else { 1215 if (!spdsock_ext_to_sel(extv, &sel, &diag)) { 1216 err = EINVAL; /* diag already set... */ 1217 goto fail; 1218 } 1219 1220 if ((rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) && 1221 !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_INBOUND, ns)) { 1222 err = ESRCH; 1223 goto fail; 1224 } 1225 1226 if ((rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) && 1227 !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_OUTBOUND, ns)) { 1228 err = ESRCH; 1229 goto fail; 1230 } 1231 } 1232 1233 if (itp != NULL) { 1234 ASSERT(iph == itp->itp_policy || iph == itp->itp_inactive); 1235 rw_enter(&iph->iph_lock, RW_READER); 1236 if (avl_numnodes(&iph->iph_rulebyid) == 0) { 1237 if (iph == itp->itp_policy) 1238 itp->itp_flags &= ~ITPF_PFLAGS; 1239 else 1240 itp->itp_flags &= ~ITPF_IFLAGS; 1241 } 1242 /* Can exit locks in any order. */ 1243 rw_exit(&iph->iph_lock); 1244 mutex_exit(&itp->itp_lock); 1245 } 1246 spd_echo(q, mp); 1247 if (audit_active) { 1248 boolean_t active; 1249 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1250 cred_t *cr; 1251 pid_t cpid; 1252 1253 cr = msg_getcred(mp, &cpid); 1254 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1255 audit_pf_policy(SPD_DELETERULE, cr, ns, ITP_NAME(itp), 1256 active, 0, cpid); 1257 } 1258 return; 1259 fail: 1260 if (itp != NULL) 1261 mutex_exit(&itp->itp_lock); 1262 spdsock_error(q, mp, err, diag); 1263 if (audit_active) { 1264 boolean_t active; 1265 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1266 cred_t *cr; 1267 pid_t cpid; 1268 1269 cr = msg_getcred(mp, &cpid); 1270 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1271 audit_pf_policy(SPD_DELETERULE, cr, ns, ITP_NAME(itp), 1272 active, err, cpid); 1273 } 1274 } 1275 1276 /* Do NOT consume a reference to itp. */ 1277 /* ARGSUSED */ 1278 static void 1279 spdsock_flip_node(ipsec_tun_pol_t *itp, void *ignoreme, netstack_t *ns) 1280 { 1281 mutex_enter(&itp->itp_lock); 1282 ITPF_SWAP(itp->itp_flags); 1283 ipsec_swap_policy(itp->itp_policy, itp->itp_inactive, ns); 1284 mutex_exit(&itp->itp_lock); 1285 } 1286 1287 void 1288 spdsock_flip(queue_t *q, mblk_t *mp, spd_if_t *tunname) 1289 { 1290 char *tname; 1291 ipsec_tun_pol_t *itp; 1292 spdsock_t *ss = (spdsock_t *)q->q_ptr; 1293 netstack_t *ns = ss->spdsock_spds->spds_netstack; 1294 1295 if (tunname != NULL) { 1296 tname = (char *)tunname->spd_if_name; 1297 if (*tname == '\0') { 1298 /* can't fail */ 1299 ipsec_swap_global_policy(ns); 1300 if (audit_active) { 1301 boolean_t active; 1302 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1303 cred_t *cr; 1304 pid_t cpid; 1305 1306 cr = msg_getcred(mp, &cpid); 1307 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1308 audit_pf_policy(SPD_FLIP, cr, ns, 1309 NULL, active, 0, cpid); 1310 } 1311 itp_walk(spdsock_flip_node, NULL, ns); 1312 if (audit_active) { 1313 boolean_t active; 1314 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1315 cred_t *cr; 1316 pid_t cpid; 1317 1318 cr = msg_getcred(mp, &cpid); 1319 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1320 audit_pf_policy(SPD_FLIP, cr, ns, 1321 "all tunnels", active, 0, cpid); 1322 } 1323 } else { 1324 itp = get_tunnel_policy(tname, ns); 1325 if (itp == NULL) { 1326 /* Better idea for "tunnel not found"? */ 1327 spdsock_error(q, mp, ESRCH, 0); 1328 if (audit_active) { 1329 boolean_t active; 1330 spd_msg_t *spmsg = 1331 (spd_msg_t *)mp->b_rptr; 1332 cred_t *cr; 1333 pid_t cpid; 1334 1335 cr = msg_getcred(mp, &cpid); 1336 active = (spmsg->spd_msg_spdid == 1337 SPD_ACTIVE); 1338 audit_pf_policy(SPD_FLIP, cr, ns, 1339 ITP_NAME(itp), active, 1340 ESRCH, cpid); 1341 } 1342 return; 1343 } 1344 spdsock_flip_node(itp, NULL, NULL); 1345 if (audit_active) { 1346 boolean_t active; 1347 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1348 cred_t *cr; 1349 pid_t cpid; 1350 1351 cr = msg_getcred(mp, &cpid); 1352 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1353 audit_pf_policy(SPD_FLIP, cr, ns, 1354 ITP_NAME(itp), active, 0, cpid); 1355 } 1356 ITP_REFRELE(itp, ns); 1357 } 1358 } else { 1359 ipsec_swap_global_policy(ns); /* can't fail */ 1360 if (audit_active) { 1361 boolean_t active; 1362 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1363 cred_t *cr; 1364 pid_t cpid; 1365 1366 cr = msg_getcred(mp, &cpid); 1367 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1368 audit_pf_policy(SPD_FLIP, cr, 1369 ns, NULL, active, 0, cpid); 1370 } 1371 } 1372 spd_echo(q, mp); 1373 } 1374 1375 /* 1376 * Unimplemented feature 1377 */ 1378 /* ARGSUSED */ 1379 static void 1380 spdsock_lookup(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp, 1381 spd_ext_t **extv, ipsec_tun_pol_t *itp) 1382 { 1383 spdsock_error(q, mp, EINVAL, 0); 1384 } 1385 1386 1387 static mblk_t * 1388 spdsock_dump_ruleset(mblk_t *req, ipsec_policy_head_t *iph, 1389 uint32_t count, uint16_t error) 1390 { 1391 size_t len = sizeof (spd_ruleset_ext_t) + sizeof (spd_msg_t); 1392 spd_msg_t *msg; 1393 spd_ruleset_ext_t *ruleset; 1394 mblk_t *m = allocb(len, BPRI_HI); 1395 1396 ASSERT(RW_READ_HELD(&iph->iph_lock)); 1397 1398 if (m == NULL) { 1399 return (NULL); 1400 } 1401 msg = (spd_msg_t *)m->b_rptr; 1402 ruleset = (spd_ruleset_ext_t *)(&msg[1]); 1403 1404 m->b_wptr = (uint8_t *)&ruleset[1]; 1405 1406 *msg = *(spd_msg_t *)(req->b_rptr); 1407 msg->spd_msg_len = SPD_8TO64(len); 1408 msg->spd_msg_errno = error; 1409 1410 ruleset->spd_ruleset_len = SPD_8TO64(sizeof (*ruleset)); 1411 ruleset->spd_ruleset_type = SPD_EXT_RULESET; 1412 ruleset->spd_ruleset_count = count; 1413 ruleset->spd_ruleset_version = iph->iph_gen; 1414 return (m); 1415 } 1416 1417 static mblk_t * 1418 spdsock_dump_finish(spdsock_t *ss, int error) 1419 { 1420 mblk_t *m; 1421 ipsec_policy_head_t *iph = ss->spdsock_dump_head; 1422 mblk_t *req = ss->spdsock_dump_req; 1423 1424 rw_enter(&iph->iph_lock, RW_READER); 1425 m = spdsock_dump_ruleset(req, iph, ss->spdsock_dump_count, error); 1426 rw_exit(&iph->iph_lock); 1427 IPPH_REFRELE(iph, ss->spdsock_spds->spds_netstack); 1428 ss->spdsock_dump_req = NULL; 1429 freemsg(req); 1430 1431 return (m); 1432 } 1433 1434 /* 1435 * Rule encoding functions. 1436 * We do a two-pass encode. 1437 * If base != NULL, fill in encoded rule part starting at base+offset. 1438 * Always return "offset" plus length of to-be-encoded data. 1439 */ 1440 static uint_t 1441 spdsock_encode_typecode(uint8_t *base, uint_t offset, uint8_t type, 1442 uint8_t type_end, uint8_t code, uint8_t code_end) 1443 { 1444 struct spd_typecode *tcp; 1445 1446 ASSERT(ALIGNED64(offset)); 1447 1448 if (base != NULL) { 1449 tcp = (struct spd_typecode *)(base + offset); 1450 tcp->spd_typecode_len = SPD_8TO64(sizeof (*tcp)); 1451 tcp->spd_typecode_exttype = SPD_EXT_ICMP_TYPECODE; 1452 tcp->spd_typecode_code = code; 1453 tcp->spd_typecode_type = type; 1454 tcp->spd_typecode_type_end = type_end; 1455 tcp->spd_typecode_code_end = code_end; 1456 } 1457 offset += sizeof (*tcp); 1458 1459 ASSERT(ALIGNED64(offset)); 1460 1461 return (offset); 1462 } 1463 1464 static uint_t 1465 spdsock_encode_proto(uint8_t *base, uint_t offset, uint8_t proto) 1466 { 1467 struct spd_proto *spp; 1468 1469 ASSERT(ALIGNED64(offset)); 1470 1471 if (base != NULL) { 1472 spp = (struct spd_proto *)(base + offset); 1473 spp->spd_proto_len = SPD_8TO64(sizeof (*spp)); 1474 spp->spd_proto_exttype = SPD_EXT_PROTO; 1475 spp->spd_proto_number = proto; 1476 spp->spd_proto_reserved1 = 0; 1477 spp->spd_proto_reserved2 = 0; 1478 } 1479 offset += sizeof (*spp); 1480 1481 ASSERT(ALIGNED64(offset)); 1482 1483 return (offset); 1484 } 1485 1486 static uint_t 1487 spdsock_encode_port(uint8_t *base, uint_t offset, uint16_t ext, uint16_t port) 1488 { 1489 struct spd_portrange *spp; 1490 1491 ASSERT(ALIGNED64(offset)); 1492 1493 if (base != NULL) { 1494 spp = (struct spd_portrange *)(base + offset); 1495 spp->spd_ports_len = SPD_8TO64(sizeof (*spp)); 1496 spp->spd_ports_exttype = ext; 1497 spp->spd_ports_minport = port; 1498 spp->spd_ports_maxport = port; 1499 } 1500 offset += sizeof (*spp); 1501 1502 ASSERT(ALIGNED64(offset)); 1503 1504 return (offset); 1505 } 1506 1507 static uint_t 1508 spdsock_encode_addr(uint8_t *base, uint_t offset, uint16_t ext, 1509 const ipsec_selkey_t *sel, const ipsec_addr_t *addr, uint_t pfxlen) 1510 { 1511 struct spd_address *sae; 1512 ipsec_addr_t *spdaddr; 1513 uint_t start = offset; 1514 uint_t addrlen; 1515 uint_t af; 1516 1517 if (sel->ipsl_valid & IPSL_IPV4) { 1518 af = AF_INET; 1519 addrlen = IP_ADDR_LEN; 1520 } else { 1521 af = AF_INET6; 1522 addrlen = IPV6_ADDR_LEN; 1523 } 1524 1525 ASSERT(ALIGNED64(offset)); 1526 1527 if (base != NULL) { 1528 sae = (struct spd_address *)(base + offset); 1529 sae->spd_address_exttype = ext; 1530 sae->spd_address_af = af; 1531 sae->spd_address_prefixlen = pfxlen; 1532 sae->spd_address_reserved2 = 0; 1533 1534 spdaddr = (ipsec_addr_t *)(&sae[1]); 1535 bcopy(addr, spdaddr, addrlen); 1536 } 1537 offset += sizeof (*sae); 1538 addrlen = roundup(addrlen, sizeof (uint64_t)); 1539 offset += addrlen; 1540 1541 ASSERT(ALIGNED64(offset)); 1542 1543 if (base != NULL) 1544 sae->spd_address_len = SPD_8TO64(offset - start); 1545 return (offset); 1546 } 1547 1548 static uint_t 1549 spdsock_encode_sel(uint8_t *base, uint_t offset, const ipsec_sel_t *sel) 1550 { 1551 const ipsec_selkey_t *selkey = &sel->ipsl_key; 1552 1553 if (selkey->ipsl_valid & IPSL_PROTOCOL) 1554 offset = spdsock_encode_proto(base, offset, selkey->ipsl_proto); 1555 if (selkey->ipsl_valid & IPSL_LOCAL_PORT) 1556 offset = spdsock_encode_port(base, offset, SPD_EXT_LCLPORT, 1557 selkey->ipsl_lport); 1558 if (selkey->ipsl_valid & IPSL_REMOTE_PORT) 1559 offset = spdsock_encode_port(base, offset, SPD_EXT_REMPORT, 1560 selkey->ipsl_rport); 1561 if (selkey->ipsl_valid & IPSL_REMOTE_ADDR) 1562 offset = spdsock_encode_addr(base, offset, SPD_EXT_REMADDR, 1563 selkey, &selkey->ipsl_remote, selkey->ipsl_remote_pfxlen); 1564 if (selkey->ipsl_valid & IPSL_LOCAL_ADDR) 1565 offset = spdsock_encode_addr(base, offset, SPD_EXT_LCLADDR, 1566 selkey, &selkey->ipsl_local, selkey->ipsl_local_pfxlen); 1567 if (selkey->ipsl_valid & IPSL_ICMP_TYPE) { 1568 offset = spdsock_encode_typecode(base, offset, 1569 selkey->ipsl_icmp_type, selkey->ipsl_icmp_type_end, 1570 (selkey->ipsl_valid & IPSL_ICMP_CODE) ? 1571 selkey->ipsl_icmp_code : 255, 1572 (selkey->ipsl_valid & IPSL_ICMP_CODE) ? 1573 selkey->ipsl_icmp_code_end : 255); 1574 } 1575 return (offset); 1576 } 1577 1578 static uint_t 1579 spdsock_encode_actattr(uint8_t *base, uint_t offset, uint32_t tag, 1580 uint32_t value) 1581 { 1582 struct spd_attribute *attr; 1583 1584 ASSERT(ALIGNED64(offset)); 1585 1586 if (base != NULL) { 1587 attr = (struct spd_attribute *)(base + offset); 1588 attr->spd_attr_tag = tag; 1589 attr->spd_attr_value = value; 1590 } 1591 offset += sizeof (struct spd_attribute); 1592 1593 ASSERT(ALIGNED64(offset)); 1594 1595 return (offset); 1596 } 1597 1598 1599 #define EMIT(t, v) offset = spdsock_encode_actattr(base, offset, (t), (v)) 1600 1601 static uint_t 1602 spdsock_encode_action(uint8_t *base, uint_t offset, const ipsec_action_t *ap) 1603 { 1604 const struct ipsec_act *act = &(ap->ipa_act); 1605 uint_t flags; 1606 1607 EMIT(SPD_ATTR_EMPTY, 0); 1608 switch (act->ipa_type) { 1609 case IPSEC_ACT_DISCARD: 1610 case IPSEC_ACT_REJECT: 1611 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_DROP); 1612 break; 1613 case IPSEC_ACT_BYPASS: 1614 case IPSEC_ACT_CLEAR: 1615 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_PASS); 1616 break; 1617 1618 case IPSEC_ACT_APPLY: 1619 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_IPSEC); 1620 flags = 0; 1621 if (act->ipa_apply.ipp_use_ah) 1622 flags |= SPD_APPLY_AH; 1623 if (act->ipa_apply.ipp_use_esp) 1624 flags |= SPD_APPLY_ESP; 1625 if (act->ipa_apply.ipp_use_espa) 1626 flags |= SPD_APPLY_ESPA; 1627 if (act->ipa_apply.ipp_use_se) 1628 flags |= SPD_APPLY_SE; 1629 if (act->ipa_apply.ipp_use_unique) 1630 flags |= SPD_APPLY_UNIQUE; 1631 EMIT(SPD_ATTR_FLAGS, flags); 1632 if (flags & SPD_APPLY_AH) { 1633 EMIT(SPD_ATTR_AH_AUTH, act->ipa_apply.ipp_auth_alg); 1634 EMIT(SPD_ATTR_AH_MINBITS, 1635 act->ipa_apply.ipp_ah_minbits); 1636 EMIT(SPD_ATTR_AH_MAXBITS, 1637 act->ipa_apply.ipp_ah_maxbits); 1638 } 1639 if (flags & SPD_APPLY_ESP) { 1640 EMIT(SPD_ATTR_ESP_ENCR, act->ipa_apply.ipp_encr_alg); 1641 EMIT(SPD_ATTR_ENCR_MINBITS, 1642 act->ipa_apply.ipp_espe_minbits); 1643 EMIT(SPD_ATTR_ENCR_MAXBITS, 1644 act->ipa_apply.ipp_espe_maxbits); 1645 if (flags & SPD_APPLY_ESPA) { 1646 EMIT(SPD_ATTR_ESP_AUTH, 1647 act->ipa_apply.ipp_esp_auth_alg); 1648 EMIT(SPD_ATTR_ESPA_MINBITS, 1649 act->ipa_apply.ipp_espa_minbits); 1650 EMIT(SPD_ATTR_ESPA_MAXBITS, 1651 act->ipa_apply.ipp_espa_maxbits); 1652 } 1653 } 1654 if (act->ipa_apply.ipp_km_proto != 0) 1655 EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_proto); 1656 if (act->ipa_apply.ipp_km_cookie != 0) 1657 EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_cookie); 1658 if (act->ipa_apply.ipp_replay_depth != 0) 1659 EMIT(SPD_ATTR_REPLAY_DEPTH, 1660 act->ipa_apply.ipp_replay_depth); 1661 /* Add more here */ 1662 break; 1663 } 1664 1665 return (offset); 1666 } 1667 1668 static uint_t 1669 spdsock_encode_action_list(uint8_t *base, uint_t offset, 1670 const ipsec_action_t *ap) 1671 { 1672 struct spd_ext_actions *act; 1673 uint_t nact = 0; 1674 uint_t start = offset; 1675 1676 ASSERT(ALIGNED64(offset)); 1677 1678 if (base != NULL) { 1679 act = (struct spd_ext_actions *)(base + offset); 1680 act->spd_actions_len = 0; 1681 act->spd_actions_exttype = SPD_EXT_ACTION; 1682 act->spd_actions_count = 0; 1683 act->spd_actions_reserved = 0; 1684 } 1685 1686 offset += sizeof (*act); 1687 1688 ASSERT(ALIGNED64(offset)); 1689 1690 while (ap != NULL) { 1691 offset = spdsock_encode_action(base, offset, ap); 1692 ap = ap->ipa_next; 1693 nact++; 1694 if (ap != NULL) { 1695 EMIT(SPD_ATTR_NEXT, 0); 1696 } 1697 } 1698 EMIT(SPD_ATTR_END, 0); 1699 1700 ASSERT(ALIGNED64(offset)); 1701 1702 if (base != NULL) { 1703 act->spd_actions_count = nact; 1704 act->spd_actions_len = SPD_8TO64(offset - start); 1705 } 1706 1707 return (offset); 1708 } 1709 1710 #undef EMIT 1711 1712 /* ARGSUSED */ 1713 static uint_t 1714 spdsock_rule_flags(uint_t dir, uint_t af) 1715 { 1716 uint_t flags = 0; 1717 1718 if (dir == IPSEC_TYPE_INBOUND) 1719 flags |= SPD_RULE_FLAG_INBOUND; 1720 if (dir == IPSEC_TYPE_OUTBOUND) 1721 flags |= SPD_RULE_FLAG_OUTBOUND; 1722 1723 return (flags); 1724 } 1725 1726 1727 static uint_t 1728 spdsock_encode_rule_head(uint8_t *base, uint_t offset, spd_msg_t *req, 1729 const ipsec_policy_t *rule, uint_t dir, uint_t af, char *name, 1730 boolean_t tunnel) 1731 { 1732 struct spd_msg *spmsg; 1733 struct spd_rule *spr; 1734 spd_if_t *sid; 1735 1736 uint_t start = offset; 1737 1738 ASSERT(ALIGNED64(offset)); 1739 1740 if (base != NULL) { 1741 spmsg = (struct spd_msg *)(base + offset); 1742 bzero(spmsg, sizeof (*spmsg)); 1743 spmsg->spd_msg_version = PF_POLICY_V1; 1744 spmsg->spd_msg_type = SPD_DUMP; 1745 spmsg->spd_msg_seq = req->spd_msg_seq; 1746 spmsg->spd_msg_pid = req->spd_msg_pid; 1747 } 1748 offset += sizeof (struct spd_msg); 1749 1750 ASSERT(ALIGNED64(offset)); 1751 1752 if (base != NULL) { 1753 spr = (struct spd_rule *)(base + offset); 1754 spr->spd_rule_type = SPD_EXT_RULE; 1755 spr->spd_rule_priority = rule->ipsp_prio; 1756 spr->spd_rule_flags = spdsock_rule_flags(dir, af); 1757 if (tunnel) 1758 spr->spd_rule_flags |= SPD_RULE_FLAG_TUNNEL; 1759 spr->spd_rule_unused = 0; 1760 spr->spd_rule_len = SPD_8TO64(sizeof (*spr)); 1761 spr->spd_rule_index = rule->ipsp_index; 1762 } 1763 offset += sizeof (struct spd_rule); 1764 1765 /* 1766 * If we have an interface name (i.e. if this policy head came from 1767 * a tunnel), add the SPD_EXT_TUN_NAME extension. 1768 */ 1769 if (name != NULL) { 1770 1771 ASSERT(ALIGNED64(offset)); 1772 1773 if (base != NULL) { 1774 sid = (spd_if_t *)(base + offset); 1775 sid->spd_if_exttype = SPD_EXT_TUN_NAME; 1776 sid->spd_if_len = SPD_8TO64(sizeof (spd_if_t) + 1777 roundup((strlen(name) - 4), 8)); 1778 (void) strlcpy((char *)sid->spd_if_name, name, 1779 LIFNAMSIZ); 1780 } 1781 1782 offset += sizeof (spd_if_t) + roundup((strlen(name) - 4), 8); 1783 } 1784 1785 offset = spdsock_encode_sel(base, offset, rule->ipsp_sel); 1786 offset = spdsock_encode_action_list(base, offset, rule->ipsp_act); 1787 1788 ASSERT(ALIGNED64(offset)); 1789 1790 if (base != NULL) { 1791 spmsg->spd_msg_len = SPD_8TO64(offset - start); 1792 } 1793 return (offset); 1794 } 1795 1796 /* ARGSUSED */ 1797 static mblk_t * 1798 spdsock_encode_rule(mblk_t *req, const ipsec_policy_t *rule, 1799 uint_t dir, uint_t af, char *name, boolean_t tunnel) 1800 { 1801 mblk_t *m; 1802 uint_t len; 1803 spd_msg_t *mreq = (spd_msg_t *)req->b_rptr; 1804 1805 /* 1806 * Figure out how much space we'll need. 1807 */ 1808 len = spdsock_encode_rule_head(NULL, 0, mreq, rule, dir, af, name, 1809 tunnel); 1810 1811 /* 1812 * Allocate mblk. 1813 */ 1814 m = allocb(len, BPRI_HI); 1815 if (m == NULL) 1816 return (NULL); 1817 1818 /* 1819 * Fill it in.. 1820 */ 1821 m->b_wptr = m->b_rptr + len; 1822 bzero(m->b_rptr, len); 1823 (void) spdsock_encode_rule_head(m->b_rptr, 0, mreq, rule, dir, af, 1824 name, tunnel); 1825 return (m); 1826 } 1827 1828 static ipsec_policy_t * 1829 spdsock_dump_next_in_chain(spdsock_t *ss, ipsec_policy_head_t *iph, 1830 ipsec_policy_t *cur) 1831 { 1832 ASSERT(RW_READ_HELD(&iph->iph_lock)); 1833 1834 ss->spdsock_dump_count++; 1835 ss->spdsock_dump_cur_rule = cur->ipsp_hash.hash_next; 1836 return (cur); 1837 } 1838 1839 static ipsec_policy_t * 1840 spdsock_dump_next_rule(spdsock_t *ss, ipsec_policy_head_t *iph) 1841 { 1842 ipsec_policy_t *cur; 1843 ipsec_policy_root_t *ipr; 1844 int chain, nchains, type, af; 1845 1846 ASSERT(RW_READ_HELD(&iph->iph_lock)); 1847 1848 cur = ss->spdsock_dump_cur_rule; 1849 1850 if (cur != NULL) 1851 return (spdsock_dump_next_in_chain(ss, iph, cur)); 1852 1853 type = ss->spdsock_dump_cur_type; 1854 1855 next: 1856 chain = ss->spdsock_dump_cur_chain; 1857 ipr = &iph->iph_root[type]; 1858 nchains = ipr->ipr_nchains; 1859 1860 while (chain < nchains) { 1861 cur = ipr->ipr_hash[chain].hash_head; 1862 chain++; 1863 if (cur != NULL) { 1864 ss->spdsock_dump_cur_chain = chain; 1865 return (spdsock_dump_next_in_chain(ss, iph, cur)); 1866 } 1867 } 1868 ss->spdsock_dump_cur_chain = nchains; 1869 1870 af = ss->spdsock_dump_cur_af; 1871 while (af < IPSEC_NAF) { 1872 cur = ipr->ipr_nonhash[af]; 1873 af++; 1874 if (cur != NULL) { 1875 ss->spdsock_dump_cur_af = af; 1876 return (spdsock_dump_next_in_chain(ss, iph, cur)); 1877 } 1878 } 1879 1880 type++; 1881 if (type >= IPSEC_NTYPES) 1882 return (NULL); 1883 1884 ss->spdsock_dump_cur_chain = 0; 1885 ss->spdsock_dump_cur_type = type; 1886 ss->spdsock_dump_cur_af = IPSEC_AF_V4; 1887 goto next; 1888 1889 } 1890 1891 /* 1892 * If we're done with one policy head, but have more to go, we iterate through 1893 * another IPsec tunnel policy head (itp). Return NULL if it is an error 1894 * worthy of returning EAGAIN via PF_POLICY. 1895 */ 1896 static ipsec_tun_pol_t * 1897 spdsock_dump_iterate_next_tunnel(spdsock_t *ss, ipsec_stack_t *ipss) 1898 { 1899 ipsec_tun_pol_t *itp; 1900 1901 ASSERT(RW_READ_HELD(&ipss->ipsec_tunnel_policy_lock)); 1902 if (ipss->ipsec_tunnel_policy_gen > ss->spdsock_dump_tun_gen) { 1903 /* Oops, state of the tunnel polheads changed. */ 1904 itp = NULL; 1905 } else if (ss->spdsock_itp == NULL) { 1906 /* Just finished global, find first node. */ 1907 itp = avl_first(&ipss->ipsec_tunnel_policies); 1908 } else { 1909 /* We just finished current polhead, find the next one. */ 1910 itp = AVL_NEXT(&ipss->ipsec_tunnel_policies, ss->spdsock_itp); 1911 } 1912 if (itp != NULL) { 1913 ITP_REFHOLD(itp); 1914 } 1915 if (ss->spdsock_itp != NULL) { 1916 ITP_REFRELE(ss->spdsock_itp, ipss->ipsec_netstack); 1917 } 1918 ss->spdsock_itp = itp; 1919 return (itp); 1920 } 1921 1922 static mblk_t * 1923 spdsock_dump_next_record(spdsock_t *ss) 1924 { 1925 ipsec_policy_head_t *iph; 1926 ipsec_policy_t *rule; 1927 mblk_t *m; 1928 ipsec_tun_pol_t *itp; 1929 netstack_t *ns = ss->spdsock_spds->spds_netstack; 1930 ipsec_stack_t *ipss = ns->netstack_ipsec; 1931 1932 iph = ss->spdsock_dump_head; 1933 1934 ASSERT(iph != NULL); 1935 1936 rw_enter(&iph->iph_lock, RW_READER); 1937 1938 if (iph->iph_gen != ss->spdsock_dump_gen) { 1939 rw_exit(&iph->iph_lock); 1940 return (spdsock_dump_finish(ss, EAGAIN)); 1941 } 1942 1943 while ((rule = spdsock_dump_next_rule(ss, iph)) == NULL) { 1944 rw_exit(&iph->iph_lock); 1945 if (--(ss->spdsock_dump_remaining_polheads) == 0) 1946 return (spdsock_dump_finish(ss, 0)); 1947 1948 1949 /* 1950 * If we reach here, we have more policy heads (tunnel 1951 * entries) to dump. Let's reset to a new policy head 1952 * and get some more rules. 1953 * 1954 * An empty policy head will have spdsock_dump_next_rule() 1955 * return NULL, and we loop (while dropping the number of 1956 * remaining polheads). If we loop to 0, we finish. We 1957 * keep looping until we hit 0 or until we have a rule to 1958 * encode. 1959 * 1960 * NOTE: No need for ITP_REF*() macros here as we're only 1961 * going after and refholding the policy head itself. 1962 */ 1963 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER); 1964 itp = spdsock_dump_iterate_next_tunnel(ss, ipss); 1965 if (itp == NULL) { 1966 rw_exit(&ipss->ipsec_tunnel_policy_lock); 1967 return (spdsock_dump_finish(ss, EAGAIN)); 1968 } 1969 1970 /* Reset other spdsock_dump thingies. */ 1971 IPPH_REFRELE(ss->spdsock_dump_head, ns); 1972 if (ss->spdsock_dump_active) { 1973 ss->spdsock_dump_tunnel = 1974 itp->itp_flags & ITPF_P_TUNNEL; 1975 iph = itp->itp_policy; 1976 } else { 1977 ss->spdsock_dump_tunnel = 1978 itp->itp_flags & ITPF_I_TUNNEL; 1979 iph = itp->itp_inactive; 1980 } 1981 IPPH_REFHOLD(iph); 1982 rw_exit(&ipss->ipsec_tunnel_policy_lock); 1983 1984 rw_enter(&iph->iph_lock, RW_READER); 1985 RESET_SPDSOCK_DUMP_POLHEAD(ss, iph); 1986 } 1987 1988 m = spdsock_encode_rule(ss->spdsock_dump_req, rule, 1989 ss->spdsock_dump_cur_type, ss->spdsock_dump_cur_af, 1990 (ss->spdsock_itp == NULL) ? NULL : ss->spdsock_itp->itp_name, 1991 ss->spdsock_dump_tunnel); 1992 rw_exit(&iph->iph_lock); 1993 1994 if (m == NULL) 1995 return (spdsock_dump_finish(ss, ENOMEM)); 1996 return (m); 1997 } 1998 1999 /* 2000 * Dump records until we run into flow-control back-pressure. 2001 */ 2002 static void 2003 spdsock_dump_some(queue_t *q, spdsock_t *ss) 2004 { 2005 mblk_t *m, *dataind; 2006 2007 while ((ss->spdsock_dump_req != NULL) && canputnext(q)) { 2008 m = spdsock_dump_next_record(ss); 2009 if (m == NULL) 2010 return; 2011 dataind = allocb(sizeof (struct T_data_req), BPRI_HI); 2012 if (dataind == NULL) { 2013 freemsg(m); 2014 return; 2015 } 2016 dataind->b_cont = m; 2017 dataind->b_wptr += sizeof (struct T_data_req); 2018 ((struct T_data_ind *)dataind->b_rptr)->PRIM_type = T_DATA_IND; 2019 ((struct T_data_ind *)dataind->b_rptr)->MORE_flag = 0; 2020 dataind->b_datap->db_type = M_PROTO; 2021 putnext(q, dataind); 2022 } 2023 } 2024 2025 /* 2026 * Start dumping. 2027 * Format a start-of-dump record, and set up the stream and kick the rsrv 2028 * procedure to continue the job.. 2029 */ 2030 /* ARGSUSED */ 2031 static void 2032 spdsock_dump(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp) 2033 { 2034 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2035 netstack_t *ns = ss->spdsock_spds->spds_netstack; 2036 ipsec_stack_t *ipss = ns->netstack_ipsec; 2037 mblk_t *mr; 2038 2039 /* spdsock_open() already set spdsock_itp to NULL. */ 2040 if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) { 2041 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER); 2042 ss->spdsock_dump_remaining_polheads = 1 + 2043 avl_numnodes(&ipss->ipsec_tunnel_policies); 2044 ss->spdsock_dump_tun_gen = ipss->ipsec_tunnel_policy_gen; 2045 rw_exit(&ipss->ipsec_tunnel_policy_lock); 2046 if (iph == ALL_ACTIVE_POLHEADS) { 2047 iph = ipsec_system_policy(ns); 2048 ss->spdsock_dump_active = B_TRUE; 2049 } else { 2050 iph = ipsec_inactive_policy(ns); 2051 ss->spdsock_dump_active = B_FALSE; 2052 } 2053 ASSERT(ss->spdsock_itp == NULL); 2054 } else { 2055 ss->spdsock_dump_remaining_polheads = 1; 2056 } 2057 2058 rw_enter(&iph->iph_lock, RW_READER); 2059 2060 mr = spdsock_dump_ruleset(mp, iph, 0, 0); 2061 2062 if (!mr) { 2063 rw_exit(&iph->iph_lock); 2064 spdsock_error(q, mp, ENOMEM, 0); 2065 return; 2066 } 2067 2068 ss->spdsock_dump_req = mp; 2069 RESET_SPDSOCK_DUMP_POLHEAD(ss, iph); 2070 2071 rw_exit(&iph->iph_lock); 2072 2073 qreply(q, mr); 2074 qenable(OTHERQ(q)); 2075 } 2076 2077 /* Do NOT consume a reference to ITP. */ 2078 void 2079 spdsock_clone_node(ipsec_tun_pol_t *itp, void *ep, netstack_t *ns) 2080 { 2081 int *errptr = (int *)ep; 2082 2083 if (*errptr != 0) 2084 return; /* We've failed already for some reason. */ 2085 mutex_enter(&itp->itp_lock); 2086 ITPF_CLONE(itp->itp_flags); 2087 *errptr = ipsec_copy_polhead(itp->itp_policy, itp->itp_inactive, ns); 2088 mutex_exit(&itp->itp_lock); 2089 } 2090 2091 void 2092 spdsock_clone(queue_t *q, mblk_t *mp, spd_if_t *tunname) 2093 { 2094 int error; 2095 char *tname; 2096 ipsec_tun_pol_t *itp; 2097 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2098 netstack_t *ns = ss->spdsock_spds->spds_netstack; 2099 2100 if (tunname != NULL) { 2101 tname = (char *)tunname->spd_if_name; 2102 if (*tname == '\0') { 2103 error = ipsec_clone_system_policy(ns); 2104 if (audit_active) { 2105 boolean_t active; 2106 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 2107 cred_t *cr; 2108 pid_t cpid; 2109 2110 cr = msg_getcred(mp, &cpid); 2111 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 2112 audit_pf_policy(SPD_CLONE, cr, ns, 2113 NULL, active, error, cpid); 2114 } 2115 if (error == 0) { 2116 itp_walk(spdsock_clone_node, &error, ns); 2117 if (audit_active) { 2118 boolean_t active; 2119 spd_msg_t *spmsg = 2120 (spd_msg_t *)mp->b_rptr; 2121 cred_t *cr; 2122 pid_t cpid; 2123 2124 cr = msg_getcred(mp, &cpid); 2125 active = (spmsg->spd_msg_spdid == 2126 SPD_ACTIVE); 2127 audit_pf_policy(SPD_CLONE, cr, 2128 ns, "all tunnels", active, 0, 2129 cpid); 2130 } 2131 } 2132 } else { 2133 itp = get_tunnel_policy(tname, ns); 2134 if (itp == NULL) { 2135 spdsock_error(q, mp, ENOENT, 0); 2136 if (audit_active) { 2137 boolean_t active; 2138 spd_msg_t *spmsg = 2139 (spd_msg_t *)mp->b_rptr; 2140 cred_t *cr; 2141 pid_t cpid; 2142 2143 cr = msg_getcred(mp, &cpid); 2144 active = (spmsg->spd_msg_spdid == 2145 SPD_ACTIVE); 2146 audit_pf_policy(SPD_CLONE, cr, 2147 ns, ITP_NAME(itp), active, ENOENT, 2148 cpid); 2149 } 2150 return; 2151 } 2152 spdsock_clone_node(itp, &error, NULL); 2153 ITP_REFRELE(itp, ns); 2154 if (audit_active) { 2155 boolean_t active; 2156 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 2157 cred_t *cr; 2158 pid_t cpid; 2159 2160 cr = msg_getcred(mp, &cpid); 2161 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 2162 audit_pf_policy(SPD_CLONE, cr, ns, 2163 ITP_NAME(itp), active, error, cpid); 2164 } 2165 } 2166 } else { 2167 error = ipsec_clone_system_policy(ns); 2168 if (audit_active) { 2169 boolean_t active; 2170 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 2171 cred_t *cr; 2172 pid_t cpid; 2173 2174 cr = msg_getcred(mp, &cpid); 2175 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 2176 audit_pf_policy(SPD_CLONE, cr, ns, NULL, 2177 active, error, cpid); 2178 } 2179 } 2180 2181 if (error != 0) 2182 spdsock_error(q, mp, error, 0); 2183 else 2184 spd_echo(q, mp); 2185 } 2186 2187 /* 2188 * Process a SPD_ALGLIST request. The caller expects separate alg entries 2189 * for AH authentication, ESP authentication, and ESP encryption. 2190 * The same distinction is then used when setting the min and max key 2191 * sizes when defining policies. 2192 */ 2193 2194 #define SPDSOCK_AH_AUTH 0 2195 #define SPDSOCK_ESP_AUTH 1 2196 #define SPDSOCK_ESP_ENCR 2 2197 #define SPDSOCK_NTYPES 3 2198 2199 static const uint_t algattr[SPDSOCK_NTYPES] = { 2200 SPD_ATTR_AH_AUTH, 2201 SPD_ATTR_ESP_AUTH, 2202 SPD_ATTR_ESP_ENCR 2203 }; 2204 static const uint_t minbitsattr[SPDSOCK_NTYPES] = { 2205 SPD_ATTR_AH_MINBITS, 2206 SPD_ATTR_ESPA_MINBITS, 2207 SPD_ATTR_ENCR_MINBITS 2208 }; 2209 static const uint_t maxbitsattr[SPDSOCK_NTYPES] = { 2210 SPD_ATTR_AH_MAXBITS, 2211 SPD_ATTR_ESPA_MAXBITS, 2212 SPD_ATTR_ENCR_MAXBITS 2213 }; 2214 static const uint_t defbitsattr[SPDSOCK_NTYPES] = { 2215 SPD_ATTR_AH_DEFBITS, 2216 SPD_ATTR_ESPA_DEFBITS, 2217 SPD_ATTR_ENCR_DEFBITS 2218 }; 2219 static const uint_t incrbitsattr[SPDSOCK_NTYPES] = { 2220 SPD_ATTR_AH_INCRBITS, 2221 SPD_ATTR_ESPA_INCRBITS, 2222 SPD_ATTR_ENCR_INCRBITS 2223 }; 2224 2225 #define ATTRPERALG 6 /* fixed attributes per algs */ 2226 2227 void 2228 spdsock_alglist(queue_t *q, mblk_t *mp) 2229 { 2230 uint_t algtype; 2231 uint_t algidx; 2232 uint_t algcount; 2233 uint_t size; 2234 mblk_t *m; 2235 uint8_t *cur; 2236 spd_msg_t *msg; 2237 struct spd_ext_actions *act; 2238 struct spd_attribute *attr; 2239 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2240 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec; 2241 2242 mutex_enter(&ipss->ipsec_alg_lock); 2243 /* 2244 * The SPD client expects to receive separate entries for 2245 * AH authentication and ESP authentication supported algorithms. 2246 * 2247 * Don't return the "any" algorithms, if defined, as no 2248 * kernel policies can be set for these algorithms. 2249 */ 2250 algcount = 2 * ipss->ipsec_nalgs[IPSEC_ALG_AUTH] + 2251 ipss->ipsec_nalgs[IPSEC_ALG_ENCR]; 2252 2253 if (ipss->ipsec_alglists[IPSEC_ALG_AUTH][SADB_AALG_NONE] != NULL) 2254 algcount--; 2255 if (ipss->ipsec_alglists[IPSEC_ALG_ENCR][SADB_EALG_NONE] != NULL) 2256 algcount--; 2257 2258 /* 2259 * For each algorithm, we encode: 2260 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT} 2261 */ 2262 2263 size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions) + 2264 ATTRPERALG * sizeof (struct spd_attribute) * algcount; 2265 2266 ASSERT(ALIGNED64(size)); 2267 2268 m = allocb(size, BPRI_HI); 2269 if (m == NULL) { 2270 mutex_exit(&ipss->ipsec_alg_lock); 2271 spdsock_error(q, mp, ENOMEM, 0); 2272 return; 2273 } 2274 2275 m->b_wptr = m->b_rptr + size; 2276 cur = m->b_rptr; 2277 2278 msg = (spd_msg_t *)cur; 2279 bcopy(mp->b_rptr, cur, sizeof (*msg)); 2280 2281 msg->spd_msg_len = SPD_8TO64(size); 2282 msg->spd_msg_errno = 0; 2283 msg->spd_msg_diagnostic = 0; 2284 2285 cur += sizeof (*msg); 2286 2287 act = (struct spd_ext_actions *)cur; 2288 cur += sizeof (*act); 2289 2290 act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t)); 2291 act->spd_actions_exttype = SPD_EXT_ACTION; 2292 act->spd_actions_count = algcount; 2293 act->spd_actions_reserved = 0; 2294 2295 attr = (struct spd_attribute *)cur; 2296 2297 #define EMIT(tag, value) { \ 2298 attr->spd_attr_tag = (tag); \ 2299 attr->spd_attr_value = (value); \ 2300 attr++; \ 2301 } 2302 2303 /* 2304 * If you change the number of EMIT's here, change 2305 * ATTRPERALG above to match 2306 */ 2307 #define EMITALGATTRS(_type) { \ 2308 EMIT(algattr[_type], algid); /* 1 */ \ 2309 EMIT(minbitsattr[_type], minbits); /* 2 */ \ 2310 EMIT(maxbitsattr[_type], maxbits); /* 3 */ \ 2311 EMIT(defbitsattr[_type], defbits); /* 4 */ \ 2312 EMIT(incrbitsattr[_type], incr); /* 5 */ \ 2313 EMIT(SPD_ATTR_NEXT, 0); /* 6 */ \ 2314 } 2315 2316 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 2317 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype]; 2318 algidx++) { 2319 int algid = ipss->ipsec_sortlist[algtype][algidx]; 2320 ipsec_alginfo_t *alg = 2321 ipss->ipsec_alglists[algtype][algid]; 2322 uint_t minbits = alg->alg_minbits; 2323 uint_t maxbits = alg->alg_maxbits; 2324 uint_t defbits = alg->alg_default_bits; 2325 uint_t incr = alg->alg_increment; 2326 2327 if (algtype == IPSEC_ALG_AUTH) { 2328 if (algid == SADB_AALG_NONE) 2329 continue; 2330 EMITALGATTRS(SPDSOCK_AH_AUTH); 2331 EMITALGATTRS(SPDSOCK_ESP_AUTH); 2332 } else { 2333 if (algid == SADB_EALG_NONE) 2334 continue; 2335 ASSERT(algtype == IPSEC_ALG_ENCR); 2336 EMITALGATTRS(SPDSOCK_ESP_ENCR); 2337 } 2338 } 2339 } 2340 2341 mutex_exit(&ipss->ipsec_alg_lock); 2342 2343 #undef EMITALGATTRS 2344 #undef EMIT 2345 #undef ATTRPERALG 2346 2347 attr--; 2348 attr->spd_attr_tag = SPD_ATTR_END; 2349 2350 freemsg(mp); 2351 qreply(q, m); 2352 } 2353 2354 /* 2355 * Process a SPD_DUMPALGS request. 2356 */ 2357 2358 #define ATTRPERALG 7 /* fixed attributes per algs */ 2359 2360 void 2361 spdsock_dumpalgs(queue_t *q, mblk_t *mp) 2362 { 2363 uint_t algtype; 2364 uint_t algidx; 2365 uint_t size; 2366 mblk_t *m; 2367 uint8_t *cur; 2368 spd_msg_t *msg; 2369 struct spd_ext_actions *act; 2370 struct spd_attribute *attr; 2371 ipsec_alginfo_t *alg; 2372 uint_t algid; 2373 uint_t i; 2374 uint_t alg_size; 2375 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2376 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec; 2377 2378 mutex_enter(&ipss->ipsec_alg_lock); 2379 2380 /* 2381 * For each algorithm, we encode: 2382 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT} 2383 * 2384 * ALG_ID / ALG_PROTO / ALG_INCRBITS / ALG_NKEYSIZES / ALG_KEYSIZE* 2385 * ALG_NBLOCKSIZES / ALG_BLOCKSIZE* / ALG_MECHNAME / {END, NEXT} 2386 */ 2387 2388 /* 2389 * Compute the size of the SPD message. 2390 */ 2391 size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions); 2392 2393 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 2394 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype]; 2395 algidx++) { 2396 algid = ipss->ipsec_sortlist[algtype][algidx]; 2397 alg = ipss->ipsec_alglists[algtype][algid]; 2398 alg_size = sizeof (struct spd_attribute) * 2399 (ATTRPERALG + alg->alg_nkey_sizes + 2400 alg->alg_nblock_sizes) + CRYPTO_MAX_MECH_NAME; 2401 size += alg_size; 2402 } 2403 } 2404 2405 ASSERT(ALIGNED64(size)); 2406 2407 m = allocb(size, BPRI_HI); 2408 if (m == NULL) { 2409 mutex_exit(&ipss->ipsec_alg_lock); 2410 spdsock_error(q, mp, ENOMEM, 0); 2411 return; 2412 } 2413 2414 m->b_wptr = m->b_rptr + size; 2415 cur = m->b_rptr; 2416 2417 msg = (spd_msg_t *)cur; 2418 bcopy(mp->b_rptr, cur, sizeof (*msg)); 2419 2420 msg->spd_msg_len = SPD_8TO64(size); 2421 msg->spd_msg_errno = 0; 2422 msg->spd_msg_type = SPD_ALGLIST; 2423 2424 msg->spd_msg_diagnostic = 0; 2425 2426 cur += sizeof (*msg); 2427 2428 act = (struct spd_ext_actions *)cur; 2429 cur += sizeof (*act); 2430 2431 act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t)); 2432 act->spd_actions_exttype = SPD_EXT_ACTION; 2433 act->spd_actions_count = ipss->ipsec_nalgs[IPSEC_ALG_AUTH] + 2434 ipss->ipsec_nalgs[IPSEC_ALG_ENCR]; 2435 act->spd_actions_reserved = 0; 2436 2437 /* 2438 * If there aren't any algorithms registered, return an empty message. 2439 * spdsock_get_ext() knows how to deal with this. 2440 */ 2441 if (act->spd_actions_count == 0) { 2442 act->spd_actions_len = 0; 2443 mutex_exit(&ipss->ipsec_alg_lock); 2444 goto error; 2445 } 2446 2447 attr = (struct spd_attribute *)cur; 2448 2449 #define EMIT(tag, value) { \ 2450 attr->spd_attr_tag = (tag); \ 2451 attr->spd_attr_value = (value); \ 2452 attr++; \ 2453 } 2454 2455 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 2456 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype]; 2457 algidx++) { 2458 2459 algid = ipss->ipsec_sortlist[algtype][algidx]; 2460 alg = ipss->ipsec_alglists[algtype][algid]; 2461 2462 /* 2463 * If you change the number of EMIT's here, change 2464 * ATTRPERALG above to match 2465 */ 2466 EMIT(SPD_ATTR_ALG_ID, algid); 2467 EMIT(SPD_ATTR_ALG_PROTO, algproto[algtype]); 2468 EMIT(SPD_ATTR_ALG_INCRBITS, alg->alg_increment); 2469 2470 EMIT(SPD_ATTR_ALG_NKEYSIZES, alg->alg_nkey_sizes); 2471 for (i = 0; i < alg->alg_nkey_sizes; i++) 2472 EMIT(SPD_ATTR_ALG_KEYSIZE, 2473 alg->alg_key_sizes[i]); 2474 2475 EMIT(SPD_ATTR_ALG_NBLOCKSIZES, alg->alg_nblock_sizes); 2476 for (i = 0; i < alg->alg_nblock_sizes; i++) 2477 EMIT(SPD_ATTR_ALG_BLOCKSIZE, 2478 alg->alg_block_sizes[i]); 2479 2480 EMIT(SPD_ATTR_ALG_MECHNAME, CRYPTO_MAX_MECH_NAME); 2481 bcopy(alg->alg_mech_name, attr, CRYPTO_MAX_MECH_NAME); 2482 attr = (struct spd_attribute *)((char *)attr + 2483 CRYPTO_MAX_MECH_NAME); 2484 2485 EMIT(SPD_ATTR_NEXT, 0); 2486 } 2487 } 2488 2489 mutex_exit(&ipss->ipsec_alg_lock); 2490 2491 #undef EMITALGATTRS 2492 #undef EMIT 2493 #undef ATTRPERALG 2494 2495 attr--; 2496 attr->spd_attr_tag = SPD_ATTR_END; 2497 2498 error: 2499 freemsg(mp); 2500 qreply(q, m); 2501 } 2502 2503 /* 2504 * Do the actual work of processing an SPD_UPDATEALGS request. Can 2505 * be invoked either once IPsec is loaded on a cached request, or 2506 * when a request is received while IPsec is loaded. 2507 */ 2508 static void 2509 spdsock_do_updatealg(spd_ext_t *extv[], int *diag, spd_stack_t *spds) 2510 { 2511 struct spd_ext_actions *actp; 2512 struct spd_attribute *attr, *endattr; 2513 uint64_t *start, *end; 2514 ipsec_alginfo_t *alg = NULL; 2515 ipsec_algtype_t alg_type = 0; 2516 boolean_t skip_alg = B_TRUE, doing_proto = B_FALSE; 2517 uint_t i, cur_key, cur_block, algid; 2518 2519 *diag = -1; 2520 ASSERT(MUTEX_HELD(&spds->spds_alg_lock)); 2521 2522 /* parse the message, building the list of algorithms */ 2523 2524 actp = (struct spd_ext_actions *)extv[SPD_EXT_ACTION]; 2525 if (actp == NULL) { 2526 *diag = SPD_DIAGNOSTIC_NO_ACTION_EXT; 2527 return; 2528 } 2529 2530 start = (uint64_t *)actp; 2531 end = (start + actp->spd_actions_len); 2532 endattr = (struct spd_attribute *)end; 2533 attr = (struct spd_attribute *)&actp[1]; 2534 2535 bzero(spds->spds_algs, IPSEC_NALGTYPES * IPSEC_MAX_ALGS * 2536 sizeof (ipsec_alginfo_t *)); 2537 2538 alg = kmem_zalloc(sizeof (*alg), KM_SLEEP); 2539 2540 #define ALG_KEY_SIZES(a) (((a)->alg_nkey_sizes + 1) * sizeof (uint16_t)) 2541 #define ALG_BLOCK_SIZES(a) (((a)->alg_nblock_sizes + 1) * sizeof (uint16_t)) 2542 2543 while (attr < endattr) { 2544 switch (attr->spd_attr_tag) { 2545 case SPD_ATTR_NOP: 2546 case SPD_ATTR_EMPTY: 2547 break; 2548 case SPD_ATTR_END: 2549 attr = endattr; 2550 /* FALLTHRU */ 2551 case SPD_ATTR_NEXT: 2552 if (doing_proto) { 2553 doing_proto = B_FALSE; 2554 break; 2555 } 2556 if (skip_alg) { 2557 ipsec_alg_free(alg); 2558 } else { 2559 ipsec_alg_free( 2560 spds->spds_algs[alg_type][alg->alg_id]); 2561 spds->spds_algs[alg_type][alg->alg_id] = 2562 alg; 2563 } 2564 alg = kmem_zalloc(sizeof (*alg), KM_SLEEP); 2565 break; 2566 2567 case SPD_ATTR_ALG_ID: 2568 if (attr->spd_attr_value >= IPSEC_MAX_ALGS) { 2569 ss1dbg(spds, ("spdsock_do_updatealg: " 2570 "invalid alg id %d\n", 2571 attr->spd_attr_value)); 2572 *diag = SPD_DIAGNOSTIC_ALG_ID_RANGE; 2573 goto bail; 2574 } 2575 alg->alg_id = attr->spd_attr_value; 2576 break; 2577 2578 case SPD_ATTR_ALG_PROTO: 2579 /* find the alg type */ 2580 for (i = 0; i < NALGPROTOS; i++) 2581 if (algproto[i] == attr->spd_attr_value) 2582 break; 2583 skip_alg = (i == NALGPROTOS); 2584 if (!skip_alg) 2585 alg_type = i; 2586 break; 2587 2588 case SPD_ATTR_ALG_INCRBITS: 2589 alg->alg_increment = attr->spd_attr_value; 2590 break; 2591 2592 case SPD_ATTR_ALG_NKEYSIZES: 2593 if (alg->alg_key_sizes != NULL) { 2594 kmem_free(alg->alg_key_sizes, 2595 ALG_KEY_SIZES(alg)); 2596 } 2597 alg->alg_nkey_sizes = attr->spd_attr_value; 2598 /* 2599 * Allocate room for the trailing zero key size 2600 * value as well. 2601 */ 2602 alg->alg_key_sizes = kmem_zalloc(ALG_KEY_SIZES(alg), 2603 KM_SLEEP); 2604 cur_key = 0; 2605 break; 2606 2607 case SPD_ATTR_ALG_KEYSIZE: 2608 if (alg->alg_key_sizes == NULL || 2609 cur_key >= alg->alg_nkey_sizes) { 2610 ss1dbg(spds, ("spdsock_do_updatealg: " 2611 "too many key sizes\n")); 2612 *diag = SPD_DIAGNOSTIC_ALG_NUM_KEY_SIZES; 2613 goto bail; 2614 } 2615 alg->alg_key_sizes[cur_key++] = attr->spd_attr_value; 2616 break; 2617 2618 case SPD_ATTR_ALG_NBLOCKSIZES: 2619 if (alg->alg_block_sizes != NULL) { 2620 kmem_free(alg->alg_block_sizes, 2621 ALG_BLOCK_SIZES(alg)); 2622 } 2623 alg->alg_nblock_sizes = attr->spd_attr_value; 2624 /* 2625 * Allocate room for the trailing zero block size 2626 * value as well. 2627 */ 2628 alg->alg_block_sizes = kmem_zalloc(ALG_BLOCK_SIZES(alg), 2629 KM_SLEEP); 2630 cur_block = 0; 2631 break; 2632 2633 case SPD_ATTR_ALG_BLOCKSIZE: 2634 if (alg->alg_block_sizes == NULL || 2635 cur_block >= alg->alg_nblock_sizes) { 2636 ss1dbg(spds, ("spdsock_do_updatealg: " 2637 "too many block sizes\n")); 2638 *diag = SPD_DIAGNOSTIC_ALG_NUM_BLOCK_SIZES; 2639 goto bail; 2640 } 2641 alg->alg_block_sizes[cur_block++] = 2642 attr->spd_attr_value; 2643 break; 2644 2645 case SPD_ATTR_ALG_MECHNAME: { 2646 char *mech_name; 2647 2648 if (attr->spd_attr_value > CRYPTO_MAX_MECH_NAME) { 2649 ss1dbg(spds, ("spdsock_do_updatealg: " 2650 "mech name too long\n")); 2651 *diag = SPD_DIAGNOSTIC_ALG_MECH_NAME_LEN; 2652 goto bail; 2653 } 2654 mech_name = (char *)(attr + 1); 2655 bcopy(mech_name, alg->alg_mech_name, 2656 attr->spd_attr_value); 2657 alg->alg_mech_name[CRYPTO_MAX_MECH_NAME-1] = '\0'; 2658 attr = (struct spd_attribute *)((char *)attr + 2659 attr->spd_attr_value); 2660 break; 2661 } 2662 2663 case SPD_ATTR_PROTO_ID: 2664 doing_proto = B_TRUE; 2665 for (i = 0; i < NALGPROTOS; i++) { 2666 if (algproto[i] == attr->spd_attr_value) { 2667 alg_type = i; 2668 break; 2669 } 2670 } 2671 break; 2672 2673 case SPD_ATTR_PROTO_EXEC_MODE: 2674 if (!doing_proto) 2675 break; 2676 for (i = 0; i < NEXECMODES; i++) { 2677 if (execmodes[i] == attr->spd_attr_value) { 2678 spds->spds_algs_exec_mode[alg_type] = i; 2679 break; 2680 } 2681 } 2682 break; 2683 } 2684 attr++; 2685 } 2686 2687 #undef ALG_KEY_SIZES 2688 #undef ALG_BLOCK_SIZES 2689 2690 /* update the algorithm tables */ 2691 spdsock_merge_algs(spds); 2692 bail: 2693 /* cleanup */ 2694 ipsec_alg_free(alg); 2695 for (alg_type = 0; alg_type < IPSEC_NALGTYPES; alg_type++) 2696 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) 2697 if (spds->spds_algs[alg_type][algid] != NULL) 2698 ipsec_alg_free(spds->spds_algs[alg_type][algid]); 2699 } 2700 2701 /* 2702 * Process an SPD_UPDATEALGS request. If IPsec is not loaded, queue 2703 * the request until IPsec loads. If IPsec is loaded, act on it 2704 * immediately. 2705 */ 2706 2707 static void 2708 spdsock_updatealg(queue_t *q, mblk_t *mp, spd_ext_t *extv[]) 2709 { 2710 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2711 spd_stack_t *spds = ss->spdsock_spds; 2712 ipsec_stack_t *ipss = spds->spds_netstack->netstack_ipsec; 2713 2714 if (!ipsec_loaded(ipss)) { 2715 /* 2716 * IPsec is not loaded, save request and return nicely, 2717 * the message will be processed once IPsec loads. 2718 */ 2719 mblk_t *new_mp; 2720 2721 /* last update message wins */ 2722 if ((new_mp = copymsg(mp)) == NULL) { 2723 spdsock_error(q, mp, ENOMEM, 0); 2724 return; 2725 } 2726 mutex_enter(&spds->spds_alg_lock); 2727 bcopy(extv, spds->spds_extv_algs, 2728 sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1)); 2729 if (spds->spds_mp_algs != NULL) 2730 freemsg(spds->spds_mp_algs); 2731 spds->spds_mp_algs = mp; 2732 spds->spds_algs_pending = B_TRUE; 2733 mutex_exit(&spds->spds_alg_lock); 2734 if (audit_active) { 2735 cred_t *cr; 2736 pid_t cpid; 2737 2738 cr = msg_getcred(mp, &cpid); 2739 audit_pf_policy(SPD_UPDATEALGS, cr, 2740 spds->spds_netstack, NULL, B_TRUE, EAGAIN, 2741 cpid); 2742 } 2743 spd_echo(q, new_mp); 2744 } else { 2745 /* 2746 * IPsec is loaded, act on the message immediately. 2747 */ 2748 int diag; 2749 2750 mutex_enter(&spds->spds_alg_lock); 2751 spdsock_do_updatealg(extv, &diag, spds); 2752 mutex_exit(&spds->spds_alg_lock); 2753 if (diag == -1) { 2754 spd_echo(q, mp); 2755 if (audit_active) { 2756 cred_t *cr; 2757 pid_t cpid; 2758 2759 cr = msg_getcred(mp, &cpid); 2760 audit_pf_policy(SPD_UPDATEALGS, cr, 2761 spds->spds_netstack, NULL, B_TRUE, 0, 2762 cpid); 2763 } 2764 } else { 2765 spdsock_diag(q, mp, diag); 2766 if (audit_active) { 2767 cred_t *cr; 2768 pid_t cpid; 2769 2770 cr = msg_getcred(mp, &cpid); 2771 audit_pf_policy(SPD_UPDATEALGS, cr, 2772 spds->spds_netstack, NULL, B_TRUE, diag, 2773 cpid); 2774 } 2775 } 2776 } 2777 } 2778 2779 /* 2780 * With a reference-held ill, dig down and find an instance of "tun", and 2781 * assign its tunnel policy pointer, while reference-holding it. Also, 2782 * release ill's refrence when finished. 2783 * 2784 * We'll be messing with q_next, so be VERY careful. 2785 */ 2786 static void 2787 find_tun_and_set_itp(ill_t *ill, ipsec_tun_pol_t *itp) 2788 { 2789 queue_t *q; 2790 tun_t *tun; 2791 2792 /* Don't bother if this ill is going away. */ 2793 if (ill->ill_flags & ILL_CONDEMNED) { 2794 ill_refrele(ill); 2795 return; 2796 } 2797 2798 2799 q = ill->ill_wq; 2800 claimstr(q); /* Lighter-weight than freezestr(). */ 2801 2802 do { 2803 /* Use strcmp() because "tun" is bounded. */ 2804 if (strcmp(q->q_qinfo->qi_minfo->mi_idname, "tun") == 0) { 2805 /* Aha! Got it. */ 2806 tun = (tun_t *)q->q_ptr; 2807 if (tun != NULL) { 2808 mutex_enter(&tun->tun_lock); 2809 if (tun->tun_itp != itp) { 2810 ASSERT(tun->tun_itp == NULL); 2811 ITP_REFHOLD(itp); 2812 tun->tun_itp = itp; 2813 } 2814 mutex_exit(&tun->tun_lock); 2815 goto release_and_return; 2816 } 2817 /* 2818 * Else assume this is some other module named "tun" 2819 * and move on, hoping we find one that actually has 2820 * something in q_ptr. 2821 */ 2822 } 2823 q = q->q_next; 2824 } while (q != NULL); 2825 2826 release_and_return: 2827 releasestr(ill->ill_wq); 2828 ill_refrele(ill); 2829 } 2830 2831 /* 2832 * Sort through the mess of polhead options to retrieve an appropriate one. 2833 * Returns NULL if we send an spdsock error. Returns a valid pointer if we 2834 * found a valid polhead. Returns ALL_ACTIVE_POLHEADS (aka. -1) or 2835 * ALL_INACTIVE_POLHEADS (aka. -2) if the operation calls for the operation to 2836 * act on ALL policy heads. 2837 */ 2838 static ipsec_policy_head_t * 2839 get_appropriate_polhead(queue_t *q, mblk_t *mp, spd_if_t *tunname, int spdid, 2840 int msgtype, ipsec_tun_pol_t **itpp) 2841 { 2842 ipsec_tun_pol_t *itp; 2843 ipsec_policy_head_t *iph; 2844 int errno; 2845 char *tname; 2846 boolean_t active; 2847 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2848 netstack_t *ns = ss->spdsock_spds->spds_netstack; 2849 uint64_t gen; /* Placeholder */ 2850 ill_t *v4, *v6; 2851 2852 active = (spdid == SPD_ACTIVE); 2853 *itpp = NULL; 2854 if (!active && spdid != SPD_STANDBY) { 2855 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_SPDID); 2856 return (NULL); 2857 } 2858 2859 if (tunname != NULL) { 2860 /* Acting on a tunnel's SPD. */ 2861 tname = (char *)tunname->spd_if_name; 2862 if (*tname == '\0') { 2863 /* Handle all-polhead cases here. */ 2864 if (msgtype != SPD_FLUSH && msgtype != SPD_DUMP) { 2865 spdsock_diag(q, mp, 2866 SPD_DIAGNOSTIC_NOT_GLOBAL_OP); 2867 return (NULL); 2868 } 2869 return (active ? ALL_ACTIVE_POLHEADS : 2870 ALL_INACTIVE_POLHEADS); 2871 } 2872 2873 itp = get_tunnel_policy(tname, ns); 2874 if (itp == NULL) { 2875 if (msgtype != SPD_ADDRULE) { 2876 /* "Tunnel not found" */ 2877 spdsock_error(q, mp, ENOENT, 0); 2878 return (NULL); 2879 } 2880 2881 errno = 0; 2882 itp = create_tunnel_policy(tname, &errno, &gen, ns); 2883 if (itp == NULL) { 2884 /* 2885 * Something very bad happened, most likely 2886 * ENOMEM. Return an indicator. 2887 */ 2888 spdsock_error(q, mp, errno, 0); 2889 return (NULL); 2890 } 2891 } 2892 /* 2893 * Troll the plumbed tunnels and see if we have a 2894 * match. We need to do this always in case we add 2895 * policy AFTER plumbing a tunnel. 2896 */ 2897 v4 = ill_lookup_on_name(tname, B_FALSE, B_FALSE, NULL, 2898 NULL, NULL, &errno, NULL, ns->netstack_ip); 2899 if (v4 != NULL) 2900 find_tun_and_set_itp(v4, itp); 2901 v6 = ill_lookup_on_name(tname, B_FALSE, B_TRUE, NULL, 2902 NULL, NULL, &errno, NULL, ns->netstack_ip); 2903 if (v6 != NULL) 2904 find_tun_and_set_itp(v6, itp); 2905 ASSERT(itp != NULL); 2906 *itpp = itp; 2907 /* For spdsock dump state, set the polhead's name. */ 2908 if (msgtype == SPD_DUMP) { 2909 ITP_REFHOLD(itp); 2910 ss->spdsock_itp = itp; 2911 ss->spdsock_dump_tunnel = itp->itp_flags & 2912 (active ? ITPF_P_TUNNEL : ITPF_I_TUNNEL); 2913 } 2914 } else { 2915 itp = NULL; 2916 /* For spdsock dump state, indicate it's global policy. */ 2917 if (msgtype == SPD_DUMP) 2918 ss->spdsock_itp = NULL; 2919 } 2920 2921 if (active) 2922 iph = (itp == NULL) ? ipsec_system_policy(ns) : itp->itp_policy; 2923 else 2924 iph = (itp == NULL) ? ipsec_inactive_policy(ns) : 2925 itp->itp_inactive; 2926 2927 ASSERT(iph != NULL); 2928 if (itp != NULL) { 2929 IPPH_REFHOLD(iph); 2930 } 2931 2932 return (iph); 2933 } 2934 2935 static void 2936 spdsock_parse(queue_t *q, mblk_t *mp) 2937 { 2938 spd_msg_t *spmsg; 2939 spd_ext_t *extv[SPD_EXT_MAX + 1]; 2940 uint_t msgsize; 2941 ipsec_policy_head_t *iph; 2942 ipsec_tun_pol_t *itp; 2943 spd_if_t *tunname; 2944 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2945 spd_stack_t *spds = ss->spdsock_spds; 2946 netstack_t *ns = spds->spds_netstack; 2947 ipsec_stack_t *ipss = ns->netstack_ipsec; 2948 2949 /* Make sure nothing's below me. */ 2950 ASSERT(WR(q)->q_next == NULL); 2951 2952 spmsg = (spd_msg_t *)mp->b_rptr; 2953 2954 msgsize = SPD_64TO8(spmsg->spd_msg_len); 2955 2956 if (msgdsize(mp) != msgsize) { 2957 /* 2958 * Message len incorrect w.r.t. actual size. Send an error 2959 * (EMSGSIZE). It may be necessary to massage things a 2960 * bit. For example, if the spd_msg_type is hosed, 2961 * I need to set it to SPD_RESERVED to get delivery to 2962 * do the right thing. Then again, maybe just letting 2963 * the error delivery do the right thing. 2964 */ 2965 ss2dbg(spds, 2966 ("mblk (%lu) and base (%d) message sizes don't jibe.\n", 2967 msgdsize(mp), msgsize)); 2968 spdsock_error(q, mp, EMSGSIZE, SPD_DIAGNOSTIC_NONE); 2969 return; 2970 } 2971 2972 if (msgsize > (uint_t)(mp->b_wptr - mp->b_rptr)) { 2973 /* Get all message into one mblk. */ 2974 if (pullupmsg(mp, -1) == 0) { 2975 /* 2976 * Something screwy happened. 2977 */ 2978 ss3dbg(spds, ("spdsock_parse: pullupmsg() failed.\n")); 2979 return; 2980 } else { 2981 spmsg = (spd_msg_t *)mp->b_rptr; 2982 } 2983 } 2984 2985 switch (spdsock_get_ext(extv, spmsg, msgsize)) { 2986 case KGE_DUP: 2987 /* Handle duplicate extension. */ 2988 ss1dbg(spds, ("Got duplicate extension of type %d.\n", 2989 extv[0]->spd_ext_type)); 2990 spdsock_diag(q, mp, dup_ext_diag[extv[0]->spd_ext_type]); 2991 return; 2992 case KGE_UNK: 2993 /* Handle unknown extension. */ 2994 ss1dbg(spds, ("Got unknown extension of type %d.\n", 2995 extv[0]->spd_ext_type)); 2996 spdsock_diag(q, mp, SPD_DIAGNOSTIC_UNKNOWN_EXT); 2997 return; 2998 case KGE_LEN: 2999 /* Length error. */ 3000 ss1dbg(spds, ("Length %d on extension type %d overrun or 0.\n", 3001 extv[0]->spd_ext_len, extv[0]->spd_ext_type)); 3002 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_EXTLEN); 3003 return; 3004 case KGE_CHK: 3005 /* Reality check failed. */ 3006 ss1dbg(spds, ("Reality check failed on extension type %d.\n", 3007 extv[0]->spd_ext_type)); 3008 spdsock_diag(q, mp, bad_ext_diag[extv[0]->spd_ext_type]); 3009 return; 3010 default: 3011 /* Default case is no errors. */ 3012 break; 3013 } 3014 3015 /* 3016 * Special-case SPD_UPDATEALGS so as not to load IPsec. 3017 */ 3018 if (!ipsec_loaded(ipss) && spmsg->spd_msg_type != SPD_UPDATEALGS) { 3019 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3020 3021 ASSERT(ss != NULL); 3022 ipsec_loader_loadnow(ipss); 3023 ss->spdsock_timeout_arg = mp; 3024 ss->spdsock_timeout = qtimeout(q, spdsock_loadcheck, 3025 q, LOADCHECK_INTERVAL); 3026 return; 3027 } 3028 3029 /* First check for messages that need no polheads at all. */ 3030 switch (spmsg->spd_msg_type) { 3031 case SPD_UPDATEALGS: 3032 spdsock_updatealg(q, mp, extv); 3033 return; 3034 case SPD_ALGLIST: 3035 spdsock_alglist(q, mp); 3036 return; 3037 case SPD_DUMPALGS: 3038 spdsock_dumpalgs(q, mp); 3039 return; 3040 } 3041 3042 /* 3043 * Then check for ones that need both primary/secondary polheads, 3044 * finding the appropriate tunnel policy if need be. 3045 */ 3046 tunname = (spd_if_t *)extv[SPD_EXT_TUN_NAME]; 3047 switch (spmsg->spd_msg_type) { 3048 case SPD_FLIP: 3049 spdsock_flip(q, mp, tunname); 3050 return; 3051 case SPD_CLONE: 3052 spdsock_clone(q, mp, tunname); 3053 return; 3054 } 3055 3056 /* 3057 * Finally, find ones that operate on exactly one polhead, or 3058 * "all polheads" of a given type (active/inactive). 3059 */ 3060 iph = get_appropriate_polhead(q, mp, tunname, spmsg->spd_msg_spdid, 3061 spmsg->spd_msg_type, &itp); 3062 if (iph == NULL) 3063 return; 3064 3065 /* All-polheads-ready operations. */ 3066 switch (spmsg->spd_msg_type) { 3067 case SPD_FLUSH: 3068 if (itp != NULL) { 3069 mutex_enter(&itp->itp_lock); 3070 if (spmsg->spd_msg_spdid == SPD_ACTIVE) 3071 itp->itp_flags &= ~ITPF_PFLAGS; 3072 else 3073 itp->itp_flags &= ~ITPF_IFLAGS; 3074 mutex_exit(&itp->itp_lock); 3075 ITP_REFRELE(itp, ns); 3076 } 3077 spdsock_flush(q, iph, itp, mp); 3078 return; 3079 case SPD_DUMP: 3080 if (itp != NULL) 3081 ITP_REFRELE(itp, ns); 3082 spdsock_dump(q, iph, mp); 3083 return; 3084 } 3085 3086 if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) { 3087 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NOT_GLOBAL_OP); 3088 return; 3089 } 3090 3091 /* Single-polhead-only operations. */ 3092 switch (spmsg->spd_msg_type) { 3093 case SPD_ADDRULE: 3094 spdsock_addrule(q, iph, mp, extv, itp); 3095 break; 3096 case SPD_DELETERULE: 3097 spdsock_deleterule(q, iph, mp, extv, itp); 3098 break; 3099 case SPD_LOOKUP: 3100 spdsock_lookup(q, iph, mp, extv, itp); 3101 break; 3102 default: 3103 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_MSG_TYPE); 3104 break; 3105 } 3106 3107 IPPH_REFRELE(iph, ns); 3108 if (itp != NULL) 3109 ITP_REFRELE(itp, ns); 3110 } 3111 3112 /* 3113 * If an algorithm mapping was received before IPsec was loaded, process it. 3114 * Called from the IPsec loader. 3115 */ 3116 void 3117 spdsock_update_pending_algs(netstack_t *ns) 3118 { 3119 spd_stack_t *spds = ns->netstack_spdsock; 3120 3121 mutex_enter(&spds->spds_alg_lock); 3122 if (spds->spds_algs_pending) { 3123 int diag; 3124 3125 spdsock_do_updatealg(spds->spds_extv_algs, &diag, 3126 spds); 3127 spds->spds_algs_pending = B_FALSE; 3128 } 3129 mutex_exit(&spds->spds_alg_lock); 3130 } 3131 3132 static void 3133 spdsock_loadcheck(void *arg) 3134 { 3135 queue_t *q = (queue_t *)arg; 3136 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3137 mblk_t *mp; 3138 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec; 3139 3140 ASSERT(ss != NULL); 3141 3142 ss->spdsock_timeout = 0; 3143 mp = ss->spdsock_timeout_arg; 3144 ASSERT(mp != NULL); 3145 ss->spdsock_timeout_arg = NULL; 3146 if (ipsec_failed(ipss)) 3147 spdsock_error(q, mp, EPROTONOSUPPORT, 0); 3148 else 3149 spdsock_parse(q, mp); 3150 } 3151 3152 /* 3153 * Copy relevant state bits. 3154 */ 3155 static void 3156 spdsock_copy_info(struct T_info_ack *tap, spdsock_t *ss) 3157 { 3158 *tap = spdsock_g_t_info_ack; 3159 tap->CURRENT_state = ss->spdsock_state; 3160 tap->OPT_size = spdsock_max_optsize; 3161 } 3162 3163 /* 3164 * This routine responds to T_CAPABILITY_REQ messages. It is called by 3165 * spdsock_wput. Much of the T_CAPABILITY_ACK information is copied from 3166 * spdsock_g_t_info_ack. The current state of the stream is copied from 3167 * spdsock_state. 3168 */ 3169 static void 3170 spdsock_capability_req(queue_t *q, mblk_t *mp) 3171 { 3172 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3173 t_uscalar_t cap_bits1; 3174 struct T_capability_ack *tcap; 3175 3176 cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1; 3177 3178 mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack), 3179 mp->b_datap->db_type, T_CAPABILITY_ACK); 3180 if (mp == NULL) 3181 return; 3182 3183 tcap = (struct T_capability_ack *)mp->b_rptr; 3184 tcap->CAP_bits1 = 0; 3185 3186 if (cap_bits1 & TC1_INFO) { 3187 spdsock_copy_info(&tcap->INFO_ack, ss); 3188 tcap->CAP_bits1 |= TC1_INFO; 3189 } 3190 3191 qreply(q, mp); 3192 } 3193 3194 /* 3195 * This routine responds to T_INFO_REQ messages. It is called by 3196 * spdsock_wput_other. 3197 * Most of the T_INFO_ACK information is copied from spdsock_g_t_info_ack. 3198 * The current state of the stream is copied from spdsock_state. 3199 */ 3200 static void 3201 spdsock_info_req(q, mp) 3202 queue_t *q; 3203 mblk_t *mp; 3204 { 3205 mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO, 3206 T_INFO_ACK); 3207 if (mp == NULL) 3208 return; 3209 spdsock_copy_info((struct T_info_ack *)mp->b_rptr, 3210 (spdsock_t *)q->q_ptr); 3211 qreply(q, mp); 3212 } 3213 3214 /* 3215 * spdsock_err_ack. This routine creates a 3216 * T_ERROR_ACK message and passes it 3217 * upstream. 3218 */ 3219 static void 3220 spdsock_err_ack(q, mp, t_error, sys_error) 3221 queue_t *q; 3222 mblk_t *mp; 3223 int t_error; 3224 int sys_error; 3225 { 3226 if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL) 3227 qreply(q, mp); 3228 } 3229 3230 /* 3231 * This routine retrieves the current status of socket options. 3232 * It returns the size of the option retrieved. 3233 */ 3234 /* ARGSUSED */ 3235 int 3236 spdsock_opt_get(queue_t *q, int level, int name, uchar_t *ptr) 3237 { 3238 int *i1 = (int *)ptr; 3239 3240 switch (level) { 3241 case SOL_SOCKET: 3242 switch (name) { 3243 case SO_TYPE: 3244 *i1 = SOCK_RAW; 3245 break; 3246 /* 3247 * The following two items can be manipulated, 3248 * but changing them should do nothing. 3249 */ 3250 case SO_SNDBUF: 3251 *i1 = (int)q->q_hiwat; 3252 break; 3253 case SO_RCVBUF: 3254 *i1 = (int)(RD(q)->q_hiwat); 3255 break; 3256 } 3257 break; 3258 default: 3259 return (0); 3260 } 3261 return (sizeof (int)); 3262 } 3263 3264 /* 3265 * This routine sets socket options. 3266 */ 3267 /* ARGSUSED */ 3268 int 3269 spdsock_opt_set(queue_t *q, uint_t mgmt_flags, int level, int name, 3270 uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp, 3271 void *thisdg_attrs, cred_t *cr, mblk_t *mblk) 3272 { 3273 int *i1 = (int *)invalp; 3274 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3275 spd_stack_t *spds = ss->spdsock_spds; 3276 3277 switch (level) { 3278 case SOL_SOCKET: 3279 switch (name) { 3280 case SO_SNDBUF: 3281 if (*i1 > spds->spds_max_buf) 3282 return (ENOBUFS); 3283 q->q_hiwat = *i1; 3284 break; 3285 case SO_RCVBUF: 3286 if (*i1 > spds->spds_max_buf) 3287 return (ENOBUFS); 3288 RD(q)->q_hiwat = *i1; 3289 (void) proto_set_rx_hiwat(RD(q), NULL, *i1); 3290 break; 3291 } 3292 break; 3293 } 3294 return (0); 3295 } 3296 3297 3298 /* 3299 * Handle STREAMS messages. 3300 */ 3301 static void 3302 spdsock_wput_other(queue_t *q, mblk_t *mp) 3303 { 3304 struct iocblk *iocp; 3305 int error; 3306 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3307 spd_stack_t *spds = ss->spdsock_spds; 3308 cred_t *cr; 3309 3310 switch (mp->b_datap->db_type) { 3311 case M_PROTO: 3312 case M_PCPROTO: 3313 if ((mp->b_wptr - mp->b_rptr) < sizeof (long)) { 3314 ss3dbg(spds, ( 3315 "spdsock_wput_other: Not big enough M_PROTO\n")); 3316 freemsg(mp); 3317 return; 3318 } 3319 switch (((union T_primitives *)mp->b_rptr)->type) { 3320 case T_CAPABILITY_REQ: 3321 spdsock_capability_req(q, mp); 3322 break; 3323 case T_INFO_REQ: 3324 spdsock_info_req(q, mp); 3325 break; 3326 case T_SVR4_OPTMGMT_REQ: 3327 case T_OPTMGMT_REQ: 3328 /* 3329 * All Solaris components should pass a db_credp 3330 * for this TPI message, hence we ASSERT. 3331 * But in case there is some other M_PROTO that looks 3332 * like a TPI message sent by some other kernel 3333 * component, we check and return an error. 3334 */ 3335 cr = msg_getcred(mp, NULL); 3336 ASSERT(cr != NULL); 3337 if (cr == NULL) { 3338 spdsock_err_ack(q, mp, TSYSERR, EINVAL); 3339 return; 3340 } 3341 if (((union T_primitives *)mp->b_rptr)->type == 3342 T_SVR4_OPTMGMT_REQ) { 3343 (void) svr4_optcom_req(q, mp, cr, 3344 &spdsock_opt_obj, B_FALSE); 3345 } else { 3346 (void) tpi_optcom_req(q, mp, cr, 3347 &spdsock_opt_obj, B_FALSE); 3348 } 3349 break; 3350 case T_DATA_REQ: 3351 case T_EXDATA_REQ: 3352 case T_ORDREL_REQ: 3353 /* Illegal for spdsock. */ 3354 freemsg(mp); 3355 (void) putnextctl1(RD(q), M_ERROR, EPROTO); 3356 break; 3357 default: 3358 /* Not supported by spdsock. */ 3359 spdsock_err_ack(q, mp, TNOTSUPPORT, 0); 3360 break; 3361 } 3362 return; 3363 case M_IOCTL: 3364 iocp = (struct iocblk *)mp->b_rptr; 3365 error = EINVAL; 3366 3367 switch (iocp->ioc_cmd) { 3368 case ND_SET: 3369 case ND_GET: 3370 if (nd_getset(q, spds->spds_g_nd, mp)) { 3371 qreply(q, mp); 3372 return; 3373 } else 3374 error = ENOENT; 3375 /* FALLTHRU */ 3376 default: 3377 miocnak(q, mp, 0, error); 3378 return; 3379 } 3380 case M_FLUSH: 3381 if (*mp->b_rptr & FLUSHW) { 3382 flushq(q, FLUSHALL); 3383 *mp->b_rptr &= ~FLUSHW; 3384 } 3385 if (*mp->b_rptr & FLUSHR) { 3386 qreply(q, mp); 3387 return; 3388 } 3389 /* Else FALLTHRU */ 3390 } 3391 3392 /* If fell through, just black-hole the message. */ 3393 freemsg(mp); 3394 } 3395 3396 static void 3397 spdsock_wput(queue_t *q, mblk_t *mp) 3398 { 3399 uint8_t *rptr = mp->b_rptr; 3400 mblk_t *mp1; 3401 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3402 spd_stack_t *spds = ss->spdsock_spds; 3403 3404 /* 3405 * If we're dumping, defer processing other messages until the 3406 * dump completes. 3407 */ 3408 if (ss->spdsock_dump_req != NULL) { 3409 if (!putq(q, mp)) 3410 freemsg(mp); 3411 return; 3412 } 3413 3414 switch (mp->b_datap->db_type) { 3415 case M_DATA: 3416 /* 3417 * Silently discard. 3418 */ 3419 ss2dbg(spds, ("raw M_DATA in spdsock.\n")); 3420 freemsg(mp); 3421 return; 3422 case M_PROTO: 3423 case M_PCPROTO: 3424 if ((mp->b_wptr - rptr) >= sizeof (struct T_data_req)) { 3425 if (((union T_primitives *)rptr)->type == T_DATA_REQ) { 3426 if ((mp1 = mp->b_cont) == NULL) { 3427 /* No data after T_DATA_REQ. */ 3428 ss2dbg(spds, 3429 ("No data after DATA_REQ.\n")); 3430 freemsg(mp); 3431 return; 3432 } 3433 freeb(mp); 3434 mp = mp1; 3435 ss2dbg(spds, ("T_DATA_REQ\n")); 3436 break; /* Out of switch. */ 3437 } 3438 } 3439 /* FALLTHRU */ 3440 default: 3441 ss3dbg(spds, ("In default wput case (%d %d).\n", 3442 mp->b_datap->db_type, ((union T_primitives *)rptr)->type)); 3443 spdsock_wput_other(q, mp); 3444 return; 3445 } 3446 3447 /* I now have a PF_POLICY message in an M_DATA block. */ 3448 spdsock_parse(q, mp); 3449 } 3450 3451 /* 3452 * Device open procedure, called when new queue pair created. 3453 * We are passed the read-side queue. 3454 */ 3455 /* ARGSUSED */ 3456 static int 3457 spdsock_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 3458 { 3459 spdsock_t *ss; 3460 queue_t *oq = OTHERQ(q); 3461 minor_t ssminor; 3462 netstack_t *ns; 3463 spd_stack_t *spds; 3464 3465 if (secpolicy_ip_config(credp, B_FALSE) != 0) 3466 return (EPERM); 3467 3468 if (q->q_ptr != NULL) 3469 return (0); /* Re-open of an already open instance. */ 3470 3471 if (sflag & MODOPEN) 3472 return (EINVAL); 3473 3474 ns = netstack_find_by_cred(credp); 3475 ASSERT(ns != NULL); 3476 spds = ns->netstack_spdsock; 3477 ASSERT(spds != NULL); 3478 3479 ss2dbg(spds, ("Made it into PF_POLICY socket open.\n")); 3480 3481 ssminor = (minor_t)(uintptr_t)vmem_alloc(spdsock_vmem, 1, VM_NOSLEEP); 3482 if (ssminor == 0) { 3483 netstack_rele(spds->spds_netstack); 3484 return (ENOMEM); 3485 } 3486 ss = kmem_zalloc(sizeof (spdsock_t), KM_NOSLEEP); 3487 if (ss == NULL) { 3488 vmem_free(spdsock_vmem, (void *)(uintptr_t)ssminor, 1); 3489 netstack_rele(spds->spds_netstack); 3490 return (ENOMEM); 3491 } 3492 3493 ss->spdsock_minor = ssminor; 3494 ss->spdsock_state = TS_UNBND; 3495 ss->spdsock_dump_req = NULL; 3496 3497 ss->spdsock_spds = spds; 3498 3499 q->q_ptr = ss; 3500 oq->q_ptr = ss; 3501 3502 q->q_hiwat = spds->spds_recv_hiwat; 3503 3504 oq->q_hiwat = spds->spds_xmit_hiwat; 3505 oq->q_lowat = spds->spds_xmit_lowat; 3506 3507 qprocson(q); 3508 (void) proto_set_rx_hiwat(q, NULL, spds->spds_recv_hiwat); 3509 3510 *devp = makedevice(getmajor(*devp), ss->spdsock_minor); 3511 return (0); 3512 } 3513 3514 /* 3515 * Read-side service procedure, invoked when we get back-enabled 3516 * when buffer space becomes available. 3517 * 3518 * Dump another chunk if we were dumping before; when we finish, kick 3519 * the write-side queue in case it's waiting for read queue space. 3520 */ 3521 void 3522 spdsock_rsrv(queue_t *q) 3523 { 3524 spdsock_t *ss = q->q_ptr; 3525 3526 if (ss->spdsock_dump_req != NULL) 3527 spdsock_dump_some(q, ss); 3528 3529 if (ss->spdsock_dump_req == NULL) 3530 qenable(OTHERQ(q)); 3531 } 3532 3533 /* 3534 * Write-side service procedure, invoked when we defer processing 3535 * if another message is received while a dump is in progress. 3536 */ 3537 void 3538 spdsock_wsrv(queue_t *q) 3539 { 3540 spdsock_t *ss = q->q_ptr; 3541 mblk_t *mp; 3542 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec; 3543 3544 if (ss->spdsock_dump_req != NULL) { 3545 qenable(OTHERQ(q)); 3546 return; 3547 } 3548 3549 while ((mp = getq(q)) != NULL) { 3550 if (ipsec_loaded(ipss)) { 3551 spdsock_wput(q, mp); 3552 if (ss->spdsock_dump_req != NULL) 3553 return; 3554 } else if (!ipsec_failed(ipss)) { 3555 (void) putq(q, mp); 3556 } else { 3557 spdsock_error(q, mp, EPFNOSUPPORT, 0); 3558 } 3559 } 3560 } 3561 3562 static int 3563 spdsock_close(queue_t *q) 3564 { 3565 spdsock_t *ss = q->q_ptr; 3566 spd_stack_t *spds = ss->spdsock_spds; 3567 3568 qprocsoff(q); 3569 3570 /* Safe assumption. */ 3571 ASSERT(ss != NULL); 3572 3573 if (ss->spdsock_timeout != 0) 3574 (void) quntimeout(q, ss->spdsock_timeout); 3575 3576 ss3dbg(spds, ("Driver close, PF_POLICY socket is going away.\n")); 3577 3578 vmem_free(spdsock_vmem, (void *)(uintptr_t)ss->spdsock_minor, 1); 3579 netstack_rele(ss->spdsock_spds->spds_netstack); 3580 3581 kmem_free(ss, sizeof (spdsock_t)); 3582 return (0); 3583 } 3584 3585 /* 3586 * Merge the IPsec algorithms tables with the received algorithm information. 3587 */ 3588 void 3589 spdsock_merge_algs(spd_stack_t *spds) 3590 { 3591 ipsec_alginfo_t *alg, *oalg; 3592 ipsec_algtype_t algtype; 3593 uint_t algidx, algid, nalgs; 3594 crypto_mech_name_t *mechs; 3595 uint_t mech_count, mech_idx; 3596 netstack_t *ns = spds->spds_netstack; 3597 ipsec_stack_t *ipss = ns->netstack_ipsec; 3598 3599 ASSERT(MUTEX_HELD(&spds->spds_alg_lock)); 3600 3601 /* 3602 * Get the list of supported mechanisms from the crypto framework. 3603 * If a mechanism is supported by KCF, resolve its mechanism 3604 * id and mark it as being valid. This operation must be done 3605 * without holding alg_lock, since it can cause a provider 3606 * module to be loaded and the provider notification callback to 3607 * be invoked. 3608 */ 3609 mechs = crypto_get_mech_list(&mech_count, KM_SLEEP); 3610 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 3611 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) { 3612 int algflags = 0; 3613 crypto_mech_type_t mt = CRYPTO_MECHANISM_INVALID; 3614 3615 alg = spds->spds_algs[algtype][algid]; 3616 if (alg == NULL) 3617 continue; 3618 3619 /* 3620 * The NULL encryption algorithm is a special 3621 * case because there are no mechanisms, yet 3622 * the algorithm is still valid. 3623 */ 3624 if (alg->alg_id == SADB_EALG_NULL) { 3625 alg->alg_mech_type = CRYPTO_MECHANISM_INVALID; 3626 alg->alg_flags = ALG_FLAG_VALID; 3627 continue; 3628 } 3629 3630 for (mech_idx = 0; mech_idx < mech_count; mech_idx++) { 3631 if (strncmp(alg->alg_mech_name, mechs[mech_idx], 3632 CRYPTO_MAX_MECH_NAME) == 0) { 3633 mt = crypto_mech2id(alg->alg_mech_name); 3634 ASSERT(mt != CRYPTO_MECHANISM_INVALID); 3635 algflags = ALG_FLAG_VALID; 3636 break; 3637 } 3638 } 3639 alg->alg_mech_type = mt; 3640 alg->alg_flags = algflags; 3641 } 3642 } 3643 3644 mutex_enter(&ipss->ipsec_alg_lock); 3645 3646 /* 3647 * For each algorithm currently defined, check if it is 3648 * present in the new tables created from the SPD_UPDATEALGS 3649 * message received from user-space. 3650 * Delete the algorithm entries that are currently defined 3651 * but not part of the new tables. 3652 */ 3653 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 3654 nalgs = ipss->ipsec_nalgs[algtype]; 3655 for (algidx = 0; algidx < nalgs; algidx++) { 3656 algid = ipss->ipsec_sortlist[algtype][algidx]; 3657 if (spds->spds_algs[algtype][algid] == NULL) 3658 ipsec_alg_unreg(algtype, algid, ns); 3659 } 3660 } 3661 3662 /* 3663 * For each algorithm we just received, check if it is 3664 * present in the currently defined tables. If it is, swap 3665 * the entry with the one we just allocated. 3666 * If the new algorithm is not in the current tables, 3667 * add it. 3668 */ 3669 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 3670 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) { 3671 alg = spds->spds_algs[algtype][algid]; 3672 if (alg == NULL) 3673 continue; 3674 3675 if ((oalg = ipss->ipsec_alglists[algtype][algid]) == 3676 NULL) { 3677 /* 3678 * New algorithm, add it to the algorithm 3679 * table. 3680 */ 3681 ipsec_alg_reg(algtype, alg, ns); 3682 } else { 3683 /* 3684 * Algorithm is already in the table. Swap 3685 * the existing entry with the new one. 3686 */ 3687 ipsec_alg_fix_min_max(alg, algtype, ns); 3688 ipss->ipsec_alglists[algtype][algid] = alg; 3689 ipsec_alg_free(oalg); 3690 } 3691 spds->spds_algs[algtype][algid] = NULL; 3692 } 3693 } 3694 3695 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 3696 ipss->ipsec_algs_exec_mode[algtype] = 3697 spds->spds_algs_exec_mode[algtype]; 3698 } 3699 3700 mutex_exit(&ipss->ipsec_alg_lock); 3701 3702 crypto_free_mech_list(mechs, mech_count); 3703 3704 ipsecah_algs_changed(ns); 3705 ipsecesp_algs_changed(ns); 3706 } 3707