1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/param.h> 27 #include <sys/types.h> 28 #include <sys/stream.h> 29 #include <sys/strsubr.h> 30 #include <sys/strsun.h> 31 #include <sys/stropts.h> 32 #include <sys/zone.h> 33 #include <sys/vnode.h> 34 #include <sys/sysmacros.h> 35 #define _SUN_TPI_VERSION 2 36 #include <sys/tihdr.h> 37 #include <sys/ddi.h> 38 #include <sys/sunddi.h> 39 #include <sys/mkdev.h> 40 #include <sys/debug.h> 41 #include <sys/kmem.h> 42 #include <sys/cmn_err.h> 43 #include <sys/suntpi.h> 44 #include <sys/policy.h> 45 #include <sys/dls.h> 46 47 #include <sys/socket.h> 48 #include <netinet/in.h> 49 #include <net/pfkeyv2.h> 50 #include <net/pfpolicy.h> 51 52 #include <inet/common.h> 53 #include <netinet/ip6.h> 54 #include <inet/ip.h> 55 #include <inet/ip6.h> 56 #include <inet/mi.h> 57 #include <inet/proto_set.h> 58 #include <inet/nd.h> 59 #include <inet/ip_if.h> 60 #include <inet/optcom.h> 61 #include <inet/ipsec_info.h> 62 #include <inet/ipsec_impl.h> 63 #include <inet/spdsock.h> 64 #include <inet/sadb.h> 65 #include <inet/iptun.h> 66 #include <inet/iptun/iptun_impl.h> 67 68 #include <sys/isa_defs.h> 69 70 #include <c2/audit.h> 71 72 /* 73 * This is a transport provider for the PF_POLICY IPsec policy 74 * management socket, which provides a management interface into the 75 * SPD, allowing policy rules to be added, deleted, and queried. 76 * 77 * This effectively replaces the old private SIOC*IPSECONFIG ioctls 78 * with an extensible interface which will hopefully be public some 79 * day. 80 * 81 * See <net/pfpolicy.h> for more details on the protocol. 82 * 83 * We link against drv/ip and call directly into it to manipulate the 84 * SPD; see ipsec_impl.h for the policy data structures and spd.c for 85 * the code which maintains them. 86 * 87 * The MT model of this is QPAIR with the addition of some explicit 88 * locking to protect system-wide policy data structures. 89 */ 90 91 static vmem_t *spdsock_vmem; /* for minor numbers. */ 92 93 #define ALIGNED64(x) IS_P2ALIGNED((x), sizeof (uint64_t)) 94 95 /* Default structure copied into T_INFO_ACK messages (from rts.c...) */ 96 static struct T_info_ack spdsock_g_t_info_ack = { 97 T_INFO_ACK, 98 T_INFINITE, /* TSDU_size. Maximum size messages. */ 99 T_INVALID, /* ETSDU_size. No expedited data. */ 100 T_INVALID, /* CDATA_size. No connect data. */ 101 T_INVALID, /* DDATA_size. No disconnect data. */ 102 0, /* ADDR_size. */ 103 0, /* OPT_size. No user-settable options */ 104 64 * 1024, /* TIDU_size. spdsock allows maximum size messages. */ 105 T_COTS, /* SERV_type. spdsock supports connection oriented. */ 106 TS_UNBND, /* CURRENT_state. This is set from spdsock_state. */ 107 (XPG4_1) /* Provider flags */ 108 }; 109 110 /* Named Dispatch Parameter Management Structure */ 111 typedef struct spdsockparam_s { 112 uint_t spdsock_param_min; 113 uint_t spdsock_param_max; 114 uint_t spdsock_param_value; 115 char *spdsock_param_name; 116 } spdsockparam_t; 117 118 /* 119 * Table of NDD variables supported by spdsock. These are loaded into 120 * spdsock_g_nd in spdsock_init_nd. 121 * All of these are alterable, within the min/max values given, at run time. 122 */ 123 static spdsockparam_t lcl_param_arr[] = { 124 /* min max value name */ 125 { 4096, 65536, 8192, "spdsock_xmit_hiwat"}, 126 { 0, 65536, 1024, "spdsock_xmit_lowat"}, 127 { 4096, 65536, 8192, "spdsock_recv_hiwat"}, 128 { 65536, 1024*1024*1024, 256*1024, "spdsock_max_buf"}, 129 { 0, 3, 0, "spdsock_debug"}, 130 }; 131 #define spds_xmit_hiwat spds_params[0].spdsock_param_value 132 #define spds_xmit_lowat spds_params[1].spdsock_param_value 133 #define spds_recv_hiwat spds_params[2].spdsock_param_value 134 #define spds_max_buf spds_params[3].spdsock_param_value 135 #define spds_debug spds_params[4].spdsock_param_value 136 137 #define ss0dbg(a) printf a 138 /* NOTE: != 0 instead of > 0 so lint doesn't complain. */ 139 #define ss1dbg(spds, a) if (spds->spds_debug != 0) printf a 140 #define ss2dbg(spds, a) if (spds->spds_debug > 1) printf a 141 #define ss3dbg(spds, a) if (spds->spds_debug > 2) printf a 142 143 #define RESET_SPDSOCK_DUMP_POLHEAD(ss, iph) { \ 144 ASSERT(RW_READ_HELD(&(iph)->iph_lock)); \ 145 (ss)->spdsock_dump_head = (iph); \ 146 (ss)->spdsock_dump_gen = (iph)->iph_gen; \ 147 (ss)->spdsock_dump_cur_type = 0; \ 148 (ss)->spdsock_dump_cur_af = IPSEC_AF_V4; \ 149 (ss)->spdsock_dump_cur_rule = NULL; \ 150 (ss)->spdsock_dump_count = 0; \ 151 (ss)->spdsock_dump_cur_chain = 0; \ 152 } 153 154 static int spdsock_close(queue_t *); 155 static int spdsock_open(queue_t *, dev_t *, int, int, cred_t *); 156 static void spdsock_wput(queue_t *, mblk_t *); 157 static void spdsock_wsrv(queue_t *); 158 static void spdsock_rsrv(queue_t *); 159 static void *spdsock_stack_init(netstackid_t stackid, netstack_t *ns); 160 static void spdsock_stack_fini(netstackid_t stackid, void *arg); 161 static void spdsock_loadcheck(void *); 162 static void spdsock_merge_algs(spd_stack_t *); 163 static void spdsock_flush_one(ipsec_policy_head_t *, netstack_t *); 164 static mblk_t *spdsock_dump_next_record(spdsock_t *); 165 166 static struct module_info info = { 167 5138, "spdsock", 1, INFPSZ, 512, 128 168 }; 169 170 static struct qinit rinit = { 171 NULL, (pfi_t)spdsock_rsrv, spdsock_open, spdsock_close, 172 NULL, &info 173 }; 174 175 static struct qinit winit = { 176 (pfi_t)spdsock_wput, (pfi_t)spdsock_wsrv, NULL, NULL, NULL, &info 177 }; 178 179 struct streamtab spdsockinfo = { 180 &rinit, &winit 181 }; 182 183 /* mapping from alg type to protocol number, as per RFC 2407 */ 184 static const uint_t algproto[] = { 185 PROTO_IPSEC_AH, 186 PROTO_IPSEC_ESP, 187 }; 188 189 #define NALGPROTOS (sizeof (algproto) / sizeof (algproto[0])) 190 191 /* mapping from kernel exec mode to spdsock exec mode */ 192 static const uint_t execmodes[] = { 193 SPD_ALG_EXEC_MODE_SYNC, 194 SPD_ALG_EXEC_MODE_ASYNC 195 }; 196 197 #define NEXECMODES (sizeof (execmodes) / sizeof (execmodes[0])) 198 199 #define ALL_ACTIVE_POLHEADS ((ipsec_policy_head_t *)-1) 200 #define ALL_INACTIVE_POLHEADS ((ipsec_policy_head_t *)-2) 201 202 #define ITP_NAME(itp) (itp != NULL ? itp->itp_name : NULL) 203 204 /* ARGSUSED */ 205 static int 206 spdsock_param_get(q, mp, cp, cr) 207 queue_t *q; 208 mblk_t *mp; 209 caddr_t cp; 210 cred_t *cr; 211 { 212 spdsockparam_t *spdsockpa = (spdsockparam_t *)cp; 213 uint_t value; 214 spdsock_t *ss = (spdsock_t *)q->q_ptr; 215 spd_stack_t *spds = ss->spdsock_spds; 216 217 mutex_enter(&spds->spds_param_lock); 218 value = spdsockpa->spdsock_param_value; 219 mutex_exit(&spds->spds_param_lock); 220 221 (void) mi_mpprintf(mp, "%u", value); 222 return (0); 223 } 224 225 /* This routine sets an NDD variable in a spdsockparam_t structure. */ 226 /* ARGSUSED */ 227 static int 228 spdsock_param_set(q, mp, value, cp, cr) 229 queue_t *q; 230 mblk_t *mp; 231 char *value; 232 caddr_t cp; 233 cred_t *cr; 234 { 235 ulong_t new_value; 236 spdsockparam_t *spdsockpa = (spdsockparam_t *)cp; 237 spdsock_t *ss = (spdsock_t *)q->q_ptr; 238 spd_stack_t *spds = ss->spdsock_spds; 239 240 /* Convert the value from a string into a long integer. */ 241 if (ddi_strtoul(value, NULL, 10, &new_value) != 0) 242 return (EINVAL); 243 244 mutex_enter(&spds->spds_param_lock); 245 /* 246 * Fail the request if the new value does not lie within the 247 * required bounds. 248 */ 249 if (new_value < spdsockpa->spdsock_param_min || 250 new_value > spdsockpa->spdsock_param_max) { 251 mutex_exit(&spds->spds_param_lock); 252 return (EINVAL); 253 } 254 255 /* Set the new value */ 256 spdsockpa->spdsock_param_value = new_value; 257 mutex_exit(&spds->spds_param_lock); 258 259 return (0); 260 } 261 262 /* 263 * Initialize at module load time 264 */ 265 boolean_t 266 spdsock_ddi_init(void) 267 { 268 spdsock_max_optsize = optcom_max_optsize( 269 spdsock_opt_obj.odb_opt_des_arr, spdsock_opt_obj.odb_opt_arr_cnt); 270 271 spdsock_vmem = vmem_create("spdsock", (void *)1, MAXMIN, 1, 272 NULL, NULL, NULL, 1, VM_SLEEP | VMC_IDENTIFIER); 273 274 /* 275 * We want to be informed each time a stack is created or 276 * destroyed in the kernel, so we can maintain the 277 * set of spd_stack_t's. 278 */ 279 netstack_register(NS_SPDSOCK, spdsock_stack_init, NULL, 280 spdsock_stack_fini); 281 282 return (B_TRUE); 283 } 284 285 /* 286 * Walk through the param array specified registering each element with the 287 * named dispatch handler. 288 */ 289 static boolean_t 290 spdsock_param_register(IDP *ndp, spdsockparam_t *ssp, int cnt) 291 { 292 for (; cnt-- > 0; ssp++) { 293 if (ssp->spdsock_param_name != NULL && 294 ssp->spdsock_param_name[0]) { 295 if (!nd_load(ndp, 296 ssp->spdsock_param_name, 297 spdsock_param_get, spdsock_param_set, 298 (caddr_t)ssp)) { 299 nd_free(ndp); 300 return (B_FALSE); 301 } 302 } 303 } 304 return (B_TRUE); 305 } 306 307 /* 308 * Initialize for each stack instance 309 */ 310 /* ARGSUSED */ 311 static void * 312 spdsock_stack_init(netstackid_t stackid, netstack_t *ns) 313 { 314 spd_stack_t *spds; 315 spdsockparam_t *ssp; 316 317 spds = (spd_stack_t *)kmem_zalloc(sizeof (*spds), KM_SLEEP); 318 spds->spds_netstack = ns; 319 320 ASSERT(spds->spds_g_nd == NULL); 321 322 ssp = (spdsockparam_t *)kmem_alloc(sizeof (lcl_param_arr), KM_SLEEP); 323 spds->spds_params = ssp; 324 bcopy(lcl_param_arr, ssp, sizeof (lcl_param_arr)); 325 326 (void) spdsock_param_register(&spds->spds_g_nd, ssp, 327 A_CNT(lcl_param_arr)); 328 329 mutex_init(&spds->spds_param_lock, NULL, MUTEX_DEFAULT, NULL); 330 mutex_init(&spds->spds_alg_lock, NULL, MUTEX_DEFAULT, NULL); 331 332 return (spds); 333 } 334 335 void 336 spdsock_ddi_destroy(void) 337 { 338 vmem_destroy(spdsock_vmem); 339 340 netstack_unregister(NS_SPDSOCK); 341 } 342 343 /* ARGSUSED */ 344 static void 345 spdsock_stack_fini(netstackid_t stackid, void *arg) 346 { 347 spd_stack_t *spds = (spd_stack_t *)arg; 348 349 freemsg(spds->spds_mp_algs); 350 mutex_destroy(&spds->spds_param_lock); 351 mutex_destroy(&spds->spds_alg_lock); 352 nd_free(&spds->spds_g_nd); 353 kmem_free(spds->spds_params, sizeof (lcl_param_arr)); 354 spds->spds_params = NULL; 355 356 kmem_free(spds, sizeof (*spds)); 357 } 358 359 /* 360 * NOTE: large quantities of this should be shared with keysock. 361 * Would be nice to combine some of this into a common module, but 362 * not possible given time pressures. 363 */ 364 365 /* 366 * High-level reality checking of extensions. 367 */ 368 /* ARGSUSED */ /* XXX */ 369 static boolean_t 370 ext_check(spd_ext_t *ext) 371 { 372 spd_if_t *tunname = (spd_if_t *)ext; 373 int i; 374 char *idstr; 375 376 if (ext->spd_ext_type == SPD_EXT_TUN_NAME) { 377 /* (NOTE: Modified from SADB_EXT_IDENTITY..) */ 378 379 /* 380 * Make sure the strings in these identities are 381 * null-terminated. Let's "proactively" null-terminate the 382 * string at the last byte if it's not terminated sooner. 383 */ 384 i = SPD_64TO8(tunname->spd_if_len) - sizeof (spd_if_t); 385 idstr = (char *)(tunname + 1); 386 while (*idstr != '\0' && i > 0) { 387 i--; 388 idstr++; 389 } 390 if (i == 0) { 391 /* 392 * I.e., if the bozo user didn't NULL-terminate the 393 * string... 394 */ 395 idstr--; 396 *idstr = '\0'; 397 } 398 } 399 return (B_TRUE); /* For now... */ 400 } 401 402 403 404 /* Return values for spdsock_get_ext(). */ 405 #define KGE_OK 0 406 #define KGE_DUP 1 407 #define KGE_UNK 2 408 #define KGE_LEN 3 409 #define KGE_CHK 4 410 411 /* 412 * Parse basic extension headers and return in the passed-in pointer vector. 413 * Return values include: 414 * 415 * KGE_OK Everything's nice and parsed out. 416 * If there are no extensions, place NULL in extv[0]. 417 * KGE_DUP There is a duplicate extension. 418 * First instance in appropriate bin. First duplicate in 419 * extv[0]. 420 * KGE_UNK Unknown extension type encountered. extv[0] contains 421 * unknown header. 422 * KGE_LEN Extension length error. 423 * KGE_CHK High-level reality check failed on specific extension. 424 * 425 * My apologies for some of the pointer arithmetic in here. I'm thinking 426 * like an assembly programmer, yet trying to make the compiler happy. 427 */ 428 static int 429 spdsock_get_ext(spd_ext_t *extv[], spd_msg_t *basehdr, uint_t msgsize) 430 { 431 bzero(extv, sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1)); 432 433 /* Use extv[0] as the "current working pointer". */ 434 435 extv[0] = (spd_ext_t *)(basehdr + 1); 436 437 while (extv[0] < (spd_ext_t *)(((uint8_t *)basehdr) + msgsize)) { 438 /* Check for unknown headers. */ 439 if (extv[0]->spd_ext_type == 0 || 440 extv[0]->spd_ext_type > SPD_EXT_MAX) 441 return (KGE_UNK); 442 443 /* 444 * Check length. Use uint64_t because extlen is in units 445 * of 64-bit words. If length goes beyond the msgsize, 446 * return an error. (Zero length also qualifies here.) 447 */ 448 if (extv[0]->spd_ext_len == 0 || 449 (void *)((uint64_t *)extv[0] + extv[0]->spd_ext_len) > 450 (void *)((uint8_t *)basehdr + msgsize)) 451 return (KGE_LEN); 452 453 /* Check for redundant headers. */ 454 if (extv[extv[0]->spd_ext_type] != NULL) 455 return (KGE_DUP); 456 457 /* 458 * Reality check the extension if possible at the spdsock 459 * level. 460 */ 461 if (!ext_check(extv[0])) 462 return (KGE_CHK); 463 464 /* If I make it here, assign the appropriate bin. */ 465 extv[extv[0]->spd_ext_type] = extv[0]; 466 467 /* Advance pointer (See above for uint64_t ptr reasoning.) */ 468 extv[0] = (spd_ext_t *) 469 ((uint64_t *)extv[0] + extv[0]->spd_ext_len); 470 } 471 472 /* Everything's cool. */ 473 474 /* 475 * If extv[0] == NULL, then there are no extension headers in this 476 * message. Ensure that this is the case. 477 */ 478 if (extv[0] == (spd_ext_t *)(basehdr + 1)) 479 extv[0] = NULL; 480 481 return (KGE_OK); 482 } 483 484 static const int bad_ext_diag[] = { 485 SPD_DIAGNOSTIC_MALFORMED_LCLPORT, 486 SPD_DIAGNOSTIC_MALFORMED_REMPORT, 487 SPD_DIAGNOSTIC_MALFORMED_PROTO, 488 SPD_DIAGNOSTIC_MALFORMED_LCLADDR, 489 SPD_DIAGNOSTIC_MALFORMED_REMADDR, 490 SPD_DIAGNOSTIC_MALFORMED_ACTION, 491 SPD_DIAGNOSTIC_MALFORMED_RULE, 492 SPD_DIAGNOSTIC_MALFORMED_RULESET, 493 SPD_DIAGNOSTIC_MALFORMED_ICMP_TYPECODE 494 }; 495 496 static const int dup_ext_diag[] = { 497 SPD_DIAGNOSTIC_DUPLICATE_LCLPORT, 498 SPD_DIAGNOSTIC_DUPLICATE_REMPORT, 499 SPD_DIAGNOSTIC_DUPLICATE_PROTO, 500 SPD_DIAGNOSTIC_DUPLICATE_LCLADDR, 501 SPD_DIAGNOSTIC_DUPLICATE_REMADDR, 502 SPD_DIAGNOSTIC_DUPLICATE_ACTION, 503 SPD_DIAGNOSTIC_DUPLICATE_RULE, 504 SPD_DIAGNOSTIC_DUPLICATE_RULESET, 505 SPD_DIAGNOSTIC_DUPLICATE_ICMP_TYPECODE 506 }; 507 508 /* 509 * Transmit a PF_POLICY error message to the instance either pointed to 510 * by ks, the instance with serial number serial, or more, depending. 511 * 512 * The faulty message (or a reasonable facsimile thereof) is in mp. 513 * This function will free mp or recycle it for delivery, thereby causing 514 * the stream head to free it. 515 */ 516 static void 517 spdsock_error(queue_t *q, mblk_t *mp, int error, int diagnostic) 518 { 519 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 520 521 ASSERT(mp->b_datap->db_type == M_DATA); 522 523 if (spmsg->spd_msg_type < SPD_MIN || 524 spmsg->spd_msg_type > SPD_MAX) 525 spmsg->spd_msg_type = SPD_RESERVED; 526 527 /* 528 * Strip out extension headers. 529 */ 530 ASSERT(mp->b_rptr + sizeof (*spmsg) <= mp->b_datap->db_lim); 531 mp->b_wptr = mp->b_rptr + sizeof (*spmsg); 532 spmsg->spd_msg_len = SPD_8TO64(sizeof (spd_msg_t)); 533 spmsg->spd_msg_errno = (uint8_t)error; 534 spmsg->spd_msg_diagnostic = (uint16_t)diagnostic; 535 536 qreply(q, mp); 537 } 538 539 static void 540 spdsock_diag(queue_t *q, mblk_t *mp, int diagnostic) 541 { 542 spdsock_error(q, mp, EINVAL, diagnostic); 543 } 544 545 static void 546 spd_echo(queue_t *q, mblk_t *mp) 547 { 548 qreply(q, mp); 549 } 550 551 /* 552 * Do NOT consume a reference to itp. 553 */ 554 /*ARGSUSED*/ 555 static void 556 spdsock_flush_node(ipsec_tun_pol_t *itp, void *cookie, netstack_t *ns) 557 { 558 boolean_t active = (boolean_t)cookie; 559 ipsec_policy_head_t *iph; 560 561 iph = active ? itp->itp_policy : itp->itp_inactive; 562 IPPH_REFHOLD(iph); 563 mutex_enter(&itp->itp_lock); 564 spdsock_flush_one(iph, ns); 565 if (active) 566 itp->itp_flags &= ~ITPF_PFLAGS; 567 else 568 itp->itp_flags &= ~ITPF_IFLAGS; 569 mutex_exit(&itp->itp_lock); 570 } 571 572 /* 573 * Clear out one polhead. 574 */ 575 static void 576 spdsock_flush_one(ipsec_policy_head_t *iph, netstack_t *ns) 577 { 578 rw_enter(&iph->iph_lock, RW_WRITER); 579 ipsec_polhead_flush(iph, ns); 580 rw_exit(&iph->iph_lock); 581 IPPH_REFRELE(iph, ns); 582 } 583 584 static void 585 spdsock_flush(queue_t *q, ipsec_policy_head_t *iph, ipsec_tun_pol_t *itp, 586 mblk_t *mp) 587 { 588 boolean_t active; 589 spdsock_t *ss = (spdsock_t *)q->q_ptr; 590 netstack_t *ns = ss->spdsock_spds->spds_netstack; 591 592 if (iph != ALL_ACTIVE_POLHEADS && iph != ALL_INACTIVE_POLHEADS) { 593 spdsock_flush_one(iph, ns); 594 if (audit_active) { 595 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 596 cred_t *cr; 597 pid_t cpid; 598 599 cr = msg_getcred(mp, &cpid); 600 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 601 audit_pf_policy(SPD_FLUSH, cr, ns, 602 ITP_NAME(itp), active, 0, cpid); 603 } 604 } else { 605 active = (iph == ALL_ACTIVE_POLHEADS); 606 607 /* First flush the global policy. */ 608 spdsock_flush_one(active ? ipsec_system_policy(ns) : 609 ipsec_inactive_policy(ns), ns); 610 if (audit_active) { 611 cred_t *cr; 612 pid_t cpid; 613 614 cr = msg_getcred(mp, &cpid); 615 audit_pf_policy(SPD_FLUSH, cr, ns, NULL, 616 active, 0, cpid); 617 } 618 /* Then flush every tunnel's appropriate one. */ 619 itp_walk(spdsock_flush_node, (void *)active, ns); 620 if (audit_active) { 621 cred_t *cr; 622 pid_t cpid; 623 624 cr = msg_getcred(mp, &cpid); 625 audit_pf_policy(SPD_FLUSH, cr, ns, 626 "all tunnels", active, 0, cpid); 627 } 628 } 629 630 spd_echo(q, mp); 631 } 632 633 static boolean_t 634 spdsock_ext_to_sel(spd_ext_t **extv, ipsec_selkey_t *sel, int *diag) 635 { 636 bzero(sel, sizeof (*sel)); 637 638 if (extv[SPD_EXT_PROTO] != NULL) { 639 struct spd_proto *pr = 640 (struct spd_proto *)extv[SPD_EXT_PROTO]; 641 sel->ipsl_proto = pr->spd_proto_number; 642 sel->ipsl_valid |= IPSL_PROTOCOL; 643 } 644 if (extv[SPD_EXT_LCLPORT] != NULL) { 645 struct spd_portrange *pr = 646 (struct spd_portrange *)extv[SPD_EXT_LCLPORT]; 647 sel->ipsl_lport = pr->spd_ports_minport; 648 sel->ipsl_valid |= IPSL_LOCAL_PORT; 649 } 650 if (extv[SPD_EXT_REMPORT] != NULL) { 651 struct spd_portrange *pr = 652 (struct spd_portrange *)extv[SPD_EXT_REMPORT]; 653 sel->ipsl_rport = pr->spd_ports_minport; 654 sel->ipsl_valid |= IPSL_REMOTE_PORT; 655 } 656 657 if (extv[SPD_EXT_ICMP_TYPECODE] != NULL) { 658 struct spd_typecode *tc= 659 (struct spd_typecode *)extv[SPD_EXT_ICMP_TYPECODE]; 660 661 sel->ipsl_valid |= IPSL_ICMP_TYPE; 662 sel->ipsl_icmp_type = tc->spd_typecode_type; 663 if (tc->spd_typecode_type_end < tc->spd_typecode_type) 664 sel->ipsl_icmp_type_end = tc->spd_typecode_type; 665 else 666 sel->ipsl_icmp_type_end = tc->spd_typecode_type_end; 667 668 if (tc->spd_typecode_code != 255) { 669 sel->ipsl_valid |= IPSL_ICMP_CODE; 670 sel->ipsl_icmp_code = tc->spd_typecode_code; 671 if (tc->spd_typecode_code_end < tc->spd_typecode_code) 672 sel->ipsl_icmp_code_end = tc->spd_typecode_code; 673 else 674 sel->ipsl_icmp_code_end = 675 tc->spd_typecode_code_end; 676 } 677 } 678 #define ADDR2SEL(sel, extv, field, pfield, extn, bit) \ 679 if ((extv)[(extn)] != NULL) { \ 680 uint_t addrlen; \ 681 struct spd_address *ap = \ 682 (struct spd_address *)((extv)[(extn)]); \ 683 addrlen = (ap->spd_address_af == AF_INET6) ? \ 684 IPV6_ADDR_LEN : IP_ADDR_LEN; \ 685 if (SPD_64TO8(ap->spd_address_len) < \ 686 (addrlen + sizeof (*ap))) { \ 687 *diag = SPD_DIAGNOSTIC_BAD_ADDR_LEN; \ 688 return (B_FALSE); \ 689 } \ 690 bcopy((ap+1), &((sel)->field), addrlen); \ 691 (sel)->pfield = ap->spd_address_prefixlen; \ 692 (sel)->ipsl_valid |= (bit); \ 693 (sel)->ipsl_valid |= (ap->spd_address_af == AF_INET6) ? \ 694 IPSL_IPV6 : IPSL_IPV4; \ 695 } 696 697 ADDR2SEL(sel, extv, ipsl_local, ipsl_local_pfxlen, 698 SPD_EXT_LCLADDR, IPSL_LOCAL_ADDR); 699 ADDR2SEL(sel, extv, ipsl_remote, ipsl_remote_pfxlen, 700 SPD_EXT_REMADDR, IPSL_REMOTE_ADDR); 701 702 if ((sel->ipsl_valid & (IPSL_IPV6|IPSL_IPV4)) == 703 (IPSL_IPV6|IPSL_IPV4)) { 704 *diag = SPD_DIAGNOSTIC_MIXED_AF; 705 return (B_FALSE); 706 } 707 708 #undef ADDR2SEL 709 710 return (B_TRUE); 711 } 712 713 static boolean_t 714 spd_convert_type(uint32_t type, ipsec_act_t *act) 715 { 716 switch (type) { 717 case SPD_ACTTYPE_DROP: 718 act->ipa_type = IPSEC_ACT_DISCARD; 719 return (B_TRUE); 720 721 case SPD_ACTTYPE_PASS: 722 act->ipa_type = IPSEC_ACT_CLEAR; 723 return (B_TRUE); 724 725 case SPD_ACTTYPE_IPSEC: 726 act->ipa_type = IPSEC_ACT_APPLY; 727 return (B_TRUE); 728 } 729 return (B_FALSE); 730 } 731 732 static boolean_t 733 spd_convert_flags(uint32_t flags, ipsec_act_t *act) 734 { 735 /* 736 * Note use of !! for boolean canonicalization. 737 */ 738 act->ipa_apply.ipp_use_ah = !!(flags & SPD_APPLY_AH); 739 act->ipa_apply.ipp_use_esp = !!(flags & SPD_APPLY_ESP); 740 act->ipa_apply.ipp_use_espa = !!(flags & SPD_APPLY_ESPA); 741 act->ipa_apply.ipp_use_se = !!(flags & SPD_APPLY_SE); 742 act->ipa_apply.ipp_use_unique = !!(flags & SPD_APPLY_UNIQUE); 743 return (B_TRUE); 744 } 745 746 static void 747 spdsock_reset_act(ipsec_act_t *act) 748 { 749 bzero(act, sizeof (*act)); 750 act->ipa_apply.ipp_espe_maxbits = IPSEC_MAX_KEYBITS; 751 act->ipa_apply.ipp_espa_maxbits = IPSEC_MAX_KEYBITS; 752 act->ipa_apply.ipp_ah_maxbits = IPSEC_MAX_KEYBITS; 753 } 754 755 /* 756 * Sanity check action against reality, and shrink-wrap key sizes.. 757 */ 758 static boolean_t 759 spdsock_check_action(ipsec_act_t *act, boolean_t tunnel_polhead, int *diag, 760 spd_stack_t *spds) 761 { 762 if (tunnel_polhead && act->ipa_apply.ipp_use_unique) { 763 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS; 764 return (B_FALSE); 765 } 766 if ((act->ipa_type != IPSEC_ACT_APPLY) && 767 (act->ipa_apply.ipp_use_ah || 768 act->ipa_apply.ipp_use_esp || 769 act->ipa_apply.ipp_use_espa || 770 act->ipa_apply.ipp_use_se || 771 act->ipa_apply.ipp_use_unique)) { 772 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS; 773 return (B_FALSE); 774 } 775 if ((act->ipa_type == IPSEC_ACT_APPLY) && 776 !act->ipa_apply.ipp_use_ah && 777 !act->ipa_apply.ipp_use_esp) { 778 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS; 779 return (B_FALSE); 780 } 781 return (ipsec_check_action(act, diag, spds->spds_netstack)); 782 } 783 784 /* 785 * We may be short a few error checks here.. 786 */ 787 static boolean_t 788 spdsock_ext_to_actvec(spd_ext_t **extv, ipsec_act_t **actpp, uint_t *nactp, 789 int *diag, spd_stack_t *spds) 790 { 791 struct spd_ext_actions *sactp = 792 (struct spd_ext_actions *)extv[SPD_EXT_ACTION]; 793 ipsec_act_t act, *actp, *endactp; 794 struct spd_attribute *attrp, *endattrp; 795 uint64_t *endp; 796 int nact; 797 boolean_t tunnel_polhead; 798 799 tunnel_polhead = (extv[SPD_EXT_TUN_NAME] != NULL && 800 (((struct spd_rule *)extv[SPD_EXT_RULE])->spd_rule_flags & 801 SPD_RULE_FLAG_TUNNEL)); 802 803 *actpp = NULL; 804 *nactp = 0; 805 806 if (sactp == NULL) { 807 *diag = SPD_DIAGNOSTIC_NO_ACTION_EXT; 808 return (B_FALSE); 809 } 810 811 /* 812 * Parse the "action" extension and convert into an action chain. 813 */ 814 815 nact = sactp->spd_actions_count; 816 817 endp = (uint64_t *)sactp; 818 endp += sactp->spd_actions_len; 819 endattrp = (struct spd_attribute *)endp; 820 821 actp = kmem_alloc(sizeof (*actp) * nact, KM_NOSLEEP); 822 if (actp == NULL) { 823 *diag = SPD_DIAGNOSTIC_ADD_NO_MEM; 824 return (B_FALSE); 825 } 826 *actpp = actp; 827 *nactp = nact; 828 endactp = actp + nact; 829 830 spdsock_reset_act(&act); 831 attrp = (struct spd_attribute *)(&sactp[1]); 832 833 for (; attrp < endattrp; attrp++) { 834 switch (attrp->spd_attr_tag) { 835 case SPD_ATTR_NOP: 836 break; 837 838 case SPD_ATTR_EMPTY: 839 spdsock_reset_act(&act); 840 break; 841 842 case SPD_ATTR_END: 843 attrp = endattrp; 844 /* FALLTHRU */ 845 case SPD_ATTR_NEXT: 846 if (actp >= endactp) { 847 *diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT; 848 goto fail; 849 } 850 if (!spdsock_check_action(&act, tunnel_polhead, 851 diag, spds)) 852 goto fail; 853 *actp++ = act; 854 spdsock_reset_act(&act); 855 break; 856 857 case SPD_ATTR_TYPE: 858 if (!spd_convert_type(attrp->spd_attr_value, &act)) { 859 *diag = SPD_DIAGNOSTIC_ADD_BAD_TYPE; 860 goto fail; 861 } 862 break; 863 864 case SPD_ATTR_FLAGS: 865 if (!tunnel_polhead && extv[SPD_EXT_TUN_NAME] != NULL) { 866 /* 867 * Set "sa unique" for transport-mode 868 * tunnels whether we want to or not. 869 */ 870 attrp->spd_attr_value |= SPD_APPLY_UNIQUE; 871 } 872 if (!spd_convert_flags(attrp->spd_attr_value, &act)) { 873 *diag = SPD_DIAGNOSTIC_ADD_BAD_FLAGS; 874 goto fail; 875 } 876 break; 877 878 case SPD_ATTR_AH_AUTH: 879 if (attrp->spd_attr_value == 0) { 880 *diag = SPD_DIAGNOSTIC_UNSUPP_AH_ALG; 881 goto fail; 882 } 883 act.ipa_apply.ipp_auth_alg = attrp->spd_attr_value; 884 break; 885 886 case SPD_ATTR_ESP_ENCR: 887 if (attrp->spd_attr_value == 0) { 888 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_ALG; 889 goto fail; 890 } 891 act.ipa_apply.ipp_encr_alg = attrp->spd_attr_value; 892 break; 893 894 case SPD_ATTR_ESP_AUTH: 895 if (attrp->spd_attr_value == 0) { 896 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_ALG; 897 goto fail; 898 } 899 act.ipa_apply.ipp_esp_auth_alg = attrp->spd_attr_value; 900 break; 901 902 case SPD_ATTR_ENCR_MINBITS: 903 act.ipa_apply.ipp_espe_minbits = attrp->spd_attr_value; 904 break; 905 906 case SPD_ATTR_ENCR_MAXBITS: 907 act.ipa_apply.ipp_espe_maxbits = attrp->spd_attr_value; 908 break; 909 910 case SPD_ATTR_AH_MINBITS: 911 act.ipa_apply.ipp_ah_minbits = attrp->spd_attr_value; 912 break; 913 914 case SPD_ATTR_AH_MAXBITS: 915 act.ipa_apply.ipp_ah_maxbits = attrp->spd_attr_value; 916 break; 917 918 case SPD_ATTR_ESPA_MINBITS: 919 act.ipa_apply.ipp_espa_minbits = attrp->spd_attr_value; 920 break; 921 922 case SPD_ATTR_ESPA_MAXBITS: 923 act.ipa_apply.ipp_espa_maxbits = attrp->spd_attr_value; 924 break; 925 926 case SPD_ATTR_LIFE_SOFT_TIME: 927 case SPD_ATTR_LIFE_HARD_TIME: 928 case SPD_ATTR_LIFE_SOFT_BYTES: 929 case SPD_ATTR_LIFE_HARD_BYTES: 930 break; 931 932 case SPD_ATTR_KM_PROTO: 933 act.ipa_apply.ipp_km_proto = attrp->spd_attr_value; 934 break; 935 936 case SPD_ATTR_KM_COOKIE: 937 act.ipa_apply.ipp_km_cookie = attrp->spd_attr_value; 938 break; 939 940 case SPD_ATTR_REPLAY_DEPTH: 941 act.ipa_apply.ipp_replay_depth = attrp->spd_attr_value; 942 break; 943 } 944 } 945 if (actp != endactp) { 946 *diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT; 947 goto fail; 948 } 949 950 return (B_TRUE); 951 fail: 952 ipsec_actvec_free(*actpp, nact); 953 *actpp = NULL; 954 return (B_FALSE); 955 } 956 957 typedef struct 958 { 959 ipsec_policy_t *pol; 960 int dir; 961 } tmprule_t; 962 963 static int 964 mkrule(ipsec_policy_head_t *iph, struct spd_rule *rule, 965 ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t af, 966 tmprule_t **rp, uint64_t *index, spd_stack_t *spds) 967 { 968 ipsec_policy_t *pol; 969 970 sel->ipsl_valid &= ~(IPSL_IPV6|IPSL_IPV4); 971 sel->ipsl_valid |= af; 972 973 pol = ipsec_policy_create(sel, actp, nact, rule->spd_rule_priority, 974 index, spds->spds_netstack); 975 if (pol == NULL) 976 return (ENOMEM); 977 978 (*rp)->pol = pol; 979 (*rp)->dir = dir; 980 (*rp)++; 981 982 if (!ipsec_check_policy(iph, pol, dir)) 983 return (EEXIST); 984 985 rule->spd_rule_index = pol->ipsp_index; 986 return (0); 987 } 988 989 static int 990 mkrulepair(ipsec_policy_head_t *iph, struct spd_rule *rule, 991 ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t afs, 992 tmprule_t **rp, uint64_t *index, spd_stack_t *spds) 993 { 994 int error; 995 996 if (afs & IPSL_IPV4) { 997 error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV4, rp, 998 index, spds); 999 if (error != 0) 1000 return (error); 1001 } 1002 if (afs & IPSL_IPV6) { 1003 error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV6, rp, 1004 index, spds); 1005 if (error != 0) 1006 return (error); 1007 } 1008 return (0); 1009 } 1010 1011 1012 static void 1013 spdsock_addrule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp, 1014 spd_ext_t **extv, ipsec_tun_pol_t *itp) 1015 { 1016 ipsec_selkey_t sel; 1017 ipsec_act_t *actp; 1018 uint_t nact; 1019 int diag = 0, error, afs; 1020 struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE]; 1021 tmprule_t rules[4], *rulep = &rules[0]; 1022 boolean_t tunnel_mode, empty_itp, active; 1023 uint64_t *index = (itp == NULL) ? NULL : &itp->itp_next_policy_index; 1024 spdsock_t *ss = (spdsock_t *)q->q_ptr; 1025 spd_stack_t *spds = ss->spdsock_spds; 1026 1027 if (rule == NULL) { 1028 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT); 1029 if (audit_active) { 1030 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1031 cred_t *cr; 1032 pid_t cpid; 1033 1034 cr = msg_getcred(mp, &cpid); 1035 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1036 audit_pf_policy(SPD_ADDRULE, cr, 1037 spds->spds_netstack, ITP_NAME(itp), active, 1038 SPD_DIAGNOSTIC_NO_RULE_EXT, cpid); 1039 } 1040 return; 1041 } 1042 1043 tunnel_mode = (rule->spd_rule_flags & SPD_RULE_FLAG_TUNNEL); 1044 1045 if (itp != NULL) { 1046 mutex_enter(&itp->itp_lock); 1047 ASSERT(itp->itp_policy == iph || itp->itp_inactive == iph); 1048 active = (itp->itp_policy == iph); 1049 if (ITP_P_ISACTIVE(itp, iph)) { 1050 /* Check for mix-and-match of tunnel/transport. */ 1051 if ((tunnel_mode && !ITP_P_ISTUNNEL(itp, iph)) || 1052 (!tunnel_mode && ITP_P_ISTUNNEL(itp, iph))) { 1053 mutex_exit(&itp->itp_lock); 1054 spdsock_error(q, mp, EBUSY, 0); 1055 return; 1056 } 1057 empty_itp = B_FALSE; 1058 } else { 1059 empty_itp = B_TRUE; 1060 itp->itp_flags = active ? ITPF_P_ACTIVE : ITPF_I_ACTIVE; 1061 if (tunnel_mode) 1062 itp->itp_flags |= active ? ITPF_P_TUNNEL : 1063 ITPF_I_TUNNEL; 1064 } 1065 } else { 1066 empty_itp = B_FALSE; 1067 } 1068 1069 if (rule->spd_rule_index != 0) { 1070 diag = SPD_DIAGNOSTIC_INVALID_RULE_INDEX; 1071 error = EINVAL; 1072 goto fail2; 1073 } 1074 1075 if (!spdsock_ext_to_sel(extv, &sel, &diag)) { 1076 error = EINVAL; 1077 goto fail2; 1078 } 1079 1080 if (itp != NULL) { 1081 if (tunnel_mode) { 1082 if (sel.ipsl_valid & 1083 (IPSL_REMOTE_PORT | IPSL_LOCAL_PORT)) { 1084 itp->itp_flags |= active ? 1085 ITPF_P_PER_PORT_SECURITY : 1086 ITPF_I_PER_PORT_SECURITY; 1087 } 1088 } else { 1089 /* 1090 * For now, we don't allow transport-mode on a tunnel 1091 * with ANY specific selectors. Bail if we have such 1092 * a request. 1093 */ 1094 if (sel.ipsl_valid & IPSL_WILDCARD) { 1095 diag = SPD_DIAGNOSTIC_NO_TUNNEL_SELECTORS; 1096 error = EINVAL; 1097 goto fail2; 1098 } 1099 } 1100 } 1101 1102 if (!spdsock_ext_to_actvec(extv, &actp, &nact, &diag, spds)) { 1103 error = EINVAL; 1104 goto fail2; 1105 } 1106 /* 1107 * If no addresses were specified, add both. 1108 */ 1109 afs = sel.ipsl_valid & (IPSL_IPV6|IPSL_IPV4); 1110 if (afs == 0) 1111 afs = (IPSL_IPV6|IPSL_IPV4); 1112 1113 rw_enter(&iph->iph_lock, RW_WRITER); 1114 1115 if (rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) { 1116 error = mkrulepair(iph, rule, &sel, actp, nact, 1117 IPSEC_TYPE_OUTBOUND, afs, &rulep, index, spds); 1118 if (error != 0) 1119 goto fail; 1120 } 1121 1122 if (rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) { 1123 error = mkrulepair(iph, rule, &sel, actp, nact, 1124 IPSEC_TYPE_INBOUND, afs, &rulep, index, spds); 1125 if (error != 0) 1126 goto fail; 1127 } 1128 1129 while ((--rulep) >= &rules[0]) { 1130 ipsec_enter_policy(iph, rulep->pol, rulep->dir, 1131 spds->spds_netstack); 1132 } 1133 rw_exit(&iph->iph_lock); 1134 if (itp != NULL) 1135 mutex_exit(&itp->itp_lock); 1136 1137 ipsec_actvec_free(actp, nact); 1138 spd_echo(q, mp); 1139 if (audit_active) { 1140 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1141 cred_t *cr; 1142 pid_t cpid; 1143 1144 cr = msg_getcred(mp, &cpid); 1145 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1146 audit_pf_policy(SPD_ADDRULE, cr, spds->spds_netstack, 1147 ITP_NAME(itp), active, 0, cpid); 1148 } 1149 return; 1150 1151 fail: 1152 rw_exit(&iph->iph_lock); 1153 while ((--rulep) >= &rules[0]) { 1154 IPPOL_REFRELE(rulep->pol, spds->spds_netstack); 1155 } 1156 ipsec_actvec_free(actp, nact); 1157 fail2: 1158 if (itp != NULL) { 1159 if (empty_itp) 1160 itp->itp_flags = 0; 1161 mutex_exit(&itp->itp_lock); 1162 } 1163 spdsock_error(q, mp, error, diag); 1164 if (audit_active) { 1165 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1166 cred_t *cr; 1167 pid_t cpid; 1168 1169 cr = msg_getcred(mp, &cpid); 1170 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1171 audit_pf_policy(SPD_ADDRULE, cr, spds->spds_netstack, 1172 ITP_NAME(itp), active, error, cpid); 1173 } 1174 } 1175 1176 void 1177 spdsock_deleterule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp, 1178 spd_ext_t **extv, ipsec_tun_pol_t *itp) 1179 { 1180 ipsec_selkey_t sel; 1181 struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE]; 1182 int err, diag = 0; 1183 spdsock_t *ss = (spdsock_t *)q->q_ptr; 1184 netstack_t *ns = ss->spdsock_spds->spds_netstack; 1185 1186 if (rule == NULL) { 1187 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT); 1188 if (audit_active) { 1189 boolean_t active; 1190 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1191 cred_t *cr; 1192 pid_t cpid; 1193 1194 cr = msg_getcred(mp, &cpid); 1195 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1196 audit_pf_policy(SPD_DELETERULE, cr, ns, 1197 ITP_NAME(itp), active, SPD_DIAGNOSTIC_NO_RULE_EXT, 1198 cpid); 1199 } 1200 return; 1201 } 1202 1203 /* 1204 * Must enter itp_lock first to avoid deadlock. See tun.c's 1205 * set_sec_simple() for the other case of itp_lock and iph_lock. 1206 */ 1207 if (itp != NULL) 1208 mutex_enter(&itp->itp_lock); 1209 1210 if (rule->spd_rule_index != 0) { 1211 if (ipsec_policy_delete_index(iph, rule->spd_rule_index, ns) != 1212 0) { 1213 err = ESRCH; 1214 goto fail; 1215 } 1216 } else { 1217 if (!spdsock_ext_to_sel(extv, &sel, &diag)) { 1218 err = EINVAL; /* diag already set... */ 1219 goto fail; 1220 } 1221 1222 if ((rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) && 1223 !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_INBOUND, ns)) { 1224 err = ESRCH; 1225 goto fail; 1226 } 1227 1228 if ((rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) && 1229 !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_OUTBOUND, ns)) { 1230 err = ESRCH; 1231 goto fail; 1232 } 1233 } 1234 1235 if (itp != NULL) { 1236 ASSERT(iph == itp->itp_policy || iph == itp->itp_inactive); 1237 rw_enter(&iph->iph_lock, RW_READER); 1238 if (avl_numnodes(&iph->iph_rulebyid) == 0) { 1239 if (iph == itp->itp_policy) 1240 itp->itp_flags &= ~ITPF_PFLAGS; 1241 else 1242 itp->itp_flags &= ~ITPF_IFLAGS; 1243 } 1244 /* Can exit locks in any order. */ 1245 rw_exit(&iph->iph_lock); 1246 mutex_exit(&itp->itp_lock); 1247 } 1248 spd_echo(q, mp); 1249 if (audit_active) { 1250 boolean_t active; 1251 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1252 cred_t *cr; 1253 pid_t cpid; 1254 1255 cr = msg_getcred(mp, &cpid); 1256 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1257 audit_pf_policy(SPD_DELETERULE, cr, ns, ITP_NAME(itp), 1258 active, 0, cpid); 1259 } 1260 return; 1261 fail: 1262 if (itp != NULL) 1263 mutex_exit(&itp->itp_lock); 1264 spdsock_error(q, mp, err, diag); 1265 if (audit_active) { 1266 boolean_t active; 1267 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1268 cred_t *cr; 1269 pid_t cpid; 1270 1271 cr = msg_getcred(mp, &cpid); 1272 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1273 audit_pf_policy(SPD_DELETERULE, cr, ns, ITP_NAME(itp), 1274 active, err, cpid); 1275 } 1276 } 1277 1278 /* Do NOT consume a reference to itp. */ 1279 /* ARGSUSED */ 1280 static void 1281 spdsock_flip_node(ipsec_tun_pol_t *itp, void *ignoreme, netstack_t *ns) 1282 { 1283 mutex_enter(&itp->itp_lock); 1284 ITPF_SWAP(itp->itp_flags); 1285 ipsec_swap_policy(itp->itp_policy, itp->itp_inactive, ns); 1286 mutex_exit(&itp->itp_lock); 1287 } 1288 1289 void 1290 spdsock_flip(queue_t *q, mblk_t *mp, spd_if_t *tunname) 1291 { 1292 char *tname; 1293 ipsec_tun_pol_t *itp; 1294 spdsock_t *ss = (spdsock_t *)q->q_ptr; 1295 netstack_t *ns = ss->spdsock_spds->spds_netstack; 1296 1297 if (tunname != NULL) { 1298 tname = (char *)tunname->spd_if_name; 1299 if (*tname == '\0') { 1300 /* can't fail */ 1301 ipsec_swap_global_policy(ns); 1302 if (audit_active) { 1303 boolean_t active; 1304 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1305 cred_t *cr; 1306 pid_t cpid; 1307 1308 cr = msg_getcred(mp, &cpid); 1309 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1310 audit_pf_policy(SPD_FLIP, cr, ns, 1311 NULL, active, 0, cpid); 1312 } 1313 itp_walk(spdsock_flip_node, NULL, ns); 1314 if (audit_active) { 1315 boolean_t active; 1316 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1317 cred_t *cr; 1318 pid_t cpid; 1319 1320 cr = msg_getcred(mp, &cpid); 1321 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1322 audit_pf_policy(SPD_FLIP, cr, ns, 1323 "all tunnels", active, 0, cpid); 1324 } 1325 } else { 1326 itp = get_tunnel_policy(tname, ns); 1327 if (itp == NULL) { 1328 /* Better idea for "tunnel not found"? */ 1329 spdsock_error(q, mp, ESRCH, 0); 1330 if (audit_active) { 1331 boolean_t active; 1332 spd_msg_t *spmsg = 1333 (spd_msg_t *)mp->b_rptr; 1334 cred_t *cr; 1335 pid_t cpid; 1336 1337 cr = msg_getcred(mp, &cpid); 1338 active = (spmsg->spd_msg_spdid == 1339 SPD_ACTIVE); 1340 audit_pf_policy(SPD_FLIP, cr, ns, 1341 ITP_NAME(itp), active, 1342 ESRCH, cpid); 1343 } 1344 return; 1345 } 1346 spdsock_flip_node(itp, NULL, NULL); 1347 if (audit_active) { 1348 boolean_t active; 1349 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1350 cred_t *cr; 1351 pid_t cpid; 1352 1353 cr = msg_getcred(mp, &cpid); 1354 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1355 audit_pf_policy(SPD_FLIP, cr, ns, 1356 ITP_NAME(itp), active, 0, cpid); 1357 } 1358 ITP_REFRELE(itp, ns); 1359 } 1360 } else { 1361 ipsec_swap_global_policy(ns); /* can't fail */ 1362 if (audit_active) { 1363 boolean_t active; 1364 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1365 cred_t *cr; 1366 pid_t cpid; 1367 1368 cr = msg_getcred(mp, &cpid); 1369 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1370 audit_pf_policy(SPD_FLIP, cr, 1371 ns, NULL, active, 0, cpid); 1372 } 1373 } 1374 spd_echo(q, mp); 1375 } 1376 1377 /* 1378 * Unimplemented feature 1379 */ 1380 /* ARGSUSED */ 1381 static void 1382 spdsock_lookup(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp, 1383 spd_ext_t **extv, ipsec_tun_pol_t *itp) 1384 { 1385 spdsock_error(q, mp, EINVAL, 0); 1386 } 1387 1388 1389 static mblk_t * 1390 spdsock_dump_ruleset(mblk_t *req, ipsec_policy_head_t *iph, 1391 uint32_t count, uint16_t error) 1392 { 1393 size_t len = sizeof (spd_ruleset_ext_t) + sizeof (spd_msg_t); 1394 spd_msg_t *msg; 1395 spd_ruleset_ext_t *ruleset; 1396 mblk_t *m = allocb(len, BPRI_HI); 1397 1398 ASSERT(RW_READ_HELD(&iph->iph_lock)); 1399 1400 if (m == NULL) { 1401 return (NULL); 1402 } 1403 msg = (spd_msg_t *)m->b_rptr; 1404 ruleset = (spd_ruleset_ext_t *)(&msg[1]); 1405 1406 m->b_wptr = (uint8_t *)&ruleset[1]; 1407 1408 *msg = *(spd_msg_t *)(req->b_rptr); 1409 msg->spd_msg_len = SPD_8TO64(len); 1410 msg->spd_msg_errno = error; 1411 1412 ruleset->spd_ruleset_len = SPD_8TO64(sizeof (*ruleset)); 1413 ruleset->spd_ruleset_type = SPD_EXT_RULESET; 1414 ruleset->spd_ruleset_count = count; 1415 ruleset->spd_ruleset_version = iph->iph_gen; 1416 return (m); 1417 } 1418 1419 static mblk_t * 1420 spdsock_dump_finish(spdsock_t *ss, int error) 1421 { 1422 mblk_t *m; 1423 ipsec_policy_head_t *iph = ss->spdsock_dump_head; 1424 mblk_t *req = ss->spdsock_dump_req; 1425 netstack_t *ns = ss->spdsock_spds->spds_netstack; 1426 1427 rw_enter(&iph->iph_lock, RW_READER); 1428 m = spdsock_dump_ruleset(req, iph, ss->spdsock_dump_count, error); 1429 rw_exit(&iph->iph_lock); 1430 IPPH_REFRELE(iph, ns); 1431 if (ss->spdsock_itp != NULL) { 1432 ITP_REFRELE(ss->spdsock_itp, ns); 1433 ss->spdsock_itp = NULL; 1434 } 1435 ss->spdsock_dump_req = NULL; 1436 freemsg(req); 1437 1438 return (m); 1439 } 1440 1441 /* 1442 * Rule encoding functions. 1443 * We do a two-pass encode. 1444 * If base != NULL, fill in encoded rule part starting at base+offset. 1445 * Always return "offset" plus length of to-be-encoded data. 1446 */ 1447 static uint_t 1448 spdsock_encode_typecode(uint8_t *base, uint_t offset, uint8_t type, 1449 uint8_t type_end, uint8_t code, uint8_t code_end) 1450 { 1451 struct spd_typecode *tcp; 1452 1453 ASSERT(ALIGNED64(offset)); 1454 1455 if (base != NULL) { 1456 tcp = (struct spd_typecode *)(base + offset); 1457 tcp->spd_typecode_len = SPD_8TO64(sizeof (*tcp)); 1458 tcp->spd_typecode_exttype = SPD_EXT_ICMP_TYPECODE; 1459 tcp->spd_typecode_code = code; 1460 tcp->spd_typecode_type = type; 1461 tcp->spd_typecode_type_end = type_end; 1462 tcp->spd_typecode_code_end = code_end; 1463 } 1464 offset += sizeof (*tcp); 1465 1466 ASSERT(ALIGNED64(offset)); 1467 1468 return (offset); 1469 } 1470 1471 static uint_t 1472 spdsock_encode_proto(uint8_t *base, uint_t offset, uint8_t proto) 1473 { 1474 struct spd_proto *spp; 1475 1476 ASSERT(ALIGNED64(offset)); 1477 1478 if (base != NULL) { 1479 spp = (struct spd_proto *)(base + offset); 1480 spp->spd_proto_len = SPD_8TO64(sizeof (*spp)); 1481 spp->spd_proto_exttype = SPD_EXT_PROTO; 1482 spp->spd_proto_number = proto; 1483 spp->spd_proto_reserved1 = 0; 1484 spp->spd_proto_reserved2 = 0; 1485 } 1486 offset += sizeof (*spp); 1487 1488 ASSERT(ALIGNED64(offset)); 1489 1490 return (offset); 1491 } 1492 1493 static uint_t 1494 spdsock_encode_port(uint8_t *base, uint_t offset, uint16_t ext, uint16_t port) 1495 { 1496 struct spd_portrange *spp; 1497 1498 ASSERT(ALIGNED64(offset)); 1499 1500 if (base != NULL) { 1501 spp = (struct spd_portrange *)(base + offset); 1502 spp->spd_ports_len = SPD_8TO64(sizeof (*spp)); 1503 spp->spd_ports_exttype = ext; 1504 spp->spd_ports_minport = port; 1505 spp->spd_ports_maxport = port; 1506 } 1507 offset += sizeof (*spp); 1508 1509 ASSERT(ALIGNED64(offset)); 1510 1511 return (offset); 1512 } 1513 1514 static uint_t 1515 spdsock_encode_addr(uint8_t *base, uint_t offset, uint16_t ext, 1516 const ipsec_selkey_t *sel, const ipsec_addr_t *addr, uint_t pfxlen) 1517 { 1518 struct spd_address *sae; 1519 ipsec_addr_t *spdaddr; 1520 uint_t start = offset; 1521 uint_t addrlen; 1522 uint_t af; 1523 1524 if (sel->ipsl_valid & IPSL_IPV4) { 1525 af = AF_INET; 1526 addrlen = IP_ADDR_LEN; 1527 } else { 1528 af = AF_INET6; 1529 addrlen = IPV6_ADDR_LEN; 1530 } 1531 1532 ASSERT(ALIGNED64(offset)); 1533 1534 if (base != NULL) { 1535 sae = (struct spd_address *)(base + offset); 1536 sae->spd_address_exttype = ext; 1537 sae->spd_address_af = af; 1538 sae->spd_address_prefixlen = pfxlen; 1539 sae->spd_address_reserved2 = 0; 1540 1541 spdaddr = (ipsec_addr_t *)(&sae[1]); 1542 bcopy(addr, spdaddr, addrlen); 1543 } 1544 offset += sizeof (*sae); 1545 addrlen = roundup(addrlen, sizeof (uint64_t)); 1546 offset += addrlen; 1547 1548 ASSERT(ALIGNED64(offset)); 1549 1550 if (base != NULL) 1551 sae->spd_address_len = SPD_8TO64(offset - start); 1552 return (offset); 1553 } 1554 1555 static uint_t 1556 spdsock_encode_sel(uint8_t *base, uint_t offset, const ipsec_sel_t *sel) 1557 { 1558 const ipsec_selkey_t *selkey = &sel->ipsl_key; 1559 1560 if (selkey->ipsl_valid & IPSL_PROTOCOL) 1561 offset = spdsock_encode_proto(base, offset, selkey->ipsl_proto); 1562 if (selkey->ipsl_valid & IPSL_LOCAL_PORT) 1563 offset = spdsock_encode_port(base, offset, SPD_EXT_LCLPORT, 1564 selkey->ipsl_lport); 1565 if (selkey->ipsl_valid & IPSL_REMOTE_PORT) 1566 offset = spdsock_encode_port(base, offset, SPD_EXT_REMPORT, 1567 selkey->ipsl_rport); 1568 if (selkey->ipsl_valid & IPSL_REMOTE_ADDR) 1569 offset = spdsock_encode_addr(base, offset, SPD_EXT_REMADDR, 1570 selkey, &selkey->ipsl_remote, selkey->ipsl_remote_pfxlen); 1571 if (selkey->ipsl_valid & IPSL_LOCAL_ADDR) 1572 offset = spdsock_encode_addr(base, offset, SPD_EXT_LCLADDR, 1573 selkey, &selkey->ipsl_local, selkey->ipsl_local_pfxlen); 1574 if (selkey->ipsl_valid & IPSL_ICMP_TYPE) { 1575 offset = spdsock_encode_typecode(base, offset, 1576 selkey->ipsl_icmp_type, selkey->ipsl_icmp_type_end, 1577 (selkey->ipsl_valid & IPSL_ICMP_CODE) ? 1578 selkey->ipsl_icmp_code : 255, 1579 (selkey->ipsl_valid & IPSL_ICMP_CODE) ? 1580 selkey->ipsl_icmp_code_end : 255); 1581 } 1582 return (offset); 1583 } 1584 1585 static uint_t 1586 spdsock_encode_actattr(uint8_t *base, uint_t offset, uint32_t tag, 1587 uint32_t value) 1588 { 1589 struct spd_attribute *attr; 1590 1591 ASSERT(ALIGNED64(offset)); 1592 1593 if (base != NULL) { 1594 attr = (struct spd_attribute *)(base + offset); 1595 attr->spd_attr_tag = tag; 1596 attr->spd_attr_value = value; 1597 } 1598 offset += sizeof (struct spd_attribute); 1599 1600 ASSERT(ALIGNED64(offset)); 1601 1602 return (offset); 1603 } 1604 1605 1606 #define EMIT(t, v) offset = spdsock_encode_actattr(base, offset, (t), (v)) 1607 1608 static uint_t 1609 spdsock_encode_action(uint8_t *base, uint_t offset, const ipsec_action_t *ap) 1610 { 1611 const struct ipsec_act *act = &(ap->ipa_act); 1612 uint_t flags; 1613 1614 EMIT(SPD_ATTR_EMPTY, 0); 1615 switch (act->ipa_type) { 1616 case IPSEC_ACT_DISCARD: 1617 case IPSEC_ACT_REJECT: 1618 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_DROP); 1619 break; 1620 case IPSEC_ACT_BYPASS: 1621 case IPSEC_ACT_CLEAR: 1622 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_PASS); 1623 break; 1624 1625 case IPSEC_ACT_APPLY: 1626 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_IPSEC); 1627 flags = 0; 1628 if (act->ipa_apply.ipp_use_ah) 1629 flags |= SPD_APPLY_AH; 1630 if (act->ipa_apply.ipp_use_esp) 1631 flags |= SPD_APPLY_ESP; 1632 if (act->ipa_apply.ipp_use_espa) 1633 flags |= SPD_APPLY_ESPA; 1634 if (act->ipa_apply.ipp_use_se) 1635 flags |= SPD_APPLY_SE; 1636 if (act->ipa_apply.ipp_use_unique) 1637 flags |= SPD_APPLY_UNIQUE; 1638 EMIT(SPD_ATTR_FLAGS, flags); 1639 if (flags & SPD_APPLY_AH) { 1640 EMIT(SPD_ATTR_AH_AUTH, act->ipa_apply.ipp_auth_alg); 1641 EMIT(SPD_ATTR_AH_MINBITS, 1642 act->ipa_apply.ipp_ah_minbits); 1643 EMIT(SPD_ATTR_AH_MAXBITS, 1644 act->ipa_apply.ipp_ah_maxbits); 1645 } 1646 if (flags & SPD_APPLY_ESP) { 1647 EMIT(SPD_ATTR_ESP_ENCR, act->ipa_apply.ipp_encr_alg); 1648 EMIT(SPD_ATTR_ENCR_MINBITS, 1649 act->ipa_apply.ipp_espe_minbits); 1650 EMIT(SPD_ATTR_ENCR_MAXBITS, 1651 act->ipa_apply.ipp_espe_maxbits); 1652 if (flags & SPD_APPLY_ESPA) { 1653 EMIT(SPD_ATTR_ESP_AUTH, 1654 act->ipa_apply.ipp_esp_auth_alg); 1655 EMIT(SPD_ATTR_ESPA_MINBITS, 1656 act->ipa_apply.ipp_espa_minbits); 1657 EMIT(SPD_ATTR_ESPA_MAXBITS, 1658 act->ipa_apply.ipp_espa_maxbits); 1659 } 1660 } 1661 if (act->ipa_apply.ipp_km_proto != 0) 1662 EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_proto); 1663 if (act->ipa_apply.ipp_km_cookie != 0) 1664 EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_cookie); 1665 if (act->ipa_apply.ipp_replay_depth != 0) 1666 EMIT(SPD_ATTR_REPLAY_DEPTH, 1667 act->ipa_apply.ipp_replay_depth); 1668 /* Add more here */ 1669 break; 1670 } 1671 1672 return (offset); 1673 } 1674 1675 static uint_t 1676 spdsock_encode_action_list(uint8_t *base, uint_t offset, 1677 const ipsec_action_t *ap) 1678 { 1679 struct spd_ext_actions *act; 1680 uint_t nact = 0; 1681 uint_t start = offset; 1682 1683 ASSERT(ALIGNED64(offset)); 1684 1685 if (base != NULL) { 1686 act = (struct spd_ext_actions *)(base + offset); 1687 act->spd_actions_len = 0; 1688 act->spd_actions_exttype = SPD_EXT_ACTION; 1689 act->spd_actions_count = 0; 1690 act->spd_actions_reserved = 0; 1691 } 1692 1693 offset += sizeof (*act); 1694 1695 ASSERT(ALIGNED64(offset)); 1696 1697 while (ap != NULL) { 1698 offset = spdsock_encode_action(base, offset, ap); 1699 ap = ap->ipa_next; 1700 nact++; 1701 if (ap != NULL) { 1702 EMIT(SPD_ATTR_NEXT, 0); 1703 } 1704 } 1705 EMIT(SPD_ATTR_END, 0); 1706 1707 ASSERT(ALIGNED64(offset)); 1708 1709 if (base != NULL) { 1710 act->spd_actions_count = nact; 1711 act->spd_actions_len = SPD_8TO64(offset - start); 1712 } 1713 1714 return (offset); 1715 } 1716 1717 #undef EMIT 1718 1719 /* ARGSUSED */ 1720 static uint_t 1721 spdsock_rule_flags(uint_t dir, uint_t af) 1722 { 1723 uint_t flags = 0; 1724 1725 if (dir == IPSEC_TYPE_INBOUND) 1726 flags |= SPD_RULE_FLAG_INBOUND; 1727 if (dir == IPSEC_TYPE_OUTBOUND) 1728 flags |= SPD_RULE_FLAG_OUTBOUND; 1729 1730 return (flags); 1731 } 1732 1733 1734 static uint_t 1735 spdsock_encode_rule_head(uint8_t *base, uint_t offset, spd_msg_t *req, 1736 const ipsec_policy_t *rule, uint_t dir, uint_t af, char *name, 1737 boolean_t tunnel) 1738 { 1739 struct spd_msg *spmsg; 1740 struct spd_rule *spr; 1741 spd_if_t *sid; 1742 1743 uint_t start = offset; 1744 1745 ASSERT(ALIGNED64(offset)); 1746 1747 if (base != NULL) { 1748 spmsg = (struct spd_msg *)(base + offset); 1749 bzero(spmsg, sizeof (*spmsg)); 1750 spmsg->spd_msg_version = PF_POLICY_V1; 1751 spmsg->spd_msg_type = SPD_DUMP; 1752 spmsg->spd_msg_seq = req->spd_msg_seq; 1753 spmsg->spd_msg_pid = req->spd_msg_pid; 1754 } 1755 offset += sizeof (struct spd_msg); 1756 1757 ASSERT(ALIGNED64(offset)); 1758 1759 if (base != NULL) { 1760 spr = (struct spd_rule *)(base + offset); 1761 spr->spd_rule_type = SPD_EXT_RULE; 1762 spr->spd_rule_priority = rule->ipsp_prio; 1763 spr->spd_rule_flags = spdsock_rule_flags(dir, af); 1764 if (tunnel) 1765 spr->spd_rule_flags |= SPD_RULE_FLAG_TUNNEL; 1766 spr->spd_rule_unused = 0; 1767 spr->spd_rule_len = SPD_8TO64(sizeof (*spr)); 1768 spr->spd_rule_index = rule->ipsp_index; 1769 } 1770 offset += sizeof (struct spd_rule); 1771 1772 /* 1773 * If we have an interface name (i.e. if this policy head came from 1774 * a tunnel), add the SPD_EXT_TUN_NAME extension. 1775 */ 1776 if (name != NULL) { 1777 1778 ASSERT(ALIGNED64(offset)); 1779 1780 if (base != NULL) { 1781 sid = (spd_if_t *)(base + offset); 1782 sid->spd_if_exttype = SPD_EXT_TUN_NAME; 1783 sid->spd_if_len = SPD_8TO64(sizeof (spd_if_t) + 1784 roundup((strlen(name) - 4), 8)); 1785 (void) strlcpy((char *)sid->spd_if_name, name, 1786 LIFNAMSIZ); 1787 } 1788 1789 offset += sizeof (spd_if_t) + roundup((strlen(name) - 4), 8); 1790 } 1791 1792 offset = spdsock_encode_sel(base, offset, rule->ipsp_sel); 1793 offset = spdsock_encode_action_list(base, offset, rule->ipsp_act); 1794 1795 ASSERT(ALIGNED64(offset)); 1796 1797 if (base != NULL) { 1798 spmsg->spd_msg_len = SPD_8TO64(offset - start); 1799 } 1800 return (offset); 1801 } 1802 1803 /* ARGSUSED */ 1804 static mblk_t * 1805 spdsock_encode_rule(mblk_t *req, const ipsec_policy_t *rule, 1806 uint_t dir, uint_t af, char *name, boolean_t tunnel) 1807 { 1808 mblk_t *m; 1809 uint_t len; 1810 spd_msg_t *mreq = (spd_msg_t *)req->b_rptr; 1811 1812 /* 1813 * Figure out how much space we'll need. 1814 */ 1815 len = spdsock_encode_rule_head(NULL, 0, mreq, rule, dir, af, name, 1816 tunnel); 1817 1818 /* 1819 * Allocate mblk. 1820 */ 1821 m = allocb(len, BPRI_HI); 1822 if (m == NULL) 1823 return (NULL); 1824 1825 /* 1826 * Fill it in.. 1827 */ 1828 m->b_wptr = m->b_rptr + len; 1829 bzero(m->b_rptr, len); 1830 (void) spdsock_encode_rule_head(m->b_rptr, 0, mreq, rule, dir, af, 1831 name, tunnel); 1832 return (m); 1833 } 1834 1835 static ipsec_policy_t * 1836 spdsock_dump_next_in_chain(spdsock_t *ss, ipsec_policy_head_t *iph, 1837 ipsec_policy_t *cur) 1838 { 1839 ASSERT(RW_READ_HELD(&iph->iph_lock)); 1840 1841 ss->spdsock_dump_count++; 1842 ss->spdsock_dump_cur_rule = cur->ipsp_hash.hash_next; 1843 return (cur); 1844 } 1845 1846 static ipsec_policy_t * 1847 spdsock_dump_next_rule(spdsock_t *ss, ipsec_policy_head_t *iph) 1848 { 1849 ipsec_policy_t *cur; 1850 ipsec_policy_root_t *ipr; 1851 int chain, nchains, type, af; 1852 1853 ASSERT(RW_READ_HELD(&iph->iph_lock)); 1854 1855 cur = ss->spdsock_dump_cur_rule; 1856 1857 if (cur != NULL) 1858 return (spdsock_dump_next_in_chain(ss, iph, cur)); 1859 1860 type = ss->spdsock_dump_cur_type; 1861 1862 next: 1863 chain = ss->spdsock_dump_cur_chain; 1864 ipr = &iph->iph_root[type]; 1865 nchains = ipr->ipr_nchains; 1866 1867 while (chain < nchains) { 1868 cur = ipr->ipr_hash[chain].hash_head; 1869 chain++; 1870 if (cur != NULL) { 1871 ss->spdsock_dump_cur_chain = chain; 1872 return (spdsock_dump_next_in_chain(ss, iph, cur)); 1873 } 1874 } 1875 ss->spdsock_dump_cur_chain = nchains; 1876 1877 af = ss->spdsock_dump_cur_af; 1878 while (af < IPSEC_NAF) { 1879 cur = ipr->ipr_nonhash[af]; 1880 af++; 1881 if (cur != NULL) { 1882 ss->spdsock_dump_cur_af = af; 1883 return (spdsock_dump_next_in_chain(ss, iph, cur)); 1884 } 1885 } 1886 1887 type++; 1888 if (type >= IPSEC_NTYPES) 1889 return (NULL); 1890 1891 ss->spdsock_dump_cur_chain = 0; 1892 ss->spdsock_dump_cur_type = type; 1893 ss->spdsock_dump_cur_af = IPSEC_AF_V4; 1894 goto next; 1895 1896 } 1897 1898 /* 1899 * If we're done with one policy head, but have more to go, we iterate through 1900 * another IPsec tunnel policy head (itp). Return NULL if it is an error 1901 * worthy of returning EAGAIN via PF_POLICY. 1902 */ 1903 static ipsec_tun_pol_t * 1904 spdsock_dump_iterate_next_tunnel(spdsock_t *ss, ipsec_stack_t *ipss) 1905 { 1906 ipsec_tun_pol_t *itp; 1907 1908 ASSERT(RW_READ_HELD(&ipss->ipsec_tunnel_policy_lock)); 1909 if (ipss->ipsec_tunnel_policy_gen > ss->spdsock_dump_tun_gen) { 1910 /* Oops, state of the tunnel polheads changed. */ 1911 itp = NULL; 1912 } else if (ss->spdsock_itp == NULL) { 1913 /* Just finished global, find first node. */ 1914 itp = avl_first(&ipss->ipsec_tunnel_policies); 1915 } else { 1916 /* We just finished current polhead, find the next one. */ 1917 itp = AVL_NEXT(&ipss->ipsec_tunnel_policies, ss->spdsock_itp); 1918 } 1919 if (itp != NULL) { 1920 ITP_REFHOLD(itp); 1921 } 1922 if (ss->spdsock_itp != NULL) { 1923 ITP_REFRELE(ss->spdsock_itp, ipss->ipsec_netstack); 1924 } 1925 ss->spdsock_itp = itp; 1926 return (itp); 1927 } 1928 1929 static mblk_t * 1930 spdsock_dump_next_record(spdsock_t *ss) 1931 { 1932 ipsec_policy_head_t *iph; 1933 ipsec_policy_t *rule; 1934 mblk_t *m; 1935 ipsec_tun_pol_t *itp; 1936 netstack_t *ns = ss->spdsock_spds->spds_netstack; 1937 ipsec_stack_t *ipss = ns->netstack_ipsec; 1938 1939 iph = ss->spdsock_dump_head; 1940 1941 ASSERT(iph != NULL); 1942 1943 rw_enter(&iph->iph_lock, RW_READER); 1944 1945 if (iph->iph_gen != ss->spdsock_dump_gen) { 1946 rw_exit(&iph->iph_lock); 1947 return (spdsock_dump_finish(ss, EAGAIN)); 1948 } 1949 1950 while ((rule = spdsock_dump_next_rule(ss, iph)) == NULL) { 1951 rw_exit(&iph->iph_lock); 1952 if (--(ss->spdsock_dump_remaining_polheads) == 0) 1953 return (spdsock_dump_finish(ss, 0)); 1954 1955 1956 /* 1957 * If we reach here, we have more policy heads (tunnel 1958 * entries) to dump. Let's reset to a new policy head 1959 * and get some more rules. 1960 * 1961 * An empty policy head will have spdsock_dump_next_rule() 1962 * return NULL, and we loop (while dropping the number of 1963 * remaining polheads). If we loop to 0, we finish. We 1964 * keep looping until we hit 0 or until we have a rule to 1965 * encode. 1966 * 1967 * NOTE: No need for ITP_REF*() macros here as we're only 1968 * going after and refholding the policy head itself. 1969 */ 1970 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER); 1971 itp = spdsock_dump_iterate_next_tunnel(ss, ipss); 1972 if (itp == NULL) { 1973 rw_exit(&ipss->ipsec_tunnel_policy_lock); 1974 return (spdsock_dump_finish(ss, EAGAIN)); 1975 } 1976 1977 /* Reset other spdsock_dump thingies. */ 1978 IPPH_REFRELE(ss->spdsock_dump_head, ns); 1979 if (ss->spdsock_dump_active) { 1980 ss->spdsock_dump_tunnel = 1981 itp->itp_flags & ITPF_P_TUNNEL; 1982 iph = itp->itp_policy; 1983 } else { 1984 ss->spdsock_dump_tunnel = 1985 itp->itp_flags & ITPF_I_TUNNEL; 1986 iph = itp->itp_inactive; 1987 } 1988 IPPH_REFHOLD(iph); 1989 rw_exit(&ipss->ipsec_tunnel_policy_lock); 1990 1991 rw_enter(&iph->iph_lock, RW_READER); 1992 RESET_SPDSOCK_DUMP_POLHEAD(ss, iph); 1993 } 1994 1995 m = spdsock_encode_rule(ss->spdsock_dump_req, rule, 1996 ss->spdsock_dump_cur_type, ss->spdsock_dump_cur_af, 1997 (ss->spdsock_itp == NULL) ? NULL : ss->spdsock_itp->itp_name, 1998 ss->spdsock_dump_tunnel); 1999 rw_exit(&iph->iph_lock); 2000 2001 if (m == NULL) 2002 return (spdsock_dump_finish(ss, ENOMEM)); 2003 return (m); 2004 } 2005 2006 /* 2007 * Dump records until we run into flow-control back-pressure. 2008 */ 2009 static void 2010 spdsock_dump_some(queue_t *q, spdsock_t *ss) 2011 { 2012 mblk_t *m, *dataind; 2013 2014 while ((ss->spdsock_dump_req != NULL) && canputnext(q)) { 2015 m = spdsock_dump_next_record(ss); 2016 if (m == NULL) 2017 return; 2018 dataind = allocb(sizeof (struct T_data_req), BPRI_HI); 2019 if (dataind == NULL) { 2020 freemsg(m); 2021 return; 2022 } 2023 dataind->b_cont = m; 2024 dataind->b_wptr += sizeof (struct T_data_req); 2025 ((struct T_data_ind *)dataind->b_rptr)->PRIM_type = T_DATA_IND; 2026 ((struct T_data_ind *)dataind->b_rptr)->MORE_flag = 0; 2027 dataind->b_datap->db_type = M_PROTO; 2028 putnext(q, dataind); 2029 } 2030 } 2031 2032 /* 2033 * Start dumping. 2034 * Format a start-of-dump record, and set up the stream and kick the rsrv 2035 * procedure to continue the job.. 2036 */ 2037 /* ARGSUSED */ 2038 static void 2039 spdsock_dump(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp) 2040 { 2041 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2042 netstack_t *ns = ss->spdsock_spds->spds_netstack; 2043 ipsec_stack_t *ipss = ns->netstack_ipsec; 2044 mblk_t *mr; 2045 2046 /* spdsock_open() already set spdsock_itp to NULL. */ 2047 if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) { 2048 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER); 2049 ss->spdsock_dump_remaining_polheads = 1 + 2050 avl_numnodes(&ipss->ipsec_tunnel_policies); 2051 ss->spdsock_dump_tun_gen = ipss->ipsec_tunnel_policy_gen; 2052 rw_exit(&ipss->ipsec_tunnel_policy_lock); 2053 if (iph == ALL_ACTIVE_POLHEADS) { 2054 iph = ipsec_system_policy(ns); 2055 ss->spdsock_dump_active = B_TRUE; 2056 } else { 2057 iph = ipsec_inactive_policy(ns); 2058 ss->spdsock_dump_active = B_FALSE; 2059 } 2060 ASSERT(ss->spdsock_itp == NULL); 2061 } else { 2062 ss->spdsock_dump_remaining_polheads = 1; 2063 } 2064 2065 rw_enter(&iph->iph_lock, RW_READER); 2066 2067 mr = spdsock_dump_ruleset(mp, iph, 0, 0); 2068 2069 if (!mr) { 2070 rw_exit(&iph->iph_lock); 2071 spdsock_error(q, mp, ENOMEM, 0); 2072 return; 2073 } 2074 2075 ss->spdsock_dump_req = mp; 2076 RESET_SPDSOCK_DUMP_POLHEAD(ss, iph); 2077 2078 rw_exit(&iph->iph_lock); 2079 2080 qreply(q, mr); 2081 qenable(OTHERQ(q)); 2082 } 2083 2084 /* Do NOT consume a reference to ITP. */ 2085 void 2086 spdsock_clone_node(ipsec_tun_pol_t *itp, void *ep, netstack_t *ns) 2087 { 2088 int *errptr = (int *)ep; 2089 2090 if (*errptr != 0) 2091 return; /* We've failed already for some reason. */ 2092 mutex_enter(&itp->itp_lock); 2093 ITPF_CLONE(itp->itp_flags); 2094 *errptr = ipsec_copy_polhead(itp->itp_policy, itp->itp_inactive, ns); 2095 mutex_exit(&itp->itp_lock); 2096 } 2097 2098 void 2099 spdsock_clone(queue_t *q, mblk_t *mp, spd_if_t *tunname) 2100 { 2101 int error; 2102 char *tname; 2103 ipsec_tun_pol_t *itp; 2104 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2105 netstack_t *ns = ss->spdsock_spds->spds_netstack; 2106 2107 if (tunname != NULL) { 2108 tname = (char *)tunname->spd_if_name; 2109 if (*tname == '\0') { 2110 error = ipsec_clone_system_policy(ns); 2111 if (audit_active) { 2112 boolean_t active; 2113 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 2114 cred_t *cr; 2115 pid_t cpid; 2116 2117 cr = msg_getcred(mp, &cpid); 2118 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 2119 audit_pf_policy(SPD_CLONE, cr, ns, 2120 NULL, active, error, cpid); 2121 } 2122 if (error == 0) { 2123 itp_walk(spdsock_clone_node, &error, ns); 2124 if (audit_active) { 2125 boolean_t active; 2126 spd_msg_t *spmsg = 2127 (spd_msg_t *)mp->b_rptr; 2128 cred_t *cr; 2129 pid_t cpid; 2130 2131 cr = msg_getcred(mp, &cpid); 2132 active = (spmsg->spd_msg_spdid == 2133 SPD_ACTIVE); 2134 audit_pf_policy(SPD_CLONE, cr, 2135 ns, "all tunnels", active, 0, 2136 cpid); 2137 } 2138 } 2139 } else { 2140 itp = get_tunnel_policy(tname, ns); 2141 if (itp == NULL) { 2142 spdsock_error(q, mp, ENOENT, 0); 2143 if (audit_active) { 2144 boolean_t active; 2145 spd_msg_t *spmsg = 2146 (spd_msg_t *)mp->b_rptr; 2147 cred_t *cr; 2148 pid_t cpid; 2149 2150 cr = msg_getcred(mp, &cpid); 2151 active = (spmsg->spd_msg_spdid == 2152 SPD_ACTIVE); 2153 audit_pf_policy(SPD_CLONE, cr, 2154 ns, ITP_NAME(itp), active, ENOENT, 2155 cpid); 2156 } 2157 return; 2158 } 2159 spdsock_clone_node(itp, &error, NULL); 2160 ITP_REFRELE(itp, ns); 2161 if (audit_active) { 2162 boolean_t active; 2163 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 2164 cred_t *cr; 2165 pid_t cpid; 2166 2167 cr = msg_getcred(mp, &cpid); 2168 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 2169 audit_pf_policy(SPD_CLONE, cr, ns, 2170 ITP_NAME(itp), active, error, cpid); 2171 } 2172 } 2173 } else { 2174 error = ipsec_clone_system_policy(ns); 2175 if (audit_active) { 2176 boolean_t active; 2177 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 2178 cred_t *cr; 2179 pid_t cpid; 2180 2181 cr = msg_getcred(mp, &cpid); 2182 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 2183 audit_pf_policy(SPD_CLONE, cr, ns, NULL, 2184 active, error, cpid); 2185 } 2186 } 2187 2188 if (error != 0) 2189 spdsock_error(q, mp, error, 0); 2190 else 2191 spd_echo(q, mp); 2192 } 2193 2194 /* 2195 * Process a SPD_ALGLIST request. The caller expects separate alg entries 2196 * for AH authentication, ESP authentication, and ESP encryption. 2197 * The same distinction is then used when setting the min and max key 2198 * sizes when defining policies. 2199 */ 2200 2201 #define SPDSOCK_AH_AUTH 0 2202 #define SPDSOCK_ESP_AUTH 1 2203 #define SPDSOCK_ESP_ENCR 2 2204 #define SPDSOCK_NTYPES 3 2205 2206 static const uint_t algattr[SPDSOCK_NTYPES] = { 2207 SPD_ATTR_AH_AUTH, 2208 SPD_ATTR_ESP_AUTH, 2209 SPD_ATTR_ESP_ENCR 2210 }; 2211 static const uint_t minbitsattr[SPDSOCK_NTYPES] = { 2212 SPD_ATTR_AH_MINBITS, 2213 SPD_ATTR_ESPA_MINBITS, 2214 SPD_ATTR_ENCR_MINBITS 2215 }; 2216 static const uint_t maxbitsattr[SPDSOCK_NTYPES] = { 2217 SPD_ATTR_AH_MAXBITS, 2218 SPD_ATTR_ESPA_MAXBITS, 2219 SPD_ATTR_ENCR_MAXBITS 2220 }; 2221 static const uint_t defbitsattr[SPDSOCK_NTYPES] = { 2222 SPD_ATTR_AH_DEFBITS, 2223 SPD_ATTR_ESPA_DEFBITS, 2224 SPD_ATTR_ENCR_DEFBITS 2225 }; 2226 static const uint_t incrbitsattr[SPDSOCK_NTYPES] = { 2227 SPD_ATTR_AH_INCRBITS, 2228 SPD_ATTR_ESPA_INCRBITS, 2229 SPD_ATTR_ENCR_INCRBITS 2230 }; 2231 2232 #define ATTRPERALG 6 /* fixed attributes per algs */ 2233 2234 void 2235 spdsock_alglist(queue_t *q, mblk_t *mp) 2236 { 2237 uint_t algtype; 2238 uint_t algidx; 2239 uint_t algcount; 2240 uint_t size; 2241 mblk_t *m; 2242 uint8_t *cur; 2243 spd_msg_t *msg; 2244 struct spd_ext_actions *act; 2245 struct spd_attribute *attr; 2246 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2247 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec; 2248 2249 mutex_enter(&ipss->ipsec_alg_lock); 2250 /* 2251 * The SPD client expects to receive separate entries for 2252 * AH authentication and ESP authentication supported algorithms. 2253 * 2254 * Don't return the "any" algorithms, if defined, as no 2255 * kernel policies can be set for these algorithms. 2256 */ 2257 algcount = 2 * ipss->ipsec_nalgs[IPSEC_ALG_AUTH] + 2258 ipss->ipsec_nalgs[IPSEC_ALG_ENCR]; 2259 2260 if (ipss->ipsec_alglists[IPSEC_ALG_AUTH][SADB_AALG_NONE] != NULL) 2261 algcount--; 2262 if (ipss->ipsec_alglists[IPSEC_ALG_ENCR][SADB_EALG_NONE] != NULL) 2263 algcount--; 2264 2265 /* 2266 * For each algorithm, we encode: 2267 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT} 2268 */ 2269 2270 size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions) + 2271 ATTRPERALG * sizeof (struct spd_attribute) * algcount; 2272 2273 ASSERT(ALIGNED64(size)); 2274 2275 m = allocb(size, BPRI_HI); 2276 if (m == NULL) { 2277 mutex_exit(&ipss->ipsec_alg_lock); 2278 spdsock_error(q, mp, ENOMEM, 0); 2279 return; 2280 } 2281 2282 m->b_wptr = m->b_rptr + size; 2283 cur = m->b_rptr; 2284 2285 msg = (spd_msg_t *)cur; 2286 bcopy(mp->b_rptr, cur, sizeof (*msg)); 2287 2288 msg->spd_msg_len = SPD_8TO64(size); 2289 msg->spd_msg_errno = 0; 2290 msg->spd_msg_diagnostic = 0; 2291 2292 cur += sizeof (*msg); 2293 2294 act = (struct spd_ext_actions *)cur; 2295 cur += sizeof (*act); 2296 2297 act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t)); 2298 act->spd_actions_exttype = SPD_EXT_ACTION; 2299 act->spd_actions_count = algcount; 2300 act->spd_actions_reserved = 0; 2301 2302 attr = (struct spd_attribute *)cur; 2303 2304 #define EMIT(tag, value) { \ 2305 attr->spd_attr_tag = (tag); \ 2306 attr->spd_attr_value = (value); \ 2307 attr++; \ 2308 } 2309 2310 /* 2311 * If you change the number of EMIT's here, change 2312 * ATTRPERALG above to match 2313 */ 2314 #define EMITALGATTRS(_type) { \ 2315 EMIT(algattr[_type], algid); /* 1 */ \ 2316 EMIT(minbitsattr[_type], minbits); /* 2 */ \ 2317 EMIT(maxbitsattr[_type], maxbits); /* 3 */ \ 2318 EMIT(defbitsattr[_type], defbits); /* 4 */ \ 2319 EMIT(incrbitsattr[_type], incr); /* 5 */ \ 2320 EMIT(SPD_ATTR_NEXT, 0); /* 6 */ \ 2321 } 2322 2323 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 2324 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype]; 2325 algidx++) { 2326 int algid = ipss->ipsec_sortlist[algtype][algidx]; 2327 ipsec_alginfo_t *alg = 2328 ipss->ipsec_alglists[algtype][algid]; 2329 uint_t minbits = alg->alg_minbits; 2330 uint_t maxbits = alg->alg_maxbits; 2331 uint_t defbits = alg->alg_default_bits; 2332 uint_t incr = alg->alg_increment; 2333 2334 if (algtype == IPSEC_ALG_AUTH) { 2335 if (algid == SADB_AALG_NONE) 2336 continue; 2337 EMITALGATTRS(SPDSOCK_AH_AUTH); 2338 EMITALGATTRS(SPDSOCK_ESP_AUTH); 2339 } else { 2340 if (algid == SADB_EALG_NONE) 2341 continue; 2342 ASSERT(algtype == IPSEC_ALG_ENCR); 2343 EMITALGATTRS(SPDSOCK_ESP_ENCR); 2344 } 2345 } 2346 } 2347 2348 mutex_exit(&ipss->ipsec_alg_lock); 2349 2350 #undef EMITALGATTRS 2351 #undef EMIT 2352 #undef ATTRPERALG 2353 2354 attr--; 2355 attr->spd_attr_tag = SPD_ATTR_END; 2356 2357 freemsg(mp); 2358 qreply(q, m); 2359 } 2360 2361 /* 2362 * Process a SPD_DUMPALGS request. 2363 */ 2364 2365 #define ATTRPERALG 7 /* fixed attributes per algs */ 2366 2367 void 2368 spdsock_dumpalgs(queue_t *q, mblk_t *mp) 2369 { 2370 uint_t algtype; 2371 uint_t algidx; 2372 uint_t size; 2373 mblk_t *m; 2374 uint8_t *cur; 2375 spd_msg_t *msg; 2376 struct spd_ext_actions *act; 2377 struct spd_attribute *attr; 2378 ipsec_alginfo_t *alg; 2379 uint_t algid; 2380 uint_t i; 2381 uint_t alg_size; 2382 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2383 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec; 2384 2385 mutex_enter(&ipss->ipsec_alg_lock); 2386 2387 /* 2388 * For each algorithm, we encode: 2389 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT} 2390 * 2391 * ALG_ID / ALG_PROTO / ALG_INCRBITS / ALG_NKEYSIZES / ALG_KEYSIZE* 2392 * ALG_NBLOCKSIZES / ALG_BLOCKSIZE* / ALG_MECHNAME / {END, NEXT} 2393 */ 2394 2395 /* 2396 * Compute the size of the SPD message. 2397 */ 2398 size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions); 2399 2400 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 2401 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype]; 2402 algidx++) { 2403 algid = ipss->ipsec_sortlist[algtype][algidx]; 2404 alg = ipss->ipsec_alglists[algtype][algid]; 2405 alg_size = sizeof (struct spd_attribute) * 2406 (ATTRPERALG + alg->alg_nkey_sizes + 2407 alg->alg_nblock_sizes) + CRYPTO_MAX_MECH_NAME; 2408 size += alg_size; 2409 } 2410 } 2411 2412 ASSERT(ALIGNED64(size)); 2413 2414 m = allocb(size, BPRI_HI); 2415 if (m == NULL) { 2416 mutex_exit(&ipss->ipsec_alg_lock); 2417 spdsock_error(q, mp, ENOMEM, 0); 2418 return; 2419 } 2420 2421 m->b_wptr = m->b_rptr + size; 2422 cur = m->b_rptr; 2423 2424 msg = (spd_msg_t *)cur; 2425 bcopy(mp->b_rptr, cur, sizeof (*msg)); 2426 2427 msg->spd_msg_len = SPD_8TO64(size); 2428 msg->spd_msg_errno = 0; 2429 msg->spd_msg_type = SPD_ALGLIST; 2430 2431 msg->spd_msg_diagnostic = 0; 2432 2433 cur += sizeof (*msg); 2434 2435 act = (struct spd_ext_actions *)cur; 2436 cur += sizeof (*act); 2437 2438 act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t)); 2439 act->spd_actions_exttype = SPD_EXT_ACTION; 2440 act->spd_actions_count = ipss->ipsec_nalgs[IPSEC_ALG_AUTH] + 2441 ipss->ipsec_nalgs[IPSEC_ALG_ENCR]; 2442 act->spd_actions_reserved = 0; 2443 2444 /* 2445 * If there aren't any algorithms registered, return an empty message. 2446 * spdsock_get_ext() knows how to deal with this. 2447 */ 2448 if (act->spd_actions_count == 0) { 2449 act->spd_actions_len = 0; 2450 mutex_exit(&ipss->ipsec_alg_lock); 2451 goto error; 2452 } 2453 2454 attr = (struct spd_attribute *)cur; 2455 2456 #define EMIT(tag, value) { \ 2457 attr->spd_attr_tag = (tag); \ 2458 attr->spd_attr_value = (value); \ 2459 attr++; \ 2460 } 2461 2462 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 2463 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype]; 2464 algidx++) { 2465 2466 algid = ipss->ipsec_sortlist[algtype][algidx]; 2467 alg = ipss->ipsec_alglists[algtype][algid]; 2468 2469 /* 2470 * If you change the number of EMIT's here, change 2471 * ATTRPERALG above to match 2472 */ 2473 EMIT(SPD_ATTR_ALG_ID, algid); 2474 EMIT(SPD_ATTR_ALG_PROTO, algproto[algtype]); 2475 EMIT(SPD_ATTR_ALG_INCRBITS, alg->alg_increment); 2476 2477 EMIT(SPD_ATTR_ALG_NKEYSIZES, alg->alg_nkey_sizes); 2478 for (i = 0; i < alg->alg_nkey_sizes; i++) 2479 EMIT(SPD_ATTR_ALG_KEYSIZE, 2480 alg->alg_key_sizes[i]); 2481 2482 EMIT(SPD_ATTR_ALG_NBLOCKSIZES, alg->alg_nblock_sizes); 2483 for (i = 0; i < alg->alg_nblock_sizes; i++) 2484 EMIT(SPD_ATTR_ALG_BLOCKSIZE, 2485 alg->alg_block_sizes[i]); 2486 2487 EMIT(SPD_ATTR_ALG_MECHNAME, CRYPTO_MAX_MECH_NAME); 2488 bcopy(alg->alg_mech_name, attr, CRYPTO_MAX_MECH_NAME); 2489 attr = (struct spd_attribute *)((char *)attr + 2490 CRYPTO_MAX_MECH_NAME); 2491 2492 EMIT(SPD_ATTR_NEXT, 0); 2493 } 2494 } 2495 2496 mutex_exit(&ipss->ipsec_alg_lock); 2497 2498 #undef EMITALGATTRS 2499 #undef EMIT 2500 #undef ATTRPERALG 2501 2502 attr--; 2503 attr->spd_attr_tag = SPD_ATTR_END; 2504 2505 error: 2506 freemsg(mp); 2507 qreply(q, m); 2508 } 2509 2510 /* 2511 * Do the actual work of processing an SPD_UPDATEALGS request. Can 2512 * be invoked either once IPsec is loaded on a cached request, or 2513 * when a request is received while IPsec is loaded. 2514 */ 2515 static void 2516 spdsock_do_updatealg(spd_ext_t *extv[], int *diag, spd_stack_t *spds) 2517 { 2518 struct spd_ext_actions *actp; 2519 struct spd_attribute *attr, *endattr; 2520 uint64_t *start, *end; 2521 ipsec_alginfo_t *alg = NULL; 2522 ipsec_algtype_t alg_type = 0; 2523 boolean_t skip_alg = B_TRUE, doing_proto = B_FALSE; 2524 uint_t i, cur_key, cur_block, algid; 2525 2526 *diag = -1; 2527 ASSERT(MUTEX_HELD(&spds->spds_alg_lock)); 2528 2529 /* parse the message, building the list of algorithms */ 2530 2531 actp = (struct spd_ext_actions *)extv[SPD_EXT_ACTION]; 2532 if (actp == NULL) { 2533 *diag = SPD_DIAGNOSTIC_NO_ACTION_EXT; 2534 return; 2535 } 2536 2537 start = (uint64_t *)actp; 2538 end = (start + actp->spd_actions_len); 2539 endattr = (struct spd_attribute *)end; 2540 attr = (struct spd_attribute *)&actp[1]; 2541 2542 bzero(spds->spds_algs, IPSEC_NALGTYPES * IPSEC_MAX_ALGS * 2543 sizeof (ipsec_alginfo_t *)); 2544 2545 alg = kmem_zalloc(sizeof (*alg), KM_SLEEP); 2546 2547 #define ALG_KEY_SIZES(a) (((a)->alg_nkey_sizes + 1) * sizeof (uint16_t)) 2548 #define ALG_BLOCK_SIZES(a) (((a)->alg_nblock_sizes + 1) * sizeof (uint16_t)) 2549 2550 while (attr < endattr) { 2551 switch (attr->spd_attr_tag) { 2552 case SPD_ATTR_NOP: 2553 case SPD_ATTR_EMPTY: 2554 break; 2555 case SPD_ATTR_END: 2556 attr = endattr; 2557 /* FALLTHRU */ 2558 case SPD_ATTR_NEXT: 2559 if (doing_proto) { 2560 doing_proto = B_FALSE; 2561 break; 2562 } 2563 if (skip_alg) { 2564 ipsec_alg_free(alg); 2565 } else { 2566 ipsec_alg_free( 2567 spds->spds_algs[alg_type][alg->alg_id]); 2568 spds->spds_algs[alg_type][alg->alg_id] = 2569 alg; 2570 } 2571 alg = kmem_zalloc(sizeof (*alg), KM_SLEEP); 2572 break; 2573 2574 case SPD_ATTR_ALG_ID: 2575 if (attr->spd_attr_value >= IPSEC_MAX_ALGS) { 2576 ss1dbg(spds, ("spdsock_do_updatealg: " 2577 "invalid alg id %d\n", 2578 attr->spd_attr_value)); 2579 *diag = SPD_DIAGNOSTIC_ALG_ID_RANGE; 2580 goto bail; 2581 } 2582 alg->alg_id = attr->spd_attr_value; 2583 break; 2584 2585 case SPD_ATTR_ALG_PROTO: 2586 /* find the alg type */ 2587 for (i = 0; i < NALGPROTOS; i++) 2588 if (algproto[i] == attr->spd_attr_value) 2589 break; 2590 skip_alg = (i == NALGPROTOS); 2591 if (!skip_alg) 2592 alg_type = i; 2593 break; 2594 2595 case SPD_ATTR_ALG_INCRBITS: 2596 alg->alg_increment = attr->spd_attr_value; 2597 break; 2598 2599 case SPD_ATTR_ALG_NKEYSIZES: 2600 if (alg->alg_key_sizes != NULL) { 2601 kmem_free(alg->alg_key_sizes, 2602 ALG_KEY_SIZES(alg)); 2603 } 2604 alg->alg_nkey_sizes = attr->spd_attr_value; 2605 /* 2606 * Allocate room for the trailing zero key size 2607 * value as well. 2608 */ 2609 alg->alg_key_sizes = kmem_zalloc(ALG_KEY_SIZES(alg), 2610 KM_SLEEP); 2611 cur_key = 0; 2612 break; 2613 2614 case SPD_ATTR_ALG_KEYSIZE: 2615 if (alg->alg_key_sizes == NULL || 2616 cur_key >= alg->alg_nkey_sizes) { 2617 ss1dbg(spds, ("spdsock_do_updatealg: " 2618 "too many key sizes\n")); 2619 *diag = SPD_DIAGNOSTIC_ALG_NUM_KEY_SIZES; 2620 goto bail; 2621 } 2622 alg->alg_key_sizes[cur_key++] = attr->spd_attr_value; 2623 break; 2624 2625 case SPD_ATTR_ALG_NBLOCKSIZES: 2626 if (alg->alg_block_sizes != NULL) { 2627 kmem_free(alg->alg_block_sizes, 2628 ALG_BLOCK_SIZES(alg)); 2629 } 2630 alg->alg_nblock_sizes = attr->spd_attr_value; 2631 /* 2632 * Allocate room for the trailing zero block size 2633 * value as well. 2634 */ 2635 alg->alg_block_sizes = kmem_zalloc(ALG_BLOCK_SIZES(alg), 2636 KM_SLEEP); 2637 cur_block = 0; 2638 break; 2639 2640 case SPD_ATTR_ALG_BLOCKSIZE: 2641 if (alg->alg_block_sizes == NULL || 2642 cur_block >= alg->alg_nblock_sizes) { 2643 ss1dbg(spds, ("spdsock_do_updatealg: " 2644 "too many block sizes\n")); 2645 *diag = SPD_DIAGNOSTIC_ALG_NUM_BLOCK_SIZES; 2646 goto bail; 2647 } 2648 alg->alg_block_sizes[cur_block++] = 2649 attr->spd_attr_value; 2650 break; 2651 2652 case SPD_ATTR_ALG_MECHNAME: { 2653 char *mech_name; 2654 2655 if (attr->spd_attr_value > CRYPTO_MAX_MECH_NAME) { 2656 ss1dbg(spds, ("spdsock_do_updatealg: " 2657 "mech name too long\n")); 2658 *diag = SPD_DIAGNOSTIC_ALG_MECH_NAME_LEN; 2659 goto bail; 2660 } 2661 mech_name = (char *)(attr + 1); 2662 bcopy(mech_name, alg->alg_mech_name, 2663 attr->spd_attr_value); 2664 alg->alg_mech_name[CRYPTO_MAX_MECH_NAME-1] = '\0'; 2665 attr = (struct spd_attribute *)((char *)attr + 2666 attr->spd_attr_value); 2667 break; 2668 } 2669 2670 case SPD_ATTR_PROTO_ID: 2671 doing_proto = B_TRUE; 2672 for (i = 0; i < NALGPROTOS; i++) { 2673 if (algproto[i] == attr->spd_attr_value) { 2674 alg_type = i; 2675 break; 2676 } 2677 } 2678 break; 2679 2680 case SPD_ATTR_PROTO_EXEC_MODE: 2681 if (!doing_proto) 2682 break; 2683 for (i = 0; i < NEXECMODES; i++) { 2684 if (execmodes[i] == attr->spd_attr_value) { 2685 spds->spds_algs_exec_mode[alg_type] = i; 2686 break; 2687 } 2688 } 2689 break; 2690 } 2691 attr++; 2692 } 2693 2694 #undef ALG_KEY_SIZES 2695 #undef ALG_BLOCK_SIZES 2696 2697 /* update the algorithm tables */ 2698 spdsock_merge_algs(spds); 2699 bail: 2700 /* cleanup */ 2701 ipsec_alg_free(alg); 2702 for (alg_type = 0; alg_type < IPSEC_NALGTYPES; alg_type++) 2703 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) 2704 if (spds->spds_algs[alg_type][algid] != NULL) 2705 ipsec_alg_free(spds->spds_algs[alg_type][algid]); 2706 } 2707 2708 /* 2709 * Process an SPD_UPDATEALGS request. If IPsec is not loaded, queue 2710 * the request until IPsec loads. If IPsec is loaded, act on it 2711 * immediately. 2712 */ 2713 2714 static void 2715 spdsock_updatealg(queue_t *q, mblk_t *mp, spd_ext_t *extv[]) 2716 { 2717 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2718 spd_stack_t *spds = ss->spdsock_spds; 2719 ipsec_stack_t *ipss = spds->spds_netstack->netstack_ipsec; 2720 2721 if (!ipsec_loaded(ipss)) { 2722 /* 2723 * IPsec is not loaded, save request and return nicely, 2724 * the message will be processed once IPsec loads. 2725 */ 2726 mblk_t *new_mp; 2727 2728 /* last update message wins */ 2729 if ((new_mp = copymsg(mp)) == NULL) { 2730 spdsock_error(q, mp, ENOMEM, 0); 2731 return; 2732 } 2733 mutex_enter(&spds->spds_alg_lock); 2734 bcopy(extv, spds->spds_extv_algs, 2735 sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1)); 2736 if (spds->spds_mp_algs != NULL) 2737 freemsg(spds->spds_mp_algs); 2738 spds->spds_mp_algs = mp; 2739 spds->spds_algs_pending = B_TRUE; 2740 mutex_exit(&spds->spds_alg_lock); 2741 if (audit_active) { 2742 cred_t *cr; 2743 pid_t cpid; 2744 2745 cr = msg_getcred(mp, &cpid); 2746 audit_pf_policy(SPD_UPDATEALGS, cr, 2747 spds->spds_netstack, NULL, B_TRUE, EAGAIN, 2748 cpid); 2749 } 2750 spd_echo(q, new_mp); 2751 } else { 2752 /* 2753 * IPsec is loaded, act on the message immediately. 2754 */ 2755 int diag; 2756 2757 mutex_enter(&spds->spds_alg_lock); 2758 spdsock_do_updatealg(extv, &diag, spds); 2759 mutex_exit(&spds->spds_alg_lock); 2760 if (diag == -1) { 2761 spd_echo(q, mp); 2762 if (audit_active) { 2763 cred_t *cr; 2764 pid_t cpid; 2765 2766 cr = msg_getcred(mp, &cpid); 2767 audit_pf_policy(SPD_UPDATEALGS, cr, 2768 spds->spds_netstack, NULL, B_TRUE, 0, 2769 cpid); 2770 } 2771 } else { 2772 spdsock_diag(q, mp, diag); 2773 if (audit_active) { 2774 cred_t *cr; 2775 pid_t cpid; 2776 2777 cr = msg_getcred(mp, &cpid); 2778 audit_pf_policy(SPD_UPDATEALGS, cr, 2779 spds->spds_netstack, NULL, B_TRUE, diag, 2780 cpid); 2781 } 2782 } 2783 } 2784 } 2785 2786 /* 2787 * Sort through the mess of polhead options to retrieve an appropriate one. 2788 * Returns NULL if we send an spdsock error. Returns a valid pointer if we 2789 * found a valid polhead. Returns ALL_ACTIVE_POLHEADS (aka. -1) or 2790 * ALL_INACTIVE_POLHEADS (aka. -2) if the operation calls for the operation to 2791 * act on ALL policy heads. 2792 */ 2793 static ipsec_policy_head_t * 2794 get_appropriate_polhead(queue_t *q, mblk_t *mp, spd_if_t *tunname, int spdid, 2795 int msgtype, ipsec_tun_pol_t **itpp) 2796 { 2797 ipsec_tun_pol_t *itp; 2798 ipsec_policy_head_t *iph; 2799 int errno; 2800 char *tname; 2801 boolean_t active; 2802 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2803 netstack_t *ns = ss->spdsock_spds->spds_netstack; 2804 uint64_t gen; /* Placeholder */ 2805 datalink_id_t linkid; 2806 2807 active = (spdid == SPD_ACTIVE); 2808 *itpp = NULL; 2809 if (!active && spdid != SPD_STANDBY) { 2810 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_SPDID); 2811 return (NULL); 2812 } 2813 2814 if (tunname != NULL) { 2815 /* Acting on a tunnel's SPD. */ 2816 tname = (char *)tunname->spd_if_name; 2817 if (*tname == '\0') { 2818 /* Handle all-polhead cases here. */ 2819 if (msgtype != SPD_FLUSH && msgtype != SPD_DUMP) { 2820 spdsock_diag(q, mp, 2821 SPD_DIAGNOSTIC_NOT_GLOBAL_OP); 2822 return (NULL); 2823 } 2824 return (active ? ALL_ACTIVE_POLHEADS : 2825 ALL_INACTIVE_POLHEADS); 2826 } 2827 2828 itp = get_tunnel_policy(tname, ns); 2829 if (itp == NULL) { 2830 if (msgtype != SPD_ADDRULE) { 2831 /* "Tunnel not found" */ 2832 spdsock_error(q, mp, ENOENT, 0); 2833 return (NULL); 2834 } 2835 2836 errno = 0; 2837 itp = create_tunnel_policy(tname, &errno, &gen, ns); 2838 if (itp == NULL) { 2839 /* 2840 * Something very bad happened, most likely 2841 * ENOMEM. Return an indicator. 2842 */ 2843 spdsock_error(q, mp, errno, 0); 2844 return (NULL); 2845 } 2846 } 2847 /* 2848 * Troll the plumbed tunnels and see if we have a match. We 2849 * need to do this always in case we add policy AFTER plumbing 2850 * a tunnel. 2851 */ 2852 if (dls_mgmt_get_linkid(tname, &linkid) == 0) 2853 iptun_set_policy(linkid, itp); 2854 2855 *itpp = itp; 2856 /* For spdsock dump state, set the polhead's name. */ 2857 if (msgtype == SPD_DUMP) { 2858 ITP_REFHOLD(itp); 2859 ss->spdsock_itp = itp; 2860 ss->spdsock_dump_tunnel = itp->itp_flags & 2861 (active ? ITPF_P_TUNNEL : ITPF_I_TUNNEL); 2862 } 2863 } else { 2864 itp = NULL; 2865 /* For spdsock dump state, indicate it's global policy. */ 2866 if (msgtype == SPD_DUMP) 2867 ss->spdsock_itp = NULL; 2868 } 2869 2870 if (active) 2871 iph = (itp == NULL) ? ipsec_system_policy(ns) : itp->itp_policy; 2872 else 2873 iph = (itp == NULL) ? ipsec_inactive_policy(ns) : 2874 itp->itp_inactive; 2875 2876 ASSERT(iph != NULL); 2877 if (itp != NULL) { 2878 IPPH_REFHOLD(iph); 2879 } 2880 2881 return (iph); 2882 } 2883 2884 static void 2885 spdsock_parse(queue_t *q, mblk_t *mp) 2886 { 2887 spd_msg_t *spmsg; 2888 spd_ext_t *extv[SPD_EXT_MAX + 1]; 2889 uint_t msgsize; 2890 ipsec_policy_head_t *iph; 2891 ipsec_tun_pol_t *itp; 2892 spd_if_t *tunname; 2893 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2894 spd_stack_t *spds = ss->spdsock_spds; 2895 netstack_t *ns = spds->spds_netstack; 2896 ipsec_stack_t *ipss = ns->netstack_ipsec; 2897 2898 /* Make sure nothing's below me. */ 2899 ASSERT(WR(q)->q_next == NULL); 2900 2901 spmsg = (spd_msg_t *)mp->b_rptr; 2902 2903 msgsize = SPD_64TO8(spmsg->spd_msg_len); 2904 2905 if (msgdsize(mp) != msgsize) { 2906 /* 2907 * Message len incorrect w.r.t. actual size. Send an error 2908 * (EMSGSIZE). It may be necessary to massage things a 2909 * bit. For example, if the spd_msg_type is hosed, 2910 * I need to set it to SPD_RESERVED to get delivery to 2911 * do the right thing. Then again, maybe just letting 2912 * the error delivery do the right thing. 2913 */ 2914 ss2dbg(spds, 2915 ("mblk (%lu) and base (%d) message sizes don't jibe.\n", 2916 msgdsize(mp), msgsize)); 2917 spdsock_error(q, mp, EMSGSIZE, SPD_DIAGNOSTIC_NONE); 2918 return; 2919 } 2920 2921 if (msgsize > (uint_t)(mp->b_wptr - mp->b_rptr)) { 2922 /* Get all message into one mblk. */ 2923 if (pullupmsg(mp, -1) == 0) { 2924 /* 2925 * Something screwy happened. 2926 */ 2927 ss3dbg(spds, ("spdsock_parse: pullupmsg() failed.\n")); 2928 return; 2929 } else { 2930 spmsg = (spd_msg_t *)mp->b_rptr; 2931 } 2932 } 2933 2934 switch (spdsock_get_ext(extv, spmsg, msgsize)) { 2935 case KGE_DUP: 2936 /* Handle duplicate extension. */ 2937 ss1dbg(spds, ("Got duplicate extension of type %d.\n", 2938 extv[0]->spd_ext_type)); 2939 spdsock_diag(q, mp, dup_ext_diag[extv[0]->spd_ext_type]); 2940 return; 2941 case KGE_UNK: 2942 /* Handle unknown extension. */ 2943 ss1dbg(spds, ("Got unknown extension of type %d.\n", 2944 extv[0]->spd_ext_type)); 2945 spdsock_diag(q, mp, SPD_DIAGNOSTIC_UNKNOWN_EXT); 2946 return; 2947 case KGE_LEN: 2948 /* Length error. */ 2949 ss1dbg(spds, ("Length %d on extension type %d overrun or 0.\n", 2950 extv[0]->spd_ext_len, extv[0]->spd_ext_type)); 2951 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_EXTLEN); 2952 return; 2953 case KGE_CHK: 2954 /* Reality check failed. */ 2955 ss1dbg(spds, ("Reality check failed on extension type %d.\n", 2956 extv[0]->spd_ext_type)); 2957 spdsock_diag(q, mp, bad_ext_diag[extv[0]->spd_ext_type]); 2958 return; 2959 default: 2960 /* Default case is no errors. */ 2961 break; 2962 } 2963 2964 /* 2965 * Special-case SPD_UPDATEALGS so as not to load IPsec. 2966 */ 2967 if (!ipsec_loaded(ipss) && spmsg->spd_msg_type != SPD_UPDATEALGS) { 2968 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2969 2970 ASSERT(ss != NULL); 2971 ipsec_loader_loadnow(ipss); 2972 ss->spdsock_timeout_arg = mp; 2973 ss->spdsock_timeout = qtimeout(q, spdsock_loadcheck, 2974 q, LOADCHECK_INTERVAL); 2975 return; 2976 } 2977 2978 /* First check for messages that need no polheads at all. */ 2979 switch (spmsg->spd_msg_type) { 2980 case SPD_UPDATEALGS: 2981 spdsock_updatealg(q, mp, extv); 2982 return; 2983 case SPD_ALGLIST: 2984 spdsock_alglist(q, mp); 2985 return; 2986 case SPD_DUMPALGS: 2987 spdsock_dumpalgs(q, mp); 2988 return; 2989 } 2990 2991 /* 2992 * Then check for ones that need both primary/secondary polheads, 2993 * finding the appropriate tunnel policy if need be. 2994 */ 2995 tunname = (spd_if_t *)extv[SPD_EXT_TUN_NAME]; 2996 switch (spmsg->spd_msg_type) { 2997 case SPD_FLIP: 2998 spdsock_flip(q, mp, tunname); 2999 return; 3000 case SPD_CLONE: 3001 spdsock_clone(q, mp, tunname); 3002 return; 3003 } 3004 3005 /* 3006 * Finally, find ones that operate on exactly one polhead, or 3007 * "all polheads" of a given type (active/inactive). 3008 */ 3009 iph = get_appropriate_polhead(q, mp, tunname, spmsg->spd_msg_spdid, 3010 spmsg->spd_msg_type, &itp); 3011 if (iph == NULL) 3012 return; 3013 3014 /* All-polheads-ready operations. */ 3015 switch (spmsg->spd_msg_type) { 3016 case SPD_FLUSH: 3017 if (itp != NULL) { 3018 mutex_enter(&itp->itp_lock); 3019 if (spmsg->spd_msg_spdid == SPD_ACTIVE) 3020 itp->itp_flags &= ~ITPF_PFLAGS; 3021 else 3022 itp->itp_flags &= ~ITPF_IFLAGS; 3023 mutex_exit(&itp->itp_lock); 3024 ITP_REFRELE(itp, ns); 3025 } 3026 spdsock_flush(q, iph, itp, mp); 3027 return; 3028 case SPD_DUMP: 3029 if (itp != NULL) 3030 ITP_REFRELE(itp, ns); 3031 spdsock_dump(q, iph, mp); 3032 return; 3033 } 3034 3035 if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) { 3036 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NOT_GLOBAL_OP); 3037 return; 3038 } 3039 3040 /* Single-polhead-only operations. */ 3041 switch (spmsg->spd_msg_type) { 3042 case SPD_ADDRULE: 3043 spdsock_addrule(q, iph, mp, extv, itp); 3044 break; 3045 case SPD_DELETERULE: 3046 spdsock_deleterule(q, iph, mp, extv, itp); 3047 break; 3048 case SPD_LOOKUP: 3049 spdsock_lookup(q, iph, mp, extv, itp); 3050 break; 3051 default: 3052 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_MSG_TYPE); 3053 break; 3054 } 3055 3056 IPPH_REFRELE(iph, ns); 3057 if (itp != NULL) 3058 ITP_REFRELE(itp, ns); 3059 } 3060 3061 /* 3062 * If an algorithm mapping was received before IPsec was loaded, process it. 3063 * Called from the IPsec loader. 3064 */ 3065 void 3066 spdsock_update_pending_algs(netstack_t *ns) 3067 { 3068 spd_stack_t *spds = ns->netstack_spdsock; 3069 3070 mutex_enter(&spds->spds_alg_lock); 3071 if (spds->spds_algs_pending) { 3072 int diag; 3073 3074 spdsock_do_updatealg(spds->spds_extv_algs, &diag, 3075 spds); 3076 spds->spds_algs_pending = B_FALSE; 3077 } 3078 mutex_exit(&spds->spds_alg_lock); 3079 } 3080 3081 static void 3082 spdsock_loadcheck(void *arg) 3083 { 3084 queue_t *q = (queue_t *)arg; 3085 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3086 mblk_t *mp; 3087 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec; 3088 3089 ASSERT(ss != NULL); 3090 3091 ss->spdsock_timeout = 0; 3092 mp = ss->spdsock_timeout_arg; 3093 ASSERT(mp != NULL); 3094 ss->spdsock_timeout_arg = NULL; 3095 if (ipsec_failed(ipss)) 3096 spdsock_error(q, mp, EPROTONOSUPPORT, 0); 3097 else 3098 spdsock_parse(q, mp); 3099 } 3100 3101 /* 3102 * Copy relevant state bits. 3103 */ 3104 static void 3105 spdsock_copy_info(struct T_info_ack *tap, spdsock_t *ss) 3106 { 3107 *tap = spdsock_g_t_info_ack; 3108 tap->CURRENT_state = ss->spdsock_state; 3109 tap->OPT_size = spdsock_max_optsize; 3110 } 3111 3112 /* 3113 * This routine responds to T_CAPABILITY_REQ messages. It is called by 3114 * spdsock_wput. Much of the T_CAPABILITY_ACK information is copied from 3115 * spdsock_g_t_info_ack. The current state of the stream is copied from 3116 * spdsock_state. 3117 */ 3118 static void 3119 spdsock_capability_req(queue_t *q, mblk_t *mp) 3120 { 3121 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3122 t_uscalar_t cap_bits1; 3123 struct T_capability_ack *tcap; 3124 3125 cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1; 3126 3127 mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack), 3128 mp->b_datap->db_type, T_CAPABILITY_ACK); 3129 if (mp == NULL) 3130 return; 3131 3132 tcap = (struct T_capability_ack *)mp->b_rptr; 3133 tcap->CAP_bits1 = 0; 3134 3135 if (cap_bits1 & TC1_INFO) { 3136 spdsock_copy_info(&tcap->INFO_ack, ss); 3137 tcap->CAP_bits1 |= TC1_INFO; 3138 } 3139 3140 qreply(q, mp); 3141 } 3142 3143 /* 3144 * This routine responds to T_INFO_REQ messages. It is called by 3145 * spdsock_wput_other. 3146 * Most of the T_INFO_ACK information is copied from spdsock_g_t_info_ack. 3147 * The current state of the stream is copied from spdsock_state. 3148 */ 3149 static void 3150 spdsock_info_req(q, mp) 3151 queue_t *q; 3152 mblk_t *mp; 3153 { 3154 mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO, 3155 T_INFO_ACK); 3156 if (mp == NULL) 3157 return; 3158 spdsock_copy_info((struct T_info_ack *)mp->b_rptr, 3159 (spdsock_t *)q->q_ptr); 3160 qreply(q, mp); 3161 } 3162 3163 /* 3164 * spdsock_err_ack. This routine creates a 3165 * T_ERROR_ACK message and passes it 3166 * upstream. 3167 */ 3168 static void 3169 spdsock_err_ack(q, mp, t_error, sys_error) 3170 queue_t *q; 3171 mblk_t *mp; 3172 int t_error; 3173 int sys_error; 3174 { 3175 if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL) 3176 qreply(q, mp); 3177 } 3178 3179 /* 3180 * This routine retrieves the current status of socket options. 3181 * It returns the size of the option retrieved. 3182 */ 3183 /* ARGSUSED */ 3184 int 3185 spdsock_opt_get(queue_t *q, int level, int name, uchar_t *ptr) 3186 { 3187 int *i1 = (int *)ptr; 3188 3189 switch (level) { 3190 case SOL_SOCKET: 3191 switch (name) { 3192 case SO_TYPE: 3193 *i1 = SOCK_RAW; 3194 break; 3195 /* 3196 * The following two items can be manipulated, 3197 * but changing them should do nothing. 3198 */ 3199 case SO_SNDBUF: 3200 *i1 = (int)q->q_hiwat; 3201 break; 3202 case SO_RCVBUF: 3203 *i1 = (int)(RD(q)->q_hiwat); 3204 break; 3205 } 3206 break; 3207 default: 3208 return (0); 3209 } 3210 return (sizeof (int)); 3211 } 3212 3213 /* 3214 * This routine sets socket options. 3215 */ 3216 /* ARGSUSED */ 3217 int 3218 spdsock_opt_set(queue_t *q, uint_t mgmt_flags, int level, int name, 3219 uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp, 3220 void *thisdg_attrs, cred_t *cr, mblk_t *mblk) 3221 { 3222 int *i1 = (int *)invalp; 3223 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3224 spd_stack_t *spds = ss->spdsock_spds; 3225 3226 switch (level) { 3227 case SOL_SOCKET: 3228 switch (name) { 3229 case SO_SNDBUF: 3230 if (*i1 > spds->spds_max_buf) 3231 return (ENOBUFS); 3232 q->q_hiwat = *i1; 3233 break; 3234 case SO_RCVBUF: 3235 if (*i1 > spds->spds_max_buf) 3236 return (ENOBUFS); 3237 RD(q)->q_hiwat = *i1; 3238 (void) proto_set_rx_hiwat(RD(q), NULL, *i1); 3239 break; 3240 } 3241 break; 3242 } 3243 return (0); 3244 } 3245 3246 3247 /* 3248 * Handle STREAMS messages. 3249 */ 3250 static void 3251 spdsock_wput_other(queue_t *q, mblk_t *mp) 3252 { 3253 struct iocblk *iocp; 3254 int error; 3255 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3256 spd_stack_t *spds = ss->spdsock_spds; 3257 cred_t *cr; 3258 3259 switch (mp->b_datap->db_type) { 3260 case M_PROTO: 3261 case M_PCPROTO: 3262 if ((mp->b_wptr - mp->b_rptr) < sizeof (long)) { 3263 ss3dbg(spds, ( 3264 "spdsock_wput_other: Not big enough M_PROTO\n")); 3265 freemsg(mp); 3266 return; 3267 } 3268 switch (((union T_primitives *)mp->b_rptr)->type) { 3269 case T_CAPABILITY_REQ: 3270 spdsock_capability_req(q, mp); 3271 break; 3272 case T_INFO_REQ: 3273 spdsock_info_req(q, mp); 3274 break; 3275 case T_SVR4_OPTMGMT_REQ: 3276 case T_OPTMGMT_REQ: 3277 /* 3278 * All Solaris components should pass a db_credp 3279 * for this TPI message, hence we ASSERT. 3280 * But in case there is some other M_PROTO that looks 3281 * like a TPI message sent by some other kernel 3282 * component, we check and return an error. 3283 */ 3284 cr = msg_getcred(mp, NULL); 3285 ASSERT(cr != NULL); 3286 if (cr == NULL) { 3287 spdsock_err_ack(q, mp, TSYSERR, EINVAL); 3288 return; 3289 } 3290 if (((union T_primitives *)mp->b_rptr)->type == 3291 T_SVR4_OPTMGMT_REQ) { 3292 (void) svr4_optcom_req(q, mp, cr, 3293 &spdsock_opt_obj, B_FALSE); 3294 } else { 3295 (void) tpi_optcom_req(q, mp, cr, 3296 &spdsock_opt_obj, B_FALSE); 3297 } 3298 break; 3299 case T_DATA_REQ: 3300 case T_EXDATA_REQ: 3301 case T_ORDREL_REQ: 3302 /* Illegal for spdsock. */ 3303 freemsg(mp); 3304 (void) putnextctl1(RD(q), M_ERROR, EPROTO); 3305 break; 3306 default: 3307 /* Not supported by spdsock. */ 3308 spdsock_err_ack(q, mp, TNOTSUPPORT, 0); 3309 break; 3310 } 3311 return; 3312 case M_IOCTL: 3313 iocp = (struct iocblk *)mp->b_rptr; 3314 error = EINVAL; 3315 3316 switch (iocp->ioc_cmd) { 3317 case ND_SET: 3318 case ND_GET: 3319 if (nd_getset(q, spds->spds_g_nd, mp)) { 3320 qreply(q, mp); 3321 return; 3322 } else 3323 error = ENOENT; 3324 /* FALLTHRU */ 3325 default: 3326 miocnak(q, mp, 0, error); 3327 return; 3328 } 3329 case M_FLUSH: 3330 if (*mp->b_rptr & FLUSHW) { 3331 flushq(q, FLUSHALL); 3332 *mp->b_rptr &= ~FLUSHW; 3333 } 3334 if (*mp->b_rptr & FLUSHR) { 3335 qreply(q, mp); 3336 return; 3337 } 3338 /* Else FALLTHRU */ 3339 } 3340 3341 /* If fell through, just black-hole the message. */ 3342 freemsg(mp); 3343 } 3344 3345 static void 3346 spdsock_wput(queue_t *q, mblk_t *mp) 3347 { 3348 uint8_t *rptr = mp->b_rptr; 3349 mblk_t *mp1; 3350 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3351 spd_stack_t *spds = ss->spdsock_spds; 3352 3353 /* 3354 * If we're dumping, defer processing other messages until the 3355 * dump completes. 3356 */ 3357 if (ss->spdsock_dump_req != NULL) { 3358 if (!putq(q, mp)) 3359 freemsg(mp); 3360 return; 3361 } 3362 3363 switch (mp->b_datap->db_type) { 3364 case M_DATA: 3365 /* 3366 * Silently discard. 3367 */ 3368 ss2dbg(spds, ("raw M_DATA in spdsock.\n")); 3369 freemsg(mp); 3370 return; 3371 case M_PROTO: 3372 case M_PCPROTO: 3373 if ((mp->b_wptr - rptr) >= sizeof (struct T_data_req)) { 3374 if (((union T_primitives *)rptr)->type == T_DATA_REQ) { 3375 if ((mp1 = mp->b_cont) == NULL) { 3376 /* No data after T_DATA_REQ. */ 3377 ss2dbg(spds, 3378 ("No data after DATA_REQ.\n")); 3379 freemsg(mp); 3380 return; 3381 } 3382 freeb(mp); 3383 mp = mp1; 3384 ss2dbg(spds, ("T_DATA_REQ\n")); 3385 break; /* Out of switch. */ 3386 } 3387 } 3388 /* FALLTHRU */ 3389 default: 3390 ss3dbg(spds, ("In default wput case (%d %d).\n", 3391 mp->b_datap->db_type, ((union T_primitives *)rptr)->type)); 3392 spdsock_wput_other(q, mp); 3393 return; 3394 } 3395 3396 /* I now have a PF_POLICY message in an M_DATA block. */ 3397 spdsock_parse(q, mp); 3398 } 3399 3400 /* 3401 * Device open procedure, called when new queue pair created. 3402 * We are passed the read-side queue. 3403 */ 3404 /* ARGSUSED */ 3405 static int 3406 spdsock_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 3407 { 3408 spdsock_t *ss; 3409 queue_t *oq = OTHERQ(q); 3410 minor_t ssminor; 3411 netstack_t *ns; 3412 spd_stack_t *spds; 3413 3414 if (secpolicy_ip_config(credp, B_FALSE) != 0) 3415 return (EPERM); 3416 3417 if (q->q_ptr != NULL) 3418 return (0); /* Re-open of an already open instance. */ 3419 3420 if (sflag & MODOPEN) 3421 return (EINVAL); 3422 3423 ns = netstack_find_by_cred(credp); 3424 ASSERT(ns != NULL); 3425 spds = ns->netstack_spdsock; 3426 ASSERT(spds != NULL); 3427 3428 ss2dbg(spds, ("Made it into PF_POLICY socket open.\n")); 3429 3430 ssminor = (minor_t)(uintptr_t)vmem_alloc(spdsock_vmem, 1, VM_NOSLEEP); 3431 if (ssminor == 0) { 3432 netstack_rele(spds->spds_netstack); 3433 return (ENOMEM); 3434 } 3435 ss = kmem_zalloc(sizeof (spdsock_t), KM_NOSLEEP); 3436 if (ss == NULL) { 3437 vmem_free(spdsock_vmem, (void *)(uintptr_t)ssminor, 1); 3438 netstack_rele(spds->spds_netstack); 3439 return (ENOMEM); 3440 } 3441 3442 ss->spdsock_minor = ssminor; 3443 ss->spdsock_state = TS_UNBND; 3444 ss->spdsock_dump_req = NULL; 3445 3446 ss->spdsock_spds = spds; 3447 3448 q->q_ptr = ss; 3449 oq->q_ptr = ss; 3450 3451 q->q_hiwat = spds->spds_recv_hiwat; 3452 3453 oq->q_hiwat = spds->spds_xmit_hiwat; 3454 oq->q_lowat = spds->spds_xmit_lowat; 3455 3456 qprocson(q); 3457 (void) proto_set_rx_hiwat(q, NULL, spds->spds_recv_hiwat); 3458 3459 *devp = makedevice(getmajor(*devp), ss->spdsock_minor); 3460 return (0); 3461 } 3462 3463 /* 3464 * Read-side service procedure, invoked when we get back-enabled 3465 * when buffer space becomes available. 3466 * 3467 * Dump another chunk if we were dumping before; when we finish, kick 3468 * the write-side queue in case it's waiting for read queue space. 3469 */ 3470 void 3471 spdsock_rsrv(queue_t *q) 3472 { 3473 spdsock_t *ss = q->q_ptr; 3474 3475 if (ss->spdsock_dump_req != NULL) 3476 spdsock_dump_some(q, ss); 3477 3478 if (ss->spdsock_dump_req == NULL) 3479 qenable(OTHERQ(q)); 3480 } 3481 3482 /* 3483 * Write-side service procedure, invoked when we defer processing 3484 * if another message is received while a dump is in progress. 3485 */ 3486 void 3487 spdsock_wsrv(queue_t *q) 3488 { 3489 spdsock_t *ss = q->q_ptr; 3490 mblk_t *mp; 3491 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec; 3492 3493 if (ss->spdsock_dump_req != NULL) { 3494 qenable(OTHERQ(q)); 3495 return; 3496 } 3497 3498 while ((mp = getq(q)) != NULL) { 3499 if (ipsec_loaded(ipss)) { 3500 spdsock_wput(q, mp); 3501 if (ss->spdsock_dump_req != NULL) 3502 return; 3503 } else if (!ipsec_failed(ipss)) { 3504 (void) putq(q, mp); 3505 } else { 3506 spdsock_error(q, mp, EPFNOSUPPORT, 0); 3507 } 3508 } 3509 } 3510 3511 static int 3512 spdsock_close(queue_t *q) 3513 { 3514 spdsock_t *ss = q->q_ptr; 3515 spd_stack_t *spds = ss->spdsock_spds; 3516 3517 qprocsoff(q); 3518 3519 /* Safe assumption. */ 3520 ASSERT(ss != NULL); 3521 3522 if (ss->spdsock_timeout != 0) 3523 (void) quntimeout(q, ss->spdsock_timeout); 3524 3525 ss3dbg(spds, ("Driver close, PF_POLICY socket is going away.\n")); 3526 3527 vmem_free(spdsock_vmem, (void *)(uintptr_t)ss->spdsock_minor, 1); 3528 netstack_rele(ss->spdsock_spds->spds_netstack); 3529 3530 kmem_free(ss, sizeof (spdsock_t)); 3531 return (0); 3532 } 3533 3534 /* 3535 * Merge the IPsec algorithms tables with the received algorithm information. 3536 */ 3537 void 3538 spdsock_merge_algs(spd_stack_t *spds) 3539 { 3540 ipsec_alginfo_t *alg, *oalg; 3541 ipsec_algtype_t algtype; 3542 uint_t algidx, algid, nalgs; 3543 crypto_mech_name_t *mechs; 3544 uint_t mech_count, mech_idx; 3545 netstack_t *ns = spds->spds_netstack; 3546 ipsec_stack_t *ipss = ns->netstack_ipsec; 3547 3548 ASSERT(MUTEX_HELD(&spds->spds_alg_lock)); 3549 3550 /* 3551 * Get the list of supported mechanisms from the crypto framework. 3552 * If a mechanism is supported by KCF, resolve its mechanism 3553 * id and mark it as being valid. This operation must be done 3554 * without holding alg_lock, since it can cause a provider 3555 * module to be loaded and the provider notification callback to 3556 * be invoked. 3557 */ 3558 mechs = crypto_get_mech_list(&mech_count, KM_SLEEP); 3559 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 3560 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) { 3561 int algflags = 0; 3562 crypto_mech_type_t mt = CRYPTO_MECHANISM_INVALID; 3563 3564 alg = spds->spds_algs[algtype][algid]; 3565 if (alg == NULL) 3566 continue; 3567 3568 /* 3569 * The NULL encryption algorithm is a special 3570 * case because there are no mechanisms, yet 3571 * the algorithm is still valid. 3572 */ 3573 if (alg->alg_id == SADB_EALG_NULL) { 3574 alg->alg_mech_type = CRYPTO_MECHANISM_INVALID; 3575 alg->alg_flags = ALG_FLAG_VALID; 3576 continue; 3577 } 3578 3579 for (mech_idx = 0; mech_idx < mech_count; mech_idx++) { 3580 if (strncmp(alg->alg_mech_name, mechs[mech_idx], 3581 CRYPTO_MAX_MECH_NAME) == 0) { 3582 mt = crypto_mech2id(alg->alg_mech_name); 3583 ASSERT(mt != CRYPTO_MECHANISM_INVALID); 3584 algflags = ALG_FLAG_VALID; 3585 break; 3586 } 3587 } 3588 alg->alg_mech_type = mt; 3589 alg->alg_flags = algflags; 3590 } 3591 } 3592 3593 mutex_enter(&ipss->ipsec_alg_lock); 3594 3595 /* 3596 * For each algorithm currently defined, check if it is 3597 * present in the new tables created from the SPD_UPDATEALGS 3598 * message received from user-space. 3599 * Delete the algorithm entries that are currently defined 3600 * but not part of the new tables. 3601 */ 3602 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 3603 nalgs = ipss->ipsec_nalgs[algtype]; 3604 for (algidx = 0; algidx < nalgs; algidx++) { 3605 algid = ipss->ipsec_sortlist[algtype][algidx]; 3606 if (spds->spds_algs[algtype][algid] == NULL) 3607 ipsec_alg_unreg(algtype, algid, ns); 3608 } 3609 } 3610 3611 /* 3612 * For each algorithm we just received, check if it is 3613 * present in the currently defined tables. If it is, swap 3614 * the entry with the one we just allocated. 3615 * If the new algorithm is not in the current tables, 3616 * add it. 3617 */ 3618 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 3619 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) { 3620 alg = spds->spds_algs[algtype][algid]; 3621 if (alg == NULL) 3622 continue; 3623 3624 if ((oalg = ipss->ipsec_alglists[algtype][algid]) == 3625 NULL) { 3626 /* 3627 * New algorithm, add it to the algorithm 3628 * table. 3629 */ 3630 ipsec_alg_reg(algtype, alg, ns); 3631 } else { 3632 /* 3633 * Algorithm is already in the table. Swap 3634 * the existing entry with the new one. 3635 */ 3636 ipsec_alg_fix_min_max(alg, algtype, ns); 3637 ipss->ipsec_alglists[algtype][algid] = alg; 3638 ipsec_alg_free(oalg); 3639 } 3640 spds->spds_algs[algtype][algid] = NULL; 3641 } 3642 } 3643 3644 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 3645 ipss->ipsec_algs_exec_mode[algtype] = 3646 spds->spds_algs_exec_mode[algtype]; 3647 } 3648 3649 mutex_exit(&ipss->ipsec_alg_lock); 3650 3651 crypto_free_mech_list(mechs, mech_count); 3652 3653 ipsecah_algs_changed(ns); 3654 ipsecesp_algs_changed(ns); 3655 } 3656