1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/param.h> 29 #include <sys/types.h> 30 #include <sys/stream.h> 31 #include <sys/strsubr.h> 32 #include <sys/strsun.h> 33 #include <sys/stropts.h> 34 #include <sys/zone.h> 35 #include <sys/vnode.h> 36 #include <sys/sysmacros.h> 37 #define _SUN_TPI_VERSION 2 38 #include <sys/tihdr.h> 39 #include <sys/ddi.h> 40 #include <sys/sunddi.h> 41 #include <sys/mkdev.h> 42 #include <sys/debug.h> 43 #include <sys/kmem.h> 44 #include <sys/cmn_err.h> 45 #include <sys/suntpi.h> 46 #include <sys/policy.h> 47 48 #include <sys/socket.h> 49 #include <netinet/in.h> 50 #include <net/pfkeyv2.h> 51 #include <net/pfpolicy.h> 52 53 #include <inet/common.h> 54 #include <netinet/ip6.h> 55 #include <inet/ip.h> 56 #include <inet/ip6.h> 57 #include <inet/mi.h> 58 #include <inet/nd.h> 59 #include <inet/ip_if.h> 60 #include <inet/tun.h> 61 #include <inet/optcom.h> 62 #include <inet/ipsec_info.h> 63 #include <inet/ipsec_impl.h> 64 #include <inet/spdsock.h> 65 #include <inet/sadb.h> 66 67 #include <sys/isa_defs.h> 68 69 #include <c2/audit.h> 70 71 /* 72 * This is a transport provider for the PF_POLICY IPsec policy 73 * management socket, which provides a management interface into the 74 * SPD, allowing policy rules to be added, deleted, and queried. 75 * 76 * This effectively replaces the old private SIOC*IPSECONFIG ioctls 77 * with an extensible interface which will hopefully be public some 78 * day. 79 * 80 * See <net/pfpolicy.h> for more details on the protocol. 81 * 82 * We link against drv/ip and call directly into it to manipulate the 83 * SPD; see ipsec_impl.h for the policy data structures and spd.c for 84 * the code which maintains them. 85 * 86 * The MT model of this is QPAIR with the addition of some explicit 87 * locking to protect system-wide policy data structures. 88 */ 89 90 static vmem_t *spdsock_vmem; /* for minor numbers. */ 91 92 #define ALIGNED64(x) IS_P2ALIGNED((x), sizeof (uint64_t)) 93 94 /* Default structure copied into T_INFO_ACK messages (from rts.c...) */ 95 static struct T_info_ack spdsock_g_t_info_ack = { 96 T_INFO_ACK, 97 T_INFINITE, /* TSDU_size. Maximum size messages. */ 98 T_INVALID, /* ETSDU_size. No expedited data. */ 99 T_INVALID, /* CDATA_size. No connect data. */ 100 T_INVALID, /* DDATA_size. No disconnect data. */ 101 0, /* ADDR_size. */ 102 0, /* OPT_size. No user-settable options */ 103 64 * 1024, /* TIDU_size. spdsock allows maximum size messages. */ 104 T_COTS, /* SERV_type. spdsock supports connection oriented. */ 105 TS_UNBND, /* CURRENT_state. This is set from spdsock_state. */ 106 (XPG4_1) /* Provider flags */ 107 }; 108 109 /* Named Dispatch Parameter Management Structure */ 110 typedef struct spdsockparam_s { 111 uint_t spdsock_param_min; 112 uint_t spdsock_param_max; 113 uint_t spdsock_param_value; 114 char *spdsock_param_name; 115 } spdsockparam_t; 116 117 /* 118 * Table of NDD variables supported by spdsock. These are loaded into 119 * spdsock_g_nd in spdsock_init_nd. 120 * All of these are alterable, within the min/max values given, at run time. 121 */ 122 static spdsockparam_t lcl_param_arr[] = { 123 /* min max value name */ 124 { 4096, 65536, 8192, "spdsock_xmit_hiwat"}, 125 { 0, 65536, 1024, "spdsock_xmit_lowat"}, 126 { 4096, 65536, 8192, "spdsock_recv_hiwat"}, 127 { 65536, 1024*1024*1024, 256*1024, "spdsock_max_buf"}, 128 { 0, 3, 0, "spdsock_debug"}, 129 }; 130 #define spds_xmit_hiwat spds_params[0].spdsock_param_value 131 #define spds_xmit_lowat spds_params[1].spdsock_param_value 132 #define spds_recv_hiwat spds_params[2].spdsock_param_value 133 #define spds_max_buf spds_params[3].spdsock_param_value 134 #define spds_debug spds_params[4].spdsock_param_value 135 136 #define ss0dbg(a) printf a 137 /* NOTE: != 0 instead of > 0 so lint doesn't complain. */ 138 #define ss1dbg(spds, a) if (spds->spds_debug != 0) printf a 139 #define ss2dbg(spds, a) if (spds->spds_debug > 1) printf a 140 #define ss3dbg(spds, a) if (spds->spds_debug > 2) printf a 141 142 #define RESET_SPDSOCK_DUMP_POLHEAD(ss, iph) { \ 143 ASSERT(RW_READ_HELD(&(iph)->iph_lock)); \ 144 (ss)->spdsock_dump_head = (iph); \ 145 (ss)->spdsock_dump_gen = (iph)->iph_gen; \ 146 (ss)->spdsock_dump_cur_type = 0; \ 147 (ss)->spdsock_dump_cur_af = IPSEC_AF_V4; \ 148 (ss)->spdsock_dump_cur_rule = NULL; \ 149 (ss)->spdsock_dump_count = 0; \ 150 (ss)->spdsock_dump_cur_chain = 0; \ 151 } 152 153 static int spdsock_close(queue_t *); 154 static int spdsock_open(queue_t *, dev_t *, int, int, cred_t *); 155 static void spdsock_wput(queue_t *, mblk_t *); 156 static void spdsock_wsrv(queue_t *); 157 static void spdsock_rsrv(queue_t *); 158 static void *spdsock_stack_init(netstackid_t stackid, netstack_t *ns); 159 static void spdsock_stack_fini(netstackid_t stackid, void *arg); 160 static void spdsock_loadcheck(void *); 161 static void spdsock_merge_algs(spd_stack_t *); 162 static void spdsock_flush_one(ipsec_policy_head_t *, netstack_t *); 163 static mblk_t *spdsock_dump_next_record(spdsock_t *); 164 165 static struct module_info info = { 166 5138, "spdsock", 1, INFPSZ, 512, 128 167 }; 168 169 static struct qinit rinit = { 170 NULL, (pfi_t)spdsock_rsrv, spdsock_open, spdsock_close, 171 NULL, &info 172 }; 173 174 static struct qinit winit = { 175 (pfi_t)spdsock_wput, (pfi_t)spdsock_wsrv, NULL, NULL, NULL, &info 176 }; 177 178 struct streamtab spdsockinfo = { 179 &rinit, &winit 180 }; 181 182 /* mapping from alg type to protocol number, as per RFC 2407 */ 183 static const uint_t algproto[] = { 184 PROTO_IPSEC_AH, 185 PROTO_IPSEC_ESP, 186 }; 187 188 #define NALGPROTOS (sizeof (algproto) / sizeof (algproto[0])) 189 190 /* mapping from kernel exec mode to spdsock exec mode */ 191 static const uint_t execmodes[] = { 192 SPD_ALG_EXEC_MODE_SYNC, 193 SPD_ALG_EXEC_MODE_ASYNC 194 }; 195 196 #define NEXECMODES (sizeof (execmodes) / sizeof (execmodes[0])) 197 198 #define ALL_ACTIVE_POLHEADS ((ipsec_policy_head_t *)-1) 199 #define ALL_INACTIVE_POLHEADS ((ipsec_policy_head_t *)-2) 200 201 #define ITP_NAME(itp) (itp != NULL ? itp->itp_name : NULL) 202 203 /* ARGSUSED */ 204 static int 205 spdsock_param_get(q, mp, cp, cr) 206 queue_t *q; 207 mblk_t *mp; 208 caddr_t cp; 209 cred_t *cr; 210 { 211 spdsockparam_t *spdsockpa = (spdsockparam_t *)cp; 212 uint_t value; 213 spdsock_t *ss = (spdsock_t *)q->q_ptr; 214 spd_stack_t *spds = ss->spdsock_spds; 215 216 mutex_enter(&spds->spds_param_lock); 217 value = spdsockpa->spdsock_param_value; 218 mutex_exit(&spds->spds_param_lock); 219 220 (void) mi_mpprintf(mp, "%u", value); 221 return (0); 222 } 223 224 /* This routine sets an NDD variable in a spdsockparam_t structure. */ 225 /* ARGSUSED */ 226 static int 227 spdsock_param_set(q, mp, value, cp, cr) 228 queue_t *q; 229 mblk_t *mp; 230 char *value; 231 caddr_t cp; 232 cred_t *cr; 233 { 234 ulong_t new_value; 235 spdsockparam_t *spdsockpa = (spdsockparam_t *)cp; 236 spdsock_t *ss = (spdsock_t *)q->q_ptr; 237 spd_stack_t *spds = ss->spdsock_spds; 238 239 /* Convert the value from a string into a long integer. */ 240 if (ddi_strtoul(value, NULL, 10, &new_value) != 0) 241 return (EINVAL); 242 243 mutex_enter(&spds->spds_param_lock); 244 /* 245 * Fail the request if the new value does not lie within the 246 * required bounds. 247 */ 248 if (new_value < spdsockpa->spdsock_param_min || 249 new_value > spdsockpa->spdsock_param_max) { 250 mutex_exit(&spds->spds_param_lock); 251 return (EINVAL); 252 } 253 254 /* Set the new value */ 255 spdsockpa->spdsock_param_value = new_value; 256 mutex_exit(&spds->spds_param_lock); 257 258 return (0); 259 } 260 261 /* 262 * Initialize at module load time 263 */ 264 boolean_t 265 spdsock_ddi_init(void) 266 { 267 spdsock_max_optsize = optcom_max_optsize( 268 spdsock_opt_obj.odb_opt_des_arr, spdsock_opt_obj.odb_opt_arr_cnt); 269 270 spdsock_vmem = vmem_create("spdsock", (void *)1, MAXMIN, 1, 271 NULL, NULL, NULL, 1, VM_SLEEP | VMC_IDENTIFIER); 272 273 /* 274 * We want to be informed each time a stack is created or 275 * destroyed in the kernel, so we can maintain the 276 * set of spd_stack_t's. 277 */ 278 netstack_register(NS_SPDSOCK, spdsock_stack_init, NULL, 279 spdsock_stack_fini); 280 281 return (B_TRUE); 282 } 283 284 /* 285 * Walk through the param array specified registering each element with the 286 * named dispatch handler. 287 */ 288 static boolean_t 289 spdsock_param_register(IDP *ndp, spdsockparam_t *ssp, int cnt) 290 { 291 for (; cnt-- > 0; ssp++) { 292 if (ssp->spdsock_param_name != NULL && 293 ssp->spdsock_param_name[0]) { 294 if (!nd_load(ndp, 295 ssp->spdsock_param_name, 296 spdsock_param_get, spdsock_param_set, 297 (caddr_t)ssp)) { 298 nd_free(ndp); 299 return (B_FALSE); 300 } 301 } 302 } 303 return (B_TRUE); 304 } 305 306 /* 307 * Initialize for each stack instance 308 */ 309 /* ARGSUSED */ 310 static void * 311 spdsock_stack_init(netstackid_t stackid, netstack_t *ns) 312 { 313 spd_stack_t *spds; 314 spdsockparam_t *ssp; 315 316 spds = (spd_stack_t *)kmem_zalloc(sizeof (*spds), KM_SLEEP); 317 spds->spds_netstack = ns; 318 319 ASSERT(spds->spds_g_nd == NULL); 320 321 ssp = (spdsockparam_t *)kmem_alloc(sizeof (lcl_param_arr), KM_SLEEP); 322 spds->spds_params = ssp; 323 bcopy(lcl_param_arr, ssp, sizeof (lcl_param_arr)); 324 325 (void) spdsock_param_register(&spds->spds_g_nd, ssp, 326 A_CNT(lcl_param_arr)); 327 328 mutex_init(&spds->spds_param_lock, NULL, MUTEX_DEFAULT, NULL); 329 mutex_init(&spds->spds_alg_lock, NULL, MUTEX_DEFAULT, NULL); 330 331 return (spds); 332 } 333 334 void 335 spdsock_ddi_destroy(void) 336 { 337 vmem_destroy(spdsock_vmem); 338 339 netstack_unregister(NS_SPDSOCK); 340 } 341 342 /* ARGSUSED */ 343 static void 344 spdsock_stack_fini(netstackid_t stackid, void *arg) 345 { 346 spd_stack_t *spds = (spd_stack_t *)arg; 347 348 freemsg(spds->spds_mp_algs); 349 mutex_destroy(&spds->spds_param_lock); 350 mutex_destroy(&spds->spds_alg_lock); 351 nd_free(&spds->spds_g_nd); 352 kmem_free(spds->spds_params, sizeof (lcl_param_arr)); 353 spds->spds_params = NULL; 354 355 kmem_free(spds, sizeof (*spds)); 356 } 357 358 /* 359 * NOTE: large quantities of this should be shared with keysock. 360 * Would be nice to combine some of this into a common module, but 361 * not possible given time pressures. 362 */ 363 364 /* 365 * High-level reality checking of extensions. 366 */ 367 /* ARGSUSED */ /* XXX */ 368 static boolean_t 369 ext_check(spd_ext_t *ext) 370 { 371 spd_if_t *tunname = (spd_if_t *)ext; 372 int i; 373 char *idstr; 374 375 if (ext->spd_ext_type == SPD_EXT_TUN_NAME) { 376 /* (NOTE: Modified from SADB_EXT_IDENTITY..) */ 377 378 /* 379 * Make sure the strings in these identities are 380 * null-terminated. Let's "proactively" null-terminate the 381 * string at the last byte if it's not terminated sooner. 382 */ 383 i = SPD_64TO8(tunname->spd_if_len) - sizeof (spd_if_t); 384 idstr = (char *)(tunname + 1); 385 while (*idstr != '\0' && i > 0) { 386 i--; 387 idstr++; 388 } 389 if (i == 0) { 390 /* 391 * I.e., if the bozo user didn't NULL-terminate the 392 * string... 393 */ 394 idstr--; 395 *idstr = '\0'; 396 } 397 } 398 return (B_TRUE); /* For now... */ 399 } 400 401 402 403 /* Return values for spdsock_get_ext(). */ 404 #define KGE_OK 0 405 #define KGE_DUP 1 406 #define KGE_UNK 2 407 #define KGE_LEN 3 408 #define KGE_CHK 4 409 410 /* 411 * Parse basic extension headers and return in the passed-in pointer vector. 412 * Return values include: 413 * 414 * KGE_OK Everything's nice and parsed out. 415 * If there are no extensions, place NULL in extv[0]. 416 * KGE_DUP There is a duplicate extension. 417 * First instance in appropriate bin. First duplicate in 418 * extv[0]. 419 * KGE_UNK Unknown extension type encountered. extv[0] contains 420 * unknown header. 421 * KGE_LEN Extension length error. 422 * KGE_CHK High-level reality check failed on specific extension. 423 * 424 * My apologies for some of the pointer arithmetic in here. I'm thinking 425 * like an assembly programmer, yet trying to make the compiler happy. 426 */ 427 static int 428 spdsock_get_ext(spd_ext_t *extv[], spd_msg_t *basehdr, uint_t msgsize) 429 { 430 bzero(extv, sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1)); 431 432 /* Use extv[0] as the "current working pointer". */ 433 434 extv[0] = (spd_ext_t *)(basehdr + 1); 435 436 while (extv[0] < (spd_ext_t *)(((uint8_t *)basehdr) + msgsize)) { 437 /* Check for unknown headers. */ 438 if (extv[0]->spd_ext_type == 0 || 439 extv[0]->spd_ext_type > SPD_EXT_MAX) 440 return (KGE_UNK); 441 442 /* 443 * Check length. Use uint64_t because extlen is in units 444 * of 64-bit words. If length goes beyond the msgsize, 445 * return an error. (Zero length also qualifies here.) 446 */ 447 if (extv[0]->spd_ext_len == 0 || 448 (void *)((uint64_t *)extv[0] + extv[0]->spd_ext_len) > 449 (void *)((uint8_t *)basehdr + msgsize)) 450 return (KGE_LEN); 451 452 /* Check for redundant headers. */ 453 if (extv[extv[0]->spd_ext_type] != NULL) 454 return (KGE_DUP); 455 456 /* 457 * Reality check the extension if possible at the spdsock 458 * level. 459 */ 460 if (!ext_check(extv[0])) 461 return (KGE_CHK); 462 463 /* If I make it here, assign the appropriate bin. */ 464 extv[extv[0]->spd_ext_type] = extv[0]; 465 466 /* Advance pointer (See above for uint64_t ptr reasoning.) */ 467 extv[0] = (spd_ext_t *) 468 ((uint64_t *)extv[0] + extv[0]->spd_ext_len); 469 } 470 471 /* Everything's cool. */ 472 473 /* 474 * If extv[0] == NULL, then there are no extension headers in this 475 * message. Ensure that this is the case. 476 */ 477 if (extv[0] == (spd_ext_t *)(basehdr + 1)) 478 extv[0] = NULL; 479 480 return (KGE_OK); 481 } 482 483 static const int bad_ext_diag[] = { 484 SPD_DIAGNOSTIC_MALFORMED_LCLPORT, 485 SPD_DIAGNOSTIC_MALFORMED_REMPORT, 486 SPD_DIAGNOSTIC_MALFORMED_PROTO, 487 SPD_DIAGNOSTIC_MALFORMED_LCLADDR, 488 SPD_DIAGNOSTIC_MALFORMED_REMADDR, 489 SPD_DIAGNOSTIC_MALFORMED_ACTION, 490 SPD_DIAGNOSTIC_MALFORMED_RULE, 491 SPD_DIAGNOSTIC_MALFORMED_RULESET, 492 SPD_DIAGNOSTIC_MALFORMED_ICMP_TYPECODE 493 }; 494 495 static const int dup_ext_diag[] = { 496 SPD_DIAGNOSTIC_DUPLICATE_LCLPORT, 497 SPD_DIAGNOSTIC_DUPLICATE_REMPORT, 498 SPD_DIAGNOSTIC_DUPLICATE_PROTO, 499 SPD_DIAGNOSTIC_DUPLICATE_LCLADDR, 500 SPD_DIAGNOSTIC_DUPLICATE_REMADDR, 501 SPD_DIAGNOSTIC_DUPLICATE_ACTION, 502 SPD_DIAGNOSTIC_DUPLICATE_RULE, 503 SPD_DIAGNOSTIC_DUPLICATE_RULESET, 504 SPD_DIAGNOSTIC_DUPLICATE_ICMP_TYPECODE 505 }; 506 507 /* 508 * Transmit a PF_POLICY error message to the instance either pointed to 509 * by ks, the instance with serial number serial, or more, depending. 510 * 511 * The faulty message (or a reasonable facsimile thereof) is in mp. 512 * This function will free mp or recycle it for delivery, thereby causing 513 * the stream head to free it. 514 */ 515 static void 516 spdsock_error(queue_t *q, mblk_t *mp, int error, int diagnostic) 517 { 518 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 519 520 ASSERT(mp->b_datap->db_type == M_DATA); 521 522 if (spmsg->spd_msg_type < SPD_MIN || 523 spmsg->spd_msg_type > SPD_MAX) 524 spmsg->spd_msg_type = SPD_RESERVED; 525 526 /* 527 * Strip out extension headers. 528 */ 529 ASSERT(mp->b_rptr + sizeof (*spmsg) <= mp->b_datap->db_lim); 530 mp->b_wptr = mp->b_rptr + sizeof (*spmsg); 531 spmsg->spd_msg_len = SPD_8TO64(sizeof (spd_msg_t)); 532 spmsg->spd_msg_errno = (uint8_t)error; 533 spmsg->spd_msg_diagnostic = (uint16_t)diagnostic; 534 535 qreply(q, mp); 536 } 537 538 static void 539 spdsock_diag(queue_t *q, mblk_t *mp, int diagnostic) 540 { 541 spdsock_error(q, mp, EINVAL, diagnostic); 542 } 543 544 static void 545 spd_echo(queue_t *q, mblk_t *mp) 546 { 547 qreply(q, mp); 548 } 549 550 /* 551 * Do NOT consume a reference to itp. 552 */ 553 /*ARGSUSED*/ 554 static void 555 spdsock_flush_node(ipsec_tun_pol_t *itp, void *cookie, netstack_t *ns) 556 { 557 boolean_t active = (boolean_t)cookie; 558 ipsec_policy_head_t *iph; 559 560 iph = active ? itp->itp_policy : itp->itp_inactive; 561 IPPH_REFHOLD(iph); 562 mutex_enter(&itp->itp_lock); 563 spdsock_flush_one(iph, ns); 564 if (active) 565 itp->itp_flags &= ~ITPF_PFLAGS; 566 else 567 itp->itp_flags &= ~ITPF_IFLAGS; 568 mutex_exit(&itp->itp_lock); 569 } 570 571 /* 572 * Clear out one polhead. 573 */ 574 static void 575 spdsock_flush_one(ipsec_policy_head_t *iph, netstack_t *ns) 576 { 577 rw_enter(&iph->iph_lock, RW_WRITER); 578 ipsec_polhead_flush(iph, ns); 579 rw_exit(&iph->iph_lock); 580 IPPH_REFRELE(iph, ns); 581 } 582 583 static void 584 spdsock_flush(queue_t *q, ipsec_policy_head_t *iph, ipsec_tun_pol_t *itp, 585 mblk_t *mp) 586 { 587 boolean_t active; 588 spdsock_t *ss = (spdsock_t *)q->q_ptr; 589 netstack_t *ns = ss->spdsock_spds->spds_netstack; 590 591 if (iph != ALL_ACTIVE_POLHEADS && iph != ALL_INACTIVE_POLHEADS) { 592 spdsock_flush_one(iph, ns); 593 if (audit_active) { 594 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 595 596 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 597 audit_pf_policy(SPD_FLUSH, DB_CRED(mp), ns, 598 ITP_NAME(itp), active, 0, DB_CPID(mp)); 599 } 600 } else { 601 active = (iph == ALL_ACTIVE_POLHEADS); 602 603 /* First flush the global policy. */ 604 spdsock_flush_one(active ? ipsec_system_policy(ns) : 605 ipsec_inactive_policy(ns), ns); 606 if (audit_active) { 607 audit_pf_policy(SPD_FLUSH, DB_CRED(mp), ns, NULL, 608 active, 0, DB_CPID(mp)); 609 } 610 /* Then flush every tunnel's appropriate one. */ 611 itp_walk(spdsock_flush_node, (void *)active, ns); 612 if (audit_active) 613 audit_pf_policy(SPD_FLUSH, DB_CRED(mp), ns, 614 "all tunnels", active, 0, DB_CPID(mp)); 615 } 616 617 spd_echo(q, mp); 618 } 619 620 static boolean_t 621 spdsock_ext_to_sel(spd_ext_t **extv, ipsec_selkey_t *sel, int *diag) 622 { 623 bzero(sel, sizeof (*sel)); 624 625 if (extv[SPD_EXT_PROTO] != NULL) { 626 struct spd_proto *pr = 627 (struct spd_proto *)extv[SPD_EXT_PROTO]; 628 sel->ipsl_proto = pr->spd_proto_number; 629 sel->ipsl_valid |= IPSL_PROTOCOL; 630 } 631 if (extv[SPD_EXT_LCLPORT] != NULL) { 632 struct spd_portrange *pr = 633 (struct spd_portrange *)extv[SPD_EXT_LCLPORT]; 634 sel->ipsl_lport = pr->spd_ports_minport; 635 sel->ipsl_valid |= IPSL_LOCAL_PORT; 636 } 637 if (extv[SPD_EXT_REMPORT] != NULL) { 638 struct spd_portrange *pr = 639 (struct spd_portrange *)extv[SPD_EXT_REMPORT]; 640 sel->ipsl_rport = pr->spd_ports_minport; 641 sel->ipsl_valid |= IPSL_REMOTE_PORT; 642 } 643 644 if (extv[SPD_EXT_ICMP_TYPECODE] != NULL) { 645 struct spd_typecode *tc= 646 (struct spd_typecode *)extv[SPD_EXT_ICMP_TYPECODE]; 647 648 sel->ipsl_valid |= IPSL_ICMP_TYPE; 649 sel->ipsl_icmp_type = tc->spd_typecode_type; 650 if (tc->spd_typecode_type_end < tc->spd_typecode_type) 651 sel->ipsl_icmp_type_end = tc->spd_typecode_type; 652 else 653 sel->ipsl_icmp_type_end = tc->spd_typecode_type_end; 654 655 if (tc->spd_typecode_code != 255) { 656 sel->ipsl_valid |= IPSL_ICMP_CODE; 657 sel->ipsl_icmp_code = tc->spd_typecode_code; 658 if (tc->spd_typecode_code_end < tc->spd_typecode_code) 659 sel->ipsl_icmp_code_end = tc->spd_typecode_code; 660 else 661 sel->ipsl_icmp_code_end = 662 tc->spd_typecode_code_end; 663 } 664 } 665 #define ADDR2SEL(sel, extv, field, pfield, extn, bit) \ 666 if ((extv)[(extn)] != NULL) { \ 667 uint_t addrlen; \ 668 struct spd_address *ap = \ 669 (struct spd_address *)((extv)[(extn)]); \ 670 addrlen = (ap->spd_address_af == AF_INET6) ? \ 671 IPV6_ADDR_LEN : IP_ADDR_LEN; \ 672 if (SPD_64TO8(ap->spd_address_len) < \ 673 (addrlen + sizeof (*ap))) { \ 674 *diag = SPD_DIAGNOSTIC_BAD_ADDR_LEN; \ 675 return (B_FALSE); \ 676 } \ 677 bcopy((ap+1), &((sel)->field), addrlen); \ 678 (sel)->pfield = ap->spd_address_prefixlen; \ 679 (sel)->ipsl_valid |= (bit); \ 680 (sel)->ipsl_valid |= (ap->spd_address_af == AF_INET6) ? \ 681 IPSL_IPV6 : IPSL_IPV4; \ 682 } 683 684 ADDR2SEL(sel, extv, ipsl_local, ipsl_local_pfxlen, 685 SPD_EXT_LCLADDR, IPSL_LOCAL_ADDR); 686 ADDR2SEL(sel, extv, ipsl_remote, ipsl_remote_pfxlen, 687 SPD_EXT_REMADDR, IPSL_REMOTE_ADDR); 688 689 if ((sel->ipsl_valid & (IPSL_IPV6|IPSL_IPV4)) == 690 (IPSL_IPV6|IPSL_IPV4)) { 691 *diag = SPD_DIAGNOSTIC_MIXED_AF; 692 return (B_FALSE); 693 } 694 695 #undef ADDR2SEL 696 697 return (B_TRUE); 698 } 699 700 static boolean_t 701 spd_convert_type(uint32_t type, ipsec_act_t *act) 702 { 703 switch (type) { 704 case SPD_ACTTYPE_DROP: 705 act->ipa_type = IPSEC_ACT_DISCARD; 706 return (B_TRUE); 707 708 case SPD_ACTTYPE_PASS: 709 act->ipa_type = IPSEC_ACT_CLEAR; 710 return (B_TRUE); 711 712 case SPD_ACTTYPE_IPSEC: 713 act->ipa_type = IPSEC_ACT_APPLY; 714 return (B_TRUE); 715 } 716 return (B_FALSE); 717 } 718 719 static boolean_t 720 spd_convert_flags(uint32_t flags, ipsec_act_t *act) 721 { 722 /* 723 * Note use of !! for boolean canonicalization. 724 */ 725 act->ipa_apply.ipp_use_ah = !!(flags & SPD_APPLY_AH); 726 act->ipa_apply.ipp_use_esp = !!(flags & SPD_APPLY_ESP); 727 act->ipa_apply.ipp_use_espa = !!(flags & SPD_APPLY_ESPA); 728 act->ipa_apply.ipp_use_se = !!(flags & SPD_APPLY_SE); 729 act->ipa_apply.ipp_use_unique = !!(flags & SPD_APPLY_UNIQUE); 730 return (B_TRUE); 731 } 732 733 static void 734 spdsock_reset_act(ipsec_act_t *act) 735 { 736 bzero(act, sizeof (*act)); 737 act->ipa_apply.ipp_espe_maxbits = IPSEC_MAX_KEYBITS; 738 act->ipa_apply.ipp_espa_maxbits = IPSEC_MAX_KEYBITS; 739 act->ipa_apply.ipp_ah_maxbits = IPSEC_MAX_KEYBITS; 740 } 741 742 /* 743 * Sanity check action against reality, and shrink-wrap key sizes.. 744 */ 745 static boolean_t 746 spdsock_check_action(ipsec_act_t *act, boolean_t tunnel_polhead, int *diag, 747 spd_stack_t *spds) 748 { 749 if (tunnel_polhead && act->ipa_apply.ipp_use_unique) { 750 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS; 751 return (B_FALSE); 752 } 753 if ((act->ipa_type != IPSEC_ACT_APPLY) && 754 (act->ipa_apply.ipp_use_ah || 755 act->ipa_apply.ipp_use_esp || 756 act->ipa_apply.ipp_use_espa || 757 act->ipa_apply.ipp_use_se || 758 act->ipa_apply.ipp_use_unique)) { 759 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS; 760 return (B_FALSE); 761 } 762 if ((act->ipa_type == IPSEC_ACT_APPLY) && 763 !act->ipa_apply.ipp_use_ah && 764 !act->ipa_apply.ipp_use_esp) { 765 *diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS; 766 return (B_FALSE); 767 } 768 return (ipsec_check_action(act, diag, spds->spds_netstack)); 769 } 770 771 /* 772 * We may be short a few error checks here.. 773 */ 774 static boolean_t 775 spdsock_ext_to_actvec(spd_ext_t **extv, ipsec_act_t **actpp, uint_t *nactp, 776 int *diag, spd_stack_t *spds) 777 { 778 struct spd_ext_actions *sactp = 779 (struct spd_ext_actions *)extv[SPD_EXT_ACTION]; 780 ipsec_act_t act, *actp, *endactp; 781 struct spd_attribute *attrp, *endattrp; 782 uint64_t *endp; 783 int nact; 784 boolean_t tunnel_polhead; 785 786 tunnel_polhead = (extv[SPD_EXT_TUN_NAME] != NULL && 787 (((struct spd_rule *)extv[SPD_EXT_RULE])->spd_rule_flags & 788 SPD_RULE_FLAG_TUNNEL)); 789 790 *actpp = NULL; 791 *nactp = 0; 792 793 if (sactp == NULL) { 794 *diag = SPD_DIAGNOSTIC_NO_ACTION_EXT; 795 return (B_FALSE); 796 } 797 798 /* 799 * Parse the "action" extension and convert into an action chain. 800 */ 801 802 nact = sactp->spd_actions_count; 803 804 endp = (uint64_t *)sactp; 805 endp += sactp->spd_actions_len; 806 endattrp = (struct spd_attribute *)endp; 807 808 actp = kmem_alloc(sizeof (*actp) * nact, KM_NOSLEEP); 809 if (actp == NULL) { 810 *diag = SPD_DIAGNOSTIC_ADD_NO_MEM; 811 return (B_FALSE); 812 } 813 *actpp = actp; 814 *nactp = nact; 815 endactp = actp + nact; 816 817 spdsock_reset_act(&act); 818 attrp = (struct spd_attribute *)(&sactp[1]); 819 820 for (; attrp < endattrp; attrp++) { 821 switch (attrp->spd_attr_tag) { 822 case SPD_ATTR_NOP: 823 break; 824 825 case SPD_ATTR_EMPTY: 826 spdsock_reset_act(&act); 827 break; 828 829 case SPD_ATTR_END: 830 attrp = endattrp; 831 /* FALLTHRU */ 832 case SPD_ATTR_NEXT: 833 if (actp >= endactp) { 834 *diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT; 835 goto fail; 836 } 837 if (!spdsock_check_action(&act, tunnel_polhead, 838 diag, spds)) 839 goto fail; 840 *actp++ = act; 841 spdsock_reset_act(&act); 842 break; 843 844 case SPD_ATTR_TYPE: 845 if (!spd_convert_type(attrp->spd_attr_value, &act)) { 846 *diag = SPD_DIAGNOSTIC_ADD_BAD_TYPE; 847 goto fail; 848 } 849 break; 850 851 case SPD_ATTR_FLAGS: 852 if (!tunnel_polhead && extv[SPD_EXT_TUN_NAME] != NULL) { 853 /* 854 * Set "sa unique" for transport-mode 855 * tunnels whether we want to or not. 856 */ 857 attrp->spd_attr_value |= SPD_APPLY_UNIQUE; 858 } 859 if (!spd_convert_flags(attrp->spd_attr_value, &act)) { 860 *diag = SPD_DIAGNOSTIC_ADD_BAD_FLAGS; 861 goto fail; 862 } 863 break; 864 865 case SPD_ATTR_AH_AUTH: 866 if (attrp->spd_attr_value == 0) { 867 *diag = SPD_DIAGNOSTIC_UNSUPP_AH_ALG; 868 goto fail; 869 } 870 act.ipa_apply.ipp_auth_alg = attrp->spd_attr_value; 871 break; 872 873 case SPD_ATTR_ESP_ENCR: 874 if (attrp->spd_attr_value == 0) { 875 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_ALG; 876 goto fail; 877 } 878 act.ipa_apply.ipp_encr_alg = attrp->spd_attr_value; 879 break; 880 881 case SPD_ATTR_ESP_AUTH: 882 if (attrp->spd_attr_value == 0) { 883 *diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_ALG; 884 goto fail; 885 } 886 act.ipa_apply.ipp_esp_auth_alg = attrp->spd_attr_value; 887 break; 888 889 case SPD_ATTR_ENCR_MINBITS: 890 act.ipa_apply.ipp_espe_minbits = attrp->spd_attr_value; 891 break; 892 893 case SPD_ATTR_ENCR_MAXBITS: 894 act.ipa_apply.ipp_espe_maxbits = attrp->spd_attr_value; 895 break; 896 897 case SPD_ATTR_AH_MINBITS: 898 act.ipa_apply.ipp_ah_minbits = attrp->spd_attr_value; 899 break; 900 901 case SPD_ATTR_AH_MAXBITS: 902 act.ipa_apply.ipp_ah_maxbits = attrp->spd_attr_value; 903 break; 904 905 case SPD_ATTR_ESPA_MINBITS: 906 act.ipa_apply.ipp_espa_minbits = attrp->spd_attr_value; 907 break; 908 909 case SPD_ATTR_ESPA_MAXBITS: 910 act.ipa_apply.ipp_espa_maxbits = attrp->spd_attr_value; 911 break; 912 913 case SPD_ATTR_LIFE_SOFT_TIME: 914 case SPD_ATTR_LIFE_HARD_TIME: 915 case SPD_ATTR_LIFE_SOFT_BYTES: 916 case SPD_ATTR_LIFE_HARD_BYTES: 917 break; 918 919 case SPD_ATTR_KM_PROTO: 920 act.ipa_apply.ipp_km_proto = attrp->spd_attr_value; 921 break; 922 923 case SPD_ATTR_KM_COOKIE: 924 act.ipa_apply.ipp_km_cookie = attrp->spd_attr_value; 925 break; 926 927 case SPD_ATTR_REPLAY_DEPTH: 928 act.ipa_apply.ipp_replay_depth = attrp->spd_attr_value; 929 break; 930 } 931 } 932 if (actp != endactp) { 933 *diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT; 934 goto fail; 935 } 936 937 return (B_TRUE); 938 fail: 939 ipsec_actvec_free(*actpp, nact); 940 *actpp = NULL; 941 return (B_FALSE); 942 } 943 944 typedef struct 945 { 946 ipsec_policy_t *pol; 947 int dir; 948 } tmprule_t; 949 950 static int 951 mkrule(ipsec_policy_head_t *iph, struct spd_rule *rule, 952 ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t af, 953 tmprule_t **rp, uint64_t *index, spd_stack_t *spds) 954 { 955 ipsec_policy_t *pol; 956 957 sel->ipsl_valid &= ~(IPSL_IPV6|IPSL_IPV4); 958 sel->ipsl_valid |= af; 959 960 pol = ipsec_policy_create(sel, actp, nact, rule->spd_rule_priority, 961 index, spds->spds_netstack); 962 if (pol == NULL) 963 return (ENOMEM); 964 965 (*rp)->pol = pol; 966 (*rp)->dir = dir; 967 (*rp)++; 968 969 if (!ipsec_check_policy(iph, pol, dir)) 970 return (EEXIST); 971 972 rule->spd_rule_index = pol->ipsp_index; 973 return (0); 974 } 975 976 static int 977 mkrulepair(ipsec_policy_head_t *iph, struct spd_rule *rule, 978 ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t afs, 979 tmprule_t **rp, uint64_t *index, spd_stack_t *spds) 980 { 981 int error; 982 983 if (afs & IPSL_IPV4) { 984 error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV4, rp, 985 index, spds); 986 if (error != 0) 987 return (error); 988 } 989 if (afs & IPSL_IPV6) { 990 error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV6, rp, 991 index, spds); 992 if (error != 0) 993 return (error); 994 } 995 return (0); 996 } 997 998 999 static void 1000 spdsock_addrule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp, 1001 spd_ext_t **extv, ipsec_tun_pol_t *itp) 1002 { 1003 ipsec_selkey_t sel; 1004 ipsec_act_t *actp; 1005 uint_t nact; 1006 int diag = 0, error, afs; 1007 struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE]; 1008 tmprule_t rules[4], *rulep = &rules[0]; 1009 boolean_t tunnel_mode, empty_itp, active; 1010 uint64_t *index = (itp == NULL) ? NULL : &itp->itp_next_policy_index; 1011 spdsock_t *ss = (spdsock_t *)q->q_ptr; 1012 spd_stack_t *spds = ss->spdsock_spds; 1013 1014 if (rule == NULL) { 1015 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT); 1016 if (audit_active) { 1017 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1018 1019 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1020 audit_pf_policy(SPD_ADDRULE, DB_CRED(mp), 1021 spds->spds_netstack, ITP_NAME(itp), active, 1022 SPD_DIAGNOSTIC_NO_RULE_EXT, DB_CPID(mp)); 1023 } 1024 return; 1025 } 1026 1027 tunnel_mode = (rule->spd_rule_flags & SPD_RULE_FLAG_TUNNEL); 1028 1029 if (itp != NULL) { 1030 mutex_enter(&itp->itp_lock); 1031 ASSERT(itp->itp_policy == iph || itp->itp_inactive == iph); 1032 active = (itp->itp_policy == iph); 1033 if (ITP_P_ISACTIVE(itp, iph)) { 1034 /* Check for mix-and-match of tunnel/transport. */ 1035 if ((tunnel_mode && !ITP_P_ISTUNNEL(itp, iph)) || 1036 (!tunnel_mode && ITP_P_ISTUNNEL(itp, iph))) { 1037 mutex_exit(&itp->itp_lock); 1038 spdsock_error(q, mp, EBUSY, 0); 1039 return; 1040 } 1041 empty_itp = B_FALSE; 1042 } else { 1043 empty_itp = B_TRUE; 1044 itp->itp_flags = active ? ITPF_P_ACTIVE : ITPF_I_ACTIVE; 1045 if (tunnel_mode) 1046 itp->itp_flags |= active ? ITPF_P_TUNNEL : 1047 ITPF_I_TUNNEL; 1048 } 1049 } else { 1050 empty_itp = B_FALSE; 1051 } 1052 1053 if (rule->spd_rule_index != 0) { 1054 diag = SPD_DIAGNOSTIC_INVALID_RULE_INDEX; 1055 error = EINVAL; 1056 goto fail2; 1057 } 1058 1059 if (!spdsock_ext_to_sel(extv, &sel, &diag)) { 1060 error = EINVAL; 1061 goto fail2; 1062 } 1063 1064 if (itp != NULL) { 1065 if (tunnel_mode) { 1066 if (sel.ipsl_valid & 1067 (IPSL_REMOTE_PORT | IPSL_LOCAL_PORT)) { 1068 itp->itp_flags |= active ? 1069 ITPF_P_PER_PORT_SECURITY : 1070 ITPF_I_PER_PORT_SECURITY; 1071 } 1072 } else { 1073 /* 1074 * For now, we don't allow transport-mode on a tunnel 1075 * with ANY specific selectors. Bail if we have such 1076 * a request. 1077 */ 1078 if (sel.ipsl_valid & IPSL_WILDCARD) { 1079 diag = SPD_DIAGNOSTIC_NO_TUNNEL_SELECTORS; 1080 error = EINVAL; 1081 goto fail2; 1082 } 1083 } 1084 } 1085 1086 if (!spdsock_ext_to_actvec(extv, &actp, &nact, &diag, spds)) { 1087 error = EINVAL; 1088 goto fail2; 1089 } 1090 /* 1091 * If no addresses were specified, add both. 1092 */ 1093 afs = sel.ipsl_valid & (IPSL_IPV6|IPSL_IPV4); 1094 if (afs == 0) 1095 afs = (IPSL_IPV6|IPSL_IPV4); 1096 1097 rw_enter(&iph->iph_lock, RW_WRITER); 1098 1099 if (rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) { 1100 error = mkrulepair(iph, rule, &sel, actp, nact, 1101 IPSEC_TYPE_OUTBOUND, afs, &rulep, index, spds); 1102 if (error != 0) 1103 goto fail; 1104 } 1105 1106 if (rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) { 1107 error = mkrulepair(iph, rule, &sel, actp, nact, 1108 IPSEC_TYPE_INBOUND, afs, &rulep, index, spds); 1109 if (error != 0) 1110 goto fail; 1111 } 1112 1113 while ((--rulep) >= &rules[0]) { 1114 ipsec_enter_policy(iph, rulep->pol, rulep->dir, 1115 spds->spds_netstack); 1116 } 1117 rw_exit(&iph->iph_lock); 1118 if (itp != NULL) 1119 mutex_exit(&itp->itp_lock); 1120 1121 ipsec_actvec_free(actp, nact); 1122 spd_echo(q, mp); 1123 if (audit_active) { 1124 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1125 1126 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1127 audit_pf_policy(SPD_ADDRULE, DB_CRED(mp), spds->spds_netstack, 1128 ITP_NAME(itp), active, 0, DB_CPID(mp)); 1129 } 1130 return; 1131 1132 fail: 1133 rw_exit(&iph->iph_lock); 1134 while ((--rulep) >= &rules[0]) { 1135 IPPOL_REFRELE(rulep->pol, spds->spds_netstack); 1136 } 1137 ipsec_actvec_free(actp, nact); 1138 fail2: 1139 if (itp != NULL) { 1140 if (empty_itp) 1141 itp->itp_flags = 0; 1142 mutex_exit(&itp->itp_lock); 1143 } 1144 spdsock_error(q, mp, error, diag); 1145 if (audit_active) { 1146 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1147 1148 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1149 audit_pf_policy(SPD_ADDRULE, DB_CRED(mp), spds->spds_netstack, 1150 ITP_NAME(itp), active, error, DB_CPID(mp)); 1151 } 1152 } 1153 1154 void 1155 spdsock_deleterule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp, 1156 spd_ext_t **extv, ipsec_tun_pol_t *itp) 1157 { 1158 ipsec_selkey_t sel; 1159 struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE]; 1160 int err, diag = 0; 1161 spdsock_t *ss = (spdsock_t *)q->q_ptr; 1162 netstack_t *ns = ss->spdsock_spds->spds_netstack; 1163 1164 if (rule == NULL) { 1165 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT); 1166 if (audit_active) { 1167 boolean_t active; 1168 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1169 1170 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1171 audit_pf_policy(SPD_DELETERULE, DB_CRED(mp), ns, 1172 ITP_NAME(itp), active, SPD_DIAGNOSTIC_NO_RULE_EXT, 1173 DB_CPID(mp)); 1174 } 1175 return; 1176 } 1177 1178 /* 1179 * Must enter itp_lock first to avoid deadlock. See tun.c's 1180 * set_sec_simple() for the other case of itp_lock and iph_lock. 1181 */ 1182 if (itp != NULL) 1183 mutex_enter(&itp->itp_lock); 1184 1185 if (rule->spd_rule_index != 0) { 1186 if (ipsec_policy_delete_index(iph, rule->spd_rule_index, ns) != 1187 0) { 1188 err = ESRCH; 1189 goto fail; 1190 } 1191 } else { 1192 if (!spdsock_ext_to_sel(extv, &sel, &diag)) { 1193 err = EINVAL; /* diag already set... */ 1194 goto fail; 1195 } 1196 1197 if ((rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) && 1198 !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_INBOUND, ns)) { 1199 err = ESRCH; 1200 goto fail; 1201 } 1202 1203 if ((rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) && 1204 !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_OUTBOUND, ns)) { 1205 err = ESRCH; 1206 goto fail; 1207 } 1208 } 1209 1210 if (itp != NULL) { 1211 ASSERT(iph == itp->itp_policy || iph == itp->itp_inactive); 1212 rw_enter(&iph->iph_lock, RW_READER); 1213 if (avl_numnodes(&iph->iph_rulebyid) == 0) { 1214 if (iph == itp->itp_policy) 1215 itp->itp_flags &= ~ITPF_PFLAGS; 1216 else 1217 itp->itp_flags &= ~ITPF_IFLAGS; 1218 } 1219 /* Can exit locks in any order. */ 1220 rw_exit(&iph->iph_lock); 1221 mutex_exit(&itp->itp_lock); 1222 } 1223 spd_echo(q, mp); 1224 if (audit_active) { 1225 boolean_t active; 1226 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1227 1228 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1229 audit_pf_policy(SPD_DELETERULE, DB_CRED(mp), ns, ITP_NAME(itp), 1230 active, 0, DB_CPID(mp)); 1231 } 1232 return; 1233 fail: 1234 if (itp != NULL) 1235 mutex_exit(&itp->itp_lock); 1236 spdsock_error(q, mp, err, diag); 1237 if (audit_active) { 1238 boolean_t active; 1239 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1240 1241 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1242 audit_pf_policy(SPD_DELETERULE, DB_CRED(mp), ns, ITP_NAME(itp), 1243 active, err, DB_CPID(mp)); 1244 } 1245 } 1246 1247 /* Do NOT consume a reference to itp. */ 1248 /* ARGSUSED */ 1249 static void 1250 spdsock_flip_node(ipsec_tun_pol_t *itp, void *ignoreme, netstack_t *ns) 1251 { 1252 mutex_enter(&itp->itp_lock); 1253 ITPF_SWAP(itp->itp_flags); 1254 ipsec_swap_policy(itp->itp_policy, itp->itp_inactive, ns); 1255 mutex_exit(&itp->itp_lock); 1256 } 1257 1258 void 1259 spdsock_flip(queue_t *q, mblk_t *mp, spd_if_t *tunname) 1260 { 1261 char *tname; 1262 ipsec_tun_pol_t *itp; 1263 spdsock_t *ss = (spdsock_t *)q->q_ptr; 1264 netstack_t *ns = ss->spdsock_spds->spds_netstack; 1265 1266 if (tunname != NULL) { 1267 tname = (char *)tunname->spd_if_name; 1268 if (*tname == '\0') { 1269 /* can't fail */ 1270 ipsec_swap_global_policy(ns); 1271 if (audit_active) { 1272 boolean_t active; 1273 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1274 1275 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1276 audit_pf_policy(SPD_FLIP, DB_CRED(mp), 1277 ns, NULL, active, 0, DB_CPID(mp)); 1278 } 1279 itp_walk(spdsock_flip_node, NULL, ns); 1280 if (audit_active) { 1281 boolean_t active; 1282 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1283 1284 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1285 audit_pf_policy(SPD_FLIP, DB_CRED(mp), ns, 1286 "all tunnels", active, 0, DB_CPID(mp)); 1287 } 1288 } else { 1289 itp = get_tunnel_policy(tname, ns); 1290 if (itp == NULL) { 1291 /* Better idea for "tunnel not found"? */ 1292 spdsock_error(q, mp, ESRCH, 0); 1293 if (audit_active) { 1294 boolean_t active; 1295 spd_msg_t *spmsg = 1296 (spd_msg_t *)mp->b_rptr; 1297 1298 active = (spmsg->spd_msg_spdid == 1299 SPD_ACTIVE); 1300 audit_pf_policy(SPD_FLIP, DB_CRED(mp), 1301 ns, ITP_NAME(itp), active, 1302 ESRCH, DB_CPID(mp)); 1303 } 1304 return; 1305 } 1306 spdsock_flip_node(itp, NULL, NULL); 1307 if (audit_active) { 1308 boolean_t active; 1309 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1310 1311 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1312 audit_pf_policy(SPD_FLIP, DB_CRED(mp), ns, 1313 ITP_NAME(itp), active, 0, DB_CPID(mp)); 1314 } 1315 ITP_REFRELE(itp, ns); 1316 } 1317 } else { 1318 ipsec_swap_global_policy(ns); /* can't fail */ 1319 if (audit_active) { 1320 boolean_t active; 1321 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 1322 1323 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 1324 audit_pf_policy(SPD_FLIP, DB_CRED(mp), 1325 ns, NULL, active, 0, DB_CPID(mp)); 1326 } 1327 } 1328 spd_echo(q, mp); 1329 } 1330 1331 /* 1332 * Unimplemented feature 1333 */ 1334 /* ARGSUSED */ 1335 static void 1336 spdsock_lookup(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp, 1337 spd_ext_t **extv, ipsec_tun_pol_t *itp) 1338 { 1339 spdsock_error(q, mp, EINVAL, 0); 1340 } 1341 1342 1343 static mblk_t * 1344 spdsock_dump_ruleset(mblk_t *req, ipsec_policy_head_t *iph, 1345 uint32_t count, uint16_t error) 1346 { 1347 size_t len = sizeof (spd_ruleset_ext_t) + sizeof (spd_msg_t); 1348 spd_msg_t *msg; 1349 spd_ruleset_ext_t *ruleset; 1350 mblk_t *m = allocb(len, BPRI_HI); 1351 1352 ASSERT(RW_READ_HELD(&iph->iph_lock)); 1353 1354 if (m == NULL) { 1355 return (NULL); 1356 } 1357 msg = (spd_msg_t *)m->b_rptr; 1358 ruleset = (spd_ruleset_ext_t *)(&msg[1]); 1359 1360 m->b_wptr = (uint8_t *)&ruleset[1]; 1361 1362 *msg = *(spd_msg_t *)(req->b_rptr); 1363 msg->spd_msg_len = SPD_8TO64(len); 1364 msg->spd_msg_errno = error; 1365 1366 ruleset->spd_ruleset_len = SPD_8TO64(sizeof (*ruleset)); 1367 ruleset->spd_ruleset_type = SPD_EXT_RULESET; 1368 ruleset->spd_ruleset_count = count; 1369 ruleset->spd_ruleset_version = iph->iph_gen; 1370 return (m); 1371 } 1372 1373 static mblk_t * 1374 spdsock_dump_finish(spdsock_t *ss, int error) 1375 { 1376 mblk_t *m; 1377 ipsec_policy_head_t *iph = ss->spdsock_dump_head; 1378 mblk_t *req = ss->spdsock_dump_req; 1379 1380 rw_enter(&iph->iph_lock, RW_READER); 1381 m = spdsock_dump_ruleset(req, iph, ss->spdsock_dump_count, error); 1382 rw_exit(&iph->iph_lock); 1383 IPPH_REFRELE(iph, ss->spdsock_spds->spds_netstack); 1384 ss->spdsock_dump_req = NULL; 1385 freemsg(req); 1386 1387 return (m); 1388 } 1389 1390 /* 1391 * Rule encoding functions. 1392 * We do a two-pass encode. 1393 * If base != NULL, fill in encoded rule part starting at base+offset. 1394 * Always return "offset" plus length of to-be-encoded data. 1395 */ 1396 static uint_t 1397 spdsock_encode_typecode(uint8_t *base, uint_t offset, uint8_t type, 1398 uint8_t type_end, uint8_t code, uint8_t code_end) 1399 { 1400 struct spd_typecode *tcp; 1401 1402 ASSERT(ALIGNED64(offset)); 1403 1404 if (base != NULL) { 1405 tcp = (struct spd_typecode *)(base + offset); 1406 tcp->spd_typecode_len = SPD_8TO64(sizeof (*tcp)); 1407 tcp->spd_typecode_exttype = SPD_EXT_ICMP_TYPECODE; 1408 tcp->spd_typecode_code = code; 1409 tcp->spd_typecode_type = type; 1410 tcp->spd_typecode_type_end = type_end; 1411 tcp->spd_typecode_code_end = code_end; 1412 } 1413 offset += sizeof (*tcp); 1414 1415 ASSERT(ALIGNED64(offset)); 1416 1417 return (offset); 1418 } 1419 1420 static uint_t 1421 spdsock_encode_proto(uint8_t *base, uint_t offset, uint8_t proto) 1422 { 1423 struct spd_proto *spp; 1424 1425 ASSERT(ALIGNED64(offset)); 1426 1427 if (base != NULL) { 1428 spp = (struct spd_proto *)(base + offset); 1429 spp->spd_proto_len = SPD_8TO64(sizeof (*spp)); 1430 spp->spd_proto_exttype = SPD_EXT_PROTO; 1431 spp->spd_proto_number = proto; 1432 spp->spd_proto_reserved1 = 0; 1433 spp->spd_proto_reserved2 = 0; 1434 } 1435 offset += sizeof (*spp); 1436 1437 ASSERT(ALIGNED64(offset)); 1438 1439 return (offset); 1440 } 1441 1442 static uint_t 1443 spdsock_encode_port(uint8_t *base, uint_t offset, uint16_t ext, uint16_t port) 1444 { 1445 struct spd_portrange *spp; 1446 1447 ASSERT(ALIGNED64(offset)); 1448 1449 if (base != NULL) { 1450 spp = (struct spd_portrange *)(base + offset); 1451 spp->spd_ports_len = SPD_8TO64(sizeof (*spp)); 1452 spp->spd_ports_exttype = ext; 1453 spp->spd_ports_minport = port; 1454 spp->spd_ports_maxport = port; 1455 } 1456 offset += sizeof (*spp); 1457 1458 ASSERT(ALIGNED64(offset)); 1459 1460 return (offset); 1461 } 1462 1463 static uint_t 1464 spdsock_encode_addr(uint8_t *base, uint_t offset, uint16_t ext, 1465 const ipsec_selkey_t *sel, const ipsec_addr_t *addr, uint_t pfxlen) 1466 { 1467 struct spd_address *sae; 1468 ipsec_addr_t *spdaddr; 1469 uint_t start = offset; 1470 uint_t addrlen; 1471 uint_t af; 1472 1473 if (sel->ipsl_valid & IPSL_IPV4) { 1474 af = AF_INET; 1475 addrlen = IP_ADDR_LEN; 1476 } else { 1477 af = AF_INET6; 1478 addrlen = IPV6_ADDR_LEN; 1479 } 1480 1481 ASSERT(ALIGNED64(offset)); 1482 1483 if (base != NULL) { 1484 sae = (struct spd_address *)(base + offset); 1485 sae->spd_address_exttype = ext; 1486 sae->spd_address_af = af; 1487 sae->spd_address_prefixlen = pfxlen; 1488 sae->spd_address_reserved2 = 0; 1489 1490 spdaddr = (ipsec_addr_t *)(&sae[1]); 1491 bcopy(addr, spdaddr, addrlen); 1492 } 1493 offset += sizeof (*sae); 1494 addrlen = roundup(addrlen, sizeof (uint64_t)); 1495 offset += addrlen; 1496 1497 ASSERT(ALIGNED64(offset)); 1498 1499 if (base != NULL) 1500 sae->spd_address_len = SPD_8TO64(offset - start); 1501 return (offset); 1502 } 1503 1504 static uint_t 1505 spdsock_encode_sel(uint8_t *base, uint_t offset, const ipsec_sel_t *sel) 1506 { 1507 const ipsec_selkey_t *selkey = &sel->ipsl_key; 1508 1509 if (selkey->ipsl_valid & IPSL_PROTOCOL) 1510 offset = spdsock_encode_proto(base, offset, selkey->ipsl_proto); 1511 if (selkey->ipsl_valid & IPSL_LOCAL_PORT) 1512 offset = spdsock_encode_port(base, offset, SPD_EXT_LCLPORT, 1513 selkey->ipsl_lport); 1514 if (selkey->ipsl_valid & IPSL_REMOTE_PORT) 1515 offset = spdsock_encode_port(base, offset, SPD_EXT_REMPORT, 1516 selkey->ipsl_rport); 1517 if (selkey->ipsl_valid & IPSL_REMOTE_ADDR) 1518 offset = spdsock_encode_addr(base, offset, SPD_EXT_REMADDR, 1519 selkey, &selkey->ipsl_remote, selkey->ipsl_remote_pfxlen); 1520 if (selkey->ipsl_valid & IPSL_LOCAL_ADDR) 1521 offset = spdsock_encode_addr(base, offset, SPD_EXT_LCLADDR, 1522 selkey, &selkey->ipsl_local, selkey->ipsl_local_pfxlen); 1523 if (selkey->ipsl_valid & IPSL_ICMP_TYPE) { 1524 offset = spdsock_encode_typecode(base, offset, 1525 selkey->ipsl_icmp_type, selkey->ipsl_icmp_type_end, 1526 (selkey->ipsl_valid & IPSL_ICMP_CODE) ? 1527 selkey->ipsl_icmp_code : 255, 1528 (selkey->ipsl_valid & IPSL_ICMP_CODE) ? 1529 selkey->ipsl_icmp_code_end : 255); 1530 } 1531 return (offset); 1532 } 1533 1534 static uint_t 1535 spdsock_encode_actattr(uint8_t *base, uint_t offset, uint32_t tag, 1536 uint32_t value) 1537 { 1538 struct spd_attribute *attr; 1539 1540 ASSERT(ALIGNED64(offset)); 1541 1542 if (base != NULL) { 1543 attr = (struct spd_attribute *)(base + offset); 1544 attr->spd_attr_tag = tag; 1545 attr->spd_attr_value = value; 1546 } 1547 offset += sizeof (struct spd_attribute); 1548 1549 ASSERT(ALIGNED64(offset)); 1550 1551 return (offset); 1552 } 1553 1554 1555 #define EMIT(t, v) offset = spdsock_encode_actattr(base, offset, (t), (v)) 1556 1557 static uint_t 1558 spdsock_encode_action(uint8_t *base, uint_t offset, const ipsec_action_t *ap) 1559 { 1560 const struct ipsec_act *act = &(ap->ipa_act); 1561 uint_t flags; 1562 1563 EMIT(SPD_ATTR_EMPTY, 0); 1564 switch (act->ipa_type) { 1565 case IPSEC_ACT_DISCARD: 1566 case IPSEC_ACT_REJECT: 1567 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_DROP); 1568 break; 1569 case IPSEC_ACT_BYPASS: 1570 case IPSEC_ACT_CLEAR: 1571 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_PASS); 1572 break; 1573 1574 case IPSEC_ACT_APPLY: 1575 EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_IPSEC); 1576 flags = 0; 1577 if (act->ipa_apply.ipp_use_ah) 1578 flags |= SPD_APPLY_AH; 1579 if (act->ipa_apply.ipp_use_esp) 1580 flags |= SPD_APPLY_ESP; 1581 if (act->ipa_apply.ipp_use_espa) 1582 flags |= SPD_APPLY_ESPA; 1583 if (act->ipa_apply.ipp_use_se) 1584 flags |= SPD_APPLY_SE; 1585 if (act->ipa_apply.ipp_use_unique) 1586 flags |= SPD_APPLY_UNIQUE; 1587 EMIT(SPD_ATTR_FLAGS, flags); 1588 if (flags & SPD_APPLY_AH) { 1589 EMIT(SPD_ATTR_AH_AUTH, act->ipa_apply.ipp_auth_alg); 1590 EMIT(SPD_ATTR_AH_MINBITS, 1591 act->ipa_apply.ipp_ah_minbits); 1592 EMIT(SPD_ATTR_AH_MAXBITS, 1593 act->ipa_apply.ipp_ah_maxbits); 1594 } 1595 if (flags & SPD_APPLY_ESP) { 1596 EMIT(SPD_ATTR_ESP_ENCR, act->ipa_apply.ipp_encr_alg); 1597 EMIT(SPD_ATTR_ENCR_MINBITS, 1598 act->ipa_apply.ipp_espe_minbits); 1599 EMIT(SPD_ATTR_ENCR_MAXBITS, 1600 act->ipa_apply.ipp_espe_maxbits); 1601 if (flags & SPD_APPLY_ESPA) { 1602 EMIT(SPD_ATTR_ESP_AUTH, 1603 act->ipa_apply.ipp_esp_auth_alg); 1604 EMIT(SPD_ATTR_ESPA_MINBITS, 1605 act->ipa_apply.ipp_espa_minbits); 1606 EMIT(SPD_ATTR_ESPA_MAXBITS, 1607 act->ipa_apply.ipp_espa_maxbits); 1608 } 1609 } 1610 if (act->ipa_apply.ipp_km_proto != 0) 1611 EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_proto); 1612 if (act->ipa_apply.ipp_km_cookie != 0) 1613 EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_cookie); 1614 if (act->ipa_apply.ipp_replay_depth != 0) 1615 EMIT(SPD_ATTR_REPLAY_DEPTH, 1616 act->ipa_apply.ipp_replay_depth); 1617 /* Add more here */ 1618 break; 1619 } 1620 1621 return (offset); 1622 } 1623 1624 static uint_t 1625 spdsock_encode_action_list(uint8_t *base, uint_t offset, 1626 const ipsec_action_t *ap) 1627 { 1628 struct spd_ext_actions *act; 1629 uint_t nact = 0; 1630 uint_t start = offset; 1631 1632 ASSERT(ALIGNED64(offset)); 1633 1634 if (base != NULL) { 1635 act = (struct spd_ext_actions *)(base + offset); 1636 act->spd_actions_len = 0; 1637 act->spd_actions_exttype = SPD_EXT_ACTION; 1638 act->spd_actions_count = 0; 1639 act->spd_actions_reserved = 0; 1640 } 1641 1642 offset += sizeof (*act); 1643 1644 ASSERT(ALIGNED64(offset)); 1645 1646 while (ap != NULL) { 1647 offset = spdsock_encode_action(base, offset, ap); 1648 ap = ap->ipa_next; 1649 nact++; 1650 if (ap != NULL) { 1651 EMIT(SPD_ATTR_NEXT, 0); 1652 } 1653 } 1654 EMIT(SPD_ATTR_END, 0); 1655 1656 ASSERT(ALIGNED64(offset)); 1657 1658 if (base != NULL) { 1659 act->spd_actions_count = nact; 1660 act->spd_actions_len = SPD_8TO64(offset - start); 1661 } 1662 1663 return (offset); 1664 } 1665 1666 #undef EMIT 1667 1668 /* ARGSUSED */ 1669 static uint_t 1670 spdsock_rule_flags(uint_t dir, uint_t af) 1671 { 1672 uint_t flags = 0; 1673 1674 if (dir == IPSEC_TYPE_INBOUND) 1675 flags |= SPD_RULE_FLAG_INBOUND; 1676 if (dir == IPSEC_TYPE_OUTBOUND) 1677 flags |= SPD_RULE_FLAG_OUTBOUND; 1678 1679 return (flags); 1680 } 1681 1682 1683 static uint_t 1684 spdsock_encode_rule_head(uint8_t *base, uint_t offset, spd_msg_t *req, 1685 const ipsec_policy_t *rule, uint_t dir, uint_t af, char *name, 1686 boolean_t tunnel) 1687 { 1688 struct spd_msg *spmsg; 1689 struct spd_rule *spr; 1690 spd_if_t *sid; 1691 1692 uint_t start = offset; 1693 1694 ASSERT(ALIGNED64(offset)); 1695 1696 if (base != NULL) { 1697 spmsg = (struct spd_msg *)(base + offset); 1698 bzero(spmsg, sizeof (*spmsg)); 1699 spmsg->spd_msg_version = PF_POLICY_V1; 1700 spmsg->spd_msg_type = SPD_DUMP; 1701 spmsg->spd_msg_seq = req->spd_msg_seq; 1702 spmsg->spd_msg_pid = req->spd_msg_pid; 1703 } 1704 offset += sizeof (struct spd_msg); 1705 1706 ASSERT(ALIGNED64(offset)); 1707 1708 if (base != NULL) { 1709 spr = (struct spd_rule *)(base + offset); 1710 spr->spd_rule_type = SPD_EXT_RULE; 1711 spr->spd_rule_priority = rule->ipsp_prio; 1712 spr->spd_rule_flags = spdsock_rule_flags(dir, af); 1713 if (tunnel) 1714 spr->spd_rule_flags |= SPD_RULE_FLAG_TUNNEL; 1715 spr->spd_rule_unused = 0; 1716 spr->spd_rule_len = SPD_8TO64(sizeof (*spr)); 1717 spr->spd_rule_index = rule->ipsp_index; 1718 } 1719 offset += sizeof (struct spd_rule); 1720 1721 /* 1722 * If we have an interface name (i.e. if this policy head came from 1723 * a tunnel), add the SPD_EXT_TUN_NAME extension. 1724 */ 1725 if (name != NULL) { 1726 1727 ASSERT(ALIGNED64(offset)); 1728 1729 if (base != NULL) { 1730 sid = (spd_if_t *)(base + offset); 1731 sid->spd_if_exttype = SPD_EXT_TUN_NAME; 1732 sid->spd_if_len = SPD_8TO64(sizeof (spd_if_t) + 1733 roundup((strlen(name) - 4), 8)); 1734 (void) strlcpy((char *)sid->spd_if_name, name, 1735 LIFNAMSIZ); 1736 } 1737 1738 offset += sizeof (spd_if_t) + roundup((strlen(name) - 4), 8); 1739 } 1740 1741 offset = spdsock_encode_sel(base, offset, rule->ipsp_sel); 1742 offset = spdsock_encode_action_list(base, offset, rule->ipsp_act); 1743 1744 ASSERT(ALIGNED64(offset)); 1745 1746 if (base != NULL) { 1747 spmsg->spd_msg_len = SPD_8TO64(offset - start); 1748 } 1749 return (offset); 1750 } 1751 1752 /* ARGSUSED */ 1753 static mblk_t * 1754 spdsock_encode_rule(mblk_t *req, const ipsec_policy_t *rule, 1755 uint_t dir, uint_t af, char *name, boolean_t tunnel) 1756 { 1757 mblk_t *m; 1758 uint_t len; 1759 spd_msg_t *mreq = (spd_msg_t *)req->b_rptr; 1760 1761 /* 1762 * Figure out how much space we'll need. 1763 */ 1764 len = spdsock_encode_rule_head(NULL, 0, mreq, rule, dir, af, name, 1765 tunnel); 1766 1767 /* 1768 * Allocate mblk. 1769 */ 1770 m = allocb(len, BPRI_HI); 1771 if (m == NULL) 1772 return (NULL); 1773 1774 /* 1775 * Fill it in.. 1776 */ 1777 m->b_wptr = m->b_rptr + len; 1778 bzero(m->b_rptr, len); 1779 (void) spdsock_encode_rule_head(m->b_rptr, 0, mreq, rule, dir, af, 1780 name, tunnel); 1781 return (m); 1782 } 1783 1784 static ipsec_policy_t * 1785 spdsock_dump_next_in_chain(spdsock_t *ss, ipsec_policy_head_t *iph, 1786 ipsec_policy_t *cur) 1787 { 1788 ASSERT(RW_READ_HELD(&iph->iph_lock)); 1789 1790 ss->spdsock_dump_count++; 1791 ss->spdsock_dump_cur_rule = cur->ipsp_hash.hash_next; 1792 return (cur); 1793 } 1794 1795 static ipsec_policy_t * 1796 spdsock_dump_next_rule(spdsock_t *ss, ipsec_policy_head_t *iph) 1797 { 1798 ipsec_policy_t *cur; 1799 ipsec_policy_root_t *ipr; 1800 int chain, nchains, type, af; 1801 1802 ASSERT(RW_READ_HELD(&iph->iph_lock)); 1803 1804 cur = ss->spdsock_dump_cur_rule; 1805 1806 if (cur != NULL) 1807 return (spdsock_dump_next_in_chain(ss, iph, cur)); 1808 1809 type = ss->spdsock_dump_cur_type; 1810 1811 next: 1812 chain = ss->spdsock_dump_cur_chain; 1813 ipr = &iph->iph_root[type]; 1814 nchains = ipr->ipr_nchains; 1815 1816 while (chain < nchains) { 1817 cur = ipr->ipr_hash[chain].hash_head; 1818 chain++; 1819 if (cur != NULL) { 1820 ss->spdsock_dump_cur_chain = chain; 1821 return (spdsock_dump_next_in_chain(ss, iph, cur)); 1822 } 1823 } 1824 ss->spdsock_dump_cur_chain = nchains; 1825 1826 af = ss->spdsock_dump_cur_af; 1827 while (af < IPSEC_NAF) { 1828 cur = ipr->ipr_nonhash[af]; 1829 af++; 1830 if (cur != NULL) { 1831 ss->spdsock_dump_cur_af = af; 1832 return (spdsock_dump_next_in_chain(ss, iph, cur)); 1833 } 1834 } 1835 1836 type++; 1837 if (type >= IPSEC_NTYPES) 1838 return (NULL); 1839 1840 ss->spdsock_dump_cur_chain = 0; 1841 ss->spdsock_dump_cur_type = type; 1842 ss->spdsock_dump_cur_af = IPSEC_AF_V4; 1843 goto next; 1844 1845 } 1846 1847 /* 1848 * If we're done with one policy head, but have more to go, we iterate through 1849 * another IPsec tunnel policy head (itp). Return NULL if it is an error 1850 * worthy of returning EAGAIN via PF_POLICY. 1851 */ 1852 static ipsec_tun_pol_t * 1853 spdsock_dump_iterate_next_tunnel(spdsock_t *ss, ipsec_stack_t *ipss) 1854 { 1855 ipsec_tun_pol_t *itp; 1856 1857 ASSERT(RW_READ_HELD(&ipss->ipsec_tunnel_policy_lock)); 1858 if (ipss->ipsec_tunnel_policy_gen > ss->spdsock_dump_tun_gen) { 1859 /* Oops, state of the tunnel polheads changed. */ 1860 itp = NULL; 1861 } else if (ss->spdsock_itp == NULL) { 1862 /* Just finished global, find first node. */ 1863 itp = avl_first(&ipss->ipsec_tunnel_policies); 1864 } else { 1865 /* We just finished current polhead, find the next one. */ 1866 itp = AVL_NEXT(&ipss->ipsec_tunnel_policies, ss->spdsock_itp); 1867 } 1868 if (itp != NULL) { 1869 ITP_REFHOLD(itp); 1870 } 1871 if (ss->spdsock_itp != NULL) { 1872 ITP_REFRELE(ss->spdsock_itp, ipss->ipsec_netstack); 1873 } 1874 ss->spdsock_itp = itp; 1875 return (itp); 1876 } 1877 1878 static mblk_t * 1879 spdsock_dump_next_record(spdsock_t *ss) 1880 { 1881 ipsec_policy_head_t *iph; 1882 ipsec_policy_t *rule; 1883 mblk_t *m; 1884 ipsec_tun_pol_t *itp; 1885 netstack_t *ns = ss->spdsock_spds->spds_netstack; 1886 ipsec_stack_t *ipss = ns->netstack_ipsec; 1887 1888 iph = ss->spdsock_dump_head; 1889 1890 ASSERT(iph != NULL); 1891 1892 rw_enter(&iph->iph_lock, RW_READER); 1893 1894 if (iph->iph_gen != ss->spdsock_dump_gen) { 1895 rw_exit(&iph->iph_lock); 1896 return (spdsock_dump_finish(ss, EAGAIN)); 1897 } 1898 1899 while ((rule = spdsock_dump_next_rule(ss, iph)) == NULL) { 1900 rw_exit(&iph->iph_lock); 1901 if (--(ss->spdsock_dump_remaining_polheads) == 0) 1902 return (spdsock_dump_finish(ss, 0)); 1903 1904 1905 /* 1906 * If we reach here, we have more policy heads (tunnel 1907 * entries) to dump. Let's reset to a new policy head 1908 * and get some more rules. 1909 * 1910 * An empty policy head will have spdsock_dump_next_rule() 1911 * return NULL, and we loop (while dropping the number of 1912 * remaining polheads). If we loop to 0, we finish. We 1913 * keep looping until we hit 0 or until we have a rule to 1914 * encode. 1915 * 1916 * NOTE: No need for ITP_REF*() macros here as we're only 1917 * going after and refholding the policy head itself. 1918 */ 1919 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER); 1920 itp = spdsock_dump_iterate_next_tunnel(ss, ipss); 1921 if (itp == NULL) { 1922 rw_exit(&ipss->ipsec_tunnel_policy_lock); 1923 return (spdsock_dump_finish(ss, EAGAIN)); 1924 } 1925 1926 /* Reset other spdsock_dump thingies. */ 1927 IPPH_REFRELE(ss->spdsock_dump_head, ns); 1928 if (ss->spdsock_dump_active) { 1929 ss->spdsock_dump_tunnel = 1930 itp->itp_flags & ITPF_P_TUNNEL; 1931 iph = itp->itp_policy; 1932 } else { 1933 ss->spdsock_dump_tunnel = 1934 itp->itp_flags & ITPF_I_TUNNEL; 1935 iph = itp->itp_inactive; 1936 } 1937 IPPH_REFHOLD(iph); 1938 rw_exit(&ipss->ipsec_tunnel_policy_lock); 1939 1940 rw_enter(&iph->iph_lock, RW_READER); 1941 RESET_SPDSOCK_DUMP_POLHEAD(ss, iph); 1942 } 1943 1944 m = spdsock_encode_rule(ss->spdsock_dump_req, rule, 1945 ss->spdsock_dump_cur_type, ss->spdsock_dump_cur_af, 1946 (ss->spdsock_itp == NULL) ? NULL : ss->spdsock_itp->itp_name, 1947 ss->spdsock_dump_tunnel); 1948 rw_exit(&iph->iph_lock); 1949 1950 if (m == NULL) 1951 return (spdsock_dump_finish(ss, ENOMEM)); 1952 return (m); 1953 } 1954 1955 /* 1956 * Dump records until we run into flow-control back-pressure. 1957 */ 1958 static void 1959 spdsock_dump_some(queue_t *q, spdsock_t *ss) 1960 { 1961 mblk_t *m, *dataind; 1962 1963 while ((ss->spdsock_dump_req != NULL) && canputnext(q)) { 1964 m = spdsock_dump_next_record(ss); 1965 if (m == NULL) 1966 return; 1967 dataind = allocb(sizeof (struct T_data_req), BPRI_HI); 1968 if (dataind == NULL) { 1969 freemsg(m); 1970 return; 1971 } 1972 dataind->b_cont = m; 1973 dataind->b_wptr += sizeof (struct T_data_req); 1974 ((struct T_data_ind *)dataind->b_rptr)->PRIM_type = T_DATA_IND; 1975 ((struct T_data_ind *)dataind->b_rptr)->MORE_flag = 0; 1976 dataind->b_datap->db_type = M_PROTO; 1977 putnext(q, dataind); 1978 } 1979 } 1980 1981 /* 1982 * Start dumping. 1983 * Format a start-of-dump record, and set up the stream and kick the rsrv 1984 * procedure to continue the job.. 1985 */ 1986 /* ARGSUSED */ 1987 static void 1988 spdsock_dump(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp) 1989 { 1990 spdsock_t *ss = (spdsock_t *)q->q_ptr; 1991 netstack_t *ns = ss->spdsock_spds->spds_netstack; 1992 ipsec_stack_t *ipss = ns->netstack_ipsec; 1993 mblk_t *mr; 1994 1995 /* spdsock_open() already set spdsock_itp to NULL. */ 1996 if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) { 1997 rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER); 1998 ss->spdsock_dump_remaining_polheads = 1 + 1999 avl_numnodes(&ipss->ipsec_tunnel_policies); 2000 ss->spdsock_dump_tun_gen = ipss->ipsec_tunnel_policy_gen; 2001 rw_exit(&ipss->ipsec_tunnel_policy_lock); 2002 if (iph == ALL_ACTIVE_POLHEADS) { 2003 iph = ipsec_system_policy(ns); 2004 ss->spdsock_dump_active = B_TRUE; 2005 } else { 2006 iph = ipsec_inactive_policy(ns); 2007 ss->spdsock_dump_active = B_FALSE; 2008 } 2009 ASSERT(ss->spdsock_itp == NULL); 2010 } else { 2011 ss->spdsock_dump_remaining_polheads = 1; 2012 } 2013 2014 rw_enter(&iph->iph_lock, RW_READER); 2015 2016 mr = spdsock_dump_ruleset(mp, iph, 0, 0); 2017 2018 if (!mr) { 2019 rw_exit(&iph->iph_lock); 2020 spdsock_error(q, mp, ENOMEM, 0); 2021 return; 2022 } 2023 2024 ss->spdsock_dump_req = mp; 2025 RESET_SPDSOCK_DUMP_POLHEAD(ss, iph); 2026 2027 rw_exit(&iph->iph_lock); 2028 2029 qreply(q, mr); 2030 qenable(OTHERQ(q)); 2031 } 2032 2033 /* Do NOT consume a reference to ITP. */ 2034 void 2035 spdsock_clone_node(ipsec_tun_pol_t *itp, void *ep, netstack_t *ns) 2036 { 2037 int *errptr = (int *)ep; 2038 2039 if (*errptr != 0) 2040 return; /* We've failed already for some reason. */ 2041 mutex_enter(&itp->itp_lock); 2042 ITPF_CLONE(itp->itp_flags); 2043 *errptr = ipsec_copy_polhead(itp->itp_policy, itp->itp_inactive, ns); 2044 mutex_exit(&itp->itp_lock); 2045 } 2046 2047 void 2048 spdsock_clone(queue_t *q, mblk_t *mp, spd_if_t *tunname) 2049 { 2050 int error; 2051 char *tname; 2052 ipsec_tun_pol_t *itp; 2053 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2054 netstack_t *ns = ss->spdsock_spds->spds_netstack; 2055 2056 if (tunname != NULL) { 2057 tname = (char *)tunname->spd_if_name; 2058 if (*tname == '\0') { 2059 error = ipsec_clone_system_policy(ns); 2060 if (audit_active) { 2061 boolean_t active; 2062 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 2063 2064 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 2065 audit_pf_policy(SPD_CLONE, DB_CRED(mp), ns, 2066 NULL, active, error, DB_CPID(mp)); 2067 } 2068 if (error == 0) { 2069 itp_walk(spdsock_clone_node, &error, ns); 2070 if (audit_active) { 2071 boolean_t active; 2072 spd_msg_t *spmsg = 2073 (spd_msg_t *)mp->b_rptr; 2074 2075 active = (spmsg->spd_msg_spdid == 2076 SPD_ACTIVE); 2077 audit_pf_policy(SPD_CLONE, DB_CRED(mp), 2078 ns, "all tunnels", active, 0, 2079 DB_CPID(mp)); 2080 } 2081 } 2082 } else { 2083 itp = get_tunnel_policy(tname, ns); 2084 if (itp == NULL) { 2085 spdsock_error(q, mp, ENOENT, 0); 2086 if (audit_active) { 2087 boolean_t active; 2088 spd_msg_t *spmsg = 2089 (spd_msg_t *)mp->b_rptr; 2090 2091 active = (spmsg->spd_msg_spdid == 2092 SPD_ACTIVE); 2093 audit_pf_policy(SPD_CLONE, DB_CRED(mp), 2094 ns, ITP_NAME(itp), active, ENOENT, 2095 DB_CPID(mp)); 2096 } 2097 return; 2098 } 2099 spdsock_clone_node(itp, &error, NULL); 2100 ITP_REFRELE(itp, ns); 2101 if (audit_active) { 2102 boolean_t active; 2103 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 2104 2105 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 2106 audit_pf_policy(SPD_CLONE, DB_CRED(mp), ns, 2107 ITP_NAME(itp), active, error, DB_CPID(mp)); 2108 } 2109 } 2110 } else { 2111 error = ipsec_clone_system_policy(ns); 2112 if (audit_active) { 2113 boolean_t active; 2114 spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr; 2115 2116 active = (spmsg->spd_msg_spdid == SPD_ACTIVE); 2117 audit_pf_policy(SPD_CLONE, DB_CRED(mp), ns, NULL, 2118 active, error, DB_CPID(mp)); 2119 } 2120 } 2121 2122 if (error != 0) 2123 spdsock_error(q, mp, error, 0); 2124 else 2125 spd_echo(q, mp); 2126 } 2127 2128 /* 2129 * Process a SPD_ALGLIST request. The caller expects separate alg entries 2130 * for AH authentication, ESP authentication, and ESP encryption. 2131 * The same distinction is then used when setting the min and max key 2132 * sizes when defining policies. 2133 */ 2134 2135 #define SPDSOCK_AH_AUTH 0 2136 #define SPDSOCK_ESP_AUTH 1 2137 #define SPDSOCK_ESP_ENCR 2 2138 #define SPDSOCK_NTYPES 3 2139 2140 static const uint_t algattr[SPDSOCK_NTYPES] = { 2141 SPD_ATTR_AH_AUTH, 2142 SPD_ATTR_ESP_AUTH, 2143 SPD_ATTR_ESP_ENCR 2144 }; 2145 static const uint_t minbitsattr[SPDSOCK_NTYPES] = { 2146 SPD_ATTR_AH_MINBITS, 2147 SPD_ATTR_ESPA_MINBITS, 2148 SPD_ATTR_ENCR_MINBITS 2149 }; 2150 static const uint_t maxbitsattr[SPDSOCK_NTYPES] = { 2151 SPD_ATTR_AH_MAXBITS, 2152 SPD_ATTR_ESPA_MAXBITS, 2153 SPD_ATTR_ENCR_MAXBITS 2154 }; 2155 static const uint_t defbitsattr[SPDSOCK_NTYPES] = { 2156 SPD_ATTR_AH_DEFBITS, 2157 SPD_ATTR_ESPA_DEFBITS, 2158 SPD_ATTR_ENCR_DEFBITS 2159 }; 2160 static const uint_t incrbitsattr[SPDSOCK_NTYPES] = { 2161 SPD_ATTR_AH_INCRBITS, 2162 SPD_ATTR_ESPA_INCRBITS, 2163 SPD_ATTR_ENCR_INCRBITS 2164 }; 2165 2166 #define ATTRPERALG 6 /* fixed attributes per algs */ 2167 2168 void 2169 spdsock_alglist(queue_t *q, mblk_t *mp) 2170 { 2171 uint_t algtype; 2172 uint_t algidx; 2173 uint_t algcount; 2174 uint_t size; 2175 mblk_t *m; 2176 uint8_t *cur; 2177 spd_msg_t *msg; 2178 struct spd_ext_actions *act; 2179 struct spd_attribute *attr; 2180 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2181 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec; 2182 2183 mutex_enter(&ipss->ipsec_alg_lock); 2184 /* 2185 * The SPD client expects to receive separate entries for 2186 * AH authentication and ESP authentication supported algorithms. 2187 * 2188 * Don't return the "any" algorithms, if defined, as no 2189 * kernel policies can be set for these algorithms. 2190 */ 2191 algcount = 2 * ipss->ipsec_nalgs[IPSEC_ALG_AUTH] + 2192 ipss->ipsec_nalgs[IPSEC_ALG_ENCR]; 2193 2194 if (ipss->ipsec_alglists[IPSEC_ALG_AUTH][SADB_AALG_NONE] != NULL) 2195 algcount--; 2196 if (ipss->ipsec_alglists[IPSEC_ALG_ENCR][SADB_EALG_NONE] != NULL) 2197 algcount--; 2198 2199 /* 2200 * For each algorithm, we encode: 2201 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT} 2202 */ 2203 2204 size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions) + 2205 ATTRPERALG * sizeof (struct spd_attribute) * algcount; 2206 2207 ASSERT(ALIGNED64(size)); 2208 2209 m = allocb(size, BPRI_HI); 2210 if (m == NULL) { 2211 mutex_exit(&ipss->ipsec_alg_lock); 2212 spdsock_error(q, mp, ENOMEM, 0); 2213 return; 2214 } 2215 2216 m->b_wptr = m->b_rptr + size; 2217 cur = m->b_rptr; 2218 2219 msg = (spd_msg_t *)cur; 2220 bcopy(mp->b_rptr, cur, sizeof (*msg)); 2221 2222 msg->spd_msg_len = SPD_8TO64(size); 2223 msg->spd_msg_errno = 0; 2224 msg->spd_msg_diagnostic = 0; 2225 2226 cur += sizeof (*msg); 2227 2228 act = (struct spd_ext_actions *)cur; 2229 cur += sizeof (*act); 2230 2231 act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t)); 2232 act->spd_actions_exttype = SPD_EXT_ACTION; 2233 act->spd_actions_count = algcount; 2234 act->spd_actions_reserved = 0; 2235 2236 attr = (struct spd_attribute *)cur; 2237 2238 #define EMIT(tag, value) { \ 2239 attr->spd_attr_tag = (tag); \ 2240 attr->spd_attr_value = (value); \ 2241 attr++; \ 2242 } 2243 2244 /* 2245 * If you change the number of EMIT's here, change 2246 * ATTRPERALG above to match 2247 */ 2248 #define EMITALGATTRS(_type) { \ 2249 EMIT(algattr[_type], algid); /* 1 */ \ 2250 EMIT(minbitsattr[_type], minbits); /* 2 */ \ 2251 EMIT(maxbitsattr[_type], maxbits); /* 3 */ \ 2252 EMIT(defbitsattr[_type], defbits); /* 4 */ \ 2253 EMIT(incrbitsattr[_type], incr); /* 5 */ \ 2254 EMIT(SPD_ATTR_NEXT, 0); /* 6 */ \ 2255 } 2256 2257 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 2258 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype]; 2259 algidx++) { 2260 int algid = ipss->ipsec_sortlist[algtype][algidx]; 2261 ipsec_alginfo_t *alg = 2262 ipss->ipsec_alglists[algtype][algid]; 2263 uint_t minbits = alg->alg_minbits; 2264 uint_t maxbits = alg->alg_maxbits; 2265 uint_t defbits = alg->alg_default_bits; 2266 uint_t incr = alg->alg_increment; 2267 2268 if (algtype == IPSEC_ALG_AUTH) { 2269 if (algid == SADB_AALG_NONE) 2270 continue; 2271 EMITALGATTRS(SPDSOCK_AH_AUTH); 2272 EMITALGATTRS(SPDSOCK_ESP_AUTH); 2273 } else { 2274 if (algid == SADB_EALG_NONE) 2275 continue; 2276 ASSERT(algtype == IPSEC_ALG_ENCR); 2277 EMITALGATTRS(SPDSOCK_ESP_ENCR); 2278 } 2279 } 2280 } 2281 2282 mutex_exit(&ipss->ipsec_alg_lock); 2283 2284 #undef EMITALGATTRS 2285 #undef EMIT 2286 #undef ATTRPERALG 2287 2288 attr--; 2289 attr->spd_attr_tag = SPD_ATTR_END; 2290 2291 freemsg(mp); 2292 qreply(q, m); 2293 } 2294 2295 /* 2296 * Process a SPD_DUMPALGS request. 2297 */ 2298 2299 #define ATTRPERALG 7 /* fixed attributes per algs */ 2300 2301 void 2302 spdsock_dumpalgs(queue_t *q, mblk_t *mp) 2303 { 2304 uint_t algtype; 2305 uint_t algidx; 2306 uint_t size; 2307 mblk_t *m; 2308 uint8_t *cur; 2309 spd_msg_t *msg; 2310 struct spd_ext_actions *act; 2311 struct spd_attribute *attr; 2312 ipsec_alginfo_t *alg; 2313 uint_t algid; 2314 uint_t i; 2315 uint_t alg_size; 2316 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2317 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec; 2318 2319 mutex_enter(&ipss->ipsec_alg_lock); 2320 2321 /* 2322 * For each algorithm, we encode: 2323 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT} 2324 * 2325 * ALG_ID / ALG_PROTO / ALG_INCRBITS / ALG_NKEYSIZES / ALG_KEYSIZE* 2326 * ALG_NBLOCKSIZES / ALG_BLOCKSIZE* / ALG_MECHNAME / {END, NEXT} 2327 */ 2328 2329 /* 2330 * Compute the size of the SPD message. 2331 */ 2332 size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions); 2333 2334 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 2335 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype]; 2336 algidx++) { 2337 algid = ipss->ipsec_sortlist[algtype][algidx]; 2338 alg = ipss->ipsec_alglists[algtype][algid]; 2339 alg_size = sizeof (struct spd_attribute) * 2340 (ATTRPERALG + alg->alg_nkey_sizes + 2341 alg->alg_nblock_sizes) + CRYPTO_MAX_MECH_NAME; 2342 size += alg_size; 2343 } 2344 } 2345 2346 ASSERT(ALIGNED64(size)); 2347 2348 m = allocb(size, BPRI_HI); 2349 if (m == NULL) { 2350 mutex_exit(&ipss->ipsec_alg_lock); 2351 spdsock_error(q, mp, ENOMEM, 0); 2352 return; 2353 } 2354 2355 m->b_wptr = m->b_rptr + size; 2356 cur = m->b_rptr; 2357 2358 msg = (spd_msg_t *)cur; 2359 bcopy(mp->b_rptr, cur, sizeof (*msg)); 2360 2361 msg->spd_msg_len = SPD_8TO64(size); 2362 msg->spd_msg_errno = 0; 2363 msg->spd_msg_diagnostic = 0; 2364 2365 cur += sizeof (*msg); 2366 2367 act = (struct spd_ext_actions *)cur; 2368 cur += sizeof (*act); 2369 2370 act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t)); 2371 act->spd_actions_exttype = SPD_EXT_ACTION; 2372 act->spd_actions_count = ipss->ipsec_nalgs[IPSEC_ALG_AUTH] + 2373 ipss->ipsec_nalgs[IPSEC_ALG_ENCR]; 2374 act->spd_actions_reserved = 0; 2375 2376 attr = (struct spd_attribute *)cur; 2377 2378 #define EMIT(tag, value) { \ 2379 attr->spd_attr_tag = (tag); \ 2380 attr->spd_attr_value = (value); \ 2381 attr++; \ 2382 } 2383 2384 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 2385 for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype]; 2386 algidx++) { 2387 2388 algid = ipss->ipsec_sortlist[algtype][algidx]; 2389 alg = ipss->ipsec_alglists[algtype][algid]; 2390 2391 /* 2392 * If you change the number of EMIT's here, change 2393 * ATTRPERALG above to match 2394 */ 2395 EMIT(SPD_ATTR_ALG_ID, algid); 2396 EMIT(SPD_ATTR_ALG_PROTO, algproto[algtype]); 2397 EMIT(SPD_ATTR_ALG_INCRBITS, alg->alg_increment); 2398 2399 EMIT(SPD_ATTR_ALG_NKEYSIZES, alg->alg_nkey_sizes); 2400 for (i = 0; i < alg->alg_nkey_sizes; i++) 2401 EMIT(SPD_ATTR_ALG_KEYSIZE, 2402 alg->alg_key_sizes[i]); 2403 2404 EMIT(SPD_ATTR_ALG_NBLOCKSIZES, alg->alg_nblock_sizes); 2405 for (i = 0; i < alg->alg_nblock_sizes; i++) 2406 EMIT(SPD_ATTR_ALG_BLOCKSIZE, 2407 alg->alg_block_sizes[i]); 2408 2409 EMIT(SPD_ATTR_ALG_MECHNAME, CRYPTO_MAX_MECH_NAME); 2410 bcopy(alg->alg_mech_name, attr, CRYPTO_MAX_MECH_NAME); 2411 attr = (struct spd_attribute *)((char *)attr + 2412 CRYPTO_MAX_MECH_NAME); 2413 2414 EMIT(SPD_ATTR_NEXT, 0); 2415 } 2416 } 2417 2418 mutex_exit(&ipss->ipsec_alg_lock); 2419 2420 #undef EMITALGATTRS 2421 #undef EMIT 2422 #undef ATTRPERALG 2423 2424 attr--; 2425 attr->spd_attr_tag = SPD_ATTR_END; 2426 2427 freemsg(mp); 2428 qreply(q, m); 2429 } 2430 2431 /* 2432 * Do the actual work of processing an SPD_UPDATEALGS request. Can 2433 * be invoked either once IPsec is loaded on a cached request, or 2434 * when a request is received while IPsec is loaded. 2435 */ 2436 static void 2437 spdsock_do_updatealg(spd_ext_t *extv[], int *diag, spd_stack_t *spds) 2438 { 2439 struct spd_ext_actions *actp; 2440 struct spd_attribute *attr, *endattr; 2441 uint64_t *start, *end; 2442 ipsec_alginfo_t *alg = NULL; 2443 ipsec_algtype_t alg_type = 0; 2444 boolean_t skip_alg = B_TRUE, doing_proto = B_FALSE; 2445 uint_t i, cur_key, cur_block, algid; 2446 2447 *diag = -1; 2448 ASSERT(MUTEX_HELD(&spds->spds_alg_lock)); 2449 2450 /* parse the message, building the list of algorithms */ 2451 2452 actp = (struct spd_ext_actions *)extv[SPD_EXT_ACTION]; 2453 if (actp == NULL) { 2454 *diag = SPD_DIAGNOSTIC_NO_ACTION_EXT; 2455 return; 2456 } 2457 2458 start = (uint64_t *)actp; 2459 end = (start + actp->spd_actions_len); 2460 endattr = (struct spd_attribute *)end; 2461 attr = (struct spd_attribute *)&actp[1]; 2462 2463 bzero(spds->spds_algs, IPSEC_NALGTYPES * IPSEC_MAX_ALGS * 2464 sizeof (ipsec_alginfo_t *)); 2465 2466 alg = kmem_zalloc(sizeof (*alg), KM_SLEEP); 2467 2468 #define ALG_KEY_SIZES(a) (((a)->alg_nkey_sizes + 1) * sizeof (uint16_t)) 2469 #define ALG_BLOCK_SIZES(a) (((a)->alg_nblock_sizes + 1) * sizeof (uint16_t)) 2470 2471 while (attr < endattr) { 2472 switch (attr->spd_attr_tag) { 2473 case SPD_ATTR_NOP: 2474 case SPD_ATTR_EMPTY: 2475 break; 2476 case SPD_ATTR_END: 2477 attr = endattr; 2478 /* FALLTHRU */ 2479 case SPD_ATTR_NEXT: 2480 if (doing_proto) { 2481 doing_proto = B_FALSE; 2482 break; 2483 } 2484 if (skip_alg) { 2485 ipsec_alg_free(alg); 2486 } else { 2487 ipsec_alg_free( 2488 spds->spds_algs[alg_type][alg->alg_id]); 2489 spds->spds_algs[alg_type][alg->alg_id] = 2490 alg; 2491 } 2492 alg = kmem_zalloc(sizeof (*alg), KM_SLEEP); 2493 break; 2494 2495 case SPD_ATTR_ALG_ID: 2496 if (attr->spd_attr_value >= IPSEC_MAX_ALGS) { 2497 ss1dbg(spds, ("spdsock_do_updatealg: " 2498 "invalid alg id %d\n", 2499 attr->spd_attr_value)); 2500 *diag = SPD_DIAGNOSTIC_ALG_ID_RANGE; 2501 goto bail; 2502 } 2503 alg->alg_id = attr->spd_attr_value; 2504 break; 2505 2506 case SPD_ATTR_ALG_PROTO: 2507 /* find the alg type */ 2508 for (i = 0; i < NALGPROTOS; i++) 2509 if (algproto[i] == attr->spd_attr_value) 2510 break; 2511 skip_alg = (i == NALGPROTOS); 2512 if (!skip_alg) 2513 alg_type = i; 2514 break; 2515 2516 case SPD_ATTR_ALG_INCRBITS: 2517 alg->alg_increment = attr->spd_attr_value; 2518 break; 2519 2520 case SPD_ATTR_ALG_NKEYSIZES: 2521 if (alg->alg_key_sizes != NULL) { 2522 kmem_free(alg->alg_key_sizes, 2523 ALG_KEY_SIZES(alg)); 2524 } 2525 alg->alg_nkey_sizes = attr->spd_attr_value; 2526 /* 2527 * Allocate room for the trailing zero key size 2528 * value as well. 2529 */ 2530 alg->alg_key_sizes = kmem_zalloc(ALG_KEY_SIZES(alg), 2531 KM_SLEEP); 2532 cur_key = 0; 2533 break; 2534 2535 case SPD_ATTR_ALG_KEYSIZE: 2536 if (alg->alg_key_sizes == NULL || 2537 cur_key >= alg->alg_nkey_sizes) { 2538 ss1dbg(spds, ("spdsock_do_updatealg: " 2539 "too many key sizes\n")); 2540 *diag = SPD_DIAGNOSTIC_ALG_NUM_KEY_SIZES; 2541 goto bail; 2542 } 2543 alg->alg_key_sizes[cur_key++] = attr->spd_attr_value; 2544 break; 2545 2546 case SPD_ATTR_ALG_NBLOCKSIZES: 2547 if (alg->alg_block_sizes != NULL) { 2548 kmem_free(alg->alg_block_sizes, 2549 ALG_BLOCK_SIZES(alg)); 2550 } 2551 alg->alg_nblock_sizes = attr->spd_attr_value; 2552 /* 2553 * Allocate room for the trailing zero block size 2554 * value as well. 2555 */ 2556 alg->alg_block_sizes = kmem_zalloc(ALG_BLOCK_SIZES(alg), 2557 KM_SLEEP); 2558 cur_block = 0; 2559 break; 2560 2561 case SPD_ATTR_ALG_BLOCKSIZE: 2562 if (alg->alg_block_sizes == NULL || 2563 cur_block >= alg->alg_nblock_sizes) { 2564 ss1dbg(spds, ("spdsock_do_updatealg: " 2565 "too many block sizes\n")); 2566 *diag = SPD_DIAGNOSTIC_ALG_NUM_BLOCK_SIZES; 2567 goto bail; 2568 } 2569 alg->alg_block_sizes[cur_block++] = 2570 attr->spd_attr_value; 2571 break; 2572 2573 case SPD_ATTR_ALG_MECHNAME: { 2574 char *mech_name; 2575 2576 if (attr->spd_attr_value > CRYPTO_MAX_MECH_NAME) { 2577 ss1dbg(spds, ("spdsock_do_updatealg: " 2578 "mech name too long\n")); 2579 *diag = SPD_DIAGNOSTIC_ALG_MECH_NAME_LEN; 2580 goto bail; 2581 } 2582 mech_name = (char *)(attr + 1); 2583 bcopy(mech_name, alg->alg_mech_name, 2584 attr->spd_attr_value); 2585 alg->alg_mech_name[CRYPTO_MAX_MECH_NAME-1] = '\0'; 2586 attr = (struct spd_attribute *)((char *)attr + 2587 attr->spd_attr_value); 2588 break; 2589 } 2590 2591 case SPD_ATTR_PROTO_ID: 2592 doing_proto = B_TRUE; 2593 for (i = 0; i < NALGPROTOS; i++) { 2594 if (algproto[i] == attr->spd_attr_value) { 2595 alg_type = i; 2596 break; 2597 } 2598 } 2599 break; 2600 2601 case SPD_ATTR_PROTO_EXEC_MODE: 2602 if (!doing_proto) 2603 break; 2604 for (i = 0; i < NEXECMODES; i++) { 2605 if (execmodes[i] == attr->spd_attr_value) { 2606 spds->spds_algs_exec_mode[alg_type] = i; 2607 break; 2608 } 2609 } 2610 break; 2611 } 2612 attr++; 2613 } 2614 2615 #undef ALG_KEY_SIZES 2616 #undef ALG_BLOCK_SIZES 2617 2618 /* update the algorithm tables */ 2619 spdsock_merge_algs(spds); 2620 bail: 2621 /* cleanup */ 2622 ipsec_alg_free(alg); 2623 for (alg_type = 0; alg_type < IPSEC_NALGTYPES; alg_type++) 2624 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) 2625 if (spds->spds_algs[alg_type][algid] != NULL) 2626 ipsec_alg_free(spds->spds_algs[alg_type][algid]); 2627 } 2628 2629 /* 2630 * Process an SPD_UPDATEALGS request. If IPsec is not loaded, queue 2631 * the request until IPsec loads. If IPsec is loaded, act on it 2632 * immediately. 2633 */ 2634 2635 static void 2636 spdsock_updatealg(queue_t *q, mblk_t *mp, spd_ext_t *extv[]) 2637 { 2638 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2639 spd_stack_t *spds = ss->spdsock_spds; 2640 ipsec_stack_t *ipss = spds->spds_netstack->netstack_ipsec; 2641 2642 if (!ipsec_loaded(ipss)) { 2643 /* 2644 * IPsec is not loaded, save request and return nicely, 2645 * the message will be processed once IPsec loads. 2646 */ 2647 mblk_t *new_mp; 2648 2649 /* last update message wins */ 2650 if ((new_mp = copymsg(mp)) == NULL) { 2651 spdsock_error(q, mp, ENOMEM, 0); 2652 return; 2653 } 2654 mutex_enter(&spds->spds_alg_lock); 2655 bcopy(extv, spds->spds_extv_algs, 2656 sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1)); 2657 if (spds->spds_mp_algs != NULL) 2658 freemsg(spds->spds_mp_algs); 2659 spds->spds_mp_algs = mp; 2660 spds->spds_algs_pending = B_TRUE; 2661 mutex_exit(&spds->spds_alg_lock); 2662 if (audit_active) 2663 audit_pf_policy(SPD_UPDATEALGS, DB_CRED(mp), 2664 spds->spds_netstack, NULL, B_TRUE, EAGAIN, 2665 DB_CPID(mp)); 2666 spd_echo(q, new_mp); 2667 } else { 2668 /* 2669 * IPsec is loaded, act on the message immediately. 2670 */ 2671 int diag; 2672 2673 mutex_enter(&spds->spds_alg_lock); 2674 spdsock_do_updatealg(extv, &diag, spds); 2675 mutex_exit(&spds->spds_alg_lock); 2676 if (diag == -1) { 2677 spd_echo(q, mp); 2678 if (audit_active) 2679 audit_pf_policy(SPD_UPDATEALGS, DB_CRED(mp), 2680 spds->spds_netstack, NULL, B_TRUE, 0, 2681 DB_CPID(mp)); 2682 } else { 2683 spdsock_diag(q, mp, diag); 2684 if (audit_active) 2685 audit_pf_policy(SPD_UPDATEALGS, DB_CRED(mp), 2686 spds->spds_netstack, NULL, B_TRUE, diag, 2687 DB_CPID(mp)); 2688 } 2689 } 2690 } 2691 2692 /* 2693 * With a reference-held ill, dig down and find an instance of "tun", and 2694 * assign its tunnel policy pointer, while reference-holding it. Also, 2695 * release ill's refrence when finished. 2696 * 2697 * We'll be messing with q_next, so be VERY careful. 2698 */ 2699 static void 2700 find_tun_and_set_itp(ill_t *ill, ipsec_tun_pol_t *itp) 2701 { 2702 queue_t *q; 2703 tun_t *tun; 2704 2705 /* Don't bother if this ill is going away. */ 2706 if (ill->ill_flags & ILL_CONDEMNED) { 2707 ill_refrele(ill); 2708 return; 2709 } 2710 2711 2712 q = ill->ill_wq; 2713 claimstr(q); /* Lighter-weight than freezestr(). */ 2714 2715 do { 2716 /* Use strcmp() because "tun" is bounded. */ 2717 if (strcmp(q->q_qinfo->qi_minfo->mi_idname, "tun") == 0) { 2718 /* Aha! Got it. */ 2719 tun = (tun_t *)q->q_ptr; 2720 if (tun != NULL) { 2721 mutex_enter(&tun->tun_lock); 2722 if (tun->tun_itp != itp) { 2723 ASSERT(tun->tun_itp == NULL); 2724 ITP_REFHOLD(itp); 2725 tun->tun_itp = itp; 2726 } 2727 mutex_exit(&tun->tun_lock); 2728 goto release_and_return; 2729 } 2730 /* 2731 * Else assume this is some other module named "tun" 2732 * and move on, hoping we find one that actually has 2733 * something in q_ptr. 2734 */ 2735 } 2736 q = q->q_next; 2737 } while (q != NULL); 2738 2739 release_and_return: 2740 releasestr(ill->ill_wq); 2741 ill_refrele(ill); 2742 } 2743 2744 /* 2745 * Sort through the mess of polhead options to retrieve an appropriate one. 2746 * Returns NULL if we send an spdsock error. Returns a valid pointer if we 2747 * found a valid polhead. Returns ALL_ACTIVE_POLHEADS (aka. -1) or 2748 * ALL_INACTIVE_POLHEADS (aka. -2) if the operation calls for the operation to 2749 * act on ALL policy heads. 2750 */ 2751 static ipsec_policy_head_t * 2752 get_appropriate_polhead(queue_t *q, mblk_t *mp, spd_if_t *tunname, int spdid, 2753 int msgtype, ipsec_tun_pol_t **itpp) 2754 { 2755 ipsec_tun_pol_t *itp; 2756 ipsec_policy_head_t *iph; 2757 int errno; 2758 char *tname; 2759 boolean_t active; 2760 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2761 netstack_t *ns = ss->spdsock_spds->spds_netstack; 2762 uint64_t gen; /* Placeholder */ 2763 ill_t *v4, *v6; 2764 2765 active = (spdid == SPD_ACTIVE); 2766 *itpp = NULL; 2767 if (!active && spdid != SPD_STANDBY) { 2768 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_SPDID); 2769 return (NULL); 2770 } 2771 2772 if (tunname != NULL) { 2773 /* Acting on a tunnel's SPD. */ 2774 tname = (char *)tunname->spd_if_name; 2775 if (*tname == '\0') { 2776 /* Handle all-polhead cases here. */ 2777 if (msgtype != SPD_FLUSH && msgtype != SPD_DUMP) { 2778 spdsock_diag(q, mp, 2779 SPD_DIAGNOSTIC_NOT_GLOBAL_OP); 2780 return (NULL); 2781 } 2782 return (active ? ALL_ACTIVE_POLHEADS : 2783 ALL_INACTIVE_POLHEADS); 2784 } 2785 2786 itp = get_tunnel_policy(tname, ns); 2787 if (itp == NULL) { 2788 if (msgtype != SPD_ADDRULE) { 2789 /* "Tunnel not found" */ 2790 spdsock_error(q, mp, ENOENT, 0); 2791 return (NULL); 2792 } 2793 2794 errno = 0; 2795 itp = create_tunnel_policy(tname, &errno, &gen, ns); 2796 if (itp == NULL) { 2797 /* 2798 * Something very bad happened, most likely 2799 * ENOMEM. Return an indicator. 2800 */ 2801 spdsock_error(q, mp, errno, 0); 2802 return (NULL); 2803 } 2804 } 2805 /* 2806 * Troll the plumbed tunnels and see if we have a 2807 * match. We need to do this always in case we add 2808 * policy AFTER plumbing a tunnel. 2809 */ 2810 v4 = ill_lookup_on_name(tname, B_FALSE, B_FALSE, NULL, 2811 NULL, NULL, &errno, NULL, ns->netstack_ip); 2812 if (v4 != NULL) 2813 find_tun_and_set_itp(v4, itp); 2814 v6 = ill_lookup_on_name(tname, B_FALSE, B_TRUE, NULL, 2815 NULL, NULL, &errno, NULL, ns->netstack_ip); 2816 if (v6 != NULL) 2817 find_tun_and_set_itp(v6, itp); 2818 ASSERT(itp != NULL); 2819 *itpp = itp; 2820 /* For spdsock dump state, set the polhead's name. */ 2821 if (msgtype == SPD_DUMP) { 2822 ITP_REFHOLD(itp); 2823 ss->spdsock_itp = itp; 2824 ss->spdsock_dump_tunnel = itp->itp_flags & 2825 (active ? ITPF_P_TUNNEL : ITPF_I_TUNNEL); 2826 } 2827 } else { 2828 itp = NULL; 2829 /* For spdsock dump state, indicate it's global policy. */ 2830 if (msgtype == SPD_DUMP) 2831 ss->spdsock_itp = NULL; 2832 } 2833 2834 if (active) 2835 iph = (itp == NULL) ? ipsec_system_policy(ns) : itp->itp_policy; 2836 else 2837 iph = (itp == NULL) ? ipsec_inactive_policy(ns) : 2838 itp->itp_inactive; 2839 2840 ASSERT(iph != NULL); 2841 if (itp != NULL) { 2842 IPPH_REFHOLD(iph); 2843 } 2844 2845 return (iph); 2846 } 2847 2848 static void 2849 spdsock_parse(queue_t *q, mblk_t *mp) 2850 { 2851 spd_msg_t *spmsg; 2852 spd_ext_t *extv[SPD_EXT_MAX + 1]; 2853 uint_t msgsize; 2854 ipsec_policy_head_t *iph; 2855 ipsec_tun_pol_t *itp; 2856 spd_if_t *tunname; 2857 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2858 spd_stack_t *spds = ss->spdsock_spds; 2859 netstack_t *ns = spds->spds_netstack; 2860 ipsec_stack_t *ipss = ns->netstack_ipsec; 2861 2862 /* Make sure nothing's below me. */ 2863 ASSERT(WR(q)->q_next == NULL); 2864 2865 spmsg = (spd_msg_t *)mp->b_rptr; 2866 2867 msgsize = SPD_64TO8(spmsg->spd_msg_len); 2868 2869 if (msgdsize(mp) != msgsize) { 2870 /* 2871 * Message len incorrect w.r.t. actual size. Send an error 2872 * (EMSGSIZE). It may be necessary to massage things a 2873 * bit. For example, if the spd_msg_type is hosed, 2874 * I need to set it to SPD_RESERVED to get delivery to 2875 * do the right thing. Then again, maybe just letting 2876 * the error delivery do the right thing. 2877 */ 2878 ss2dbg(spds, 2879 ("mblk (%lu) and base (%d) message sizes don't jibe.\n", 2880 msgdsize(mp), msgsize)); 2881 spdsock_error(q, mp, EMSGSIZE, SPD_DIAGNOSTIC_NONE); 2882 return; 2883 } 2884 2885 if (msgsize > (uint_t)(mp->b_wptr - mp->b_rptr)) { 2886 /* Get all message into one mblk. */ 2887 if (pullupmsg(mp, -1) == 0) { 2888 /* 2889 * Something screwy happened. 2890 */ 2891 ss3dbg(spds, ("spdsock_parse: pullupmsg() failed.\n")); 2892 return; 2893 } else { 2894 spmsg = (spd_msg_t *)mp->b_rptr; 2895 } 2896 } 2897 2898 switch (spdsock_get_ext(extv, spmsg, msgsize)) { 2899 case KGE_DUP: 2900 /* Handle duplicate extension. */ 2901 ss1dbg(spds, ("Got duplicate extension of type %d.\n", 2902 extv[0]->spd_ext_type)); 2903 spdsock_diag(q, mp, dup_ext_diag[extv[0]->spd_ext_type]); 2904 return; 2905 case KGE_UNK: 2906 /* Handle unknown extension. */ 2907 ss1dbg(spds, ("Got unknown extension of type %d.\n", 2908 extv[0]->spd_ext_type)); 2909 spdsock_diag(q, mp, SPD_DIAGNOSTIC_UNKNOWN_EXT); 2910 return; 2911 case KGE_LEN: 2912 /* Length error. */ 2913 ss1dbg(spds, ("Length %d on extension type %d overrun or 0.\n", 2914 extv[0]->spd_ext_len, extv[0]->spd_ext_type)); 2915 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_EXTLEN); 2916 return; 2917 case KGE_CHK: 2918 /* Reality check failed. */ 2919 ss1dbg(spds, ("Reality check failed on extension type %d.\n", 2920 extv[0]->spd_ext_type)); 2921 spdsock_diag(q, mp, bad_ext_diag[extv[0]->spd_ext_type]); 2922 return; 2923 default: 2924 /* Default case is no errors. */ 2925 break; 2926 } 2927 2928 /* 2929 * Special-case SPD_UPDATEALGS so as not to load IPsec. 2930 */ 2931 if (!ipsec_loaded(ipss) && spmsg->spd_msg_type != SPD_UPDATEALGS) { 2932 spdsock_t *ss = (spdsock_t *)q->q_ptr; 2933 2934 ASSERT(ss != NULL); 2935 ipsec_loader_loadnow(ipss); 2936 ss->spdsock_timeout_arg = mp; 2937 ss->spdsock_timeout = qtimeout(q, spdsock_loadcheck, 2938 q, LOADCHECK_INTERVAL); 2939 return; 2940 } 2941 2942 /* First check for messages that need no polheads at all. */ 2943 switch (spmsg->spd_msg_type) { 2944 case SPD_UPDATEALGS: 2945 spdsock_updatealg(q, mp, extv); 2946 return; 2947 case SPD_ALGLIST: 2948 spdsock_alglist(q, mp); 2949 return; 2950 case SPD_DUMPALGS: 2951 spdsock_dumpalgs(q, mp); 2952 return; 2953 } 2954 2955 /* 2956 * Then check for ones that need both primary/secondary polheads, 2957 * finding the appropriate tunnel policy if need be. 2958 */ 2959 tunname = (spd_if_t *)extv[SPD_EXT_TUN_NAME]; 2960 switch (spmsg->spd_msg_type) { 2961 case SPD_FLIP: 2962 spdsock_flip(q, mp, tunname); 2963 return; 2964 case SPD_CLONE: 2965 spdsock_clone(q, mp, tunname); 2966 return; 2967 } 2968 2969 /* 2970 * Finally, find ones that operate on exactly one polhead, or 2971 * "all polheads" of a given type (active/inactive). 2972 */ 2973 iph = get_appropriate_polhead(q, mp, tunname, spmsg->spd_msg_spdid, 2974 spmsg->spd_msg_type, &itp); 2975 if (iph == NULL) 2976 return; 2977 2978 /* All-polheads-ready operations. */ 2979 switch (spmsg->spd_msg_type) { 2980 case SPD_FLUSH: 2981 if (itp != NULL) { 2982 mutex_enter(&itp->itp_lock); 2983 if (spmsg->spd_msg_spdid == SPD_ACTIVE) 2984 itp->itp_flags &= ~ITPF_PFLAGS; 2985 else 2986 itp->itp_flags &= ~ITPF_IFLAGS; 2987 mutex_exit(&itp->itp_lock); 2988 ITP_REFRELE(itp, ns); 2989 } 2990 spdsock_flush(q, iph, itp, mp); 2991 return; 2992 case SPD_DUMP: 2993 if (itp != NULL) 2994 ITP_REFRELE(itp, ns); 2995 spdsock_dump(q, iph, mp); 2996 return; 2997 } 2998 2999 if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) { 3000 spdsock_diag(q, mp, SPD_DIAGNOSTIC_NOT_GLOBAL_OP); 3001 return; 3002 } 3003 3004 /* Single-polhead-only operations. */ 3005 switch (spmsg->spd_msg_type) { 3006 case SPD_ADDRULE: 3007 spdsock_addrule(q, iph, mp, extv, itp); 3008 break; 3009 case SPD_DELETERULE: 3010 spdsock_deleterule(q, iph, mp, extv, itp); 3011 break; 3012 case SPD_LOOKUP: 3013 spdsock_lookup(q, iph, mp, extv, itp); 3014 break; 3015 default: 3016 spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_MSG_TYPE); 3017 break; 3018 } 3019 3020 IPPH_REFRELE(iph, ns); 3021 if (itp != NULL) 3022 ITP_REFRELE(itp, ns); 3023 } 3024 3025 /* 3026 * If an algorithm mapping was received before IPsec was loaded, process it. 3027 * Called from the IPsec loader. 3028 */ 3029 void 3030 spdsock_update_pending_algs(netstack_t *ns) 3031 { 3032 spd_stack_t *spds = ns->netstack_spdsock; 3033 3034 mutex_enter(&spds->spds_alg_lock); 3035 if (spds->spds_algs_pending) { 3036 int diag; 3037 3038 spdsock_do_updatealg(spds->spds_extv_algs, &diag, 3039 spds); 3040 spds->spds_algs_pending = B_FALSE; 3041 } 3042 mutex_exit(&spds->spds_alg_lock); 3043 } 3044 3045 static void 3046 spdsock_loadcheck(void *arg) 3047 { 3048 queue_t *q = (queue_t *)arg; 3049 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3050 mblk_t *mp; 3051 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec; 3052 3053 ASSERT(ss != NULL); 3054 3055 ss->spdsock_timeout = 0; 3056 mp = ss->spdsock_timeout_arg; 3057 ASSERT(mp != NULL); 3058 ss->spdsock_timeout_arg = NULL; 3059 if (ipsec_failed(ipss)) 3060 spdsock_error(q, mp, EPROTONOSUPPORT, 0); 3061 else 3062 spdsock_parse(q, mp); 3063 } 3064 3065 /* 3066 * Copy relevant state bits. 3067 */ 3068 static void 3069 spdsock_copy_info(struct T_info_ack *tap, spdsock_t *ss) 3070 { 3071 *tap = spdsock_g_t_info_ack; 3072 tap->CURRENT_state = ss->spdsock_state; 3073 tap->OPT_size = spdsock_max_optsize; 3074 } 3075 3076 /* 3077 * This routine responds to T_CAPABILITY_REQ messages. It is called by 3078 * spdsock_wput. Much of the T_CAPABILITY_ACK information is copied from 3079 * spdsock_g_t_info_ack. The current state of the stream is copied from 3080 * spdsock_state. 3081 */ 3082 static void 3083 spdsock_capability_req(queue_t *q, mblk_t *mp) 3084 { 3085 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3086 t_uscalar_t cap_bits1; 3087 struct T_capability_ack *tcap; 3088 3089 cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1; 3090 3091 mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack), 3092 mp->b_datap->db_type, T_CAPABILITY_ACK); 3093 if (mp == NULL) 3094 return; 3095 3096 tcap = (struct T_capability_ack *)mp->b_rptr; 3097 tcap->CAP_bits1 = 0; 3098 3099 if (cap_bits1 & TC1_INFO) { 3100 spdsock_copy_info(&tcap->INFO_ack, ss); 3101 tcap->CAP_bits1 |= TC1_INFO; 3102 } 3103 3104 qreply(q, mp); 3105 } 3106 3107 /* 3108 * This routine responds to T_INFO_REQ messages. It is called by 3109 * spdsock_wput_other. 3110 * Most of the T_INFO_ACK information is copied from spdsock_g_t_info_ack. 3111 * The current state of the stream is copied from spdsock_state. 3112 */ 3113 static void 3114 spdsock_info_req(q, mp) 3115 queue_t *q; 3116 mblk_t *mp; 3117 { 3118 mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO, 3119 T_INFO_ACK); 3120 if (mp == NULL) 3121 return; 3122 spdsock_copy_info((struct T_info_ack *)mp->b_rptr, 3123 (spdsock_t *)q->q_ptr); 3124 qreply(q, mp); 3125 } 3126 3127 /* 3128 * spdsock_err_ack. This routine creates a 3129 * T_ERROR_ACK message and passes it 3130 * upstream. 3131 */ 3132 static void 3133 spdsock_err_ack(q, mp, t_error, sys_error) 3134 queue_t *q; 3135 mblk_t *mp; 3136 int t_error; 3137 int sys_error; 3138 { 3139 if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL) 3140 qreply(q, mp); 3141 } 3142 3143 /* 3144 * This routine retrieves the current status of socket options. 3145 * It returns the size of the option retrieved. 3146 */ 3147 /* ARGSUSED */ 3148 int 3149 spdsock_opt_get(queue_t *q, int level, int name, uchar_t *ptr) 3150 { 3151 int *i1 = (int *)ptr; 3152 3153 switch (level) { 3154 case SOL_SOCKET: 3155 switch (name) { 3156 case SO_TYPE: 3157 *i1 = SOCK_RAW; 3158 break; 3159 /* 3160 * The following two items can be manipulated, 3161 * but changing them should do nothing. 3162 */ 3163 case SO_SNDBUF: 3164 *i1 = (int)q->q_hiwat; 3165 break; 3166 case SO_RCVBUF: 3167 *i1 = (int)(RD(q)->q_hiwat); 3168 break; 3169 } 3170 break; 3171 default: 3172 return (0); 3173 } 3174 return (sizeof (int)); 3175 } 3176 3177 /* 3178 * This routine sets socket options. 3179 */ 3180 /* ARGSUSED */ 3181 int 3182 spdsock_opt_set(queue_t *q, uint_t mgmt_flags, int level, int name, 3183 uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp, 3184 void *thisdg_attrs, cred_t *cr, mblk_t *mblk) 3185 { 3186 int *i1 = (int *)invalp; 3187 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3188 spd_stack_t *spds = ss->spdsock_spds; 3189 3190 switch (level) { 3191 case SOL_SOCKET: 3192 switch (name) { 3193 case SO_SNDBUF: 3194 if (*i1 > spds->spds_max_buf) 3195 return (ENOBUFS); 3196 q->q_hiwat = *i1; 3197 break; 3198 case SO_RCVBUF: 3199 if (*i1 > spds->spds_max_buf) 3200 return (ENOBUFS); 3201 RD(q)->q_hiwat = *i1; 3202 (void) mi_set_sth_hiwat(RD(q), *i1); 3203 break; 3204 } 3205 break; 3206 } 3207 return (0); 3208 } 3209 3210 3211 /* 3212 * Handle STREAMS messages. 3213 */ 3214 static void 3215 spdsock_wput_other(queue_t *q, mblk_t *mp) 3216 { 3217 struct iocblk *iocp; 3218 int error; 3219 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3220 spd_stack_t *spds = ss->spdsock_spds; 3221 cred_t *cr; 3222 3223 switch (mp->b_datap->db_type) { 3224 case M_PROTO: 3225 case M_PCPROTO: 3226 if ((mp->b_wptr - mp->b_rptr) < sizeof (long)) { 3227 ss3dbg(spds, ( 3228 "spdsock_wput_other: Not big enough M_PROTO\n")); 3229 freemsg(mp); 3230 return; 3231 } 3232 cr = zone_get_kcred(netstackid_to_zoneid( 3233 spds->spds_netstack->netstack_stackid)); 3234 ASSERT(cr != NULL); 3235 3236 switch (((union T_primitives *)mp->b_rptr)->type) { 3237 case T_CAPABILITY_REQ: 3238 spdsock_capability_req(q, mp); 3239 break; 3240 case T_INFO_REQ: 3241 spdsock_info_req(q, mp); 3242 break; 3243 case T_SVR4_OPTMGMT_REQ: 3244 (void) svr4_optcom_req(q, mp, DB_CREDDEF(mp, cr), 3245 &spdsock_opt_obj, B_FALSE); 3246 break; 3247 case T_OPTMGMT_REQ: 3248 (void) tpi_optcom_req(q, mp, DB_CREDDEF(mp, cr), 3249 &spdsock_opt_obj, B_FALSE); 3250 break; 3251 case T_DATA_REQ: 3252 case T_EXDATA_REQ: 3253 case T_ORDREL_REQ: 3254 /* Illegal for spdsock. */ 3255 freemsg(mp); 3256 (void) putnextctl1(RD(q), M_ERROR, EPROTO); 3257 break; 3258 default: 3259 /* Not supported by spdsock. */ 3260 spdsock_err_ack(q, mp, TNOTSUPPORT, 0); 3261 break; 3262 } 3263 crfree(cr); 3264 return; 3265 case M_IOCTL: 3266 iocp = (struct iocblk *)mp->b_rptr; 3267 error = EINVAL; 3268 3269 switch (iocp->ioc_cmd) { 3270 case ND_SET: 3271 case ND_GET: 3272 if (nd_getset(q, spds->spds_g_nd, mp)) { 3273 qreply(q, mp); 3274 return; 3275 } else 3276 error = ENOENT; 3277 /* FALLTHRU */ 3278 default: 3279 miocnak(q, mp, 0, error); 3280 return; 3281 } 3282 case M_FLUSH: 3283 if (*mp->b_rptr & FLUSHW) { 3284 flushq(q, FLUSHALL); 3285 *mp->b_rptr &= ~FLUSHW; 3286 } 3287 if (*mp->b_rptr & FLUSHR) { 3288 qreply(q, mp); 3289 return; 3290 } 3291 /* Else FALLTHRU */ 3292 } 3293 3294 /* If fell through, just black-hole the message. */ 3295 freemsg(mp); 3296 } 3297 3298 static void 3299 spdsock_wput(queue_t *q, mblk_t *mp) 3300 { 3301 uint8_t *rptr = mp->b_rptr; 3302 mblk_t *mp1; 3303 spdsock_t *ss = (spdsock_t *)q->q_ptr; 3304 spd_stack_t *spds = ss->spdsock_spds; 3305 3306 /* 3307 * If we're dumping, defer processing other messages until the 3308 * dump completes. 3309 */ 3310 if (ss->spdsock_dump_req != NULL) { 3311 if (!putq(q, mp)) 3312 freemsg(mp); 3313 return; 3314 } 3315 3316 switch (mp->b_datap->db_type) { 3317 case M_DATA: 3318 /* 3319 * Silently discard. 3320 */ 3321 ss2dbg(spds, ("raw M_DATA in spdsock.\n")); 3322 freemsg(mp); 3323 return; 3324 case M_PROTO: 3325 case M_PCPROTO: 3326 if ((mp->b_wptr - rptr) >= sizeof (struct T_data_req)) { 3327 if (((union T_primitives *)rptr)->type == T_DATA_REQ) { 3328 if ((mp1 = mp->b_cont) == NULL) { 3329 /* No data after T_DATA_REQ. */ 3330 ss2dbg(spds, 3331 ("No data after DATA_REQ.\n")); 3332 freemsg(mp); 3333 return; 3334 } 3335 freeb(mp); 3336 mp = mp1; 3337 ss2dbg(spds, ("T_DATA_REQ\n")); 3338 break; /* Out of switch. */ 3339 } 3340 } 3341 /* FALLTHRU */ 3342 default: 3343 ss3dbg(spds, ("In default wput case (%d %d).\n", 3344 mp->b_datap->db_type, ((union T_primitives *)rptr)->type)); 3345 spdsock_wput_other(q, mp); 3346 return; 3347 } 3348 3349 /* I now have a PF_POLICY message in an M_DATA block. */ 3350 spdsock_parse(q, mp); 3351 } 3352 3353 /* 3354 * Device open procedure, called when new queue pair created. 3355 * We are passed the read-side queue. 3356 */ 3357 /* ARGSUSED */ 3358 static int 3359 spdsock_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 3360 { 3361 spdsock_t *ss; 3362 queue_t *oq = OTHERQ(q); 3363 minor_t ssminor; 3364 netstack_t *ns; 3365 spd_stack_t *spds; 3366 3367 if (secpolicy_ip_config(credp, B_FALSE) != 0) 3368 return (EPERM); 3369 3370 if (q->q_ptr != NULL) 3371 return (0); /* Re-open of an already open instance. */ 3372 3373 if (sflag & MODOPEN) 3374 return (EINVAL); 3375 3376 ns = netstack_find_by_cred(credp); 3377 ASSERT(ns != NULL); 3378 spds = ns->netstack_spdsock; 3379 ASSERT(spds != NULL); 3380 3381 ss2dbg(spds, ("Made it into PF_POLICY socket open.\n")); 3382 3383 ssminor = (minor_t)(uintptr_t)vmem_alloc(spdsock_vmem, 1, VM_NOSLEEP); 3384 if (ssminor == 0) { 3385 netstack_rele(spds->spds_netstack); 3386 return (ENOMEM); 3387 } 3388 ss = kmem_zalloc(sizeof (spdsock_t), KM_NOSLEEP); 3389 if (ss == NULL) { 3390 vmem_free(spdsock_vmem, (void *)(uintptr_t)ssminor, 1); 3391 netstack_rele(spds->spds_netstack); 3392 return (ENOMEM); 3393 } 3394 3395 ss->spdsock_minor = ssminor; 3396 ss->spdsock_state = TS_UNBND; 3397 ss->spdsock_dump_req = NULL; 3398 3399 ss->spdsock_spds = spds; 3400 3401 q->q_ptr = ss; 3402 oq->q_ptr = ss; 3403 3404 q->q_hiwat = spds->spds_recv_hiwat; 3405 3406 oq->q_hiwat = spds->spds_xmit_hiwat; 3407 oq->q_lowat = spds->spds_xmit_lowat; 3408 3409 qprocson(q); 3410 (void) mi_set_sth_hiwat(q, spds->spds_recv_hiwat); 3411 3412 *devp = makedevice(getmajor(*devp), ss->spdsock_minor); 3413 return (0); 3414 } 3415 3416 /* 3417 * Read-side service procedure, invoked when we get back-enabled 3418 * when buffer space becomes available. 3419 * 3420 * Dump another chunk if we were dumping before; when we finish, kick 3421 * the write-side queue in case it's waiting for read queue space. 3422 */ 3423 void 3424 spdsock_rsrv(queue_t *q) 3425 { 3426 spdsock_t *ss = q->q_ptr; 3427 3428 if (ss->spdsock_dump_req != NULL) 3429 spdsock_dump_some(q, ss); 3430 3431 if (ss->spdsock_dump_req == NULL) 3432 qenable(OTHERQ(q)); 3433 } 3434 3435 /* 3436 * Write-side service procedure, invoked when we defer processing 3437 * if another message is received while a dump is in progress. 3438 */ 3439 void 3440 spdsock_wsrv(queue_t *q) 3441 { 3442 spdsock_t *ss = q->q_ptr; 3443 mblk_t *mp; 3444 ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec; 3445 3446 if (ss->spdsock_dump_req != NULL) { 3447 qenable(OTHERQ(q)); 3448 return; 3449 } 3450 3451 while ((mp = getq(q)) != NULL) { 3452 if (ipsec_loaded(ipss)) { 3453 spdsock_wput(q, mp); 3454 if (ss->spdsock_dump_req != NULL) 3455 return; 3456 } else if (!ipsec_failed(ipss)) { 3457 (void) putq(q, mp); 3458 } else { 3459 spdsock_error(q, mp, EPFNOSUPPORT, 0); 3460 } 3461 } 3462 } 3463 3464 static int 3465 spdsock_close(queue_t *q) 3466 { 3467 spdsock_t *ss = q->q_ptr; 3468 spd_stack_t *spds = ss->spdsock_spds; 3469 3470 qprocsoff(q); 3471 3472 /* Safe assumption. */ 3473 ASSERT(ss != NULL); 3474 3475 if (ss->spdsock_timeout != 0) 3476 (void) quntimeout(q, ss->spdsock_timeout); 3477 3478 ss3dbg(spds, ("Driver close, PF_POLICY socket is going away.\n")); 3479 3480 vmem_free(spdsock_vmem, (void *)(uintptr_t)ss->spdsock_minor, 1); 3481 netstack_rele(ss->spdsock_spds->spds_netstack); 3482 3483 kmem_free(ss, sizeof (spdsock_t)); 3484 return (0); 3485 } 3486 3487 /* 3488 * Merge the IPsec algorithms tables with the received algorithm information. 3489 */ 3490 void 3491 spdsock_merge_algs(spd_stack_t *spds) 3492 { 3493 ipsec_alginfo_t *alg, *oalg; 3494 ipsec_algtype_t algtype; 3495 uint_t algidx, algid, nalgs; 3496 crypto_mech_name_t *mechs; 3497 uint_t mech_count, mech_idx; 3498 netstack_t *ns = spds->spds_netstack; 3499 ipsec_stack_t *ipss = ns->netstack_ipsec; 3500 3501 ASSERT(MUTEX_HELD(&spds->spds_alg_lock)); 3502 3503 /* 3504 * Get the list of supported mechanisms from the crypto framework. 3505 * If a mechanism is supported by KCF, resolve its mechanism 3506 * id and mark it as being valid. This operation must be done 3507 * without holding alg_lock, since it can cause a provider 3508 * module to be loaded and the provider notification callback to 3509 * be invoked. 3510 */ 3511 mechs = crypto_get_mech_list(&mech_count, KM_SLEEP); 3512 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 3513 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) { 3514 int algflags = 0; 3515 crypto_mech_type_t mt = CRYPTO_MECHANISM_INVALID; 3516 3517 alg = spds->spds_algs[algtype][algid]; 3518 if (alg == NULL) 3519 continue; 3520 3521 /* 3522 * The NULL encryption algorithm is a special 3523 * case because there are no mechanisms, yet 3524 * the algorithm is still valid. 3525 */ 3526 if (alg->alg_id == SADB_EALG_NULL) { 3527 alg->alg_mech_type = CRYPTO_MECHANISM_INVALID; 3528 alg->alg_flags = ALG_FLAG_VALID; 3529 continue; 3530 } 3531 3532 for (mech_idx = 0; mech_idx < mech_count; mech_idx++) { 3533 if (strncmp(alg->alg_mech_name, mechs[mech_idx], 3534 CRYPTO_MAX_MECH_NAME) == 0) { 3535 mt = crypto_mech2id(alg->alg_mech_name); 3536 ASSERT(mt != CRYPTO_MECHANISM_INVALID); 3537 algflags = ALG_FLAG_VALID; 3538 break; 3539 } 3540 } 3541 alg->alg_mech_type = mt; 3542 alg->alg_flags = algflags; 3543 } 3544 } 3545 3546 mutex_enter(&ipss->ipsec_alg_lock); 3547 3548 /* 3549 * For each algorithm currently defined, check if it is 3550 * present in the new tables created from the SPD_UPDATEALGS 3551 * message received from user-space. 3552 * Delete the algorithm entries that are currently defined 3553 * but not part of the new tables. 3554 */ 3555 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 3556 nalgs = ipss->ipsec_nalgs[algtype]; 3557 for (algidx = 0; algidx < nalgs; algidx++) { 3558 algid = ipss->ipsec_sortlist[algtype][algidx]; 3559 if (spds->spds_algs[algtype][algid] == NULL) 3560 ipsec_alg_unreg(algtype, algid, ns); 3561 } 3562 } 3563 3564 /* 3565 * For each algorithm we just received, check if it is 3566 * present in the currently defined tables. If it is, swap 3567 * the entry with the one we just allocated. 3568 * If the new algorithm is not in the current tables, 3569 * add it. 3570 */ 3571 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 3572 for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) { 3573 alg = spds->spds_algs[algtype][algid]; 3574 if (alg == NULL) 3575 continue; 3576 3577 if ((oalg = ipss->ipsec_alglists[algtype][algid]) == 3578 NULL) { 3579 /* 3580 * New algorithm, add it to the algorithm 3581 * table. 3582 */ 3583 ipsec_alg_reg(algtype, alg, ns); 3584 } else { 3585 /* 3586 * Algorithm is already in the table. Swap 3587 * the existing entry with the new one. 3588 */ 3589 ipsec_alg_fix_min_max(alg, algtype, ns); 3590 ipss->ipsec_alglists[algtype][algid] = alg; 3591 ipsec_alg_free(oalg); 3592 } 3593 spds->spds_algs[algtype][algid] = NULL; 3594 } 3595 } 3596 3597 for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) { 3598 ipss->ipsec_algs_exec_mode[algtype] = 3599 spds->spds_algs_exec_mode[algtype]; 3600 } 3601 3602 mutex_exit(&ipss->ipsec_alg_lock); 3603 3604 crypto_free_mech_list(mechs, mech_count); 3605 3606 ipsecah_algs_changed(ns); 3607 ipsecesp_algs_changed(ns); 3608 } 3609