xref: /titanic_51/usr/src/uts/common/inet/ip/spdsock.c (revision 565679070e884800f5d041d42d226813c0bbf6d8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/param.h>
27 #include <sys/types.h>
28 #include <sys/stream.h>
29 #include <sys/strsubr.h>
30 #include <sys/strsun.h>
31 #include <sys/stropts.h>
32 #include <sys/zone.h>
33 #include <sys/vnode.h>
34 #include <sys/sysmacros.h>
35 #define	_SUN_TPI_VERSION 2
36 #include <sys/tihdr.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/mkdev.h>
40 #include <sys/debug.h>
41 #include <sys/kmem.h>
42 #include <sys/cmn_err.h>
43 #include <sys/suntpi.h>
44 #include <sys/policy.h>
45 #include <sys/dls.h>
46 
47 #include <sys/socket.h>
48 #include <netinet/in.h>
49 #include <net/pfkeyv2.h>
50 #include <net/pfpolicy.h>
51 
52 #include <inet/common.h>
53 #include <netinet/ip6.h>
54 #include <inet/ip.h>
55 #include <inet/ip6.h>
56 #include <inet/mi.h>
57 #include <inet/proto_set.h>
58 #include <inet/nd.h>
59 #include <inet/ip_if.h>
60 #include <inet/optcom.h>
61 #include <inet/ipsec_info.h>
62 #include <inet/ipsec_impl.h>
63 #include <inet/spdsock.h>
64 #include <inet/sadb.h>
65 #include <inet/iptun.h>
66 #include <inet/iptun/iptun_impl.h>
67 
68 #include <sys/isa_defs.h>
69 
70 #include <c2/audit.h>
71 
72 /*
73  * This is a transport provider for the PF_POLICY IPsec policy
74  * management socket, which provides a management interface into the
75  * SPD, allowing policy rules to be added, deleted, and queried.
76  *
77  * This effectively replaces the old private SIOC*IPSECONFIG ioctls
78  * with an extensible interface which will hopefully be public some
79  * day.
80  *
81  * See <net/pfpolicy.h> for more details on the protocol.
82  *
83  * We link against drv/ip and call directly into it to manipulate the
84  * SPD; see ipsec_impl.h for the policy data structures and spd.c for
85  * the code which maintains them.
86  *
87  * The MT model of this is QPAIR with the addition of some explicit
88  * locking to protect system-wide policy data structures.
89  */
90 
91 static vmem_t *spdsock_vmem;		/* for minor numbers. */
92 
93 #define	ALIGNED64(x) IS_P2ALIGNED((x), sizeof (uint64_t))
94 
95 /* Default structure copied into T_INFO_ACK messages (from rts.c...) */
96 static struct T_info_ack spdsock_g_t_info_ack = {
97 	T_INFO_ACK,
98 	T_INFINITE,	/* TSDU_size. Maximum size messages. */
99 	T_INVALID,	/* ETSDU_size. No expedited data. */
100 	T_INVALID,	/* CDATA_size. No connect data. */
101 	T_INVALID,	/* DDATA_size. No disconnect data. */
102 	0,		/* ADDR_size. */
103 	0,		/* OPT_size. No user-settable options */
104 	64 * 1024,	/* TIDU_size. spdsock allows maximum size messages. */
105 	T_COTS,		/* SERV_type. spdsock supports connection oriented. */
106 	TS_UNBND,	/* CURRENT_state. This is set from spdsock_state. */
107 	(XPG4_1)	/* Provider flags */
108 };
109 
110 /* Named Dispatch Parameter Management Structure */
111 typedef struct spdsockparam_s {
112 	uint_t	spdsock_param_min;
113 	uint_t	spdsock_param_max;
114 	uint_t	spdsock_param_value;
115 	char *spdsock_param_name;
116 } spdsockparam_t;
117 
118 /*
119  * Table of NDD variables supported by spdsock. These are loaded into
120  * spdsock_g_nd in spdsock_init_nd.
121  * All of these are alterable, within the min/max values given, at run time.
122  */
123 static	spdsockparam_t	lcl_param_arr[] = {
124 	/* min	max	value	name */
125 	{ 4096, 65536,	8192,	"spdsock_xmit_hiwat"},
126 	{ 0,	65536,	1024,	"spdsock_xmit_lowat"},
127 	{ 4096, 65536,	8192,	"spdsock_recv_hiwat"},
128 	{ 65536, 1024*1024*1024, 256*1024,	"spdsock_max_buf"},
129 	{ 0,	3,	0,	"spdsock_debug"},
130 };
131 #define	spds_xmit_hiwat	spds_params[0].spdsock_param_value
132 #define	spds_xmit_lowat	spds_params[1].spdsock_param_value
133 #define	spds_recv_hiwat	spds_params[2].spdsock_param_value
134 #define	spds_max_buf	spds_params[3].spdsock_param_value
135 #define	spds_debug		spds_params[4].spdsock_param_value
136 
137 #define	ss0dbg(a)	printf a
138 /* NOTE:  != 0 instead of > 0 so lint doesn't complain. */
139 #define	ss1dbg(spds, a)	if (spds->spds_debug != 0) printf a
140 #define	ss2dbg(spds, a)	if (spds->spds_debug > 1) printf a
141 #define	ss3dbg(spds, a)	if (spds->spds_debug > 2) printf a
142 
143 #define	RESET_SPDSOCK_DUMP_POLHEAD(ss, iph) { \
144 	ASSERT(RW_READ_HELD(&(iph)->iph_lock)); \
145 	(ss)->spdsock_dump_head = (iph); \
146 	(ss)->spdsock_dump_gen = (iph)->iph_gen; \
147 	(ss)->spdsock_dump_cur_type = 0; \
148 	(ss)->spdsock_dump_cur_af = IPSEC_AF_V4; \
149 	(ss)->spdsock_dump_cur_rule = NULL; \
150 	(ss)->spdsock_dump_count = 0; \
151 	(ss)->spdsock_dump_cur_chain = 0; \
152 }
153 
154 static int spdsock_close(queue_t *);
155 static int spdsock_open(queue_t *, dev_t *, int, int, cred_t *);
156 static void spdsock_wput(queue_t *, mblk_t *);
157 static void spdsock_wsrv(queue_t *);
158 static void spdsock_rsrv(queue_t *);
159 static void *spdsock_stack_init(netstackid_t stackid, netstack_t *ns);
160 static void spdsock_stack_fini(netstackid_t stackid, void *arg);
161 static void spdsock_loadcheck(void *);
162 static void spdsock_merge_algs(spd_stack_t *);
163 static void spdsock_flush_one(ipsec_policy_head_t *, netstack_t *);
164 static mblk_t *spdsock_dump_next_record(spdsock_t *);
165 
166 static struct module_info info = {
167 	5138, "spdsock", 1, INFPSZ, 512, 128
168 };
169 
170 static struct qinit rinit = {
171 	NULL, (pfi_t)spdsock_rsrv, spdsock_open, spdsock_close,
172 	NULL, &info
173 };
174 
175 static struct qinit winit = {
176 	(pfi_t)spdsock_wput, (pfi_t)spdsock_wsrv, NULL, NULL, NULL, &info
177 };
178 
179 struct streamtab spdsockinfo = {
180 	&rinit, &winit
181 };
182 
183 /* mapping from alg type to protocol number, as per RFC 2407 */
184 static const uint_t algproto[] = {
185 	PROTO_IPSEC_AH,
186 	PROTO_IPSEC_ESP,
187 };
188 
189 #define	NALGPROTOS	(sizeof (algproto) / sizeof (algproto[0]))
190 
191 /* mapping from kernel exec mode to spdsock exec mode */
192 static const uint_t execmodes[] = {
193 	SPD_ALG_EXEC_MODE_SYNC,
194 	SPD_ALG_EXEC_MODE_ASYNC
195 };
196 
197 #define	NEXECMODES	(sizeof (execmodes) / sizeof (execmodes[0]))
198 
199 #define	ALL_ACTIVE_POLHEADS ((ipsec_policy_head_t *)-1)
200 #define	ALL_INACTIVE_POLHEADS ((ipsec_policy_head_t *)-2)
201 
202 #define	ITP_NAME(itp) (itp != NULL ? itp->itp_name : NULL)
203 
204 /* ARGSUSED */
205 static int
206 spdsock_param_get(q, mp, cp, cr)
207 	queue_t	*q;
208 	mblk_t	*mp;
209 	caddr_t	cp;
210 	cred_t *cr;
211 {
212 	spdsockparam_t	*spdsockpa = (spdsockparam_t *)cp;
213 	uint_t value;
214 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
215 	spd_stack_t	*spds = ss->spdsock_spds;
216 
217 	mutex_enter(&spds->spds_param_lock);
218 	value = spdsockpa->spdsock_param_value;
219 	mutex_exit(&spds->spds_param_lock);
220 
221 	(void) mi_mpprintf(mp, "%u", value);
222 	return (0);
223 }
224 
225 /* This routine sets an NDD variable in a spdsockparam_t structure. */
226 /* ARGSUSED */
227 static int
228 spdsock_param_set(q, mp, value, cp, cr)
229 	queue_t	*q;
230 	mblk_t	*mp;
231 	char *value;
232 	caddr_t	cp;
233 	cred_t *cr;
234 {
235 	ulong_t	new_value;
236 	spdsockparam_t	*spdsockpa = (spdsockparam_t *)cp;
237 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
238 	spd_stack_t	*spds = ss->spdsock_spds;
239 
240 	/* Convert the value from a string into a long integer. */
241 	if (ddi_strtoul(value, NULL, 10, &new_value) != 0)
242 		return (EINVAL);
243 
244 	mutex_enter(&spds->spds_param_lock);
245 	/*
246 	 * Fail the request if the new value does not lie within the
247 	 * required bounds.
248 	 */
249 	if (new_value < spdsockpa->spdsock_param_min ||
250 	    new_value > spdsockpa->spdsock_param_max) {
251 		mutex_exit(&spds->spds_param_lock);
252 		return (EINVAL);
253 	}
254 
255 	/* Set the new value */
256 	spdsockpa->spdsock_param_value = new_value;
257 	mutex_exit(&spds->spds_param_lock);
258 
259 	return (0);
260 }
261 
262 /*
263  * Initialize at module load time
264  */
265 boolean_t
266 spdsock_ddi_init(void)
267 {
268 	spdsock_max_optsize = optcom_max_optsize(
269 	    spdsock_opt_obj.odb_opt_des_arr, spdsock_opt_obj.odb_opt_arr_cnt);
270 
271 	spdsock_vmem = vmem_create("spdsock", (void *)1, MAXMIN, 1,
272 	    NULL, NULL, NULL, 1, VM_SLEEP | VMC_IDENTIFIER);
273 
274 	/*
275 	 * We want to be informed each time a stack is created or
276 	 * destroyed in the kernel, so we can maintain the
277 	 * set of spd_stack_t's.
278 	 */
279 	netstack_register(NS_SPDSOCK, spdsock_stack_init, NULL,
280 	    spdsock_stack_fini);
281 
282 	return (B_TRUE);
283 }
284 
285 /*
286  * Walk through the param array specified registering each element with the
287  * named dispatch handler.
288  */
289 static boolean_t
290 spdsock_param_register(IDP *ndp, spdsockparam_t *ssp, int cnt)
291 {
292 	for (; cnt-- > 0; ssp++) {
293 		if (ssp->spdsock_param_name != NULL &&
294 		    ssp->spdsock_param_name[0]) {
295 			if (!nd_load(ndp,
296 			    ssp->spdsock_param_name,
297 			    spdsock_param_get, spdsock_param_set,
298 			    (caddr_t)ssp)) {
299 				nd_free(ndp);
300 				return (B_FALSE);
301 			}
302 		}
303 	}
304 	return (B_TRUE);
305 }
306 
307 /*
308  * Initialize for each stack instance
309  */
310 /* ARGSUSED */
311 static void *
312 spdsock_stack_init(netstackid_t stackid, netstack_t *ns)
313 {
314 	spd_stack_t	*spds;
315 	spdsockparam_t	*ssp;
316 
317 	spds = (spd_stack_t *)kmem_zalloc(sizeof (*spds), KM_SLEEP);
318 	spds->spds_netstack = ns;
319 
320 	ASSERT(spds->spds_g_nd == NULL);
321 
322 	ssp = (spdsockparam_t *)kmem_alloc(sizeof (lcl_param_arr), KM_SLEEP);
323 	spds->spds_params = ssp;
324 	bcopy(lcl_param_arr, ssp, sizeof (lcl_param_arr));
325 
326 	(void) spdsock_param_register(&spds->spds_g_nd, ssp,
327 	    A_CNT(lcl_param_arr));
328 
329 	mutex_init(&spds->spds_param_lock, NULL, MUTEX_DEFAULT, NULL);
330 	mutex_init(&spds->spds_alg_lock, NULL, MUTEX_DEFAULT, NULL);
331 
332 	return (spds);
333 }
334 
335 void
336 spdsock_ddi_destroy(void)
337 {
338 	vmem_destroy(spdsock_vmem);
339 
340 	netstack_unregister(NS_SPDSOCK);
341 }
342 
343 /* ARGSUSED */
344 static void
345 spdsock_stack_fini(netstackid_t stackid, void *arg)
346 {
347 	spd_stack_t *spds = (spd_stack_t *)arg;
348 
349 	freemsg(spds->spds_mp_algs);
350 	mutex_destroy(&spds->spds_param_lock);
351 	mutex_destroy(&spds->spds_alg_lock);
352 	nd_free(&spds->spds_g_nd);
353 	kmem_free(spds->spds_params, sizeof (lcl_param_arr));
354 	spds->spds_params = NULL;
355 
356 	kmem_free(spds, sizeof (*spds));
357 }
358 
359 /*
360  * NOTE: large quantities of this should be shared with keysock.
361  * Would be nice to combine some of this into a common module, but
362  * not possible given time pressures.
363  */
364 
365 /*
366  * High-level reality checking of extensions.
367  */
368 /* ARGSUSED */ /* XXX */
369 static boolean_t
370 ext_check(spd_ext_t *ext)
371 {
372 	spd_if_t *tunname = (spd_if_t *)ext;
373 	int i;
374 	char *idstr;
375 
376 	if (ext->spd_ext_type == SPD_EXT_TUN_NAME) {
377 		/* (NOTE:  Modified from SADB_EXT_IDENTITY..) */
378 
379 		/*
380 		 * Make sure the strings in these identities are
381 		 * null-terminated.  Let's "proactively" null-terminate the
382 		 * string at the last byte if it's not terminated sooner.
383 		 */
384 		i = SPD_64TO8(tunname->spd_if_len) - sizeof (spd_if_t);
385 		idstr = (char *)(tunname + 1);
386 		while (*idstr != '\0' && i > 0) {
387 			i--;
388 			idstr++;
389 		}
390 		if (i == 0) {
391 			/*
392 			 * I.e., if the bozo user didn't NULL-terminate the
393 			 * string...
394 			 */
395 			idstr--;
396 			*idstr = '\0';
397 		}
398 	}
399 	return (B_TRUE);	/* For now... */
400 }
401 
402 
403 
404 /* Return values for spdsock_get_ext(). */
405 #define	KGE_OK	0
406 #define	KGE_DUP	1
407 #define	KGE_UNK	2
408 #define	KGE_LEN	3
409 #define	KGE_CHK	4
410 
411 /*
412  * Parse basic extension headers and return in the passed-in pointer vector.
413  * Return values include:
414  *
415  *	KGE_OK	Everything's nice and parsed out.
416  *		If there are no extensions, place NULL in extv[0].
417  *	KGE_DUP	There is a duplicate extension.
418  *		First instance in appropriate bin.  First duplicate in
419  *		extv[0].
420  *	KGE_UNK	Unknown extension type encountered.  extv[0] contains
421  *		unknown header.
422  *	KGE_LEN	Extension length error.
423  *	KGE_CHK	High-level reality check failed on specific extension.
424  *
425  * My apologies for some of the pointer arithmetic in here.  I'm thinking
426  * like an assembly programmer, yet trying to make the compiler happy.
427  */
428 static int
429 spdsock_get_ext(spd_ext_t *extv[], spd_msg_t *basehdr, uint_t msgsize)
430 {
431 	bzero(extv, sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1));
432 
433 	/* Use extv[0] as the "current working pointer". */
434 
435 	extv[0] = (spd_ext_t *)(basehdr + 1);
436 
437 	while (extv[0] < (spd_ext_t *)(((uint8_t *)basehdr) + msgsize)) {
438 		/* Check for unknown headers. */
439 		if (extv[0]->spd_ext_type == 0 ||
440 		    extv[0]->spd_ext_type > SPD_EXT_MAX)
441 			return (KGE_UNK);
442 
443 		/*
444 		 * Check length.  Use uint64_t because extlen is in units
445 		 * of 64-bit words.  If length goes beyond the msgsize,
446 		 * return an error.  (Zero length also qualifies here.)
447 		 */
448 		if (extv[0]->spd_ext_len == 0 ||
449 		    (void *)((uint64_t *)extv[0] + extv[0]->spd_ext_len) >
450 		    (void *)((uint8_t *)basehdr + msgsize))
451 			return (KGE_LEN);
452 
453 		/* Check for redundant headers. */
454 		if (extv[extv[0]->spd_ext_type] != NULL)
455 			return (KGE_DUP);
456 
457 		/*
458 		 * Reality check the extension if possible at the spdsock
459 		 * level.
460 		 */
461 		if (!ext_check(extv[0]))
462 			return (KGE_CHK);
463 
464 		/* If I make it here, assign the appropriate bin. */
465 		extv[extv[0]->spd_ext_type] = extv[0];
466 
467 		/* Advance pointer (See above for uint64_t ptr reasoning.) */
468 		extv[0] = (spd_ext_t *)
469 		    ((uint64_t *)extv[0] + extv[0]->spd_ext_len);
470 	}
471 
472 	/* Everything's cool. */
473 
474 	/*
475 	 * If extv[0] == NULL, then there are no extension headers in this
476 	 * message.  Ensure that this is the case.
477 	 */
478 	if (extv[0] == (spd_ext_t *)(basehdr + 1))
479 		extv[0] = NULL;
480 
481 	return (KGE_OK);
482 }
483 
484 static const int bad_ext_diag[] = {
485 	SPD_DIAGNOSTIC_MALFORMED_LCLPORT,
486 	SPD_DIAGNOSTIC_MALFORMED_REMPORT,
487 	SPD_DIAGNOSTIC_MALFORMED_PROTO,
488 	SPD_DIAGNOSTIC_MALFORMED_LCLADDR,
489 	SPD_DIAGNOSTIC_MALFORMED_REMADDR,
490 	SPD_DIAGNOSTIC_MALFORMED_ACTION,
491 	SPD_DIAGNOSTIC_MALFORMED_RULE,
492 	SPD_DIAGNOSTIC_MALFORMED_RULESET,
493 	SPD_DIAGNOSTIC_MALFORMED_ICMP_TYPECODE
494 };
495 
496 static const int dup_ext_diag[] = {
497 	SPD_DIAGNOSTIC_DUPLICATE_LCLPORT,
498 	SPD_DIAGNOSTIC_DUPLICATE_REMPORT,
499 	SPD_DIAGNOSTIC_DUPLICATE_PROTO,
500 	SPD_DIAGNOSTIC_DUPLICATE_LCLADDR,
501 	SPD_DIAGNOSTIC_DUPLICATE_REMADDR,
502 	SPD_DIAGNOSTIC_DUPLICATE_ACTION,
503 	SPD_DIAGNOSTIC_DUPLICATE_RULE,
504 	SPD_DIAGNOSTIC_DUPLICATE_RULESET,
505 	SPD_DIAGNOSTIC_DUPLICATE_ICMP_TYPECODE
506 };
507 
508 /*
509  * Transmit a PF_POLICY error message to the instance either pointed to
510  * by ks, the instance with serial number serial, or more, depending.
511  *
512  * The faulty message (or a reasonable facsimile thereof) is in mp.
513  * This function will free mp or recycle it for delivery, thereby causing
514  * the stream head to free it.
515  */
516 static void
517 spdsock_error(queue_t *q, mblk_t *mp, int error, int diagnostic)
518 {
519 	spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
520 
521 	ASSERT(mp->b_datap->db_type == M_DATA);
522 
523 	if (spmsg->spd_msg_type < SPD_MIN ||
524 	    spmsg->spd_msg_type > SPD_MAX)
525 		spmsg->spd_msg_type = SPD_RESERVED;
526 
527 	/*
528 	 * Strip out extension headers.
529 	 */
530 	ASSERT(mp->b_rptr + sizeof (*spmsg) <= mp->b_datap->db_lim);
531 	mp->b_wptr = mp->b_rptr + sizeof (*spmsg);
532 	spmsg->spd_msg_len = SPD_8TO64(sizeof (spd_msg_t));
533 	spmsg->spd_msg_errno = (uint8_t)error;
534 	spmsg->spd_msg_diagnostic = (uint16_t)diagnostic;
535 
536 	qreply(q, mp);
537 }
538 
539 static void
540 spdsock_diag(queue_t *q, mblk_t *mp, int diagnostic)
541 {
542 	spdsock_error(q, mp, EINVAL, diagnostic);
543 }
544 
545 static void
546 spd_echo(queue_t *q, mblk_t *mp)
547 {
548 	qreply(q, mp);
549 }
550 
551 /*
552  * Do NOT consume a reference to itp.
553  */
554 /*ARGSUSED*/
555 static void
556 spdsock_flush_node(ipsec_tun_pol_t *itp, void *cookie, netstack_t *ns)
557 {
558 	boolean_t active = (boolean_t)cookie;
559 	ipsec_policy_head_t *iph;
560 
561 	iph = active ? itp->itp_policy : itp->itp_inactive;
562 	IPPH_REFHOLD(iph);
563 	mutex_enter(&itp->itp_lock);
564 	spdsock_flush_one(iph, ns);
565 	if (active)
566 		itp->itp_flags &= ~ITPF_PFLAGS;
567 	else
568 		itp->itp_flags &= ~ITPF_IFLAGS;
569 	mutex_exit(&itp->itp_lock);
570 }
571 
572 /*
573  * Clear out one polhead.
574  */
575 static void
576 spdsock_flush_one(ipsec_policy_head_t *iph, netstack_t *ns)
577 {
578 	rw_enter(&iph->iph_lock, RW_WRITER);
579 	ipsec_polhead_flush(iph, ns);
580 	rw_exit(&iph->iph_lock);
581 	IPPH_REFRELE(iph, ns);
582 }
583 
584 static void
585 spdsock_flush(queue_t *q, ipsec_policy_head_t *iph, ipsec_tun_pol_t *itp,
586     mblk_t *mp)
587 {
588 	boolean_t active;
589 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
590 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
591 
592 	if (iph != ALL_ACTIVE_POLHEADS && iph != ALL_INACTIVE_POLHEADS) {
593 		spdsock_flush_one(iph, ns);
594 		if (audit_active) {
595 			spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
596 			cred_t *cr;
597 			pid_t cpid;
598 
599 			cr = msg_getcred(mp, &cpid);
600 			active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
601 			audit_pf_policy(SPD_FLUSH, cr, ns,
602 			    ITP_NAME(itp), active, 0, cpid);
603 		}
604 	} else {
605 		active = (iph == ALL_ACTIVE_POLHEADS);
606 
607 		/* First flush the global policy. */
608 		spdsock_flush_one(active ? ipsec_system_policy(ns) :
609 		    ipsec_inactive_policy(ns), ns);
610 		if (audit_active) {
611 			cred_t *cr;
612 			pid_t cpid;
613 
614 			cr = msg_getcred(mp, &cpid);
615 			audit_pf_policy(SPD_FLUSH, cr, ns, NULL,
616 			    active, 0, cpid);
617 		}
618 		/* Then flush every tunnel's appropriate one. */
619 		itp_walk(spdsock_flush_node, (void *)active, ns);
620 		if (audit_active) {
621 			cred_t *cr;
622 			pid_t cpid;
623 
624 			cr = msg_getcred(mp, &cpid);
625 			audit_pf_policy(SPD_FLUSH, cr, ns,
626 			    "all tunnels", active, 0, cpid);
627 		}
628 	}
629 
630 	spd_echo(q, mp);
631 }
632 
633 static boolean_t
634 spdsock_ext_to_sel(spd_ext_t **extv, ipsec_selkey_t *sel, int *diag)
635 {
636 	bzero(sel, sizeof (*sel));
637 
638 	if (extv[SPD_EXT_PROTO] != NULL) {
639 		struct spd_proto *pr =
640 		    (struct spd_proto *)extv[SPD_EXT_PROTO];
641 		sel->ipsl_proto = pr->spd_proto_number;
642 		sel->ipsl_valid |= IPSL_PROTOCOL;
643 	}
644 	if (extv[SPD_EXT_LCLPORT] != NULL) {
645 		struct spd_portrange *pr =
646 		    (struct spd_portrange *)extv[SPD_EXT_LCLPORT];
647 		sel->ipsl_lport = pr->spd_ports_minport;
648 		sel->ipsl_valid |= IPSL_LOCAL_PORT;
649 	}
650 	if (extv[SPD_EXT_REMPORT] != NULL) {
651 		struct spd_portrange *pr =
652 		    (struct spd_portrange *)extv[SPD_EXT_REMPORT];
653 		sel->ipsl_rport = pr->spd_ports_minport;
654 		sel->ipsl_valid |= IPSL_REMOTE_PORT;
655 	}
656 
657 	if (extv[SPD_EXT_ICMP_TYPECODE] != NULL) {
658 		struct spd_typecode *tc=
659 		    (struct spd_typecode *)extv[SPD_EXT_ICMP_TYPECODE];
660 
661 		sel->ipsl_valid |= IPSL_ICMP_TYPE;
662 		sel->ipsl_icmp_type = tc->spd_typecode_type;
663 		if (tc->spd_typecode_type_end < tc->spd_typecode_type)
664 			sel->ipsl_icmp_type_end = tc->spd_typecode_type;
665 		else
666 			sel->ipsl_icmp_type_end = tc->spd_typecode_type_end;
667 
668 		if (tc->spd_typecode_code != 255) {
669 			sel->ipsl_valid |= IPSL_ICMP_CODE;
670 			sel->ipsl_icmp_code = tc->spd_typecode_code;
671 			if (tc->spd_typecode_code_end < tc->spd_typecode_code)
672 				sel->ipsl_icmp_code_end = tc->spd_typecode_code;
673 			else
674 				sel->ipsl_icmp_code_end =
675 				    tc->spd_typecode_code_end;
676 		}
677 	}
678 #define	ADDR2SEL(sel, extv, field, pfield, extn, bit)			      \
679 	if ((extv)[(extn)] != NULL) {					      \
680 		uint_t addrlen;						      \
681 		struct spd_address *ap = 				      \
682 			(struct spd_address *)((extv)[(extn)]); 	      \
683 		addrlen = (ap->spd_address_af == AF_INET6) ? 		      \
684 			IPV6_ADDR_LEN : IP_ADDR_LEN;			      \
685 		if (SPD_64TO8(ap->spd_address_len) < 			      \
686 			(addrlen + sizeof (*ap))) {			      \
687 			*diag = SPD_DIAGNOSTIC_BAD_ADDR_LEN;		      \
688 			return (B_FALSE);				      \
689 		}							      \
690 		bcopy((ap+1), &((sel)->field), addrlen);		      \
691 		(sel)->pfield = ap->spd_address_prefixlen;		      \
692 		(sel)->ipsl_valid |= (bit);				      \
693 		(sel)->ipsl_valid |= (ap->spd_address_af == AF_INET6) ?	      \
694 			IPSL_IPV6 : IPSL_IPV4;				      \
695 	}
696 
697 	ADDR2SEL(sel, extv, ipsl_local, ipsl_local_pfxlen,
698 	    SPD_EXT_LCLADDR, IPSL_LOCAL_ADDR);
699 	ADDR2SEL(sel, extv, ipsl_remote, ipsl_remote_pfxlen,
700 	    SPD_EXT_REMADDR, IPSL_REMOTE_ADDR);
701 
702 	if ((sel->ipsl_valid & (IPSL_IPV6|IPSL_IPV4)) ==
703 	    (IPSL_IPV6|IPSL_IPV4)) {
704 		*diag = SPD_DIAGNOSTIC_MIXED_AF;
705 		return (B_FALSE);
706 	}
707 
708 #undef ADDR2SEL
709 
710 	return (B_TRUE);
711 }
712 
713 static boolean_t
714 spd_convert_type(uint32_t type, ipsec_act_t *act)
715 {
716 	switch (type) {
717 	case SPD_ACTTYPE_DROP:
718 		act->ipa_type = IPSEC_ACT_DISCARD;
719 		return (B_TRUE);
720 
721 	case SPD_ACTTYPE_PASS:
722 		act->ipa_type = IPSEC_ACT_CLEAR;
723 		return (B_TRUE);
724 
725 	case SPD_ACTTYPE_IPSEC:
726 		act->ipa_type = IPSEC_ACT_APPLY;
727 		return (B_TRUE);
728 	}
729 	return (B_FALSE);
730 }
731 
732 static boolean_t
733 spd_convert_flags(uint32_t flags, ipsec_act_t *act)
734 {
735 	/*
736 	 * Note use of !! for boolean canonicalization.
737 	 */
738 	act->ipa_apply.ipp_use_ah = !!(flags & SPD_APPLY_AH);
739 	act->ipa_apply.ipp_use_esp = !!(flags & SPD_APPLY_ESP);
740 	act->ipa_apply.ipp_use_espa = !!(flags & SPD_APPLY_ESPA);
741 	act->ipa_apply.ipp_use_se = !!(flags & SPD_APPLY_SE);
742 	act->ipa_apply.ipp_use_unique = !!(flags & SPD_APPLY_UNIQUE);
743 	return (B_TRUE);
744 }
745 
746 static void
747 spdsock_reset_act(ipsec_act_t *act)
748 {
749 	bzero(act, sizeof (*act));
750 	act->ipa_apply.ipp_espe_maxbits = IPSEC_MAX_KEYBITS;
751 	act->ipa_apply.ipp_espa_maxbits = IPSEC_MAX_KEYBITS;
752 	act->ipa_apply.ipp_ah_maxbits = IPSEC_MAX_KEYBITS;
753 }
754 
755 /*
756  * Sanity check action against reality, and shrink-wrap key sizes..
757  */
758 static boolean_t
759 spdsock_check_action(ipsec_act_t *act, boolean_t tunnel_polhead, int *diag,
760     spd_stack_t *spds)
761 {
762 	if (tunnel_polhead && act->ipa_apply.ipp_use_unique) {
763 		*diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
764 		return (B_FALSE);
765 	}
766 	if ((act->ipa_type != IPSEC_ACT_APPLY) &&
767 	    (act->ipa_apply.ipp_use_ah ||
768 	    act->ipa_apply.ipp_use_esp ||
769 	    act->ipa_apply.ipp_use_espa ||
770 	    act->ipa_apply.ipp_use_se ||
771 	    act->ipa_apply.ipp_use_unique)) {
772 		*diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
773 		return (B_FALSE);
774 	}
775 	if ((act->ipa_type == IPSEC_ACT_APPLY) &&
776 	    !act->ipa_apply.ipp_use_ah &&
777 	    !act->ipa_apply.ipp_use_esp) {
778 		*diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
779 		return (B_FALSE);
780 	}
781 	return (ipsec_check_action(act, diag, spds->spds_netstack));
782 }
783 
784 /*
785  * We may be short a few error checks here..
786  */
787 static boolean_t
788 spdsock_ext_to_actvec(spd_ext_t **extv, ipsec_act_t **actpp, uint_t *nactp,
789     int *diag, spd_stack_t *spds)
790 {
791 	struct spd_ext_actions *sactp =
792 	    (struct spd_ext_actions *)extv[SPD_EXT_ACTION];
793 	ipsec_act_t act, *actp, *endactp;
794 	struct spd_attribute *attrp, *endattrp;
795 	uint64_t *endp;
796 	int nact;
797 	boolean_t tunnel_polhead;
798 
799 	tunnel_polhead = (extv[SPD_EXT_TUN_NAME] != NULL &&
800 	    (((struct spd_rule *)extv[SPD_EXT_RULE])->spd_rule_flags &
801 	    SPD_RULE_FLAG_TUNNEL));
802 
803 	*actpp = NULL;
804 	*nactp = 0;
805 
806 	if (sactp == NULL) {
807 		*diag = SPD_DIAGNOSTIC_NO_ACTION_EXT;
808 		return (B_FALSE);
809 	}
810 
811 	/*
812 	 * Parse the "action" extension and convert into an action chain.
813 	 */
814 
815 	nact = sactp->spd_actions_count;
816 
817 	endp = (uint64_t *)sactp;
818 	endp += sactp->spd_actions_len;
819 	endattrp = (struct spd_attribute *)endp;
820 
821 	actp = kmem_alloc(sizeof (*actp) * nact, KM_NOSLEEP);
822 	if (actp == NULL) {
823 		*diag = SPD_DIAGNOSTIC_ADD_NO_MEM;
824 		return (B_FALSE);
825 	}
826 	*actpp = actp;
827 	*nactp = nact;
828 	endactp = actp + nact;
829 
830 	spdsock_reset_act(&act);
831 	attrp = (struct spd_attribute *)(&sactp[1]);
832 
833 	for (; attrp < endattrp; attrp++) {
834 		switch (attrp->spd_attr_tag) {
835 		case SPD_ATTR_NOP:
836 			break;
837 
838 		case SPD_ATTR_EMPTY:
839 			spdsock_reset_act(&act);
840 			break;
841 
842 		case SPD_ATTR_END:
843 			attrp = endattrp;
844 			/* FALLTHRU */
845 		case SPD_ATTR_NEXT:
846 			if (actp >= endactp) {
847 				*diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT;
848 				goto fail;
849 			}
850 			if (!spdsock_check_action(&act, tunnel_polhead,
851 			    diag, spds))
852 				goto fail;
853 			*actp++ = act;
854 			spdsock_reset_act(&act);
855 			break;
856 
857 		case SPD_ATTR_TYPE:
858 			if (!spd_convert_type(attrp->spd_attr_value, &act)) {
859 				*diag = SPD_DIAGNOSTIC_ADD_BAD_TYPE;
860 				goto fail;
861 			}
862 			break;
863 
864 		case SPD_ATTR_FLAGS:
865 			if (!tunnel_polhead && extv[SPD_EXT_TUN_NAME] != NULL) {
866 				/*
867 				 * Set "sa unique" for transport-mode
868 				 * tunnels whether we want to or not.
869 				 */
870 				attrp->spd_attr_value |= SPD_APPLY_UNIQUE;
871 			}
872 			if (!spd_convert_flags(attrp->spd_attr_value, &act)) {
873 				*diag = SPD_DIAGNOSTIC_ADD_BAD_FLAGS;
874 				goto fail;
875 			}
876 			break;
877 
878 		case SPD_ATTR_AH_AUTH:
879 			if (attrp->spd_attr_value == 0) {
880 				*diag = SPD_DIAGNOSTIC_UNSUPP_AH_ALG;
881 				goto fail;
882 			}
883 			act.ipa_apply.ipp_auth_alg = attrp->spd_attr_value;
884 			break;
885 
886 		case SPD_ATTR_ESP_ENCR:
887 			if (attrp->spd_attr_value == 0) {
888 				*diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_ALG;
889 				goto fail;
890 			}
891 			act.ipa_apply.ipp_encr_alg = attrp->spd_attr_value;
892 			break;
893 
894 		case SPD_ATTR_ESP_AUTH:
895 			if (attrp->spd_attr_value == 0) {
896 				*diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_ALG;
897 				goto fail;
898 			}
899 			act.ipa_apply.ipp_esp_auth_alg = attrp->spd_attr_value;
900 			break;
901 
902 		case SPD_ATTR_ENCR_MINBITS:
903 			act.ipa_apply.ipp_espe_minbits = attrp->spd_attr_value;
904 			break;
905 
906 		case SPD_ATTR_ENCR_MAXBITS:
907 			act.ipa_apply.ipp_espe_maxbits = attrp->spd_attr_value;
908 			break;
909 
910 		case SPD_ATTR_AH_MINBITS:
911 			act.ipa_apply.ipp_ah_minbits = attrp->spd_attr_value;
912 			break;
913 
914 		case SPD_ATTR_AH_MAXBITS:
915 			act.ipa_apply.ipp_ah_maxbits = attrp->spd_attr_value;
916 			break;
917 
918 		case SPD_ATTR_ESPA_MINBITS:
919 			act.ipa_apply.ipp_espa_minbits = attrp->spd_attr_value;
920 			break;
921 
922 		case SPD_ATTR_ESPA_MAXBITS:
923 			act.ipa_apply.ipp_espa_maxbits = attrp->spd_attr_value;
924 			break;
925 
926 		case SPD_ATTR_LIFE_SOFT_TIME:
927 		case SPD_ATTR_LIFE_HARD_TIME:
928 		case SPD_ATTR_LIFE_SOFT_BYTES:
929 		case SPD_ATTR_LIFE_HARD_BYTES:
930 			break;
931 
932 		case SPD_ATTR_KM_PROTO:
933 			act.ipa_apply.ipp_km_proto = attrp->spd_attr_value;
934 			break;
935 
936 		case SPD_ATTR_KM_COOKIE:
937 			act.ipa_apply.ipp_km_cookie = attrp->spd_attr_value;
938 			break;
939 
940 		case SPD_ATTR_REPLAY_DEPTH:
941 			act.ipa_apply.ipp_replay_depth = attrp->spd_attr_value;
942 			break;
943 		}
944 	}
945 	if (actp != endactp) {
946 		*diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT;
947 		goto fail;
948 	}
949 
950 	return (B_TRUE);
951 fail:
952 	ipsec_actvec_free(*actpp, nact);
953 	*actpp = NULL;
954 	return (B_FALSE);
955 }
956 
957 typedef struct
958 {
959 	ipsec_policy_t *pol;
960 	int dir;
961 } tmprule_t;
962 
963 static int
964 mkrule(ipsec_policy_head_t *iph, struct spd_rule *rule,
965     ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t af,
966     tmprule_t **rp, uint64_t *index, spd_stack_t *spds)
967 {
968 	ipsec_policy_t *pol;
969 
970 	sel->ipsl_valid &= ~(IPSL_IPV6|IPSL_IPV4);
971 	sel->ipsl_valid |= af;
972 
973 	pol = ipsec_policy_create(sel, actp, nact, rule->spd_rule_priority,
974 	    index, spds->spds_netstack);
975 	if (pol == NULL)
976 		return (ENOMEM);
977 
978 	(*rp)->pol = pol;
979 	(*rp)->dir = dir;
980 	(*rp)++;
981 
982 	if (!ipsec_check_policy(iph, pol, dir))
983 		return (EEXIST);
984 
985 	rule->spd_rule_index = pol->ipsp_index;
986 	return (0);
987 }
988 
989 static int
990 mkrulepair(ipsec_policy_head_t *iph, struct spd_rule *rule,
991     ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t afs,
992     tmprule_t **rp, uint64_t *index, spd_stack_t *spds)
993 {
994 	int error;
995 
996 	if (afs & IPSL_IPV4) {
997 		error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV4, rp,
998 		    index, spds);
999 		if (error != 0)
1000 			return (error);
1001 	}
1002 	if (afs & IPSL_IPV6) {
1003 		error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV6, rp,
1004 		    index, spds);
1005 		if (error != 0)
1006 			return (error);
1007 	}
1008 	return (0);
1009 }
1010 
1011 
1012 static void
1013 spdsock_addrule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1014     spd_ext_t **extv, ipsec_tun_pol_t *itp)
1015 {
1016 	ipsec_selkey_t sel;
1017 	ipsec_act_t *actp;
1018 	uint_t nact;
1019 	int diag = 0, error, afs;
1020 	struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE];
1021 	tmprule_t rules[4], *rulep = &rules[0];
1022 	boolean_t tunnel_mode, empty_itp, active;
1023 	uint64_t *index = (itp == NULL) ? NULL : &itp->itp_next_policy_index;
1024 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
1025 	spd_stack_t	*spds = ss->spdsock_spds;
1026 
1027 	if (rule == NULL) {
1028 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT);
1029 		if (audit_active) {
1030 			spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1031 			cred_t *cr;
1032 			pid_t cpid;
1033 
1034 			cr = msg_getcred(mp, &cpid);
1035 			active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1036 			audit_pf_policy(SPD_ADDRULE, cr,
1037 			    spds->spds_netstack, ITP_NAME(itp), active,
1038 			    SPD_DIAGNOSTIC_NO_RULE_EXT, cpid);
1039 		}
1040 		return;
1041 	}
1042 
1043 	tunnel_mode = (rule->spd_rule_flags & SPD_RULE_FLAG_TUNNEL);
1044 
1045 	if (itp != NULL) {
1046 		mutex_enter(&itp->itp_lock);
1047 		ASSERT(itp->itp_policy == iph || itp->itp_inactive == iph);
1048 		active = (itp->itp_policy == iph);
1049 		if (ITP_P_ISACTIVE(itp, iph)) {
1050 			/* Check for mix-and-match of tunnel/transport. */
1051 			if ((tunnel_mode && !ITP_P_ISTUNNEL(itp, iph)) ||
1052 			    (!tunnel_mode && ITP_P_ISTUNNEL(itp, iph))) {
1053 				mutex_exit(&itp->itp_lock);
1054 				spdsock_error(q, mp, EBUSY, 0);
1055 				return;
1056 			}
1057 			empty_itp = B_FALSE;
1058 		} else {
1059 			empty_itp = B_TRUE;
1060 			itp->itp_flags = active ? ITPF_P_ACTIVE : ITPF_I_ACTIVE;
1061 			if (tunnel_mode)
1062 				itp->itp_flags |= active ? ITPF_P_TUNNEL :
1063 				    ITPF_I_TUNNEL;
1064 		}
1065 	} else {
1066 		empty_itp = B_FALSE;
1067 	}
1068 
1069 	if (rule->spd_rule_index != 0) {
1070 		diag = SPD_DIAGNOSTIC_INVALID_RULE_INDEX;
1071 		error = EINVAL;
1072 		goto fail2;
1073 	}
1074 
1075 	if (!spdsock_ext_to_sel(extv, &sel, &diag)) {
1076 		error = EINVAL;
1077 		goto fail2;
1078 	}
1079 
1080 	if (itp != NULL) {
1081 		if (tunnel_mode) {
1082 			if (sel.ipsl_valid &
1083 			    (IPSL_REMOTE_PORT | IPSL_LOCAL_PORT)) {
1084 				itp->itp_flags |= active ?
1085 				    ITPF_P_PER_PORT_SECURITY :
1086 				    ITPF_I_PER_PORT_SECURITY;
1087 			}
1088 		} else {
1089 			/*
1090 			 * For now, we don't allow transport-mode on a tunnel
1091 			 * with ANY specific selectors.  Bail if we have such
1092 			 * a request.
1093 			 */
1094 			if (sel.ipsl_valid & IPSL_WILDCARD) {
1095 				diag = SPD_DIAGNOSTIC_NO_TUNNEL_SELECTORS;
1096 				error = EINVAL;
1097 				goto fail2;
1098 			}
1099 		}
1100 	}
1101 
1102 	if (!spdsock_ext_to_actvec(extv, &actp, &nact, &diag, spds)) {
1103 		error = EINVAL;
1104 		goto fail2;
1105 	}
1106 	/*
1107 	 * If no addresses were specified, add both.
1108 	 */
1109 	afs = sel.ipsl_valid & (IPSL_IPV6|IPSL_IPV4);
1110 	if (afs == 0)
1111 		afs = (IPSL_IPV6|IPSL_IPV4);
1112 
1113 	rw_enter(&iph->iph_lock, RW_WRITER);
1114 
1115 	if (rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) {
1116 		error = mkrulepair(iph, rule, &sel, actp, nact,
1117 		    IPSEC_TYPE_OUTBOUND, afs, &rulep, index, spds);
1118 		if (error != 0)
1119 			goto fail;
1120 	}
1121 
1122 	if (rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) {
1123 		error = mkrulepair(iph, rule, &sel, actp, nact,
1124 		    IPSEC_TYPE_INBOUND, afs, &rulep, index, spds);
1125 		if (error != 0)
1126 			goto fail;
1127 	}
1128 
1129 	while ((--rulep) >= &rules[0]) {
1130 		ipsec_enter_policy(iph, rulep->pol, rulep->dir,
1131 		    spds->spds_netstack);
1132 	}
1133 	rw_exit(&iph->iph_lock);
1134 	if (itp != NULL)
1135 		mutex_exit(&itp->itp_lock);
1136 
1137 	ipsec_actvec_free(actp, nact);
1138 	spd_echo(q, mp);
1139 	if (audit_active) {
1140 		spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1141 		cred_t *cr;
1142 		pid_t cpid;
1143 
1144 		cr = msg_getcred(mp, &cpid);
1145 		active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1146 		audit_pf_policy(SPD_ADDRULE, cr, spds->spds_netstack,
1147 		    ITP_NAME(itp), active, 0, cpid);
1148 	}
1149 	return;
1150 
1151 fail:
1152 	rw_exit(&iph->iph_lock);
1153 	while ((--rulep) >= &rules[0]) {
1154 		IPPOL_REFRELE(rulep->pol, spds->spds_netstack);
1155 	}
1156 	ipsec_actvec_free(actp, nact);
1157 fail2:
1158 	if (itp != NULL) {
1159 		if (empty_itp)
1160 			itp->itp_flags = 0;
1161 		mutex_exit(&itp->itp_lock);
1162 	}
1163 	spdsock_error(q, mp, error, diag);
1164 	if (audit_active) {
1165 		spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1166 		cred_t *cr;
1167 		pid_t cpid;
1168 
1169 		cr = msg_getcred(mp, &cpid);
1170 		active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1171 		audit_pf_policy(SPD_ADDRULE, cr, spds->spds_netstack,
1172 		    ITP_NAME(itp), active, error, cpid);
1173 	}
1174 }
1175 
1176 void
1177 spdsock_deleterule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1178     spd_ext_t **extv, ipsec_tun_pol_t *itp)
1179 {
1180 	ipsec_selkey_t sel;
1181 	struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE];
1182 	int err, diag = 0;
1183 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
1184 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
1185 
1186 	if (rule == NULL) {
1187 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT);
1188 		if (audit_active) {
1189 			boolean_t active;
1190 			spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1191 			cred_t *cr;
1192 			pid_t cpid;
1193 
1194 			cr = msg_getcred(mp, &cpid);
1195 			active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1196 			audit_pf_policy(SPD_DELETERULE, cr, ns,
1197 			    ITP_NAME(itp), active, SPD_DIAGNOSTIC_NO_RULE_EXT,
1198 			    cpid);
1199 		}
1200 		return;
1201 	}
1202 
1203 	/*
1204 	 * Must enter itp_lock first to avoid deadlock.  See tun.c's
1205 	 * set_sec_simple() for the other case of itp_lock and iph_lock.
1206 	 */
1207 	if (itp != NULL)
1208 		mutex_enter(&itp->itp_lock);
1209 
1210 	if (rule->spd_rule_index != 0) {
1211 		if (ipsec_policy_delete_index(iph, rule->spd_rule_index, ns) !=
1212 		    0) {
1213 			err = ESRCH;
1214 			goto fail;
1215 		}
1216 	} else {
1217 		if (!spdsock_ext_to_sel(extv, &sel, &diag)) {
1218 			err = EINVAL;	/* diag already set... */
1219 			goto fail;
1220 		}
1221 
1222 		if ((rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) &&
1223 		    !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_INBOUND, ns)) {
1224 			err = ESRCH;
1225 			goto fail;
1226 		}
1227 
1228 		if ((rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) &&
1229 		    !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_OUTBOUND, ns)) {
1230 			err = ESRCH;
1231 			goto fail;
1232 		}
1233 	}
1234 
1235 	if (itp != NULL) {
1236 		ASSERT(iph == itp->itp_policy || iph == itp->itp_inactive);
1237 		rw_enter(&iph->iph_lock, RW_READER);
1238 		if (avl_numnodes(&iph->iph_rulebyid) == 0) {
1239 			if (iph == itp->itp_policy)
1240 				itp->itp_flags &= ~ITPF_PFLAGS;
1241 			else
1242 				itp->itp_flags &= ~ITPF_IFLAGS;
1243 		}
1244 		/* Can exit locks in any order. */
1245 		rw_exit(&iph->iph_lock);
1246 		mutex_exit(&itp->itp_lock);
1247 	}
1248 	spd_echo(q, mp);
1249 	if (audit_active) {
1250 		boolean_t active;
1251 		spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1252 		cred_t *cr;
1253 		pid_t cpid;
1254 
1255 		cr = msg_getcred(mp, &cpid);
1256 		active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1257 		audit_pf_policy(SPD_DELETERULE, cr, ns, ITP_NAME(itp),
1258 		    active, 0, cpid);
1259 	}
1260 	return;
1261 fail:
1262 	if (itp != NULL)
1263 		mutex_exit(&itp->itp_lock);
1264 	spdsock_error(q, mp, err, diag);
1265 	if (audit_active) {
1266 		boolean_t active;
1267 		spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1268 		cred_t *cr;
1269 		pid_t cpid;
1270 
1271 		cr = msg_getcred(mp, &cpid);
1272 		active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1273 		audit_pf_policy(SPD_DELETERULE, cr, ns, ITP_NAME(itp),
1274 		    active, err, cpid);
1275 	}
1276 }
1277 
1278 /* Do NOT consume a reference to itp. */
1279 /* ARGSUSED */
1280 static void
1281 spdsock_flip_node(ipsec_tun_pol_t *itp, void *ignoreme, netstack_t *ns)
1282 {
1283 	mutex_enter(&itp->itp_lock);
1284 	ITPF_SWAP(itp->itp_flags);
1285 	ipsec_swap_policy(itp->itp_policy, itp->itp_inactive, ns);
1286 	mutex_exit(&itp->itp_lock);
1287 }
1288 
1289 void
1290 spdsock_flip(queue_t *q, mblk_t *mp, spd_if_t *tunname)
1291 {
1292 	char *tname;
1293 	ipsec_tun_pol_t *itp;
1294 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
1295 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
1296 
1297 	if (tunname != NULL) {
1298 		tname = (char *)tunname->spd_if_name;
1299 		if (*tname == '\0') {
1300 			/* can't fail */
1301 			ipsec_swap_global_policy(ns);
1302 			if (audit_active) {
1303 				boolean_t active;
1304 				spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1305 				cred_t *cr;
1306 				pid_t cpid;
1307 
1308 				cr = msg_getcred(mp, &cpid);
1309 				active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1310 				audit_pf_policy(SPD_FLIP, cr, ns,
1311 				    NULL, active, 0, cpid);
1312 			}
1313 			itp_walk(spdsock_flip_node, NULL, ns);
1314 			if (audit_active) {
1315 				boolean_t active;
1316 				spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1317 				cred_t *cr;
1318 				pid_t cpid;
1319 
1320 				cr = msg_getcred(mp, &cpid);
1321 				active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1322 				audit_pf_policy(SPD_FLIP, cr, ns,
1323 				    "all tunnels", active, 0, cpid);
1324 			}
1325 		} else {
1326 			itp = get_tunnel_policy(tname, ns);
1327 			if (itp == NULL) {
1328 				/* Better idea for "tunnel not found"? */
1329 				spdsock_error(q, mp, ESRCH, 0);
1330 				if (audit_active) {
1331 					boolean_t active;
1332 					spd_msg_t *spmsg =
1333 					    (spd_msg_t *)mp->b_rptr;
1334 					cred_t *cr;
1335 					pid_t cpid;
1336 
1337 					cr = msg_getcred(mp, &cpid);
1338 					active = (spmsg->spd_msg_spdid ==
1339 					    SPD_ACTIVE);
1340 					audit_pf_policy(SPD_FLIP, cr, ns,
1341 					    ITP_NAME(itp), active,
1342 					    ESRCH, cpid);
1343 				}
1344 				return;
1345 			}
1346 			spdsock_flip_node(itp, NULL, NULL);
1347 			if (audit_active) {
1348 				boolean_t active;
1349 				spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1350 				cred_t *cr;
1351 				pid_t cpid;
1352 
1353 				cr = msg_getcred(mp, &cpid);
1354 				active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1355 				audit_pf_policy(SPD_FLIP, cr, ns,
1356 				    ITP_NAME(itp), active, 0, cpid);
1357 			}
1358 			ITP_REFRELE(itp, ns);
1359 		}
1360 	} else {
1361 		ipsec_swap_global_policy(ns);	/* can't fail */
1362 		if (audit_active) {
1363 			boolean_t active;
1364 			spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1365 			cred_t *cr;
1366 			pid_t cpid;
1367 
1368 			cr = msg_getcred(mp, &cpid);
1369 			active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1370 			audit_pf_policy(SPD_FLIP, cr,
1371 			    ns, NULL, active, 0, cpid);
1372 		}
1373 	}
1374 	spd_echo(q, mp);
1375 }
1376 
1377 /*
1378  * Unimplemented feature
1379  */
1380 /* ARGSUSED */
1381 static void
1382 spdsock_lookup(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1383     spd_ext_t **extv, ipsec_tun_pol_t *itp)
1384 {
1385 	spdsock_error(q, mp, EINVAL, 0);
1386 }
1387 
1388 
1389 static mblk_t *
1390 spdsock_dump_ruleset(mblk_t *req, ipsec_policy_head_t *iph,
1391     uint32_t count, uint16_t error)
1392 {
1393 	size_t len = sizeof (spd_ruleset_ext_t) + sizeof (spd_msg_t);
1394 	spd_msg_t *msg;
1395 	spd_ruleset_ext_t *ruleset;
1396 	mblk_t *m = allocb(len, BPRI_HI);
1397 
1398 	ASSERT(RW_READ_HELD(&iph->iph_lock));
1399 
1400 	if (m == NULL) {
1401 		return (NULL);
1402 	}
1403 	msg = (spd_msg_t *)m->b_rptr;
1404 	ruleset = (spd_ruleset_ext_t *)(&msg[1]);
1405 
1406 	m->b_wptr = (uint8_t *)&ruleset[1];
1407 
1408 	*msg = *(spd_msg_t *)(req->b_rptr);
1409 	msg->spd_msg_len = SPD_8TO64(len);
1410 	msg->spd_msg_errno = error;
1411 
1412 	ruleset->spd_ruleset_len = SPD_8TO64(sizeof (*ruleset));
1413 	ruleset->spd_ruleset_type = SPD_EXT_RULESET;
1414 	ruleset->spd_ruleset_count = count;
1415 	ruleset->spd_ruleset_version = iph->iph_gen;
1416 	return (m);
1417 }
1418 
1419 static mblk_t *
1420 spdsock_dump_finish(spdsock_t *ss, int error)
1421 {
1422 	mblk_t *m;
1423 	ipsec_policy_head_t *iph = ss->spdsock_dump_head;
1424 	mblk_t *req = ss->spdsock_dump_req;
1425 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
1426 
1427 	rw_enter(&iph->iph_lock, RW_READER);
1428 	m = spdsock_dump_ruleset(req, iph, ss->spdsock_dump_count, error);
1429 	rw_exit(&iph->iph_lock);
1430 	IPPH_REFRELE(iph, ns);
1431 	if (ss->spdsock_itp != NULL) {
1432 		ITP_REFRELE(ss->spdsock_itp, ns);
1433 		ss->spdsock_itp = NULL;
1434 	}
1435 	ss->spdsock_dump_req = NULL;
1436 	freemsg(req);
1437 
1438 	return (m);
1439 }
1440 
1441 /*
1442  * Rule encoding functions.
1443  * We do a two-pass encode.
1444  * If base != NULL, fill in encoded rule part starting at base+offset.
1445  * Always return "offset" plus length of to-be-encoded data.
1446  */
1447 static uint_t
1448 spdsock_encode_typecode(uint8_t *base, uint_t offset, uint8_t type,
1449     uint8_t type_end, uint8_t code, uint8_t code_end)
1450 {
1451 	struct spd_typecode *tcp;
1452 
1453 	ASSERT(ALIGNED64(offset));
1454 
1455 	if (base != NULL) {
1456 		tcp = (struct spd_typecode *)(base + offset);
1457 		tcp->spd_typecode_len = SPD_8TO64(sizeof (*tcp));
1458 		tcp->spd_typecode_exttype = SPD_EXT_ICMP_TYPECODE;
1459 		tcp->spd_typecode_code = code;
1460 		tcp->spd_typecode_type = type;
1461 		tcp->spd_typecode_type_end = type_end;
1462 		tcp->spd_typecode_code_end = code_end;
1463 	}
1464 	offset += sizeof (*tcp);
1465 
1466 	ASSERT(ALIGNED64(offset));
1467 
1468 	return (offset);
1469 }
1470 
1471 static uint_t
1472 spdsock_encode_proto(uint8_t *base, uint_t offset, uint8_t proto)
1473 {
1474 	struct spd_proto *spp;
1475 
1476 	ASSERT(ALIGNED64(offset));
1477 
1478 	if (base != NULL) {
1479 		spp = (struct spd_proto *)(base + offset);
1480 		spp->spd_proto_len = SPD_8TO64(sizeof (*spp));
1481 		spp->spd_proto_exttype = SPD_EXT_PROTO;
1482 		spp->spd_proto_number = proto;
1483 		spp->spd_proto_reserved1 = 0;
1484 		spp->spd_proto_reserved2 = 0;
1485 	}
1486 	offset += sizeof (*spp);
1487 
1488 	ASSERT(ALIGNED64(offset));
1489 
1490 	return (offset);
1491 }
1492 
1493 static uint_t
1494 spdsock_encode_port(uint8_t *base, uint_t offset, uint16_t ext, uint16_t port)
1495 {
1496 	struct spd_portrange *spp;
1497 
1498 	ASSERT(ALIGNED64(offset));
1499 
1500 	if (base != NULL) {
1501 		spp = (struct spd_portrange *)(base + offset);
1502 		spp->spd_ports_len = SPD_8TO64(sizeof (*spp));
1503 		spp->spd_ports_exttype = ext;
1504 		spp->spd_ports_minport = port;
1505 		spp->spd_ports_maxport = port;
1506 	}
1507 	offset += sizeof (*spp);
1508 
1509 	ASSERT(ALIGNED64(offset));
1510 
1511 	return (offset);
1512 }
1513 
1514 static uint_t
1515 spdsock_encode_addr(uint8_t *base, uint_t offset, uint16_t ext,
1516     const ipsec_selkey_t *sel, const ipsec_addr_t *addr, uint_t pfxlen)
1517 {
1518 	struct spd_address *sae;
1519 	ipsec_addr_t *spdaddr;
1520 	uint_t start = offset;
1521 	uint_t addrlen;
1522 	uint_t af;
1523 
1524 	if (sel->ipsl_valid & IPSL_IPV4) {
1525 		af = AF_INET;
1526 		addrlen = IP_ADDR_LEN;
1527 	} else {
1528 		af = AF_INET6;
1529 		addrlen = IPV6_ADDR_LEN;
1530 	}
1531 
1532 	ASSERT(ALIGNED64(offset));
1533 
1534 	if (base != NULL) {
1535 		sae = (struct spd_address *)(base + offset);
1536 		sae->spd_address_exttype = ext;
1537 		sae->spd_address_af = af;
1538 		sae->spd_address_prefixlen = pfxlen;
1539 		sae->spd_address_reserved2 = 0;
1540 
1541 		spdaddr = (ipsec_addr_t *)(&sae[1]);
1542 		bcopy(addr, spdaddr, addrlen);
1543 	}
1544 	offset += sizeof (*sae);
1545 	addrlen = roundup(addrlen, sizeof (uint64_t));
1546 	offset += addrlen;
1547 
1548 	ASSERT(ALIGNED64(offset));
1549 
1550 	if (base != NULL)
1551 		sae->spd_address_len = SPD_8TO64(offset - start);
1552 	return (offset);
1553 }
1554 
1555 static uint_t
1556 spdsock_encode_sel(uint8_t *base, uint_t offset, const ipsec_sel_t *sel)
1557 {
1558 	const ipsec_selkey_t *selkey = &sel->ipsl_key;
1559 
1560 	if (selkey->ipsl_valid & IPSL_PROTOCOL)
1561 		offset = spdsock_encode_proto(base, offset, selkey->ipsl_proto);
1562 	if (selkey->ipsl_valid & IPSL_LOCAL_PORT)
1563 		offset = spdsock_encode_port(base, offset, SPD_EXT_LCLPORT,
1564 		    selkey->ipsl_lport);
1565 	if (selkey->ipsl_valid & IPSL_REMOTE_PORT)
1566 		offset = spdsock_encode_port(base, offset, SPD_EXT_REMPORT,
1567 		    selkey->ipsl_rport);
1568 	if (selkey->ipsl_valid & IPSL_REMOTE_ADDR)
1569 		offset = spdsock_encode_addr(base, offset, SPD_EXT_REMADDR,
1570 		    selkey, &selkey->ipsl_remote, selkey->ipsl_remote_pfxlen);
1571 	if (selkey->ipsl_valid & IPSL_LOCAL_ADDR)
1572 		offset = spdsock_encode_addr(base, offset, SPD_EXT_LCLADDR,
1573 		    selkey, &selkey->ipsl_local, selkey->ipsl_local_pfxlen);
1574 	if (selkey->ipsl_valid & IPSL_ICMP_TYPE) {
1575 		offset = spdsock_encode_typecode(base, offset,
1576 		    selkey->ipsl_icmp_type, selkey->ipsl_icmp_type_end,
1577 		    (selkey->ipsl_valid & IPSL_ICMP_CODE) ?
1578 		    selkey->ipsl_icmp_code : 255,
1579 		    (selkey->ipsl_valid & IPSL_ICMP_CODE) ?
1580 		    selkey->ipsl_icmp_code_end : 255);
1581 	}
1582 	return (offset);
1583 }
1584 
1585 static uint_t
1586 spdsock_encode_actattr(uint8_t *base, uint_t offset, uint32_t tag,
1587     uint32_t value)
1588 {
1589 	struct spd_attribute *attr;
1590 
1591 	ASSERT(ALIGNED64(offset));
1592 
1593 	if (base != NULL) {
1594 		attr = (struct spd_attribute *)(base + offset);
1595 		attr->spd_attr_tag = tag;
1596 		attr->spd_attr_value = value;
1597 	}
1598 	offset += sizeof (struct spd_attribute);
1599 
1600 	ASSERT(ALIGNED64(offset));
1601 
1602 	return (offset);
1603 }
1604 
1605 
1606 #define	EMIT(t, v) offset = spdsock_encode_actattr(base, offset, (t), (v))
1607 
1608 static uint_t
1609 spdsock_encode_action(uint8_t *base, uint_t offset, const ipsec_action_t *ap)
1610 {
1611 	const struct ipsec_act *act = &(ap->ipa_act);
1612 	uint_t flags;
1613 
1614 	EMIT(SPD_ATTR_EMPTY, 0);
1615 	switch (act->ipa_type) {
1616 	case IPSEC_ACT_DISCARD:
1617 	case IPSEC_ACT_REJECT:
1618 		EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_DROP);
1619 		break;
1620 	case IPSEC_ACT_BYPASS:
1621 	case IPSEC_ACT_CLEAR:
1622 		EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_PASS);
1623 		break;
1624 
1625 	case IPSEC_ACT_APPLY:
1626 		EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_IPSEC);
1627 		flags = 0;
1628 		if (act->ipa_apply.ipp_use_ah)
1629 			flags |= SPD_APPLY_AH;
1630 		if (act->ipa_apply.ipp_use_esp)
1631 			flags |= SPD_APPLY_ESP;
1632 		if (act->ipa_apply.ipp_use_espa)
1633 			flags |= SPD_APPLY_ESPA;
1634 		if (act->ipa_apply.ipp_use_se)
1635 			flags |= SPD_APPLY_SE;
1636 		if (act->ipa_apply.ipp_use_unique)
1637 			flags |= SPD_APPLY_UNIQUE;
1638 		EMIT(SPD_ATTR_FLAGS, flags);
1639 		if (flags & SPD_APPLY_AH) {
1640 			EMIT(SPD_ATTR_AH_AUTH, act->ipa_apply.ipp_auth_alg);
1641 			EMIT(SPD_ATTR_AH_MINBITS,
1642 			    act->ipa_apply.ipp_ah_minbits);
1643 			EMIT(SPD_ATTR_AH_MAXBITS,
1644 			    act->ipa_apply.ipp_ah_maxbits);
1645 		}
1646 		if (flags & SPD_APPLY_ESP) {
1647 			EMIT(SPD_ATTR_ESP_ENCR, act->ipa_apply.ipp_encr_alg);
1648 			EMIT(SPD_ATTR_ENCR_MINBITS,
1649 			    act->ipa_apply.ipp_espe_minbits);
1650 			EMIT(SPD_ATTR_ENCR_MAXBITS,
1651 			    act->ipa_apply.ipp_espe_maxbits);
1652 			if (flags & SPD_APPLY_ESPA) {
1653 				EMIT(SPD_ATTR_ESP_AUTH,
1654 				    act->ipa_apply.ipp_esp_auth_alg);
1655 				EMIT(SPD_ATTR_ESPA_MINBITS,
1656 				    act->ipa_apply.ipp_espa_minbits);
1657 				EMIT(SPD_ATTR_ESPA_MAXBITS,
1658 				    act->ipa_apply.ipp_espa_maxbits);
1659 			}
1660 		}
1661 		if (act->ipa_apply.ipp_km_proto != 0)
1662 			EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_proto);
1663 		if (act->ipa_apply.ipp_km_cookie != 0)
1664 			EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_cookie);
1665 		if (act->ipa_apply.ipp_replay_depth != 0)
1666 			EMIT(SPD_ATTR_REPLAY_DEPTH,
1667 			    act->ipa_apply.ipp_replay_depth);
1668 		/* Add more here */
1669 		break;
1670 	}
1671 
1672 	return (offset);
1673 }
1674 
1675 static uint_t
1676 spdsock_encode_action_list(uint8_t *base, uint_t offset,
1677     const ipsec_action_t *ap)
1678 {
1679 	struct spd_ext_actions *act;
1680 	uint_t nact = 0;
1681 	uint_t start = offset;
1682 
1683 	ASSERT(ALIGNED64(offset));
1684 
1685 	if (base != NULL) {
1686 		act = (struct spd_ext_actions *)(base + offset);
1687 		act->spd_actions_len = 0;
1688 		act->spd_actions_exttype = SPD_EXT_ACTION;
1689 		act->spd_actions_count = 0;
1690 		act->spd_actions_reserved = 0;
1691 	}
1692 
1693 	offset += sizeof (*act);
1694 
1695 	ASSERT(ALIGNED64(offset));
1696 
1697 	while (ap != NULL) {
1698 		offset = spdsock_encode_action(base, offset, ap);
1699 		ap = ap->ipa_next;
1700 		nact++;
1701 		if (ap != NULL) {
1702 			EMIT(SPD_ATTR_NEXT, 0);
1703 		}
1704 	}
1705 	EMIT(SPD_ATTR_END, 0);
1706 
1707 	ASSERT(ALIGNED64(offset));
1708 
1709 	if (base != NULL) {
1710 		act->spd_actions_count = nact;
1711 		act->spd_actions_len = SPD_8TO64(offset - start);
1712 	}
1713 
1714 	return (offset);
1715 }
1716 
1717 #undef EMIT
1718 
1719 /* ARGSUSED */
1720 static uint_t
1721 spdsock_rule_flags(uint_t dir, uint_t af)
1722 {
1723 	uint_t flags = 0;
1724 
1725 	if (dir == IPSEC_TYPE_INBOUND)
1726 		flags |= SPD_RULE_FLAG_INBOUND;
1727 	if (dir == IPSEC_TYPE_OUTBOUND)
1728 		flags |= SPD_RULE_FLAG_OUTBOUND;
1729 
1730 	return (flags);
1731 }
1732 
1733 
1734 static uint_t
1735 spdsock_encode_rule_head(uint8_t *base, uint_t offset, spd_msg_t *req,
1736     const ipsec_policy_t *rule, uint_t dir, uint_t af, char *name,
1737     boolean_t tunnel)
1738 {
1739 	struct spd_msg *spmsg;
1740 	struct spd_rule *spr;
1741 	spd_if_t *sid;
1742 
1743 	uint_t start = offset;
1744 
1745 	ASSERT(ALIGNED64(offset));
1746 
1747 	if (base != NULL) {
1748 		spmsg = (struct spd_msg *)(base + offset);
1749 		bzero(spmsg, sizeof (*spmsg));
1750 		spmsg->spd_msg_version = PF_POLICY_V1;
1751 		spmsg->spd_msg_type = SPD_DUMP;
1752 		spmsg->spd_msg_seq = req->spd_msg_seq;
1753 		spmsg->spd_msg_pid = req->spd_msg_pid;
1754 	}
1755 	offset += sizeof (struct spd_msg);
1756 
1757 	ASSERT(ALIGNED64(offset));
1758 
1759 	if (base != NULL) {
1760 		spr = (struct spd_rule *)(base + offset);
1761 		spr->spd_rule_type = SPD_EXT_RULE;
1762 		spr->spd_rule_priority = rule->ipsp_prio;
1763 		spr->spd_rule_flags = spdsock_rule_flags(dir, af);
1764 		if (tunnel)
1765 			spr->spd_rule_flags |= SPD_RULE_FLAG_TUNNEL;
1766 		spr->spd_rule_unused = 0;
1767 		spr->spd_rule_len = SPD_8TO64(sizeof (*spr));
1768 		spr->spd_rule_index = rule->ipsp_index;
1769 	}
1770 	offset += sizeof (struct spd_rule);
1771 
1772 	/*
1773 	 * If we have an interface name (i.e. if this policy head came from
1774 	 * a tunnel), add the SPD_EXT_TUN_NAME extension.
1775 	 */
1776 	if (name != NULL) {
1777 
1778 		ASSERT(ALIGNED64(offset));
1779 
1780 		if (base != NULL) {
1781 			sid = (spd_if_t *)(base + offset);
1782 			sid->spd_if_exttype = SPD_EXT_TUN_NAME;
1783 			sid->spd_if_len = SPD_8TO64(sizeof (spd_if_t) +
1784 			    roundup((strlen(name) - 4), 8));
1785 			(void) strlcpy((char *)sid->spd_if_name, name,
1786 			    LIFNAMSIZ);
1787 		}
1788 
1789 		offset += sizeof (spd_if_t) + roundup((strlen(name) - 4), 8);
1790 	}
1791 
1792 	offset = spdsock_encode_sel(base, offset, rule->ipsp_sel);
1793 	offset = spdsock_encode_action_list(base, offset, rule->ipsp_act);
1794 
1795 	ASSERT(ALIGNED64(offset));
1796 
1797 	if (base != NULL) {
1798 		spmsg->spd_msg_len = SPD_8TO64(offset - start);
1799 	}
1800 	return (offset);
1801 }
1802 
1803 /* ARGSUSED */
1804 static mblk_t *
1805 spdsock_encode_rule(mblk_t *req, const ipsec_policy_t *rule,
1806     uint_t dir, uint_t af, char *name, boolean_t tunnel)
1807 {
1808 	mblk_t *m;
1809 	uint_t len;
1810 	spd_msg_t *mreq = (spd_msg_t *)req->b_rptr;
1811 
1812 	/*
1813 	 * Figure out how much space we'll need.
1814 	 */
1815 	len = spdsock_encode_rule_head(NULL, 0, mreq, rule, dir, af, name,
1816 	    tunnel);
1817 
1818 	/*
1819 	 * Allocate mblk.
1820 	 */
1821 	m = allocb(len, BPRI_HI);
1822 	if (m == NULL)
1823 		return (NULL);
1824 
1825 	/*
1826 	 * Fill it in..
1827 	 */
1828 	m->b_wptr = m->b_rptr + len;
1829 	bzero(m->b_rptr, len);
1830 	(void) spdsock_encode_rule_head(m->b_rptr, 0, mreq, rule, dir, af,
1831 	    name, tunnel);
1832 	return (m);
1833 }
1834 
1835 static ipsec_policy_t *
1836 spdsock_dump_next_in_chain(spdsock_t *ss, ipsec_policy_head_t *iph,
1837     ipsec_policy_t *cur)
1838 {
1839 	ASSERT(RW_READ_HELD(&iph->iph_lock));
1840 
1841 	ss->spdsock_dump_count++;
1842 	ss->spdsock_dump_cur_rule = cur->ipsp_hash.hash_next;
1843 	return (cur);
1844 }
1845 
1846 static ipsec_policy_t *
1847 spdsock_dump_next_rule(spdsock_t *ss, ipsec_policy_head_t *iph)
1848 {
1849 	ipsec_policy_t *cur;
1850 	ipsec_policy_root_t *ipr;
1851 	int chain, nchains, type, af;
1852 
1853 	ASSERT(RW_READ_HELD(&iph->iph_lock));
1854 
1855 	cur = ss->spdsock_dump_cur_rule;
1856 
1857 	if (cur != NULL)
1858 		return (spdsock_dump_next_in_chain(ss, iph, cur));
1859 
1860 	type = ss->spdsock_dump_cur_type;
1861 
1862 next:
1863 	chain = ss->spdsock_dump_cur_chain;
1864 	ipr = &iph->iph_root[type];
1865 	nchains = ipr->ipr_nchains;
1866 
1867 	while (chain < nchains) {
1868 		cur = ipr->ipr_hash[chain].hash_head;
1869 		chain++;
1870 		if (cur != NULL) {
1871 			ss->spdsock_dump_cur_chain = chain;
1872 			return (spdsock_dump_next_in_chain(ss, iph, cur));
1873 		}
1874 	}
1875 	ss->spdsock_dump_cur_chain = nchains;
1876 
1877 	af = ss->spdsock_dump_cur_af;
1878 	while (af < IPSEC_NAF) {
1879 		cur = ipr->ipr_nonhash[af];
1880 		af++;
1881 		if (cur != NULL) {
1882 			ss->spdsock_dump_cur_af = af;
1883 			return (spdsock_dump_next_in_chain(ss, iph, cur));
1884 		}
1885 	}
1886 
1887 	type++;
1888 	if (type >= IPSEC_NTYPES)
1889 		return (NULL);
1890 
1891 	ss->spdsock_dump_cur_chain = 0;
1892 	ss->spdsock_dump_cur_type = type;
1893 	ss->spdsock_dump_cur_af = IPSEC_AF_V4;
1894 	goto next;
1895 
1896 }
1897 
1898 /*
1899  * If we're done with one policy head, but have more to go, we iterate through
1900  * another IPsec tunnel policy head (itp).  Return NULL if it is an error
1901  * worthy of returning EAGAIN via PF_POLICY.
1902  */
1903 static ipsec_tun_pol_t *
1904 spdsock_dump_iterate_next_tunnel(spdsock_t *ss, ipsec_stack_t *ipss)
1905 {
1906 	ipsec_tun_pol_t *itp;
1907 
1908 	ASSERT(RW_READ_HELD(&ipss->ipsec_tunnel_policy_lock));
1909 	if (ipss->ipsec_tunnel_policy_gen > ss->spdsock_dump_tun_gen) {
1910 		/* Oops, state of the tunnel polheads changed. */
1911 		itp = NULL;
1912 	} else if (ss->spdsock_itp == NULL) {
1913 		/* Just finished global, find first node. */
1914 		itp = avl_first(&ipss->ipsec_tunnel_policies);
1915 	} else {
1916 		/* We just finished current polhead, find the next one. */
1917 		itp = AVL_NEXT(&ipss->ipsec_tunnel_policies, ss->spdsock_itp);
1918 	}
1919 	if (itp != NULL) {
1920 		ITP_REFHOLD(itp);
1921 	}
1922 	if (ss->spdsock_itp != NULL) {
1923 		ITP_REFRELE(ss->spdsock_itp, ipss->ipsec_netstack);
1924 	}
1925 	ss->spdsock_itp = itp;
1926 	return (itp);
1927 }
1928 
1929 static mblk_t *
1930 spdsock_dump_next_record(spdsock_t *ss)
1931 {
1932 	ipsec_policy_head_t *iph;
1933 	ipsec_policy_t *rule;
1934 	mblk_t *m;
1935 	ipsec_tun_pol_t *itp;
1936 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
1937 	ipsec_stack_t *ipss = ns->netstack_ipsec;
1938 
1939 	iph = ss->spdsock_dump_head;
1940 
1941 	ASSERT(iph != NULL);
1942 
1943 	rw_enter(&iph->iph_lock, RW_READER);
1944 
1945 	if (iph->iph_gen != ss->spdsock_dump_gen) {
1946 		rw_exit(&iph->iph_lock);
1947 		return (spdsock_dump_finish(ss, EAGAIN));
1948 	}
1949 
1950 	while ((rule = spdsock_dump_next_rule(ss, iph)) == NULL) {
1951 		rw_exit(&iph->iph_lock);
1952 		if (--(ss->spdsock_dump_remaining_polheads) == 0)
1953 			return (spdsock_dump_finish(ss, 0));
1954 
1955 
1956 		/*
1957 		 * If we reach here, we have more policy heads (tunnel
1958 		 * entries) to dump.  Let's reset to a new policy head
1959 		 * and get some more rules.
1960 		 *
1961 		 * An empty policy head will have spdsock_dump_next_rule()
1962 		 * return NULL, and we loop (while dropping the number of
1963 		 * remaining polheads).  If we loop to 0, we finish.  We
1964 		 * keep looping until we hit 0 or until we have a rule to
1965 		 * encode.
1966 		 *
1967 		 * NOTE:  No need for ITP_REF*() macros here as we're only
1968 		 * going after and refholding the policy head itself.
1969 		 */
1970 		rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
1971 		itp = spdsock_dump_iterate_next_tunnel(ss, ipss);
1972 		if (itp == NULL) {
1973 			rw_exit(&ipss->ipsec_tunnel_policy_lock);
1974 			return (spdsock_dump_finish(ss, EAGAIN));
1975 		}
1976 
1977 		/* Reset other spdsock_dump thingies. */
1978 		IPPH_REFRELE(ss->spdsock_dump_head, ns);
1979 		if (ss->spdsock_dump_active) {
1980 			ss->spdsock_dump_tunnel =
1981 			    itp->itp_flags & ITPF_P_TUNNEL;
1982 			iph = itp->itp_policy;
1983 		} else {
1984 			ss->spdsock_dump_tunnel =
1985 			    itp->itp_flags & ITPF_I_TUNNEL;
1986 			iph = itp->itp_inactive;
1987 		}
1988 		IPPH_REFHOLD(iph);
1989 		rw_exit(&ipss->ipsec_tunnel_policy_lock);
1990 
1991 		rw_enter(&iph->iph_lock, RW_READER);
1992 		RESET_SPDSOCK_DUMP_POLHEAD(ss, iph);
1993 	}
1994 
1995 	m = spdsock_encode_rule(ss->spdsock_dump_req, rule,
1996 	    ss->spdsock_dump_cur_type, ss->spdsock_dump_cur_af,
1997 	    (ss->spdsock_itp == NULL) ? NULL : ss->spdsock_itp->itp_name,
1998 	    ss->spdsock_dump_tunnel);
1999 	rw_exit(&iph->iph_lock);
2000 
2001 	if (m == NULL)
2002 		return (spdsock_dump_finish(ss, ENOMEM));
2003 	return (m);
2004 }
2005 
2006 /*
2007  * Dump records until we run into flow-control back-pressure.
2008  */
2009 static void
2010 spdsock_dump_some(queue_t *q, spdsock_t *ss)
2011 {
2012 	mblk_t *m, *dataind;
2013 
2014 	while ((ss->spdsock_dump_req != NULL) && canputnext(q)) {
2015 		m = spdsock_dump_next_record(ss);
2016 		if (m == NULL)
2017 			return;
2018 		dataind = allocb(sizeof (struct T_data_req), BPRI_HI);
2019 		if (dataind == NULL) {
2020 			freemsg(m);
2021 			return;
2022 		}
2023 		dataind->b_cont = m;
2024 		dataind->b_wptr += sizeof (struct T_data_req);
2025 		((struct T_data_ind *)dataind->b_rptr)->PRIM_type = T_DATA_IND;
2026 		((struct T_data_ind *)dataind->b_rptr)->MORE_flag = 0;
2027 		dataind->b_datap->db_type = M_PROTO;
2028 		putnext(q, dataind);
2029 	}
2030 }
2031 
2032 /*
2033  * Start dumping.
2034  * Format a start-of-dump record, and set up the stream and kick the rsrv
2035  * procedure to continue the job..
2036  */
2037 /* ARGSUSED */
2038 static void
2039 spdsock_dump(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp)
2040 {
2041 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2042 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
2043 	ipsec_stack_t *ipss = ns->netstack_ipsec;
2044 	mblk_t *mr;
2045 
2046 	/* spdsock_open() already set spdsock_itp to NULL. */
2047 	if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) {
2048 		rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
2049 		ss->spdsock_dump_remaining_polheads = 1 +
2050 		    avl_numnodes(&ipss->ipsec_tunnel_policies);
2051 		ss->spdsock_dump_tun_gen = ipss->ipsec_tunnel_policy_gen;
2052 		rw_exit(&ipss->ipsec_tunnel_policy_lock);
2053 		if (iph == ALL_ACTIVE_POLHEADS) {
2054 			iph = ipsec_system_policy(ns);
2055 			ss->spdsock_dump_active = B_TRUE;
2056 		} else {
2057 			iph = ipsec_inactive_policy(ns);
2058 			ss->spdsock_dump_active = B_FALSE;
2059 		}
2060 		ASSERT(ss->spdsock_itp == NULL);
2061 	} else {
2062 		ss->spdsock_dump_remaining_polheads = 1;
2063 	}
2064 
2065 	rw_enter(&iph->iph_lock, RW_READER);
2066 
2067 	mr = spdsock_dump_ruleset(mp, iph, 0, 0);
2068 
2069 	if (!mr) {
2070 		rw_exit(&iph->iph_lock);
2071 		spdsock_error(q, mp, ENOMEM, 0);
2072 		return;
2073 	}
2074 
2075 	ss->spdsock_dump_req = mp;
2076 	RESET_SPDSOCK_DUMP_POLHEAD(ss, iph);
2077 
2078 	rw_exit(&iph->iph_lock);
2079 
2080 	qreply(q, mr);
2081 	qenable(OTHERQ(q));
2082 }
2083 
2084 /* Do NOT consume a reference to ITP. */
2085 void
2086 spdsock_clone_node(ipsec_tun_pol_t *itp, void *ep, netstack_t *ns)
2087 {
2088 	int *errptr = (int *)ep;
2089 
2090 	if (*errptr != 0)
2091 		return;	/* We've failed already for some reason. */
2092 	mutex_enter(&itp->itp_lock);
2093 	ITPF_CLONE(itp->itp_flags);
2094 	*errptr = ipsec_copy_polhead(itp->itp_policy, itp->itp_inactive, ns);
2095 	mutex_exit(&itp->itp_lock);
2096 }
2097 
2098 void
2099 spdsock_clone(queue_t *q, mblk_t *mp, spd_if_t *tunname)
2100 {
2101 	int error;
2102 	char *tname;
2103 	ipsec_tun_pol_t *itp;
2104 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2105 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
2106 
2107 	if (tunname != NULL) {
2108 		tname = (char *)tunname->spd_if_name;
2109 		if (*tname == '\0') {
2110 			error = ipsec_clone_system_policy(ns);
2111 			if (audit_active) {
2112 				boolean_t active;
2113 				spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2114 				cred_t *cr;
2115 				pid_t cpid;
2116 
2117 				cr = msg_getcred(mp, &cpid);
2118 				active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2119 				audit_pf_policy(SPD_CLONE, cr, ns,
2120 				    NULL, active, error, cpid);
2121 			}
2122 			if (error == 0) {
2123 				itp_walk(spdsock_clone_node, &error, ns);
2124 				if (audit_active) {
2125 					boolean_t active;
2126 					spd_msg_t *spmsg =
2127 					    (spd_msg_t *)mp->b_rptr;
2128 					cred_t *cr;
2129 					pid_t cpid;
2130 
2131 					cr = msg_getcred(mp, &cpid);
2132 					active = (spmsg->spd_msg_spdid ==
2133 					    SPD_ACTIVE);
2134 					audit_pf_policy(SPD_CLONE, cr,
2135 					    ns, "all tunnels", active, 0,
2136 					    cpid);
2137 				}
2138 			}
2139 		} else {
2140 			itp = get_tunnel_policy(tname, ns);
2141 			if (itp == NULL) {
2142 				spdsock_error(q, mp, ENOENT, 0);
2143 				if (audit_active) {
2144 					boolean_t active;
2145 					spd_msg_t *spmsg =
2146 					    (spd_msg_t *)mp->b_rptr;
2147 					cred_t *cr;
2148 					pid_t cpid;
2149 
2150 					cr = msg_getcred(mp, &cpid);
2151 					active = (spmsg->spd_msg_spdid ==
2152 					    SPD_ACTIVE);
2153 					audit_pf_policy(SPD_CLONE, cr,
2154 					    ns, NULL, active, ENOENT, cpid);
2155 				}
2156 				return;
2157 			}
2158 			spdsock_clone_node(itp, &error, NULL);
2159 			if (audit_active) {
2160 				boolean_t active;
2161 				spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2162 				cred_t *cr;
2163 				pid_t cpid;
2164 
2165 				cr = msg_getcred(mp, &cpid);
2166 				active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2167 				audit_pf_policy(SPD_CLONE, cr, ns,
2168 				    ITP_NAME(itp), active, error, cpid);
2169 			}
2170 			ITP_REFRELE(itp, ns);
2171 		}
2172 	} else {
2173 		error = ipsec_clone_system_policy(ns);
2174 		if (audit_active) {
2175 			boolean_t active;
2176 			spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2177 			cred_t *cr;
2178 			pid_t cpid;
2179 
2180 			cr = msg_getcred(mp, &cpid);
2181 			active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2182 			audit_pf_policy(SPD_CLONE, cr, ns, NULL,
2183 			    active, error, cpid);
2184 		}
2185 	}
2186 
2187 	if (error != 0)
2188 		spdsock_error(q, mp, error, 0);
2189 	else
2190 		spd_echo(q, mp);
2191 }
2192 
2193 /*
2194  * Process a SPD_ALGLIST request. The caller expects separate alg entries
2195  * for AH authentication, ESP authentication, and ESP encryption.
2196  * The same distinction is then used when setting the min and max key
2197  * sizes when defining policies.
2198  */
2199 
2200 #define	SPDSOCK_AH_AUTH		0
2201 #define	SPDSOCK_ESP_AUTH	1
2202 #define	SPDSOCK_ESP_ENCR	2
2203 #define	SPDSOCK_NTYPES		3
2204 
2205 static const uint_t algattr[SPDSOCK_NTYPES] = {
2206 	SPD_ATTR_AH_AUTH,
2207 	SPD_ATTR_ESP_AUTH,
2208 	SPD_ATTR_ESP_ENCR
2209 };
2210 static const uint_t minbitsattr[SPDSOCK_NTYPES] = {
2211 	SPD_ATTR_AH_MINBITS,
2212 	SPD_ATTR_ESPA_MINBITS,
2213 	SPD_ATTR_ENCR_MINBITS
2214 };
2215 static const uint_t maxbitsattr[SPDSOCK_NTYPES] = {
2216 	SPD_ATTR_AH_MAXBITS,
2217 	SPD_ATTR_ESPA_MAXBITS,
2218 	SPD_ATTR_ENCR_MAXBITS
2219 };
2220 static const uint_t defbitsattr[SPDSOCK_NTYPES] = {
2221 	SPD_ATTR_AH_DEFBITS,
2222 	SPD_ATTR_ESPA_DEFBITS,
2223 	SPD_ATTR_ENCR_DEFBITS
2224 };
2225 static const uint_t incrbitsattr[SPDSOCK_NTYPES] = {
2226 	SPD_ATTR_AH_INCRBITS,
2227 	SPD_ATTR_ESPA_INCRBITS,
2228 	SPD_ATTR_ENCR_INCRBITS
2229 };
2230 
2231 #define	ATTRPERALG	6	/* fixed attributes per algs */
2232 
2233 void
2234 spdsock_alglist(queue_t *q, mblk_t *mp)
2235 {
2236 	uint_t algtype;
2237 	uint_t algidx;
2238 	uint_t algcount;
2239 	uint_t size;
2240 	mblk_t *m;
2241 	uint8_t *cur;
2242 	spd_msg_t *msg;
2243 	struct spd_ext_actions *act;
2244 	struct spd_attribute *attr;
2245 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2246 	ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
2247 
2248 	mutex_enter(&ipss->ipsec_alg_lock);
2249 	/*
2250 	 * The SPD client expects to receive separate entries for
2251 	 * AH authentication and ESP authentication supported algorithms.
2252 	 *
2253 	 * Don't return the "any" algorithms, if defined, as no
2254 	 * kernel policies can be set for these algorithms.
2255 	 */
2256 	algcount = 2 * ipss->ipsec_nalgs[IPSEC_ALG_AUTH] +
2257 	    ipss->ipsec_nalgs[IPSEC_ALG_ENCR];
2258 
2259 	if (ipss->ipsec_alglists[IPSEC_ALG_AUTH][SADB_AALG_NONE] != NULL)
2260 		algcount--;
2261 	if (ipss->ipsec_alglists[IPSEC_ALG_ENCR][SADB_EALG_NONE] != NULL)
2262 		algcount--;
2263 
2264 	/*
2265 	 * For each algorithm, we encode:
2266 	 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT}
2267 	 */
2268 
2269 	size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions) +
2270 	    ATTRPERALG * sizeof (struct spd_attribute) * algcount;
2271 
2272 	ASSERT(ALIGNED64(size));
2273 
2274 	m = allocb(size, BPRI_HI);
2275 	if (m == NULL) {
2276 		mutex_exit(&ipss->ipsec_alg_lock);
2277 		spdsock_error(q, mp, ENOMEM, 0);
2278 		return;
2279 	}
2280 
2281 	m->b_wptr = m->b_rptr + size;
2282 	cur = m->b_rptr;
2283 
2284 	msg = (spd_msg_t *)cur;
2285 	bcopy(mp->b_rptr, cur, sizeof (*msg));
2286 
2287 	msg->spd_msg_len = SPD_8TO64(size);
2288 	msg->spd_msg_errno = 0;
2289 	msg->spd_msg_diagnostic = 0;
2290 
2291 	cur += sizeof (*msg);
2292 
2293 	act = (struct spd_ext_actions *)cur;
2294 	cur += sizeof (*act);
2295 
2296 	act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t));
2297 	act->spd_actions_exttype = SPD_EXT_ACTION;
2298 	act->spd_actions_count = algcount;
2299 	act->spd_actions_reserved = 0;
2300 
2301 	attr = (struct spd_attribute *)cur;
2302 
2303 #define	EMIT(tag, value) {					\
2304 		attr->spd_attr_tag = (tag); 			\
2305 		attr->spd_attr_value = (value); 		\
2306 		attr++;			  			\
2307 	}
2308 
2309 	/*
2310 	 * If you change the number of EMIT's here, change
2311 	 * ATTRPERALG above to match
2312 	 */
2313 #define	EMITALGATTRS(_type) {					\
2314 		EMIT(algattr[_type], algid); 		/* 1 */	\
2315 		EMIT(minbitsattr[_type], minbits);	/* 2 */	\
2316 		EMIT(maxbitsattr[_type], maxbits);	/* 3 */	\
2317 		EMIT(defbitsattr[_type], defbits);	/* 4 */	\
2318 		EMIT(incrbitsattr[_type], incr);	/* 5 */	\
2319 		EMIT(SPD_ATTR_NEXT, 0);			/* 6 */	\
2320 	}
2321 
2322 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2323 		for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2324 		    algidx++) {
2325 			int algid = ipss->ipsec_sortlist[algtype][algidx];
2326 			ipsec_alginfo_t *alg =
2327 			    ipss->ipsec_alglists[algtype][algid];
2328 			uint_t minbits = alg->alg_minbits;
2329 			uint_t maxbits = alg->alg_maxbits;
2330 			uint_t defbits = alg->alg_default_bits;
2331 			uint_t incr = alg->alg_increment;
2332 
2333 			if (algtype == IPSEC_ALG_AUTH) {
2334 				if (algid == SADB_AALG_NONE)
2335 					continue;
2336 				EMITALGATTRS(SPDSOCK_AH_AUTH);
2337 				EMITALGATTRS(SPDSOCK_ESP_AUTH);
2338 			} else {
2339 				if (algid == SADB_EALG_NONE)
2340 					continue;
2341 				ASSERT(algtype == IPSEC_ALG_ENCR);
2342 				EMITALGATTRS(SPDSOCK_ESP_ENCR);
2343 			}
2344 		}
2345 	}
2346 
2347 	mutex_exit(&ipss->ipsec_alg_lock);
2348 
2349 #undef EMITALGATTRS
2350 #undef EMIT
2351 #undef ATTRPERALG
2352 
2353 	attr--;
2354 	attr->spd_attr_tag = SPD_ATTR_END;
2355 
2356 	freemsg(mp);
2357 	qreply(q, m);
2358 }
2359 
2360 /*
2361  * Process a SPD_DUMPALGS request.
2362  */
2363 
2364 #define	ATTRPERALG	9	/* fixed attributes per algs */
2365 
2366 void
2367 spdsock_dumpalgs(queue_t *q, mblk_t *mp)
2368 {
2369 	uint_t algtype;
2370 	uint_t algidx;
2371 	uint_t size;
2372 	mblk_t *m;
2373 	uint8_t *cur;
2374 	spd_msg_t *msg;
2375 	struct spd_ext_actions *act;
2376 	struct spd_attribute *attr;
2377 	ipsec_alginfo_t *alg;
2378 	uint_t algid;
2379 	uint_t i;
2380 	uint_t alg_size;
2381 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2382 	ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
2383 
2384 	mutex_enter(&ipss->ipsec_alg_lock);
2385 
2386 	/*
2387 	 * For each algorithm, we encode:
2388 	 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT}
2389 	 *
2390 	 * ALG_ID / ALG_PROTO / ALG_INCRBITS / ALG_NKEYSIZES / ALG_KEYSIZE*
2391 	 * ALG_NBLOCKSIZES / ALG_BLOCKSIZE* / ALG_NPARAMS / ALG_PARAMS* /
2392 	 * ALG_MECHNAME / ALG_FLAGS / {END, NEXT}
2393 	 */
2394 
2395 	/*
2396 	 * Compute the size of the SPD message.
2397 	 */
2398 	size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions);
2399 
2400 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2401 		for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2402 		    algidx++) {
2403 			algid = ipss->ipsec_sortlist[algtype][algidx];
2404 			alg = ipss->ipsec_alglists[algtype][algid];
2405 			alg_size = sizeof (struct spd_attribute) *
2406 			    (ATTRPERALG + alg->alg_nkey_sizes +
2407 			    alg->alg_nblock_sizes + alg->alg_nparams) +
2408 			    CRYPTO_MAX_MECH_NAME;
2409 			size += alg_size;
2410 		}
2411 	}
2412 
2413 	ASSERT(ALIGNED64(size));
2414 
2415 	m = allocb(size, BPRI_HI);
2416 	if (m == NULL) {
2417 		mutex_exit(&ipss->ipsec_alg_lock);
2418 		spdsock_error(q, mp, ENOMEM, 0);
2419 		return;
2420 	}
2421 
2422 	m->b_wptr = m->b_rptr + size;
2423 	cur = m->b_rptr;
2424 
2425 	msg = (spd_msg_t *)cur;
2426 	bcopy(mp->b_rptr, cur, sizeof (*msg));
2427 
2428 	msg->spd_msg_len = SPD_8TO64(size);
2429 	msg->spd_msg_errno = 0;
2430 	msg->spd_msg_type = SPD_ALGLIST;
2431 
2432 	msg->spd_msg_diagnostic = 0;
2433 
2434 	cur += sizeof (*msg);
2435 
2436 	act = (struct spd_ext_actions *)cur;
2437 	cur += sizeof (*act);
2438 
2439 	act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t));
2440 	act->spd_actions_exttype = SPD_EXT_ACTION;
2441 	act->spd_actions_count = ipss->ipsec_nalgs[IPSEC_ALG_AUTH] +
2442 	    ipss->ipsec_nalgs[IPSEC_ALG_ENCR];
2443 	act->spd_actions_reserved = 0;
2444 
2445 	/*
2446 	 * If there aren't any algorithms registered, return an empty message.
2447 	 * spdsock_get_ext() knows how to deal with this.
2448 	 */
2449 	if (act->spd_actions_count == 0) {
2450 		act->spd_actions_len = 0;
2451 		mutex_exit(&ipss->ipsec_alg_lock);
2452 		goto error;
2453 	}
2454 
2455 	attr = (struct spd_attribute *)cur;
2456 
2457 #define	EMIT(tag, value) {					\
2458 		attr->spd_attr_tag = (tag); 			\
2459 		attr->spd_attr_value = (value); 		\
2460 		attr++;			  			\
2461 	}
2462 
2463 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2464 		for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2465 		    algidx++) {
2466 
2467 			algid = ipss->ipsec_sortlist[algtype][algidx];
2468 			alg = ipss->ipsec_alglists[algtype][algid];
2469 
2470 			/*
2471 			 * If you change the number of EMIT's here, change
2472 			 * ATTRPERALG above to match
2473 			 */
2474 			EMIT(SPD_ATTR_ALG_ID, algid);
2475 			EMIT(SPD_ATTR_ALG_PROTO, algproto[algtype]);
2476 			EMIT(SPD_ATTR_ALG_INCRBITS, alg->alg_increment);
2477 			EMIT(SPD_ATTR_ALG_NKEYSIZES, alg->alg_nkey_sizes);
2478 			for (i = 0; i < alg->alg_nkey_sizes; i++)
2479 				EMIT(SPD_ATTR_ALG_KEYSIZE,
2480 				    alg->alg_key_sizes[i]);
2481 
2482 			EMIT(SPD_ATTR_ALG_NBLOCKSIZES, alg->alg_nblock_sizes);
2483 			for (i = 0; i < alg->alg_nblock_sizes; i++)
2484 				EMIT(SPD_ATTR_ALG_BLOCKSIZE,
2485 				    alg->alg_block_sizes[i]);
2486 
2487 			EMIT(SPD_ATTR_ALG_NPARAMS, alg->alg_nparams);
2488 			for (i = 0; i < alg->alg_nparams; i++)
2489 				EMIT(SPD_ATTR_ALG_PARAMS,
2490 				    alg->alg_params[i]);
2491 
2492 			EMIT(SPD_ATTR_ALG_FLAGS, alg->alg_flags);
2493 
2494 			EMIT(SPD_ATTR_ALG_MECHNAME, CRYPTO_MAX_MECH_NAME);
2495 			bcopy(alg->alg_mech_name, attr, CRYPTO_MAX_MECH_NAME);
2496 			attr = (struct spd_attribute *)((char *)attr +
2497 			    CRYPTO_MAX_MECH_NAME);
2498 
2499 			EMIT(SPD_ATTR_NEXT, 0);
2500 		}
2501 	}
2502 
2503 	mutex_exit(&ipss->ipsec_alg_lock);
2504 
2505 #undef EMITALGATTRS
2506 #undef EMIT
2507 #undef ATTRPERALG
2508 
2509 	attr--;
2510 	attr->spd_attr_tag = SPD_ATTR_END;
2511 
2512 error:
2513 	freemsg(mp);
2514 	qreply(q, m);
2515 }
2516 
2517 /*
2518  * Do the actual work of processing an SPD_UPDATEALGS request. Can
2519  * be invoked either once IPsec is loaded on a cached request, or
2520  * when a request is received while IPsec is loaded.
2521  */
2522 static void
2523 spdsock_do_updatealg(spd_ext_t *extv[], int *diag, spd_stack_t *spds)
2524 {
2525 	struct spd_ext_actions *actp;
2526 	struct spd_attribute *attr, *endattr;
2527 	uint64_t *start, *end;
2528 	ipsec_alginfo_t *alg = NULL;
2529 	ipsec_algtype_t alg_type = 0;
2530 	boolean_t skip_alg = B_TRUE, doing_proto = B_FALSE;
2531 	uint_t i, cur_key, cur_block, algid;
2532 
2533 	*diag = -1;
2534 	ASSERT(MUTEX_HELD(&spds->spds_alg_lock));
2535 
2536 	/* parse the message, building the list of algorithms */
2537 
2538 	actp = (struct spd_ext_actions *)extv[SPD_EXT_ACTION];
2539 	if (actp == NULL) {
2540 		*diag = SPD_DIAGNOSTIC_NO_ACTION_EXT;
2541 		return;
2542 	}
2543 
2544 	start = (uint64_t *)actp;
2545 	end = (start + actp->spd_actions_len);
2546 	endattr = (struct spd_attribute *)end;
2547 	attr = (struct spd_attribute *)&actp[1];
2548 
2549 	bzero(spds->spds_algs, IPSEC_NALGTYPES * IPSEC_MAX_ALGS *
2550 	    sizeof (ipsec_alginfo_t *));
2551 
2552 	alg = kmem_zalloc(sizeof (*alg), KM_SLEEP);
2553 
2554 #define	ALG_KEY_SIZES(a)   (((a)->alg_nkey_sizes + 1) * sizeof (uint16_t))
2555 #define	ALG_BLOCK_SIZES(a) (((a)->alg_nblock_sizes + 1) * sizeof (uint16_t))
2556 
2557 	while (attr < endattr) {
2558 		switch (attr->spd_attr_tag) {
2559 		case SPD_ATTR_NOP:
2560 		case SPD_ATTR_EMPTY:
2561 			break;
2562 		case SPD_ATTR_END:
2563 			attr = endattr;
2564 			/* FALLTHRU */
2565 		case SPD_ATTR_NEXT:
2566 			if (doing_proto) {
2567 				doing_proto = B_FALSE;
2568 				break;
2569 			}
2570 			if (skip_alg) {
2571 				ipsec_alg_free(alg);
2572 			} else {
2573 				ipsec_alg_free(
2574 				    spds->spds_algs[alg_type][alg->alg_id]);
2575 				spds->spds_algs[alg_type][alg->alg_id] =
2576 				    alg;
2577 			}
2578 			alg = kmem_zalloc(sizeof (*alg), KM_SLEEP);
2579 			break;
2580 
2581 		case SPD_ATTR_ALG_ID:
2582 			if (attr->spd_attr_value >= IPSEC_MAX_ALGS) {
2583 				ss1dbg(spds, ("spdsock_do_updatealg: "
2584 				    "invalid alg id %d\n",
2585 				    attr->spd_attr_value));
2586 				*diag = SPD_DIAGNOSTIC_ALG_ID_RANGE;
2587 				goto bail;
2588 			}
2589 			alg->alg_id = attr->spd_attr_value;
2590 			break;
2591 
2592 		case SPD_ATTR_ALG_PROTO:
2593 			/* find the alg type */
2594 			for (i = 0; i < NALGPROTOS; i++)
2595 				if (algproto[i] == attr->spd_attr_value)
2596 					break;
2597 			skip_alg = (i == NALGPROTOS);
2598 			if (!skip_alg)
2599 				alg_type = i;
2600 			break;
2601 
2602 		case SPD_ATTR_ALG_INCRBITS:
2603 			alg->alg_increment = attr->spd_attr_value;
2604 			break;
2605 
2606 		case SPD_ATTR_ALG_NKEYSIZES:
2607 			if (alg->alg_key_sizes != NULL) {
2608 				kmem_free(alg->alg_key_sizes,
2609 				    ALG_KEY_SIZES(alg));
2610 			}
2611 			alg->alg_nkey_sizes = attr->spd_attr_value;
2612 			/*
2613 			 * Allocate room for the trailing zero key size
2614 			 * value as well.
2615 			 */
2616 			alg->alg_key_sizes = kmem_zalloc(ALG_KEY_SIZES(alg),
2617 			    KM_SLEEP);
2618 			cur_key = 0;
2619 			break;
2620 
2621 		case SPD_ATTR_ALG_KEYSIZE:
2622 			if (alg->alg_key_sizes == NULL ||
2623 			    cur_key >= alg->alg_nkey_sizes) {
2624 				ss1dbg(spds, ("spdsock_do_updatealg: "
2625 				    "too many key sizes\n"));
2626 				*diag = SPD_DIAGNOSTIC_ALG_NUM_KEY_SIZES;
2627 				goto bail;
2628 			}
2629 			alg->alg_key_sizes[cur_key++] = attr->spd_attr_value;
2630 			break;
2631 
2632 		case SPD_ATTR_ALG_FLAGS:
2633 			/*
2634 			 * Flags (bit mask). The alg_flags element of
2635 			 * ipsecalg_flags_t is only 8 bits wide. The
2636 			 * user can set the VALID bit, but we will ignore it
2637 			 * and make the decision is the algorithm is valid.
2638 			 */
2639 			alg->alg_flags |= (uint8_t)attr->spd_attr_value;
2640 			break;
2641 
2642 		case SPD_ATTR_ALG_NBLOCKSIZES:
2643 			if (alg->alg_block_sizes != NULL) {
2644 				kmem_free(alg->alg_block_sizes,
2645 				    ALG_BLOCK_SIZES(alg));
2646 			}
2647 			alg->alg_nblock_sizes = attr->spd_attr_value;
2648 			/*
2649 			 * Allocate room for the trailing zero block size
2650 			 * value as well.
2651 			 */
2652 			alg->alg_block_sizes = kmem_zalloc(ALG_BLOCK_SIZES(alg),
2653 			    KM_SLEEP);
2654 			cur_block = 0;
2655 			break;
2656 
2657 		case SPD_ATTR_ALG_BLOCKSIZE:
2658 			if (alg->alg_block_sizes == NULL ||
2659 			    cur_block >= alg->alg_nblock_sizes) {
2660 				ss1dbg(spds, ("spdsock_do_updatealg: "
2661 				    "too many block sizes\n"));
2662 				*diag = SPD_DIAGNOSTIC_ALG_NUM_BLOCK_SIZES;
2663 				goto bail;
2664 			}
2665 			alg->alg_block_sizes[cur_block++] =
2666 			    attr->spd_attr_value;
2667 			break;
2668 
2669 		case SPD_ATTR_ALG_NPARAMS:
2670 			if (alg->alg_params != NULL) {
2671 				kmem_free(alg->alg_params,
2672 				    ALG_BLOCK_SIZES(alg));
2673 			}
2674 			alg->alg_nparams = attr->spd_attr_value;
2675 			/*
2676 			 * Allocate room for the trailing zero block size
2677 			 * value as well.
2678 			 */
2679 			alg->alg_params = kmem_zalloc(ALG_BLOCK_SIZES(alg),
2680 			    KM_SLEEP);
2681 			cur_block = 0;
2682 			break;
2683 
2684 		case SPD_ATTR_ALG_PARAMS:
2685 			if (alg->alg_params == NULL ||
2686 			    cur_block >= alg->alg_nparams) {
2687 				ss1dbg(spds, ("spdsock_do_updatealg: "
2688 				    "too many params\n"));
2689 				*diag = SPD_DIAGNOSTIC_ALG_NUM_BLOCK_SIZES;
2690 				goto bail;
2691 			}
2692 			/*
2693 			 * Array contains: iv_len, icv_len, salt_len
2694 			 * Any additional parameters are currently ignored.
2695 			 */
2696 			alg->alg_params[cur_block++] =
2697 			    attr->spd_attr_value;
2698 			break;
2699 
2700 		case SPD_ATTR_ALG_MECHNAME: {
2701 			char *mech_name;
2702 
2703 			if (attr->spd_attr_value > CRYPTO_MAX_MECH_NAME) {
2704 				ss1dbg(spds, ("spdsock_do_updatealg: "
2705 				    "mech name too long\n"));
2706 				*diag = SPD_DIAGNOSTIC_ALG_MECH_NAME_LEN;
2707 				goto bail;
2708 			}
2709 			mech_name = (char *)(attr + 1);
2710 			bcopy(mech_name, alg->alg_mech_name,
2711 			    attr->spd_attr_value);
2712 			alg->alg_mech_name[CRYPTO_MAX_MECH_NAME-1] = '\0';
2713 			attr = (struct spd_attribute *)((char *)attr +
2714 			    attr->spd_attr_value);
2715 			break;
2716 		}
2717 
2718 		case SPD_ATTR_PROTO_ID:
2719 			doing_proto = B_TRUE;
2720 			for (i = 0; i < NALGPROTOS; i++) {
2721 				if (algproto[i] == attr->spd_attr_value) {
2722 					alg_type = i;
2723 					break;
2724 				}
2725 			}
2726 			break;
2727 
2728 		case SPD_ATTR_PROTO_EXEC_MODE:
2729 			if (!doing_proto)
2730 				break;
2731 			for (i = 0; i < NEXECMODES; i++) {
2732 				if (execmodes[i] == attr->spd_attr_value) {
2733 					spds->spds_algs_exec_mode[alg_type] = i;
2734 					break;
2735 				}
2736 			}
2737 			break;
2738 		}
2739 		attr++;
2740 	}
2741 
2742 #undef	ALG_KEY_SIZES
2743 #undef	ALG_BLOCK_SIZES
2744 
2745 	/* update the algorithm tables */
2746 	spdsock_merge_algs(spds);
2747 bail:
2748 	/* cleanup */
2749 	ipsec_alg_free(alg);
2750 	for (alg_type = 0; alg_type < IPSEC_NALGTYPES; alg_type++)
2751 		for (algid = 0; algid < IPSEC_MAX_ALGS; algid++)
2752 		if (spds->spds_algs[alg_type][algid] != NULL)
2753 			ipsec_alg_free(spds->spds_algs[alg_type][algid]);
2754 }
2755 
2756 /*
2757  * Process an SPD_UPDATEALGS request. If IPsec is not loaded, queue
2758  * the request until IPsec loads. If IPsec is loaded, act on it
2759  * immediately.
2760  */
2761 
2762 static void
2763 spdsock_updatealg(queue_t *q, mblk_t *mp, spd_ext_t *extv[])
2764 {
2765 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2766 	spd_stack_t	*spds = ss->spdsock_spds;
2767 	ipsec_stack_t	*ipss = spds->spds_netstack->netstack_ipsec;
2768 
2769 	if (!ipsec_loaded(ipss)) {
2770 		/*
2771 		 * IPsec is not loaded, save request and return nicely,
2772 		 * the message will be processed once IPsec loads.
2773 		 */
2774 		mblk_t *new_mp;
2775 
2776 		/* last update message wins */
2777 		if ((new_mp = copymsg(mp)) == NULL) {
2778 			spdsock_error(q, mp, ENOMEM, 0);
2779 			return;
2780 		}
2781 		mutex_enter(&spds->spds_alg_lock);
2782 		bcopy(extv, spds->spds_extv_algs,
2783 		    sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1));
2784 		if (spds->spds_mp_algs != NULL)
2785 			freemsg(spds->spds_mp_algs);
2786 		spds->spds_mp_algs = mp;
2787 		spds->spds_algs_pending = B_TRUE;
2788 		mutex_exit(&spds->spds_alg_lock);
2789 		if (audit_active) {
2790 			cred_t *cr;
2791 			pid_t cpid;
2792 
2793 			cr = msg_getcred(mp, &cpid);
2794 			audit_pf_policy(SPD_UPDATEALGS, cr,
2795 			    spds->spds_netstack, NULL, B_TRUE, EAGAIN,
2796 			    cpid);
2797 		}
2798 		spd_echo(q, new_mp);
2799 	} else {
2800 		/*
2801 		 * IPsec is loaded, act on the message immediately.
2802 		 */
2803 		int diag;
2804 
2805 		mutex_enter(&spds->spds_alg_lock);
2806 		spdsock_do_updatealg(extv, &diag, spds);
2807 		mutex_exit(&spds->spds_alg_lock);
2808 		if (diag == -1) {
2809 			spd_echo(q, mp);
2810 			if (audit_active) {
2811 				cred_t *cr;
2812 				pid_t cpid;
2813 
2814 				cr = msg_getcred(mp, &cpid);
2815 				audit_pf_policy(SPD_UPDATEALGS, cr,
2816 				    spds->spds_netstack, NULL, B_TRUE, 0,
2817 				    cpid);
2818 			}
2819 		} else {
2820 			spdsock_diag(q, mp, diag);
2821 			if (audit_active) {
2822 				cred_t *cr;
2823 				pid_t cpid;
2824 
2825 				cr = msg_getcred(mp, &cpid);
2826 				audit_pf_policy(SPD_UPDATEALGS, cr,
2827 				    spds->spds_netstack, NULL, B_TRUE, diag,
2828 				    cpid);
2829 			}
2830 		}
2831 	}
2832 }
2833 
2834 /*
2835  * Sort through the mess of polhead options to retrieve an appropriate one.
2836  * Returns NULL if we send an spdsock error.  Returns a valid pointer if we
2837  * found a valid polhead.  Returns ALL_ACTIVE_POLHEADS (aka. -1) or
2838  * ALL_INACTIVE_POLHEADS (aka. -2) if the operation calls for the operation to
2839  * act on ALL policy heads.
2840  */
2841 static ipsec_policy_head_t *
2842 get_appropriate_polhead(queue_t *q, mblk_t *mp, spd_if_t *tunname, int spdid,
2843     int msgtype, ipsec_tun_pol_t **itpp)
2844 {
2845 	ipsec_tun_pol_t *itp;
2846 	ipsec_policy_head_t *iph;
2847 	int errno;
2848 	char *tname;
2849 	boolean_t active;
2850 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2851 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
2852 	uint64_t gen;	/* Placeholder */
2853 	datalink_id_t linkid;
2854 
2855 	active = (spdid == SPD_ACTIVE);
2856 	*itpp = NULL;
2857 	if (!active && spdid != SPD_STANDBY) {
2858 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_SPDID);
2859 		return (NULL);
2860 	}
2861 
2862 	if (tunname != NULL) {
2863 		/* Acting on a tunnel's SPD. */
2864 		tname = (char *)tunname->spd_if_name;
2865 		if (*tname == '\0') {
2866 			/* Handle all-polhead cases here. */
2867 			if (msgtype != SPD_FLUSH && msgtype != SPD_DUMP) {
2868 				spdsock_diag(q, mp,
2869 				    SPD_DIAGNOSTIC_NOT_GLOBAL_OP);
2870 				return (NULL);
2871 			}
2872 			return (active ? ALL_ACTIVE_POLHEADS :
2873 			    ALL_INACTIVE_POLHEADS);
2874 		}
2875 
2876 		itp = get_tunnel_policy(tname, ns);
2877 		if (itp == NULL) {
2878 			if (msgtype != SPD_ADDRULE) {
2879 				/* "Tunnel not found" */
2880 				spdsock_error(q, mp, ENOENT, 0);
2881 				return (NULL);
2882 			}
2883 
2884 			errno = 0;
2885 			itp = create_tunnel_policy(tname, &errno, &gen, ns);
2886 			if (itp == NULL) {
2887 				/*
2888 				 * Something very bad happened, most likely
2889 				 * ENOMEM.  Return an indicator.
2890 				 */
2891 				spdsock_error(q, mp, errno, 0);
2892 				return (NULL);
2893 			}
2894 		}
2895 		/*
2896 		 * Troll the plumbed tunnels and see if we have a match.  We
2897 		 * need to do this always in case we add policy AFTER plumbing
2898 		 * a tunnel.
2899 		 */
2900 		if (dls_mgmt_get_linkid(tname, &linkid) == 0)
2901 			iptun_set_policy(linkid, itp);
2902 
2903 		*itpp = itp;
2904 		/* For spdsock dump state, set the polhead's name. */
2905 		if (msgtype == SPD_DUMP) {
2906 			ITP_REFHOLD(itp);
2907 			ss->spdsock_itp = itp;
2908 			ss->spdsock_dump_tunnel = itp->itp_flags &
2909 			    (active ? ITPF_P_TUNNEL : ITPF_I_TUNNEL);
2910 		}
2911 	} else {
2912 		itp = NULL;
2913 		/* For spdsock dump state, indicate it's global policy. */
2914 		if (msgtype == SPD_DUMP)
2915 			ss->spdsock_itp = NULL;
2916 	}
2917 
2918 	if (active)
2919 		iph = (itp == NULL) ? ipsec_system_policy(ns) : itp->itp_policy;
2920 	else
2921 		iph = (itp == NULL) ? ipsec_inactive_policy(ns) :
2922 		    itp->itp_inactive;
2923 
2924 	ASSERT(iph != NULL);
2925 	if (itp != NULL) {
2926 		IPPH_REFHOLD(iph);
2927 	}
2928 
2929 	return (iph);
2930 }
2931 
2932 static void
2933 spdsock_parse(queue_t *q, mblk_t *mp)
2934 {
2935 	spd_msg_t *spmsg;
2936 	spd_ext_t *extv[SPD_EXT_MAX + 1];
2937 	uint_t msgsize;
2938 	ipsec_policy_head_t *iph;
2939 	ipsec_tun_pol_t *itp;
2940 	spd_if_t *tunname;
2941 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2942 	spd_stack_t *spds = ss->spdsock_spds;
2943 	netstack_t *ns = spds->spds_netstack;
2944 	ipsec_stack_t *ipss = ns->netstack_ipsec;
2945 
2946 	/* Make sure nothing's below me. */
2947 	ASSERT(WR(q)->q_next == NULL);
2948 
2949 	spmsg = (spd_msg_t *)mp->b_rptr;
2950 
2951 	msgsize = SPD_64TO8(spmsg->spd_msg_len);
2952 
2953 	if (msgdsize(mp) != msgsize) {
2954 		/*
2955 		 * Message len incorrect w.r.t. actual size.  Send an error
2956 		 * (EMSGSIZE).	It may be necessary to massage things a
2957 		 * bit.	 For example, if the spd_msg_type is hosed,
2958 		 * I need to set it to SPD_RESERVED to get delivery to
2959 		 * do the right thing.	Then again, maybe just letting
2960 		 * the error delivery do the right thing.
2961 		 */
2962 		ss2dbg(spds,
2963 		    ("mblk (%lu) and base (%d) message sizes don't jibe.\n",
2964 		    msgdsize(mp), msgsize));
2965 		spdsock_error(q, mp, EMSGSIZE, SPD_DIAGNOSTIC_NONE);
2966 		return;
2967 	}
2968 
2969 	if (msgsize > (uint_t)(mp->b_wptr - mp->b_rptr)) {
2970 		/* Get all message into one mblk. */
2971 		if (pullupmsg(mp, -1) == 0) {
2972 			/*
2973 			 * Something screwy happened.
2974 			 */
2975 			ss3dbg(spds, ("spdsock_parse: pullupmsg() failed.\n"));
2976 			return;
2977 		} else {
2978 			spmsg = (spd_msg_t *)mp->b_rptr;
2979 		}
2980 	}
2981 
2982 	switch (spdsock_get_ext(extv, spmsg, msgsize)) {
2983 	case KGE_DUP:
2984 		/* Handle duplicate extension. */
2985 		ss1dbg(spds, ("Got duplicate extension of type %d.\n",
2986 		    extv[0]->spd_ext_type));
2987 		spdsock_diag(q, mp, dup_ext_diag[extv[0]->spd_ext_type]);
2988 		return;
2989 	case KGE_UNK:
2990 		/* Handle unknown extension. */
2991 		ss1dbg(spds, ("Got unknown extension of type %d.\n",
2992 		    extv[0]->spd_ext_type));
2993 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_UNKNOWN_EXT);
2994 		return;
2995 	case KGE_LEN:
2996 		/* Length error. */
2997 		ss1dbg(spds, ("Length %d on extension type %d overrun or 0.\n",
2998 		    extv[0]->spd_ext_len, extv[0]->spd_ext_type));
2999 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_EXTLEN);
3000 		return;
3001 	case KGE_CHK:
3002 		/* Reality check failed. */
3003 		ss1dbg(spds, ("Reality check failed on extension type %d.\n",
3004 		    extv[0]->spd_ext_type));
3005 		spdsock_diag(q, mp, bad_ext_diag[extv[0]->spd_ext_type]);
3006 		return;
3007 	default:
3008 		/* Default case is no errors. */
3009 		break;
3010 	}
3011 
3012 	/*
3013 	 * Special-case SPD_UPDATEALGS so as not to load IPsec.
3014 	 */
3015 	if (!ipsec_loaded(ipss) && spmsg->spd_msg_type != SPD_UPDATEALGS) {
3016 		spdsock_t *ss = (spdsock_t *)q->q_ptr;
3017 
3018 		ASSERT(ss != NULL);
3019 		ipsec_loader_loadnow(ipss);
3020 		ss->spdsock_timeout_arg = mp;
3021 		ss->spdsock_timeout = qtimeout(q, spdsock_loadcheck,
3022 		    q, LOADCHECK_INTERVAL);
3023 		return;
3024 	}
3025 
3026 	/* First check for messages that need no polheads at all. */
3027 	switch (spmsg->spd_msg_type) {
3028 	case SPD_UPDATEALGS:
3029 		spdsock_updatealg(q, mp, extv);
3030 		return;
3031 	case SPD_ALGLIST:
3032 		spdsock_alglist(q, mp);
3033 		return;
3034 	case SPD_DUMPALGS:
3035 		spdsock_dumpalgs(q, mp);
3036 		return;
3037 	}
3038 
3039 	/*
3040 	 * Then check for ones that need both primary/secondary polheads,
3041 	 * finding the appropriate tunnel policy if need be.
3042 	 */
3043 	tunname = (spd_if_t *)extv[SPD_EXT_TUN_NAME];
3044 	switch (spmsg->spd_msg_type) {
3045 	case SPD_FLIP:
3046 		spdsock_flip(q, mp, tunname);
3047 		return;
3048 	case SPD_CLONE:
3049 		spdsock_clone(q, mp, tunname);
3050 		return;
3051 	}
3052 
3053 	/*
3054 	 * Finally, find ones that operate on exactly one polhead, or
3055 	 * "all polheads" of a given type (active/inactive).
3056 	 */
3057 	iph = get_appropriate_polhead(q, mp, tunname, spmsg->spd_msg_spdid,
3058 	    spmsg->spd_msg_type, &itp);
3059 	if (iph == NULL)
3060 		return;
3061 
3062 	/* All-polheads-ready operations. */
3063 	switch (spmsg->spd_msg_type) {
3064 	case SPD_FLUSH:
3065 		if (itp != NULL) {
3066 			mutex_enter(&itp->itp_lock);
3067 			if (spmsg->spd_msg_spdid == SPD_ACTIVE)
3068 				itp->itp_flags &= ~ITPF_PFLAGS;
3069 			else
3070 				itp->itp_flags &= ~ITPF_IFLAGS;
3071 			mutex_exit(&itp->itp_lock);
3072 			ITP_REFRELE(itp, ns);
3073 		}
3074 		spdsock_flush(q, iph, itp, mp);
3075 		return;
3076 	case SPD_DUMP:
3077 		if (itp != NULL)
3078 			ITP_REFRELE(itp, ns);
3079 		spdsock_dump(q, iph, mp);
3080 		return;
3081 	}
3082 
3083 	if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) {
3084 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_NOT_GLOBAL_OP);
3085 		return;
3086 	}
3087 
3088 	/* Single-polhead-only operations. */
3089 	switch (spmsg->spd_msg_type) {
3090 	case SPD_ADDRULE:
3091 		spdsock_addrule(q, iph, mp, extv, itp);
3092 		break;
3093 	case SPD_DELETERULE:
3094 		spdsock_deleterule(q, iph, mp, extv, itp);
3095 		break;
3096 	case SPD_LOOKUP:
3097 		spdsock_lookup(q, iph, mp, extv, itp);
3098 		break;
3099 	default:
3100 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_MSG_TYPE);
3101 		break;
3102 	}
3103 
3104 	IPPH_REFRELE(iph, ns);
3105 	if (itp != NULL)
3106 		ITP_REFRELE(itp, ns);
3107 }
3108 
3109 /*
3110  * If an algorithm mapping was received before IPsec was loaded, process it.
3111  * Called from the IPsec loader.
3112  */
3113 void
3114 spdsock_update_pending_algs(netstack_t *ns)
3115 {
3116 	spd_stack_t *spds = ns->netstack_spdsock;
3117 
3118 	mutex_enter(&spds->spds_alg_lock);
3119 	if (spds->spds_algs_pending) {
3120 		int diag;
3121 
3122 		spdsock_do_updatealg(spds->spds_extv_algs, &diag,
3123 		    spds);
3124 		spds->spds_algs_pending = B_FALSE;
3125 	}
3126 	mutex_exit(&spds->spds_alg_lock);
3127 }
3128 
3129 static void
3130 spdsock_loadcheck(void *arg)
3131 {
3132 	queue_t *q = (queue_t *)arg;
3133 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
3134 	mblk_t *mp;
3135 	ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
3136 
3137 	ASSERT(ss != NULL);
3138 
3139 	ss->spdsock_timeout = 0;
3140 	mp = ss->spdsock_timeout_arg;
3141 	ASSERT(mp != NULL);
3142 	ss->spdsock_timeout_arg = NULL;
3143 	if (ipsec_failed(ipss))
3144 		spdsock_error(q, mp, EPROTONOSUPPORT, 0);
3145 	else
3146 		spdsock_parse(q, mp);
3147 }
3148 
3149 /*
3150  * Copy relevant state bits.
3151  */
3152 static void
3153 spdsock_copy_info(struct T_info_ack *tap, spdsock_t *ss)
3154 {
3155 	*tap = spdsock_g_t_info_ack;
3156 	tap->CURRENT_state = ss->spdsock_state;
3157 	tap->OPT_size = spdsock_max_optsize;
3158 }
3159 
3160 /*
3161  * This routine responds to T_CAPABILITY_REQ messages.  It is called by
3162  * spdsock_wput.  Much of the T_CAPABILITY_ACK information is copied from
3163  * spdsock_g_t_info_ack.  The current state of the stream is copied from
3164  * spdsock_state.
3165  */
3166 static void
3167 spdsock_capability_req(queue_t *q, mblk_t *mp)
3168 {
3169 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
3170 	t_uscalar_t cap_bits1;
3171 	struct T_capability_ack	*tcap;
3172 
3173 	cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1;
3174 
3175 	mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack),
3176 	    mp->b_datap->db_type, T_CAPABILITY_ACK);
3177 	if (mp == NULL)
3178 		return;
3179 
3180 	tcap = (struct T_capability_ack *)mp->b_rptr;
3181 	tcap->CAP_bits1 = 0;
3182 
3183 	if (cap_bits1 & TC1_INFO) {
3184 		spdsock_copy_info(&tcap->INFO_ack, ss);
3185 		tcap->CAP_bits1 |= TC1_INFO;
3186 	}
3187 
3188 	qreply(q, mp);
3189 }
3190 
3191 /*
3192  * This routine responds to T_INFO_REQ messages. It is called by
3193  * spdsock_wput_other.
3194  * Most of the T_INFO_ACK information is copied from spdsock_g_t_info_ack.
3195  * The current state of the stream is copied from spdsock_state.
3196  */
3197 static void
3198 spdsock_info_req(q, mp)
3199 	queue_t	*q;
3200 	mblk_t	*mp;
3201 {
3202 	mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO,
3203 	    T_INFO_ACK);
3204 	if (mp == NULL)
3205 		return;
3206 	spdsock_copy_info((struct T_info_ack *)mp->b_rptr,
3207 	    (spdsock_t *)q->q_ptr);
3208 	qreply(q, mp);
3209 }
3210 
3211 /*
3212  * spdsock_err_ack. This routine creates a
3213  * T_ERROR_ACK message and passes it
3214  * upstream.
3215  */
3216 static void
3217 spdsock_err_ack(q, mp, t_error, sys_error)
3218 	queue_t	*q;
3219 	mblk_t	*mp;
3220 	int	t_error;
3221 	int	sys_error;
3222 {
3223 	if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL)
3224 		qreply(q, mp);
3225 }
3226 
3227 /*
3228  * This routine retrieves the current status of socket options.
3229  * It returns the size of the option retrieved.
3230  */
3231 /* ARGSUSED */
3232 int
3233 spdsock_opt_get(queue_t *q, int level, int name, uchar_t *ptr)
3234 {
3235 	int *i1 = (int *)ptr;
3236 
3237 	switch (level) {
3238 	case SOL_SOCKET:
3239 		switch (name) {
3240 		case SO_TYPE:
3241 			*i1 = SOCK_RAW;
3242 			break;
3243 		/*
3244 		 * The following two items can be manipulated,
3245 		 * but changing them should do nothing.
3246 		 */
3247 		case SO_SNDBUF:
3248 			*i1 = (int)q->q_hiwat;
3249 			break;
3250 		case SO_RCVBUF:
3251 			*i1 = (int)(RD(q)->q_hiwat);
3252 			break;
3253 		}
3254 		break;
3255 	default:
3256 		return (0);
3257 	}
3258 	return (sizeof (int));
3259 }
3260 
3261 /*
3262  * This routine sets socket options.
3263  */
3264 /* ARGSUSED */
3265 int
3266 spdsock_opt_set(queue_t *q, uint_t mgmt_flags, int level, int name,
3267     uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp,
3268     void *thisdg_attrs, cred_t *cr, mblk_t *mblk)
3269 {
3270 	int *i1 = (int *)invalp;
3271 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
3272 	spd_stack_t	*spds = ss->spdsock_spds;
3273 
3274 	switch (level) {
3275 	case SOL_SOCKET:
3276 		switch (name) {
3277 		case SO_SNDBUF:
3278 			if (*i1 > spds->spds_max_buf)
3279 				return (ENOBUFS);
3280 			q->q_hiwat = *i1;
3281 			break;
3282 		case SO_RCVBUF:
3283 			if (*i1 > spds->spds_max_buf)
3284 				return (ENOBUFS);
3285 			RD(q)->q_hiwat = *i1;
3286 			(void) proto_set_rx_hiwat(RD(q), NULL, *i1);
3287 			break;
3288 		}
3289 		break;
3290 	}
3291 	return (0);
3292 }
3293 
3294 
3295 /*
3296  * Handle STREAMS messages.
3297  */
3298 static void
3299 spdsock_wput_other(queue_t *q, mblk_t *mp)
3300 {
3301 	struct iocblk *iocp;
3302 	int error;
3303 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
3304 	spd_stack_t	*spds = ss->spdsock_spds;
3305 	cred_t		*cr;
3306 
3307 	switch (mp->b_datap->db_type) {
3308 	case M_PROTO:
3309 	case M_PCPROTO:
3310 		if ((mp->b_wptr - mp->b_rptr) < sizeof (long)) {
3311 			ss3dbg(spds, (
3312 			    "spdsock_wput_other: Not big enough M_PROTO\n"));
3313 			freemsg(mp);
3314 			return;
3315 		}
3316 		switch (((union T_primitives *)mp->b_rptr)->type) {
3317 		case T_CAPABILITY_REQ:
3318 			spdsock_capability_req(q, mp);
3319 			break;
3320 		case T_INFO_REQ:
3321 			spdsock_info_req(q, mp);
3322 			break;
3323 		case T_SVR4_OPTMGMT_REQ:
3324 		case T_OPTMGMT_REQ:
3325 			/*
3326 			 * All Solaris components should pass a db_credp
3327 			 * for this TPI message, hence we ASSERT.
3328 			 * But in case there is some other M_PROTO that looks
3329 			 * like a TPI message sent by some other kernel
3330 			 * component, we check and return an error.
3331 			 */
3332 			cr = msg_getcred(mp, NULL);
3333 			ASSERT(cr != NULL);
3334 			if (cr == NULL) {
3335 				spdsock_err_ack(q, mp, TSYSERR, EINVAL);
3336 				return;
3337 			}
3338 			if (((union T_primitives *)mp->b_rptr)->type ==
3339 			    T_SVR4_OPTMGMT_REQ) {
3340 				(void) svr4_optcom_req(q, mp, cr,
3341 				    &spdsock_opt_obj, B_FALSE);
3342 			} else {
3343 				(void) tpi_optcom_req(q, mp, cr,
3344 				    &spdsock_opt_obj, B_FALSE);
3345 			}
3346 			break;
3347 		case T_DATA_REQ:
3348 		case T_EXDATA_REQ:
3349 		case T_ORDREL_REQ:
3350 			/* Illegal for spdsock. */
3351 			freemsg(mp);
3352 			(void) putnextctl1(RD(q), M_ERROR, EPROTO);
3353 			break;
3354 		default:
3355 			/* Not supported by spdsock. */
3356 			spdsock_err_ack(q, mp, TNOTSUPPORT, 0);
3357 			break;
3358 		}
3359 		return;
3360 	case M_IOCTL:
3361 		iocp = (struct iocblk *)mp->b_rptr;
3362 		error = EINVAL;
3363 
3364 		switch (iocp->ioc_cmd) {
3365 		case ND_SET:
3366 		case ND_GET:
3367 			if (nd_getset(q, spds->spds_g_nd, mp)) {
3368 				qreply(q, mp);
3369 				return;
3370 			} else
3371 				error = ENOENT;
3372 			/* FALLTHRU */
3373 		default:
3374 			miocnak(q, mp, 0, error);
3375 			return;
3376 		}
3377 	case M_FLUSH:
3378 		if (*mp->b_rptr & FLUSHW) {
3379 			flushq(q, FLUSHALL);
3380 			*mp->b_rptr &= ~FLUSHW;
3381 		}
3382 		if (*mp->b_rptr & FLUSHR) {
3383 			qreply(q, mp);
3384 			return;
3385 		}
3386 		/* Else FALLTHRU */
3387 	}
3388 
3389 	/* If fell through, just black-hole the message. */
3390 	freemsg(mp);
3391 }
3392 
3393 static void
3394 spdsock_wput(queue_t *q, mblk_t *mp)
3395 {
3396 	uint8_t *rptr = mp->b_rptr;
3397 	mblk_t *mp1;
3398 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
3399 	spd_stack_t	*spds = ss->spdsock_spds;
3400 
3401 	/*
3402 	 * If we're dumping, defer processing other messages until the
3403 	 * dump completes.
3404 	 */
3405 	if (ss->spdsock_dump_req != NULL) {
3406 		if (!putq(q, mp))
3407 			freemsg(mp);
3408 		return;
3409 	}
3410 
3411 	switch (mp->b_datap->db_type) {
3412 	case M_DATA:
3413 		/*
3414 		 * Silently discard.
3415 		 */
3416 		ss2dbg(spds, ("raw M_DATA in spdsock.\n"));
3417 		freemsg(mp);
3418 		return;
3419 	case M_PROTO:
3420 	case M_PCPROTO:
3421 		if ((mp->b_wptr - rptr) >= sizeof (struct T_data_req)) {
3422 			if (((union T_primitives *)rptr)->type == T_DATA_REQ) {
3423 				if ((mp1 = mp->b_cont) == NULL) {
3424 					/* No data after T_DATA_REQ. */
3425 					ss2dbg(spds,
3426 					    ("No data after DATA_REQ.\n"));
3427 					freemsg(mp);
3428 					return;
3429 				}
3430 				freeb(mp);
3431 				mp = mp1;
3432 				ss2dbg(spds, ("T_DATA_REQ\n"));
3433 				break;	/* Out of switch. */
3434 			}
3435 		}
3436 		/* FALLTHRU */
3437 	default:
3438 		ss3dbg(spds, ("In default wput case (%d %d).\n",
3439 		    mp->b_datap->db_type, ((union T_primitives *)rptr)->type));
3440 		spdsock_wput_other(q, mp);
3441 		return;
3442 	}
3443 
3444 	/* I now have a PF_POLICY message in an M_DATA block. */
3445 	spdsock_parse(q, mp);
3446 }
3447 
3448 /*
3449  * Device open procedure, called when new queue pair created.
3450  * We are passed the read-side queue.
3451  */
3452 /* ARGSUSED */
3453 static int
3454 spdsock_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
3455 {
3456 	spdsock_t *ss;
3457 	queue_t *oq = OTHERQ(q);
3458 	minor_t ssminor;
3459 	netstack_t *ns;
3460 	spd_stack_t *spds;
3461 
3462 	if (secpolicy_ip_config(credp, B_FALSE) != 0)
3463 		return (EPERM);
3464 
3465 	if (q->q_ptr != NULL)
3466 		return (0);  /* Re-open of an already open instance. */
3467 
3468 	if (sflag & MODOPEN)
3469 		return (EINVAL);
3470 
3471 	ns = netstack_find_by_cred(credp);
3472 	ASSERT(ns != NULL);
3473 	spds = ns->netstack_spdsock;
3474 	ASSERT(spds != NULL);
3475 
3476 	ss2dbg(spds, ("Made it into PF_POLICY socket open.\n"));
3477 
3478 	ssminor = (minor_t)(uintptr_t)vmem_alloc(spdsock_vmem, 1, VM_NOSLEEP);
3479 	if (ssminor == 0) {
3480 		netstack_rele(spds->spds_netstack);
3481 		return (ENOMEM);
3482 	}
3483 	ss = kmem_zalloc(sizeof (spdsock_t), KM_NOSLEEP);
3484 	if (ss == NULL) {
3485 		vmem_free(spdsock_vmem, (void *)(uintptr_t)ssminor, 1);
3486 		netstack_rele(spds->spds_netstack);
3487 		return (ENOMEM);
3488 	}
3489 
3490 	ss->spdsock_minor = ssminor;
3491 	ss->spdsock_state = TS_UNBND;
3492 	ss->spdsock_dump_req = NULL;
3493 
3494 	ss->spdsock_spds = spds;
3495 
3496 	q->q_ptr = ss;
3497 	oq->q_ptr = ss;
3498 
3499 	q->q_hiwat = spds->spds_recv_hiwat;
3500 
3501 	oq->q_hiwat = spds->spds_xmit_hiwat;
3502 	oq->q_lowat = spds->spds_xmit_lowat;
3503 
3504 	qprocson(q);
3505 	(void) proto_set_rx_hiwat(q, NULL, spds->spds_recv_hiwat);
3506 
3507 	*devp = makedevice(getmajor(*devp), ss->spdsock_minor);
3508 	return (0);
3509 }
3510 
3511 /*
3512  * Read-side service procedure, invoked when we get back-enabled
3513  * when buffer space becomes available.
3514  *
3515  * Dump another chunk if we were dumping before; when we finish, kick
3516  * the write-side queue in case it's waiting for read queue space.
3517  */
3518 void
3519 spdsock_rsrv(queue_t *q)
3520 {
3521 	spdsock_t *ss = q->q_ptr;
3522 
3523 	if (ss->spdsock_dump_req != NULL)
3524 		spdsock_dump_some(q, ss);
3525 
3526 	if (ss->spdsock_dump_req == NULL)
3527 		qenable(OTHERQ(q));
3528 }
3529 
3530 /*
3531  * Write-side service procedure, invoked when we defer processing
3532  * if another message is received while a dump is in progress.
3533  */
3534 void
3535 spdsock_wsrv(queue_t *q)
3536 {
3537 	spdsock_t *ss = q->q_ptr;
3538 	mblk_t *mp;
3539 	ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
3540 
3541 	if (ss->spdsock_dump_req != NULL) {
3542 		qenable(OTHERQ(q));
3543 		return;
3544 	}
3545 
3546 	while ((mp = getq(q)) != NULL) {
3547 		if (ipsec_loaded(ipss)) {
3548 			spdsock_wput(q, mp);
3549 			if (ss->spdsock_dump_req != NULL)
3550 				return;
3551 		} else if (!ipsec_failed(ipss)) {
3552 			(void) putq(q, mp);
3553 		} else {
3554 			spdsock_error(q, mp, EPFNOSUPPORT, 0);
3555 		}
3556 	}
3557 }
3558 
3559 static int
3560 spdsock_close(queue_t *q)
3561 {
3562 	spdsock_t *ss = q->q_ptr;
3563 	spd_stack_t	*spds = ss->spdsock_spds;
3564 
3565 	qprocsoff(q);
3566 
3567 	/* Safe assumption. */
3568 	ASSERT(ss != NULL);
3569 
3570 	if (ss->spdsock_timeout != 0)
3571 		(void) quntimeout(q, ss->spdsock_timeout);
3572 
3573 	ss3dbg(spds, ("Driver close, PF_POLICY socket is going away.\n"));
3574 
3575 	vmem_free(spdsock_vmem, (void *)(uintptr_t)ss->spdsock_minor, 1);
3576 	netstack_rele(ss->spdsock_spds->spds_netstack);
3577 
3578 	kmem_free(ss, sizeof (spdsock_t));
3579 	return (0);
3580 }
3581 
3582 /*
3583  * Merge the IPsec algorithms tables with the received algorithm information.
3584  */
3585 void
3586 spdsock_merge_algs(spd_stack_t *spds)
3587 {
3588 	ipsec_alginfo_t *alg, *oalg;
3589 	ipsec_algtype_t algtype;
3590 	uint_t algidx, algid, nalgs;
3591 	crypto_mech_name_t *mechs;
3592 	uint_t mech_count, mech_idx;
3593 	netstack_t	*ns = spds->spds_netstack;
3594 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
3595 
3596 	ASSERT(MUTEX_HELD(&spds->spds_alg_lock));
3597 
3598 	/*
3599 	 * Get the list of supported mechanisms from the crypto framework.
3600 	 * If a mechanism is supported by KCF, resolve its mechanism
3601 	 * id and mark it as being valid. This operation must be done
3602 	 * without holding alg_lock, since it can cause a provider
3603 	 * module to be loaded and the provider notification callback to
3604 	 * be invoked.
3605 	 */
3606 	mechs = crypto_get_mech_list(&mech_count, KM_SLEEP);
3607 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3608 		for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) {
3609 			int algflags = 0;
3610 			crypto_mech_type_t mt = CRYPTO_MECHANISM_INVALID;
3611 
3612 			alg = spds->spds_algs[algtype][algid];
3613 			if (alg == NULL)
3614 				continue;
3615 
3616 			/*
3617 			 * The NULL encryption algorithm is a special
3618 			 * case because there are no mechanisms, yet
3619 			 * the algorithm is still valid.
3620 			 */
3621 			if (alg->alg_id == SADB_EALG_NULL) {
3622 				alg->alg_mech_type = CRYPTO_MECHANISM_INVALID;
3623 				alg->alg_flags |= ALG_FLAG_VALID;
3624 				continue;
3625 			}
3626 
3627 			for (mech_idx = 0; mech_idx < mech_count; mech_idx++) {
3628 				if (strncmp(alg->alg_mech_name, mechs[mech_idx],
3629 				    CRYPTO_MAX_MECH_NAME) == 0) {
3630 					mt = crypto_mech2id(alg->alg_mech_name);
3631 					ASSERT(mt != CRYPTO_MECHANISM_INVALID);
3632 					algflags = ALG_FLAG_VALID;
3633 					break;
3634 				}
3635 			}
3636 			alg->alg_mech_type = mt;
3637 			alg->alg_flags |= algflags;
3638 		}
3639 	}
3640 
3641 	mutex_enter(&ipss->ipsec_alg_lock);
3642 
3643 	/*
3644 	 * For each algorithm currently defined, check if it is
3645 	 * present in the new tables created from the SPD_UPDATEALGS
3646 	 * message received from user-space.
3647 	 * Delete the algorithm entries that are currently defined
3648 	 * but not part of the new tables.
3649 	 */
3650 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3651 		nalgs = ipss->ipsec_nalgs[algtype];
3652 		for (algidx = 0; algidx < nalgs; algidx++) {
3653 			algid = ipss->ipsec_sortlist[algtype][algidx];
3654 			if (spds->spds_algs[algtype][algid] == NULL)
3655 				ipsec_alg_unreg(algtype, algid, ns);
3656 		}
3657 	}
3658 
3659 	/*
3660 	 * For each algorithm we just received, check if it is
3661 	 * present in the currently defined tables. If it is, swap
3662 	 * the entry with the one we just allocated.
3663 	 * If the new algorithm is not in the current tables,
3664 	 * add it.
3665 	 */
3666 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3667 		for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) {
3668 			alg = spds->spds_algs[algtype][algid];
3669 			if (alg == NULL)
3670 				continue;
3671 
3672 			if ((oalg = ipss->ipsec_alglists[algtype][algid]) ==
3673 			    NULL) {
3674 				/*
3675 				 * New algorithm, add it to the algorithm
3676 				 * table.
3677 				 */
3678 				ipsec_alg_reg(algtype, alg, ns);
3679 			} else {
3680 				/*
3681 				 * Algorithm is already in the table. Swap
3682 				 * the existing entry with the new one.
3683 				 */
3684 				ipsec_alg_fix_min_max(alg, algtype, ns);
3685 				ipss->ipsec_alglists[algtype][algid] = alg;
3686 				ipsec_alg_free(oalg);
3687 			}
3688 			spds->spds_algs[algtype][algid] = NULL;
3689 		}
3690 	}
3691 
3692 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3693 		ipss->ipsec_algs_exec_mode[algtype] =
3694 		    spds->spds_algs_exec_mode[algtype];
3695 	}
3696 
3697 	mutex_exit(&ipss->ipsec_alg_lock);
3698 
3699 	crypto_free_mech_list(mechs, mech_count);
3700 
3701 	ipsecah_algs_changed(ns);
3702 	ipsecesp_algs_changed(ns);
3703 }
3704