xref: /titanic_52/usr/src/uts/common/inet/ip/spdsock.c (revision 5b08e637db3c0e4201b09b542c766da0f66129e8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/param.h>
27 #include <sys/types.h>
28 #include <sys/stream.h>
29 #include <sys/strsubr.h>
30 #include <sys/strsun.h>
31 #include <sys/stropts.h>
32 #include <sys/zone.h>
33 #include <sys/vnode.h>
34 #include <sys/sysmacros.h>
35 #define	_SUN_TPI_VERSION 2
36 #include <sys/tihdr.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/mkdev.h>
40 #include <sys/debug.h>
41 #include <sys/kmem.h>
42 #include <sys/cmn_err.h>
43 #include <sys/suntpi.h>
44 #include <sys/policy.h>
45 
46 #include <sys/socket.h>
47 #include <netinet/in.h>
48 #include <net/pfkeyv2.h>
49 #include <net/pfpolicy.h>
50 
51 #include <inet/common.h>
52 #include <netinet/ip6.h>
53 #include <inet/ip.h>
54 #include <inet/ip6.h>
55 #include <inet/mi.h>
56 #include <inet/proto_set.h>
57 #include <inet/nd.h>
58 #include <inet/ip_if.h>
59 #include <inet/tun.h>
60 #include <inet/optcom.h>
61 #include <inet/ipsec_info.h>
62 #include <inet/ipsec_impl.h>
63 #include <inet/spdsock.h>
64 #include <inet/sadb.h>
65 
66 #include <sys/isa_defs.h>
67 
68 #include <c2/audit.h>
69 
70 /*
71  * This is a transport provider for the PF_POLICY IPsec policy
72  * management socket, which provides a management interface into the
73  * SPD, allowing policy rules to be added, deleted, and queried.
74  *
75  * This effectively replaces the old private SIOC*IPSECONFIG ioctls
76  * with an extensible interface which will hopefully be public some
77  * day.
78  *
79  * See <net/pfpolicy.h> for more details on the protocol.
80  *
81  * We link against drv/ip and call directly into it to manipulate the
82  * SPD; see ipsec_impl.h for the policy data structures and spd.c for
83  * the code which maintains them.
84  *
85  * The MT model of this is QPAIR with the addition of some explicit
86  * locking to protect system-wide policy data structures.
87  */
88 
89 static vmem_t *spdsock_vmem;		/* for minor numbers. */
90 
91 #define	ALIGNED64(x) IS_P2ALIGNED((x), sizeof (uint64_t))
92 
93 /* Default structure copied into T_INFO_ACK messages (from rts.c...) */
94 static struct T_info_ack spdsock_g_t_info_ack = {
95 	T_INFO_ACK,
96 	T_INFINITE,	/* TSDU_size. Maximum size messages. */
97 	T_INVALID,	/* ETSDU_size. No expedited data. */
98 	T_INVALID,	/* CDATA_size. No connect data. */
99 	T_INVALID,	/* DDATA_size. No disconnect data. */
100 	0,		/* ADDR_size. */
101 	0,		/* OPT_size. No user-settable options */
102 	64 * 1024,	/* TIDU_size. spdsock allows maximum size messages. */
103 	T_COTS,		/* SERV_type. spdsock supports connection oriented. */
104 	TS_UNBND,	/* CURRENT_state. This is set from spdsock_state. */
105 	(XPG4_1)	/* Provider flags */
106 };
107 
108 /* Named Dispatch Parameter Management Structure */
109 typedef struct spdsockparam_s {
110 	uint_t	spdsock_param_min;
111 	uint_t	spdsock_param_max;
112 	uint_t	spdsock_param_value;
113 	char *spdsock_param_name;
114 } spdsockparam_t;
115 
116 /*
117  * Table of NDD variables supported by spdsock. These are loaded into
118  * spdsock_g_nd in spdsock_init_nd.
119  * All of these are alterable, within the min/max values given, at run time.
120  */
121 static	spdsockparam_t	lcl_param_arr[] = {
122 	/* min	max	value	name */
123 	{ 4096, 65536,	8192,	"spdsock_xmit_hiwat"},
124 	{ 0,	65536,	1024,	"spdsock_xmit_lowat"},
125 	{ 4096, 65536,	8192,	"spdsock_recv_hiwat"},
126 	{ 65536, 1024*1024*1024, 256*1024,	"spdsock_max_buf"},
127 	{ 0,	3,	0,	"spdsock_debug"},
128 };
129 #define	spds_xmit_hiwat	spds_params[0].spdsock_param_value
130 #define	spds_xmit_lowat	spds_params[1].spdsock_param_value
131 #define	spds_recv_hiwat	spds_params[2].spdsock_param_value
132 #define	spds_max_buf	spds_params[3].spdsock_param_value
133 #define	spds_debug		spds_params[4].spdsock_param_value
134 
135 #define	ss0dbg(a)	printf a
136 /* NOTE:  != 0 instead of > 0 so lint doesn't complain. */
137 #define	ss1dbg(spds, a)	if (spds->spds_debug != 0) printf a
138 #define	ss2dbg(spds, a)	if (spds->spds_debug > 1) printf a
139 #define	ss3dbg(spds, a)	if (spds->spds_debug > 2) printf a
140 
141 #define	RESET_SPDSOCK_DUMP_POLHEAD(ss, iph) { \
142 	ASSERT(RW_READ_HELD(&(iph)->iph_lock)); \
143 	(ss)->spdsock_dump_head = (iph); \
144 	(ss)->spdsock_dump_gen = (iph)->iph_gen; \
145 	(ss)->spdsock_dump_cur_type = 0; \
146 	(ss)->spdsock_dump_cur_af = IPSEC_AF_V4; \
147 	(ss)->spdsock_dump_cur_rule = NULL; \
148 	(ss)->spdsock_dump_count = 0; \
149 	(ss)->spdsock_dump_cur_chain = 0; \
150 }
151 
152 static int spdsock_close(queue_t *);
153 static int spdsock_open(queue_t *, dev_t *, int, int, cred_t *);
154 static void spdsock_wput(queue_t *, mblk_t *);
155 static void spdsock_wsrv(queue_t *);
156 static void spdsock_rsrv(queue_t *);
157 static void *spdsock_stack_init(netstackid_t stackid, netstack_t *ns);
158 static void spdsock_stack_fini(netstackid_t stackid, void *arg);
159 static void spdsock_loadcheck(void *);
160 static void spdsock_merge_algs(spd_stack_t *);
161 static void spdsock_flush_one(ipsec_policy_head_t *, netstack_t *);
162 static mblk_t *spdsock_dump_next_record(spdsock_t *);
163 
164 static struct module_info info = {
165 	5138, "spdsock", 1, INFPSZ, 512, 128
166 };
167 
168 static struct qinit rinit = {
169 	NULL, (pfi_t)spdsock_rsrv, spdsock_open, spdsock_close,
170 	NULL, &info
171 };
172 
173 static struct qinit winit = {
174 	(pfi_t)spdsock_wput, (pfi_t)spdsock_wsrv, NULL, NULL, NULL, &info
175 };
176 
177 struct streamtab spdsockinfo = {
178 	&rinit, &winit
179 };
180 
181 /* mapping from alg type to protocol number, as per RFC 2407 */
182 static const uint_t algproto[] = {
183 	PROTO_IPSEC_AH,
184 	PROTO_IPSEC_ESP,
185 };
186 
187 #define	NALGPROTOS	(sizeof (algproto) / sizeof (algproto[0]))
188 
189 /* mapping from kernel exec mode to spdsock exec mode */
190 static const uint_t execmodes[] = {
191 	SPD_ALG_EXEC_MODE_SYNC,
192 	SPD_ALG_EXEC_MODE_ASYNC
193 };
194 
195 #define	NEXECMODES	(sizeof (execmodes) / sizeof (execmodes[0]))
196 
197 #define	ALL_ACTIVE_POLHEADS ((ipsec_policy_head_t *)-1)
198 #define	ALL_INACTIVE_POLHEADS ((ipsec_policy_head_t *)-2)
199 
200 #define	ITP_NAME(itp) (itp != NULL ? itp->itp_name : NULL)
201 
202 /* ARGSUSED */
203 static int
204 spdsock_param_get(q, mp, cp, cr)
205 	queue_t	*q;
206 	mblk_t	*mp;
207 	caddr_t	cp;
208 	cred_t *cr;
209 {
210 	spdsockparam_t	*spdsockpa = (spdsockparam_t *)cp;
211 	uint_t value;
212 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
213 	spd_stack_t	*spds = ss->spdsock_spds;
214 
215 	mutex_enter(&spds->spds_param_lock);
216 	value = spdsockpa->spdsock_param_value;
217 	mutex_exit(&spds->spds_param_lock);
218 
219 	(void) mi_mpprintf(mp, "%u", value);
220 	return (0);
221 }
222 
223 /* This routine sets an NDD variable in a spdsockparam_t structure. */
224 /* ARGSUSED */
225 static int
226 spdsock_param_set(q, mp, value, cp, cr)
227 	queue_t	*q;
228 	mblk_t	*mp;
229 	char *value;
230 	caddr_t	cp;
231 	cred_t *cr;
232 {
233 	ulong_t	new_value;
234 	spdsockparam_t	*spdsockpa = (spdsockparam_t *)cp;
235 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
236 	spd_stack_t	*spds = ss->spdsock_spds;
237 
238 	/* Convert the value from a string into a long integer. */
239 	if (ddi_strtoul(value, NULL, 10, &new_value) != 0)
240 		return (EINVAL);
241 
242 	mutex_enter(&spds->spds_param_lock);
243 	/*
244 	 * Fail the request if the new value does not lie within the
245 	 * required bounds.
246 	 */
247 	if (new_value < spdsockpa->spdsock_param_min ||
248 	    new_value > spdsockpa->spdsock_param_max) {
249 		mutex_exit(&spds->spds_param_lock);
250 		return (EINVAL);
251 	}
252 
253 	/* Set the new value */
254 	spdsockpa->spdsock_param_value = new_value;
255 	mutex_exit(&spds->spds_param_lock);
256 
257 	return (0);
258 }
259 
260 /*
261  * Initialize at module load time
262  */
263 boolean_t
264 spdsock_ddi_init(void)
265 {
266 	spdsock_max_optsize = optcom_max_optsize(
267 	    spdsock_opt_obj.odb_opt_des_arr, spdsock_opt_obj.odb_opt_arr_cnt);
268 
269 	spdsock_vmem = vmem_create("spdsock", (void *)1, MAXMIN, 1,
270 	    NULL, NULL, NULL, 1, VM_SLEEP | VMC_IDENTIFIER);
271 
272 	/*
273 	 * We want to be informed each time a stack is created or
274 	 * destroyed in the kernel, so we can maintain the
275 	 * set of spd_stack_t's.
276 	 */
277 	netstack_register(NS_SPDSOCK, spdsock_stack_init, NULL,
278 	    spdsock_stack_fini);
279 
280 	return (B_TRUE);
281 }
282 
283 /*
284  * Walk through the param array specified registering each element with the
285  * named dispatch handler.
286  */
287 static boolean_t
288 spdsock_param_register(IDP *ndp, spdsockparam_t *ssp, int cnt)
289 {
290 	for (; cnt-- > 0; ssp++) {
291 		if (ssp->spdsock_param_name != NULL &&
292 		    ssp->spdsock_param_name[0]) {
293 			if (!nd_load(ndp,
294 			    ssp->spdsock_param_name,
295 			    spdsock_param_get, spdsock_param_set,
296 			    (caddr_t)ssp)) {
297 				nd_free(ndp);
298 				return (B_FALSE);
299 			}
300 		}
301 	}
302 	return (B_TRUE);
303 }
304 
305 /*
306  * Initialize for each stack instance
307  */
308 /* ARGSUSED */
309 static void *
310 spdsock_stack_init(netstackid_t stackid, netstack_t *ns)
311 {
312 	spd_stack_t	*spds;
313 	spdsockparam_t	*ssp;
314 
315 	spds = (spd_stack_t *)kmem_zalloc(sizeof (*spds), KM_SLEEP);
316 	spds->spds_netstack = ns;
317 
318 	ASSERT(spds->spds_g_nd == NULL);
319 
320 	ssp = (spdsockparam_t *)kmem_alloc(sizeof (lcl_param_arr), KM_SLEEP);
321 	spds->spds_params = ssp;
322 	bcopy(lcl_param_arr, ssp, sizeof (lcl_param_arr));
323 
324 	(void) spdsock_param_register(&spds->spds_g_nd, ssp,
325 	    A_CNT(lcl_param_arr));
326 
327 	mutex_init(&spds->spds_param_lock, NULL, MUTEX_DEFAULT, NULL);
328 	mutex_init(&spds->spds_alg_lock, NULL, MUTEX_DEFAULT, NULL);
329 
330 	return (spds);
331 }
332 
333 void
334 spdsock_ddi_destroy(void)
335 {
336 	vmem_destroy(spdsock_vmem);
337 
338 	netstack_unregister(NS_SPDSOCK);
339 }
340 
341 /* ARGSUSED */
342 static void
343 spdsock_stack_fini(netstackid_t stackid, void *arg)
344 {
345 	spd_stack_t *spds = (spd_stack_t *)arg;
346 
347 	freemsg(spds->spds_mp_algs);
348 	mutex_destroy(&spds->spds_param_lock);
349 	mutex_destroy(&spds->spds_alg_lock);
350 	nd_free(&spds->spds_g_nd);
351 	kmem_free(spds->spds_params, sizeof (lcl_param_arr));
352 	spds->spds_params = NULL;
353 
354 	kmem_free(spds, sizeof (*spds));
355 }
356 
357 /*
358  * NOTE: large quantities of this should be shared with keysock.
359  * Would be nice to combine some of this into a common module, but
360  * not possible given time pressures.
361  */
362 
363 /*
364  * High-level reality checking of extensions.
365  */
366 /* ARGSUSED */ /* XXX */
367 static boolean_t
368 ext_check(spd_ext_t *ext)
369 {
370 	spd_if_t *tunname = (spd_if_t *)ext;
371 	int i;
372 	char *idstr;
373 
374 	if (ext->spd_ext_type == SPD_EXT_TUN_NAME) {
375 		/* (NOTE:  Modified from SADB_EXT_IDENTITY..) */
376 
377 		/*
378 		 * Make sure the strings in these identities are
379 		 * null-terminated.  Let's "proactively" null-terminate the
380 		 * string at the last byte if it's not terminated sooner.
381 		 */
382 		i = SPD_64TO8(tunname->spd_if_len) - sizeof (spd_if_t);
383 		idstr = (char *)(tunname + 1);
384 		while (*idstr != '\0' && i > 0) {
385 			i--;
386 			idstr++;
387 		}
388 		if (i == 0) {
389 			/*
390 			 * I.e., if the bozo user didn't NULL-terminate the
391 			 * string...
392 			 */
393 			idstr--;
394 			*idstr = '\0';
395 		}
396 	}
397 	return (B_TRUE);	/* For now... */
398 }
399 
400 
401 
402 /* Return values for spdsock_get_ext(). */
403 #define	KGE_OK	0
404 #define	KGE_DUP	1
405 #define	KGE_UNK	2
406 #define	KGE_LEN	3
407 #define	KGE_CHK	4
408 
409 /*
410  * Parse basic extension headers and return in the passed-in pointer vector.
411  * Return values include:
412  *
413  *	KGE_OK	Everything's nice and parsed out.
414  *		If there are no extensions, place NULL in extv[0].
415  *	KGE_DUP	There is a duplicate extension.
416  *		First instance in appropriate bin.  First duplicate in
417  *		extv[0].
418  *	KGE_UNK	Unknown extension type encountered.  extv[0] contains
419  *		unknown header.
420  *	KGE_LEN	Extension length error.
421  *	KGE_CHK	High-level reality check failed on specific extension.
422  *
423  * My apologies for some of the pointer arithmetic in here.  I'm thinking
424  * like an assembly programmer, yet trying to make the compiler happy.
425  */
426 static int
427 spdsock_get_ext(spd_ext_t *extv[], spd_msg_t *basehdr, uint_t msgsize)
428 {
429 	bzero(extv, sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1));
430 
431 	/* Use extv[0] as the "current working pointer". */
432 
433 	extv[0] = (spd_ext_t *)(basehdr + 1);
434 
435 	while (extv[0] < (spd_ext_t *)(((uint8_t *)basehdr) + msgsize)) {
436 		/* Check for unknown headers. */
437 		if (extv[0]->spd_ext_type == 0 ||
438 		    extv[0]->spd_ext_type > SPD_EXT_MAX)
439 			return (KGE_UNK);
440 
441 		/*
442 		 * Check length.  Use uint64_t because extlen is in units
443 		 * of 64-bit words.  If length goes beyond the msgsize,
444 		 * return an error.  (Zero length also qualifies here.)
445 		 */
446 		if (extv[0]->spd_ext_len == 0 ||
447 		    (void *)((uint64_t *)extv[0] + extv[0]->spd_ext_len) >
448 		    (void *)((uint8_t *)basehdr + msgsize))
449 			return (KGE_LEN);
450 
451 		/* Check for redundant headers. */
452 		if (extv[extv[0]->spd_ext_type] != NULL)
453 			return (KGE_DUP);
454 
455 		/*
456 		 * Reality check the extension if possible at the spdsock
457 		 * level.
458 		 */
459 		if (!ext_check(extv[0]))
460 			return (KGE_CHK);
461 
462 		/* If I make it here, assign the appropriate bin. */
463 		extv[extv[0]->spd_ext_type] = extv[0];
464 
465 		/* Advance pointer (See above for uint64_t ptr reasoning.) */
466 		extv[0] = (spd_ext_t *)
467 		    ((uint64_t *)extv[0] + extv[0]->spd_ext_len);
468 	}
469 
470 	/* Everything's cool. */
471 
472 	/*
473 	 * If extv[0] == NULL, then there are no extension headers in this
474 	 * message.  Ensure that this is the case.
475 	 */
476 	if (extv[0] == (spd_ext_t *)(basehdr + 1))
477 		extv[0] = NULL;
478 
479 	return (KGE_OK);
480 }
481 
482 static const int bad_ext_diag[] = {
483 	SPD_DIAGNOSTIC_MALFORMED_LCLPORT,
484 	SPD_DIAGNOSTIC_MALFORMED_REMPORT,
485 	SPD_DIAGNOSTIC_MALFORMED_PROTO,
486 	SPD_DIAGNOSTIC_MALFORMED_LCLADDR,
487 	SPD_DIAGNOSTIC_MALFORMED_REMADDR,
488 	SPD_DIAGNOSTIC_MALFORMED_ACTION,
489 	SPD_DIAGNOSTIC_MALFORMED_RULE,
490 	SPD_DIAGNOSTIC_MALFORMED_RULESET,
491 	SPD_DIAGNOSTIC_MALFORMED_ICMP_TYPECODE
492 };
493 
494 static const int dup_ext_diag[] = {
495 	SPD_DIAGNOSTIC_DUPLICATE_LCLPORT,
496 	SPD_DIAGNOSTIC_DUPLICATE_REMPORT,
497 	SPD_DIAGNOSTIC_DUPLICATE_PROTO,
498 	SPD_DIAGNOSTIC_DUPLICATE_LCLADDR,
499 	SPD_DIAGNOSTIC_DUPLICATE_REMADDR,
500 	SPD_DIAGNOSTIC_DUPLICATE_ACTION,
501 	SPD_DIAGNOSTIC_DUPLICATE_RULE,
502 	SPD_DIAGNOSTIC_DUPLICATE_RULESET,
503 	SPD_DIAGNOSTIC_DUPLICATE_ICMP_TYPECODE
504 };
505 
506 /*
507  * Transmit a PF_POLICY error message to the instance either pointed to
508  * by ks, the instance with serial number serial, or more, depending.
509  *
510  * The faulty message (or a reasonable facsimile thereof) is in mp.
511  * This function will free mp or recycle it for delivery, thereby causing
512  * the stream head to free it.
513  */
514 static void
515 spdsock_error(queue_t *q, mblk_t *mp, int error, int diagnostic)
516 {
517 	spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
518 
519 	ASSERT(mp->b_datap->db_type == M_DATA);
520 
521 	if (spmsg->spd_msg_type < SPD_MIN ||
522 	    spmsg->spd_msg_type > SPD_MAX)
523 		spmsg->spd_msg_type = SPD_RESERVED;
524 
525 	/*
526 	 * Strip out extension headers.
527 	 */
528 	ASSERT(mp->b_rptr + sizeof (*spmsg) <= mp->b_datap->db_lim);
529 	mp->b_wptr = mp->b_rptr + sizeof (*spmsg);
530 	spmsg->spd_msg_len = SPD_8TO64(sizeof (spd_msg_t));
531 	spmsg->spd_msg_errno = (uint8_t)error;
532 	spmsg->spd_msg_diagnostic = (uint16_t)diagnostic;
533 
534 	qreply(q, mp);
535 }
536 
537 static void
538 spdsock_diag(queue_t *q, mblk_t *mp, int diagnostic)
539 {
540 	spdsock_error(q, mp, EINVAL, diagnostic);
541 }
542 
543 static void
544 spd_echo(queue_t *q, mblk_t *mp)
545 {
546 	qreply(q, mp);
547 }
548 
549 /*
550  * Do NOT consume a reference to itp.
551  */
552 /*ARGSUSED*/
553 static void
554 spdsock_flush_node(ipsec_tun_pol_t *itp, void *cookie, netstack_t *ns)
555 {
556 	boolean_t active = (boolean_t)cookie;
557 	ipsec_policy_head_t *iph;
558 
559 	iph = active ? itp->itp_policy : itp->itp_inactive;
560 	IPPH_REFHOLD(iph);
561 	mutex_enter(&itp->itp_lock);
562 	spdsock_flush_one(iph, ns);
563 	if (active)
564 		itp->itp_flags &= ~ITPF_PFLAGS;
565 	else
566 		itp->itp_flags &= ~ITPF_IFLAGS;
567 	mutex_exit(&itp->itp_lock);
568 }
569 
570 /*
571  * Clear out one polhead.
572  */
573 static void
574 spdsock_flush_one(ipsec_policy_head_t *iph, netstack_t *ns)
575 {
576 	rw_enter(&iph->iph_lock, RW_WRITER);
577 	ipsec_polhead_flush(iph, ns);
578 	rw_exit(&iph->iph_lock);
579 	IPPH_REFRELE(iph, ns);
580 }
581 
582 static void
583 spdsock_flush(queue_t *q, ipsec_policy_head_t *iph, ipsec_tun_pol_t *itp,
584     mblk_t *mp)
585 {
586 	boolean_t active;
587 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
588 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
589 
590 	if (iph != ALL_ACTIVE_POLHEADS && iph != ALL_INACTIVE_POLHEADS) {
591 		spdsock_flush_one(iph, ns);
592 		if (audit_active) {
593 			spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
594 
595 			active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
596 			audit_pf_policy(SPD_FLUSH, DB_CRED(mp), ns,
597 			    ITP_NAME(itp), active, 0, DB_CPID(mp));
598 		}
599 	} else {
600 		active = (iph == ALL_ACTIVE_POLHEADS);
601 
602 		/* First flush the global policy. */
603 		spdsock_flush_one(active ? ipsec_system_policy(ns) :
604 		    ipsec_inactive_policy(ns), ns);
605 		if (audit_active) {
606 			audit_pf_policy(SPD_FLUSH, DB_CRED(mp), ns, NULL,
607 			    active, 0, DB_CPID(mp));
608 		}
609 		/* Then flush every tunnel's appropriate one. */
610 		itp_walk(spdsock_flush_node, (void *)active, ns);
611 		if (audit_active)
612 			audit_pf_policy(SPD_FLUSH, DB_CRED(mp), ns,
613 			    "all tunnels", active, 0, DB_CPID(mp));
614 	}
615 
616 	spd_echo(q, mp);
617 }
618 
619 static boolean_t
620 spdsock_ext_to_sel(spd_ext_t **extv, ipsec_selkey_t *sel, int *diag)
621 {
622 	bzero(sel, sizeof (*sel));
623 
624 	if (extv[SPD_EXT_PROTO] != NULL) {
625 		struct spd_proto *pr =
626 		    (struct spd_proto *)extv[SPD_EXT_PROTO];
627 		sel->ipsl_proto = pr->spd_proto_number;
628 		sel->ipsl_valid |= IPSL_PROTOCOL;
629 	}
630 	if (extv[SPD_EXT_LCLPORT] != NULL) {
631 		struct spd_portrange *pr =
632 		    (struct spd_portrange *)extv[SPD_EXT_LCLPORT];
633 		sel->ipsl_lport = pr->spd_ports_minport;
634 		sel->ipsl_valid |= IPSL_LOCAL_PORT;
635 	}
636 	if (extv[SPD_EXT_REMPORT] != NULL) {
637 		struct spd_portrange *pr =
638 		    (struct spd_portrange *)extv[SPD_EXT_REMPORT];
639 		sel->ipsl_rport = pr->spd_ports_minport;
640 		sel->ipsl_valid |= IPSL_REMOTE_PORT;
641 	}
642 
643 	if (extv[SPD_EXT_ICMP_TYPECODE] != NULL) {
644 		struct spd_typecode *tc=
645 		    (struct spd_typecode *)extv[SPD_EXT_ICMP_TYPECODE];
646 
647 		sel->ipsl_valid |= IPSL_ICMP_TYPE;
648 		sel->ipsl_icmp_type = tc->spd_typecode_type;
649 		if (tc->spd_typecode_type_end < tc->spd_typecode_type)
650 			sel->ipsl_icmp_type_end = tc->spd_typecode_type;
651 		else
652 			sel->ipsl_icmp_type_end = tc->spd_typecode_type_end;
653 
654 		if (tc->spd_typecode_code != 255) {
655 			sel->ipsl_valid |= IPSL_ICMP_CODE;
656 			sel->ipsl_icmp_code = tc->spd_typecode_code;
657 			if (tc->spd_typecode_code_end < tc->spd_typecode_code)
658 				sel->ipsl_icmp_code_end = tc->spd_typecode_code;
659 			else
660 				sel->ipsl_icmp_code_end =
661 				    tc->spd_typecode_code_end;
662 		}
663 	}
664 #define	ADDR2SEL(sel, extv, field, pfield, extn, bit)			      \
665 	if ((extv)[(extn)] != NULL) {					      \
666 		uint_t addrlen;						      \
667 		struct spd_address *ap = 				      \
668 			(struct spd_address *)((extv)[(extn)]); 	      \
669 		addrlen = (ap->spd_address_af == AF_INET6) ? 		      \
670 			IPV6_ADDR_LEN : IP_ADDR_LEN;			      \
671 		if (SPD_64TO8(ap->spd_address_len) < 			      \
672 			(addrlen + sizeof (*ap))) {			      \
673 			*diag = SPD_DIAGNOSTIC_BAD_ADDR_LEN;		      \
674 			return (B_FALSE);				      \
675 		}							      \
676 		bcopy((ap+1), &((sel)->field), addrlen);		      \
677 		(sel)->pfield = ap->spd_address_prefixlen;		      \
678 		(sel)->ipsl_valid |= (bit);				      \
679 		(sel)->ipsl_valid |= (ap->spd_address_af == AF_INET6) ?	      \
680 			IPSL_IPV6 : IPSL_IPV4;				      \
681 	}
682 
683 	ADDR2SEL(sel, extv, ipsl_local, ipsl_local_pfxlen,
684 	    SPD_EXT_LCLADDR, IPSL_LOCAL_ADDR);
685 	ADDR2SEL(sel, extv, ipsl_remote, ipsl_remote_pfxlen,
686 	    SPD_EXT_REMADDR, IPSL_REMOTE_ADDR);
687 
688 	if ((sel->ipsl_valid & (IPSL_IPV6|IPSL_IPV4)) ==
689 	    (IPSL_IPV6|IPSL_IPV4)) {
690 		*diag = SPD_DIAGNOSTIC_MIXED_AF;
691 		return (B_FALSE);
692 	}
693 
694 #undef ADDR2SEL
695 
696 	return (B_TRUE);
697 }
698 
699 static boolean_t
700 spd_convert_type(uint32_t type, ipsec_act_t *act)
701 {
702 	switch (type) {
703 	case SPD_ACTTYPE_DROP:
704 		act->ipa_type = IPSEC_ACT_DISCARD;
705 		return (B_TRUE);
706 
707 	case SPD_ACTTYPE_PASS:
708 		act->ipa_type = IPSEC_ACT_CLEAR;
709 		return (B_TRUE);
710 
711 	case SPD_ACTTYPE_IPSEC:
712 		act->ipa_type = IPSEC_ACT_APPLY;
713 		return (B_TRUE);
714 	}
715 	return (B_FALSE);
716 }
717 
718 static boolean_t
719 spd_convert_flags(uint32_t flags, ipsec_act_t *act)
720 {
721 	/*
722 	 * Note use of !! for boolean canonicalization.
723 	 */
724 	act->ipa_apply.ipp_use_ah = !!(flags & SPD_APPLY_AH);
725 	act->ipa_apply.ipp_use_esp = !!(flags & SPD_APPLY_ESP);
726 	act->ipa_apply.ipp_use_espa = !!(flags & SPD_APPLY_ESPA);
727 	act->ipa_apply.ipp_use_se = !!(flags & SPD_APPLY_SE);
728 	act->ipa_apply.ipp_use_unique = !!(flags & SPD_APPLY_UNIQUE);
729 	return (B_TRUE);
730 }
731 
732 static void
733 spdsock_reset_act(ipsec_act_t *act)
734 {
735 	bzero(act, sizeof (*act));
736 	act->ipa_apply.ipp_espe_maxbits = IPSEC_MAX_KEYBITS;
737 	act->ipa_apply.ipp_espa_maxbits = IPSEC_MAX_KEYBITS;
738 	act->ipa_apply.ipp_ah_maxbits = IPSEC_MAX_KEYBITS;
739 }
740 
741 /*
742  * Sanity check action against reality, and shrink-wrap key sizes..
743  */
744 static boolean_t
745 spdsock_check_action(ipsec_act_t *act, boolean_t tunnel_polhead, int *diag,
746     spd_stack_t *spds)
747 {
748 	if (tunnel_polhead && act->ipa_apply.ipp_use_unique) {
749 		*diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
750 		return (B_FALSE);
751 	}
752 	if ((act->ipa_type != IPSEC_ACT_APPLY) &&
753 	    (act->ipa_apply.ipp_use_ah ||
754 	    act->ipa_apply.ipp_use_esp ||
755 	    act->ipa_apply.ipp_use_espa ||
756 	    act->ipa_apply.ipp_use_se ||
757 	    act->ipa_apply.ipp_use_unique)) {
758 		*diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
759 		return (B_FALSE);
760 	}
761 	if ((act->ipa_type == IPSEC_ACT_APPLY) &&
762 	    !act->ipa_apply.ipp_use_ah &&
763 	    !act->ipa_apply.ipp_use_esp) {
764 		*diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
765 		return (B_FALSE);
766 	}
767 	return (ipsec_check_action(act, diag, spds->spds_netstack));
768 }
769 
770 /*
771  * We may be short a few error checks here..
772  */
773 static boolean_t
774 spdsock_ext_to_actvec(spd_ext_t **extv, ipsec_act_t **actpp, uint_t *nactp,
775     int *diag, spd_stack_t *spds)
776 {
777 	struct spd_ext_actions *sactp =
778 	    (struct spd_ext_actions *)extv[SPD_EXT_ACTION];
779 	ipsec_act_t act, *actp, *endactp;
780 	struct spd_attribute *attrp, *endattrp;
781 	uint64_t *endp;
782 	int nact;
783 	boolean_t tunnel_polhead;
784 
785 	tunnel_polhead = (extv[SPD_EXT_TUN_NAME] != NULL &&
786 	    (((struct spd_rule *)extv[SPD_EXT_RULE])->spd_rule_flags &
787 	    SPD_RULE_FLAG_TUNNEL));
788 
789 	*actpp = NULL;
790 	*nactp = 0;
791 
792 	if (sactp == NULL) {
793 		*diag = SPD_DIAGNOSTIC_NO_ACTION_EXT;
794 		return (B_FALSE);
795 	}
796 
797 	/*
798 	 * Parse the "action" extension and convert into an action chain.
799 	 */
800 
801 	nact = sactp->spd_actions_count;
802 
803 	endp = (uint64_t *)sactp;
804 	endp += sactp->spd_actions_len;
805 	endattrp = (struct spd_attribute *)endp;
806 
807 	actp = kmem_alloc(sizeof (*actp) * nact, KM_NOSLEEP);
808 	if (actp == NULL) {
809 		*diag = SPD_DIAGNOSTIC_ADD_NO_MEM;
810 		return (B_FALSE);
811 	}
812 	*actpp = actp;
813 	*nactp = nact;
814 	endactp = actp + nact;
815 
816 	spdsock_reset_act(&act);
817 	attrp = (struct spd_attribute *)(&sactp[1]);
818 
819 	for (; attrp < endattrp; attrp++) {
820 		switch (attrp->spd_attr_tag) {
821 		case SPD_ATTR_NOP:
822 			break;
823 
824 		case SPD_ATTR_EMPTY:
825 			spdsock_reset_act(&act);
826 			break;
827 
828 		case SPD_ATTR_END:
829 			attrp = endattrp;
830 			/* FALLTHRU */
831 		case SPD_ATTR_NEXT:
832 			if (actp >= endactp) {
833 				*diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT;
834 				goto fail;
835 			}
836 			if (!spdsock_check_action(&act, tunnel_polhead,
837 			    diag, spds))
838 				goto fail;
839 			*actp++ = act;
840 			spdsock_reset_act(&act);
841 			break;
842 
843 		case SPD_ATTR_TYPE:
844 			if (!spd_convert_type(attrp->spd_attr_value, &act)) {
845 				*diag = SPD_DIAGNOSTIC_ADD_BAD_TYPE;
846 				goto fail;
847 			}
848 			break;
849 
850 		case SPD_ATTR_FLAGS:
851 			if (!tunnel_polhead && extv[SPD_EXT_TUN_NAME] != NULL) {
852 				/*
853 				 * Set "sa unique" for transport-mode
854 				 * tunnels whether we want to or not.
855 				 */
856 				attrp->spd_attr_value |= SPD_APPLY_UNIQUE;
857 			}
858 			if (!spd_convert_flags(attrp->spd_attr_value, &act)) {
859 				*diag = SPD_DIAGNOSTIC_ADD_BAD_FLAGS;
860 				goto fail;
861 			}
862 			break;
863 
864 		case SPD_ATTR_AH_AUTH:
865 			if (attrp->spd_attr_value == 0) {
866 				*diag = SPD_DIAGNOSTIC_UNSUPP_AH_ALG;
867 				goto fail;
868 			}
869 			act.ipa_apply.ipp_auth_alg = attrp->spd_attr_value;
870 			break;
871 
872 		case SPD_ATTR_ESP_ENCR:
873 			if (attrp->spd_attr_value == 0) {
874 				*diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_ALG;
875 				goto fail;
876 			}
877 			act.ipa_apply.ipp_encr_alg = attrp->spd_attr_value;
878 			break;
879 
880 		case SPD_ATTR_ESP_AUTH:
881 			if (attrp->spd_attr_value == 0) {
882 				*diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_ALG;
883 				goto fail;
884 			}
885 			act.ipa_apply.ipp_esp_auth_alg = attrp->spd_attr_value;
886 			break;
887 
888 		case SPD_ATTR_ENCR_MINBITS:
889 			act.ipa_apply.ipp_espe_minbits = attrp->spd_attr_value;
890 			break;
891 
892 		case SPD_ATTR_ENCR_MAXBITS:
893 			act.ipa_apply.ipp_espe_maxbits = attrp->spd_attr_value;
894 			break;
895 
896 		case SPD_ATTR_AH_MINBITS:
897 			act.ipa_apply.ipp_ah_minbits = attrp->spd_attr_value;
898 			break;
899 
900 		case SPD_ATTR_AH_MAXBITS:
901 			act.ipa_apply.ipp_ah_maxbits = attrp->spd_attr_value;
902 			break;
903 
904 		case SPD_ATTR_ESPA_MINBITS:
905 			act.ipa_apply.ipp_espa_minbits = attrp->spd_attr_value;
906 			break;
907 
908 		case SPD_ATTR_ESPA_MAXBITS:
909 			act.ipa_apply.ipp_espa_maxbits = attrp->spd_attr_value;
910 			break;
911 
912 		case SPD_ATTR_LIFE_SOFT_TIME:
913 		case SPD_ATTR_LIFE_HARD_TIME:
914 		case SPD_ATTR_LIFE_SOFT_BYTES:
915 		case SPD_ATTR_LIFE_HARD_BYTES:
916 			break;
917 
918 		case SPD_ATTR_KM_PROTO:
919 			act.ipa_apply.ipp_km_proto = attrp->spd_attr_value;
920 			break;
921 
922 		case SPD_ATTR_KM_COOKIE:
923 			act.ipa_apply.ipp_km_cookie = attrp->spd_attr_value;
924 			break;
925 
926 		case SPD_ATTR_REPLAY_DEPTH:
927 			act.ipa_apply.ipp_replay_depth = attrp->spd_attr_value;
928 			break;
929 		}
930 	}
931 	if (actp != endactp) {
932 		*diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT;
933 		goto fail;
934 	}
935 
936 	return (B_TRUE);
937 fail:
938 	ipsec_actvec_free(*actpp, nact);
939 	*actpp = NULL;
940 	return (B_FALSE);
941 }
942 
943 typedef struct
944 {
945 	ipsec_policy_t *pol;
946 	int dir;
947 } tmprule_t;
948 
949 static int
950 mkrule(ipsec_policy_head_t *iph, struct spd_rule *rule,
951     ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t af,
952     tmprule_t **rp, uint64_t *index, spd_stack_t *spds)
953 {
954 	ipsec_policy_t *pol;
955 
956 	sel->ipsl_valid &= ~(IPSL_IPV6|IPSL_IPV4);
957 	sel->ipsl_valid |= af;
958 
959 	pol = ipsec_policy_create(sel, actp, nact, rule->spd_rule_priority,
960 	    index, spds->spds_netstack);
961 	if (pol == NULL)
962 		return (ENOMEM);
963 
964 	(*rp)->pol = pol;
965 	(*rp)->dir = dir;
966 	(*rp)++;
967 
968 	if (!ipsec_check_policy(iph, pol, dir))
969 		return (EEXIST);
970 
971 	rule->spd_rule_index = pol->ipsp_index;
972 	return (0);
973 }
974 
975 static int
976 mkrulepair(ipsec_policy_head_t *iph, struct spd_rule *rule,
977     ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t afs,
978     tmprule_t **rp, uint64_t *index, spd_stack_t *spds)
979 {
980 	int error;
981 
982 	if (afs & IPSL_IPV4) {
983 		error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV4, rp,
984 		    index, spds);
985 		if (error != 0)
986 			return (error);
987 	}
988 	if (afs & IPSL_IPV6) {
989 		error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV6, rp,
990 		    index, spds);
991 		if (error != 0)
992 			return (error);
993 	}
994 	return (0);
995 }
996 
997 
998 static void
999 spdsock_addrule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1000     spd_ext_t **extv, ipsec_tun_pol_t *itp)
1001 {
1002 	ipsec_selkey_t sel;
1003 	ipsec_act_t *actp;
1004 	uint_t nact;
1005 	int diag = 0, error, afs;
1006 	struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE];
1007 	tmprule_t rules[4], *rulep = &rules[0];
1008 	boolean_t tunnel_mode, empty_itp, active;
1009 	uint64_t *index = (itp == NULL) ? NULL : &itp->itp_next_policy_index;
1010 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
1011 	spd_stack_t	*spds = ss->spdsock_spds;
1012 
1013 	if (rule == NULL) {
1014 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT);
1015 		if (audit_active) {
1016 			spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1017 
1018 			active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1019 			audit_pf_policy(SPD_ADDRULE, DB_CRED(mp),
1020 			    spds->spds_netstack, ITP_NAME(itp), active,
1021 			    SPD_DIAGNOSTIC_NO_RULE_EXT, DB_CPID(mp));
1022 		}
1023 		return;
1024 	}
1025 
1026 	tunnel_mode = (rule->spd_rule_flags & SPD_RULE_FLAG_TUNNEL);
1027 
1028 	if (itp != NULL) {
1029 		mutex_enter(&itp->itp_lock);
1030 		ASSERT(itp->itp_policy == iph || itp->itp_inactive == iph);
1031 		active = (itp->itp_policy == iph);
1032 		if (ITP_P_ISACTIVE(itp, iph)) {
1033 			/* Check for mix-and-match of tunnel/transport. */
1034 			if ((tunnel_mode && !ITP_P_ISTUNNEL(itp, iph)) ||
1035 			    (!tunnel_mode && ITP_P_ISTUNNEL(itp, iph))) {
1036 				mutex_exit(&itp->itp_lock);
1037 				spdsock_error(q, mp, EBUSY, 0);
1038 				return;
1039 			}
1040 			empty_itp = B_FALSE;
1041 		} else {
1042 			empty_itp = B_TRUE;
1043 			itp->itp_flags = active ? ITPF_P_ACTIVE : ITPF_I_ACTIVE;
1044 			if (tunnel_mode)
1045 				itp->itp_flags |= active ? ITPF_P_TUNNEL :
1046 				    ITPF_I_TUNNEL;
1047 		}
1048 	} else {
1049 		empty_itp = B_FALSE;
1050 	}
1051 
1052 	if (rule->spd_rule_index != 0) {
1053 		diag = SPD_DIAGNOSTIC_INVALID_RULE_INDEX;
1054 		error = EINVAL;
1055 		goto fail2;
1056 	}
1057 
1058 	if (!spdsock_ext_to_sel(extv, &sel, &diag)) {
1059 		error = EINVAL;
1060 		goto fail2;
1061 	}
1062 
1063 	if (itp != NULL) {
1064 		if (tunnel_mode) {
1065 			if (sel.ipsl_valid &
1066 			    (IPSL_REMOTE_PORT | IPSL_LOCAL_PORT)) {
1067 				itp->itp_flags |= active ?
1068 				    ITPF_P_PER_PORT_SECURITY :
1069 				    ITPF_I_PER_PORT_SECURITY;
1070 			}
1071 		} else {
1072 			/*
1073 			 * For now, we don't allow transport-mode on a tunnel
1074 			 * with ANY specific selectors.  Bail if we have such
1075 			 * a request.
1076 			 */
1077 			if (sel.ipsl_valid & IPSL_WILDCARD) {
1078 				diag = SPD_DIAGNOSTIC_NO_TUNNEL_SELECTORS;
1079 				error = EINVAL;
1080 				goto fail2;
1081 			}
1082 		}
1083 	}
1084 
1085 	if (!spdsock_ext_to_actvec(extv, &actp, &nact, &diag, spds)) {
1086 		error = EINVAL;
1087 		goto fail2;
1088 	}
1089 	/*
1090 	 * If no addresses were specified, add both.
1091 	 */
1092 	afs = sel.ipsl_valid & (IPSL_IPV6|IPSL_IPV4);
1093 	if (afs == 0)
1094 		afs = (IPSL_IPV6|IPSL_IPV4);
1095 
1096 	rw_enter(&iph->iph_lock, RW_WRITER);
1097 
1098 	if (rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) {
1099 		error = mkrulepair(iph, rule, &sel, actp, nact,
1100 		    IPSEC_TYPE_OUTBOUND, afs, &rulep, index, spds);
1101 		if (error != 0)
1102 			goto fail;
1103 	}
1104 
1105 	if (rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) {
1106 		error = mkrulepair(iph, rule, &sel, actp, nact,
1107 		    IPSEC_TYPE_INBOUND, afs, &rulep, index, spds);
1108 		if (error != 0)
1109 			goto fail;
1110 	}
1111 
1112 	while ((--rulep) >= &rules[0]) {
1113 		ipsec_enter_policy(iph, rulep->pol, rulep->dir,
1114 		    spds->spds_netstack);
1115 	}
1116 	rw_exit(&iph->iph_lock);
1117 	if (itp != NULL)
1118 		mutex_exit(&itp->itp_lock);
1119 
1120 	ipsec_actvec_free(actp, nact);
1121 	spd_echo(q, mp);
1122 	if (audit_active) {
1123 		spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1124 
1125 		active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1126 		audit_pf_policy(SPD_ADDRULE, DB_CRED(mp), spds->spds_netstack,
1127 		    ITP_NAME(itp), active, 0, DB_CPID(mp));
1128 	}
1129 	return;
1130 
1131 fail:
1132 	rw_exit(&iph->iph_lock);
1133 	while ((--rulep) >= &rules[0]) {
1134 		IPPOL_REFRELE(rulep->pol, spds->spds_netstack);
1135 	}
1136 	ipsec_actvec_free(actp, nact);
1137 fail2:
1138 	if (itp != NULL) {
1139 		if (empty_itp)
1140 			itp->itp_flags = 0;
1141 		mutex_exit(&itp->itp_lock);
1142 	}
1143 	spdsock_error(q, mp, error, diag);
1144 	if (audit_active) {
1145 		spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1146 
1147 		active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1148 		audit_pf_policy(SPD_ADDRULE, DB_CRED(mp), spds->spds_netstack,
1149 		    ITP_NAME(itp), active, error, DB_CPID(mp));
1150 	}
1151 }
1152 
1153 void
1154 spdsock_deleterule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1155     spd_ext_t **extv, ipsec_tun_pol_t *itp)
1156 {
1157 	ipsec_selkey_t sel;
1158 	struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE];
1159 	int err, diag = 0;
1160 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
1161 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
1162 
1163 	if (rule == NULL) {
1164 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT);
1165 		if (audit_active) {
1166 			boolean_t active;
1167 			spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1168 
1169 			active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1170 			audit_pf_policy(SPD_DELETERULE, DB_CRED(mp), ns,
1171 			    ITP_NAME(itp), active, SPD_DIAGNOSTIC_NO_RULE_EXT,
1172 			    DB_CPID(mp));
1173 		}
1174 		return;
1175 	}
1176 
1177 	/*
1178 	 * Must enter itp_lock first to avoid deadlock.  See tun.c's
1179 	 * set_sec_simple() for the other case of itp_lock and iph_lock.
1180 	 */
1181 	if (itp != NULL)
1182 		mutex_enter(&itp->itp_lock);
1183 
1184 	if (rule->spd_rule_index != 0) {
1185 		if (ipsec_policy_delete_index(iph, rule->spd_rule_index, ns) !=
1186 		    0) {
1187 			err = ESRCH;
1188 			goto fail;
1189 		}
1190 	} else {
1191 		if (!spdsock_ext_to_sel(extv, &sel, &diag)) {
1192 			err = EINVAL;	/* diag already set... */
1193 			goto fail;
1194 		}
1195 
1196 		if ((rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) &&
1197 		    !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_INBOUND, ns)) {
1198 			err = ESRCH;
1199 			goto fail;
1200 		}
1201 
1202 		if ((rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) &&
1203 		    !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_OUTBOUND, ns)) {
1204 			err = ESRCH;
1205 			goto fail;
1206 		}
1207 	}
1208 
1209 	if (itp != NULL) {
1210 		ASSERT(iph == itp->itp_policy || iph == itp->itp_inactive);
1211 		rw_enter(&iph->iph_lock, RW_READER);
1212 		if (avl_numnodes(&iph->iph_rulebyid) == 0) {
1213 			if (iph == itp->itp_policy)
1214 				itp->itp_flags &= ~ITPF_PFLAGS;
1215 			else
1216 				itp->itp_flags &= ~ITPF_IFLAGS;
1217 		}
1218 		/* Can exit locks in any order. */
1219 		rw_exit(&iph->iph_lock);
1220 		mutex_exit(&itp->itp_lock);
1221 	}
1222 	spd_echo(q, mp);
1223 	if (audit_active) {
1224 		boolean_t active;
1225 		spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1226 
1227 		active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1228 		audit_pf_policy(SPD_DELETERULE, DB_CRED(mp), ns, ITP_NAME(itp),
1229 		    active, 0, DB_CPID(mp));
1230 	}
1231 	return;
1232 fail:
1233 	if (itp != NULL)
1234 		mutex_exit(&itp->itp_lock);
1235 	spdsock_error(q, mp, err, diag);
1236 	if (audit_active) {
1237 		boolean_t active;
1238 		spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1239 
1240 		active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1241 		audit_pf_policy(SPD_DELETERULE, DB_CRED(mp), ns, ITP_NAME(itp),
1242 		    active, err, DB_CPID(mp));
1243 	}
1244 }
1245 
1246 /* Do NOT consume a reference to itp. */
1247 /* ARGSUSED */
1248 static void
1249 spdsock_flip_node(ipsec_tun_pol_t *itp, void *ignoreme, netstack_t *ns)
1250 {
1251 	mutex_enter(&itp->itp_lock);
1252 	ITPF_SWAP(itp->itp_flags);
1253 	ipsec_swap_policy(itp->itp_policy, itp->itp_inactive, ns);
1254 	mutex_exit(&itp->itp_lock);
1255 }
1256 
1257 void
1258 spdsock_flip(queue_t *q, mblk_t *mp, spd_if_t *tunname)
1259 {
1260 	char *tname;
1261 	ipsec_tun_pol_t *itp;
1262 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
1263 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
1264 
1265 	if (tunname != NULL) {
1266 		tname = (char *)tunname->spd_if_name;
1267 		if (*tname == '\0') {
1268 			/* can't fail */
1269 			ipsec_swap_global_policy(ns);
1270 			if (audit_active) {
1271 				boolean_t active;
1272 				spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1273 
1274 				active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1275 				audit_pf_policy(SPD_FLIP, DB_CRED(mp),
1276 				    ns, NULL, active, 0, DB_CPID(mp));
1277 			}
1278 			itp_walk(spdsock_flip_node, NULL, ns);
1279 			if (audit_active) {
1280 				boolean_t active;
1281 				spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1282 
1283 				active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1284 				audit_pf_policy(SPD_FLIP, DB_CRED(mp), ns,
1285 				    "all tunnels", active, 0, DB_CPID(mp));
1286 			}
1287 		} else {
1288 			itp = get_tunnel_policy(tname, ns);
1289 			if (itp == NULL) {
1290 				/* Better idea for "tunnel not found"? */
1291 				spdsock_error(q, mp, ESRCH, 0);
1292 				if (audit_active) {
1293 					boolean_t active;
1294 					spd_msg_t *spmsg =
1295 					    (spd_msg_t *)mp->b_rptr;
1296 
1297 					active = (spmsg->spd_msg_spdid ==
1298 					    SPD_ACTIVE);
1299 					audit_pf_policy(SPD_FLIP, DB_CRED(mp),
1300 					    ns, ITP_NAME(itp), active,
1301 					    ESRCH, DB_CPID(mp));
1302 				}
1303 				return;
1304 			}
1305 			spdsock_flip_node(itp, NULL, NULL);
1306 			if (audit_active) {
1307 				boolean_t active;
1308 				spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1309 
1310 				active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1311 				audit_pf_policy(SPD_FLIP, DB_CRED(mp), ns,
1312 				    ITP_NAME(itp), active, 0, DB_CPID(mp));
1313 			}
1314 			ITP_REFRELE(itp, ns);
1315 		}
1316 	} else {
1317 		ipsec_swap_global_policy(ns);	/* can't fail */
1318 		if (audit_active) {
1319 			boolean_t active;
1320 			spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
1321 
1322 			active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
1323 			audit_pf_policy(SPD_FLIP, DB_CRED(mp),
1324 			    ns, NULL, active, 0, DB_CPID(mp));
1325 		}
1326 	}
1327 	spd_echo(q, mp);
1328 }
1329 
1330 /*
1331  * Unimplemented feature
1332  */
1333 /* ARGSUSED */
1334 static void
1335 spdsock_lookup(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1336     spd_ext_t **extv, ipsec_tun_pol_t *itp)
1337 {
1338 	spdsock_error(q, mp, EINVAL, 0);
1339 }
1340 
1341 
1342 static mblk_t *
1343 spdsock_dump_ruleset(mblk_t *req, ipsec_policy_head_t *iph,
1344     uint32_t count, uint16_t error)
1345 {
1346 	size_t len = sizeof (spd_ruleset_ext_t) + sizeof (spd_msg_t);
1347 	spd_msg_t *msg;
1348 	spd_ruleset_ext_t *ruleset;
1349 	mblk_t *m = allocb(len, BPRI_HI);
1350 
1351 	ASSERT(RW_READ_HELD(&iph->iph_lock));
1352 
1353 	if (m == NULL) {
1354 		return (NULL);
1355 	}
1356 	msg = (spd_msg_t *)m->b_rptr;
1357 	ruleset = (spd_ruleset_ext_t *)(&msg[1]);
1358 
1359 	m->b_wptr = (uint8_t *)&ruleset[1];
1360 
1361 	*msg = *(spd_msg_t *)(req->b_rptr);
1362 	msg->spd_msg_len = SPD_8TO64(len);
1363 	msg->spd_msg_errno = error;
1364 
1365 	ruleset->spd_ruleset_len = SPD_8TO64(sizeof (*ruleset));
1366 	ruleset->spd_ruleset_type = SPD_EXT_RULESET;
1367 	ruleset->spd_ruleset_count = count;
1368 	ruleset->spd_ruleset_version = iph->iph_gen;
1369 	return (m);
1370 }
1371 
1372 static mblk_t *
1373 spdsock_dump_finish(spdsock_t *ss, int error)
1374 {
1375 	mblk_t *m;
1376 	ipsec_policy_head_t *iph = ss->spdsock_dump_head;
1377 	mblk_t *req = ss->spdsock_dump_req;
1378 
1379 	rw_enter(&iph->iph_lock, RW_READER);
1380 	m = spdsock_dump_ruleset(req, iph, ss->spdsock_dump_count, error);
1381 	rw_exit(&iph->iph_lock);
1382 	IPPH_REFRELE(iph, ss->spdsock_spds->spds_netstack);
1383 	ss->spdsock_dump_req = NULL;
1384 	freemsg(req);
1385 
1386 	return (m);
1387 }
1388 
1389 /*
1390  * Rule encoding functions.
1391  * We do a two-pass encode.
1392  * If base != NULL, fill in encoded rule part starting at base+offset.
1393  * Always return "offset" plus length of to-be-encoded data.
1394  */
1395 static uint_t
1396 spdsock_encode_typecode(uint8_t *base, uint_t offset, uint8_t type,
1397     uint8_t type_end, uint8_t code, uint8_t code_end)
1398 {
1399 	struct spd_typecode *tcp;
1400 
1401 	ASSERT(ALIGNED64(offset));
1402 
1403 	if (base != NULL) {
1404 		tcp = (struct spd_typecode *)(base + offset);
1405 		tcp->spd_typecode_len = SPD_8TO64(sizeof (*tcp));
1406 		tcp->spd_typecode_exttype = SPD_EXT_ICMP_TYPECODE;
1407 		tcp->spd_typecode_code = code;
1408 		tcp->spd_typecode_type = type;
1409 		tcp->spd_typecode_type_end = type_end;
1410 		tcp->spd_typecode_code_end = code_end;
1411 	}
1412 	offset += sizeof (*tcp);
1413 
1414 	ASSERT(ALIGNED64(offset));
1415 
1416 	return (offset);
1417 }
1418 
1419 static uint_t
1420 spdsock_encode_proto(uint8_t *base, uint_t offset, uint8_t proto)
1421 {
1422 	struct spd_proto *spp;
1423 
1424 	ASSERT(ALIGNED64(offset));
1425 
1426 	if (base != NULL) {
1427 		spp = (struct spd_proto *)(base + offset);
1428 		spp->spd_proto_len = SPD_8TO64(sizeof (*spp));
1429 		spp->spd_proto_exttype = SPD_EXT_PROTO;
1430 		spp->spd_proto_number = proto;
1431 		spp->spd_proto_reserved1 = 0;
1432 		spp->spd_proto_reserved2 = 0;
1433 	}
1434 	offset += sizeof (*spp);
1435 
1436 	ASSERT(ALIGNED64(offset));
1437 
1438 	return (offset);
1439 }
1440 
1441 static uint_t
1442 spdsock_encode_port(uint8_t *base, uint_t offset, uint16_t ext, uint16_t port)
1443 {
1444 	struct spd_portrange *spp;
1445 
1446 	ASSERT(ALIGNED64(offset));
1447 
1448 	if (base != NULL) {
1449 		spp = (struct spd_portrange *)(base + offset);
1450 		spp->spd_ports_len = SPD_8TO64(sizeof (*spp));
1451 		spp->spd_ports_exttype = ext;
1452 		spp->spd_ports_minport = port;
1453 		spp->spd_ports_maxport = port;
1454 	}
1455 	offset += sizeof (*spp);
1456 
1457 	ASSERT(ALIGNED64(offset));
1458 
1459 	return (offset);
1460 }
1461 
1462 static uint_t
1463 spdsock_encode_addr(uint8_t *base, uint_t offset, uint16_t ext,
1464     const ipsec_selkey_t *sel, const ipsec_addr_t *addr, uint_t pfxlen)
1465 {
1466 	struct spd_address *sae;
1467 	ipsec_addr_t *spdaddr;
1468 	uint_t start = offset;
1469 	uint_t addrlen;
1470 	uint_t af;
1471 
1472 	if (sel->ipsl_valid & IPSL_IPV4) {
1473 		af = AF_INET;
1474 		addrlen = IP_ADDR_LEN;
1475 	} else {
1476 		af = AF_INET6;
1477 		addrlen = IPV6_ADDR_LEN;
1478 	}
1479 
1480 	ASSERT(ALIGNED64(offset));
1481 
1482 	if (base != NULL) {
1483 		sae = (struct spd_address *)(base + offset);
1484 		sae->spd_address_exttype = ext;
1485 		sae->spd_address_af = af;
1486 		sae->spd_address_prefixlen = pfxlen;
1487 		sae->spd_address_reserved2 = 0;
1488 
1489 		spdaddr = (ipsec_addr_t *)(&sae[1]);
1490 		bcopy(addr, spdaddr, addrlen);
1491 	}
1492 	offset += sizeof (*sae);
1493 	addrlen = roundup(addrlen, sizeof (uint64_t));
1494 	offset += addrlen;
1495 
1496 	ASSERT(ALIGNED64(offset));
1497 
1498 	if (base != NULL)
1499 		sae->spd_address_len = SPD_8TO64(offset - start);
1500 	return (offset);
1501 }
1502 
1503 static uint_t
1504 spdsock_encode_sel(uint8_t *base, uint_t offset, const ipsec_sel_t *sel)
1505 {
1506 	const ipsec_selkey_t *selkey = &sel->ipsl_key;
1507 
1508 	if (selkey->ipsl_valid & IPSL_PROTOCOL)
1509 		offset = spdsock_encode_proto(base, offset, selkey->ipsl_proto);
1510 	if (selkey->ipsl_valid & IPSL_LOCAL_PORT)
1511 		offset = spdsock_encode_port(base, offset, SPD_EXT_LCLPORT,
1512 		    selkey->ipsl_lport);
1513 	if (selkey->ipsl_valid & IPSL_REMOTE_PORT)
1514 		offset = spdsock_encode_port(base, offset, SPD_EXT_REMPORT,
1515 		    selkey->ipsl_rport);
1516 	if (selkey->ipsl_valid & IPSL_REMOTE_ADDR)
1517 		offset = spdsock_encode_addr(base, offset, SPD_EXT_REMADDR,
1518 		    selkey, &selkey->ipsl_remote, selkey->ipsl_remote_pfxlen);
1519 	if (selkey->ipsl_valid & IPSL_LOCAL_ADDR)
1520 		offset = spdsock_encode_addr(base, offset, SPD_EXT_LCLADDR,
1521 		    selkey, &selkey->ipsl_local, selkey->ipsl_local_pfxlen);
1522 	if (selkey->ipsl_valid & IPSL_ICMP_TYPE) {
1523 		offset = spdsock_encode_typecode(base, offset,
1524 		    selkey->ipsl_icmp_type, selkey->ipsl_icmp_type_end,
1525 		    (selkey->ipsl_valid & IPSL_ICMP_CODE) ?
1526 		    selkey->ipsl_icmp_code : 255,
1527 		    (selkey->ipsl_valid & IPSL_ICMP_CODE) ?
1528 		    selkey->ipsl_icmp_code_end : 255);
1529 	}
1530 	return (offset);
1531 }
1532 
1533 static uint_t
1534 spdsock_encode_actattr(uint8_t *base, uint_t offset, uint32_t tag,
1535     uint32_t value)
1536 {
1537 	struct spd_attribute *attr;
1538 
1539 	ASSERT(ALIGNED64(offset));
1540 
1541 	if (base != NULL) {
1542 		attr = (struct spd_attribute *)(base + offset);
1543 		attr->spd_attr_tag = tag;
1544 		attr->spd_attr_value = value;
1545 	}
1546 	offset += sizeof (struct spd_attribute);
1547 
1548 	ASSERT(ALIGNED64(offset));
1549 
1550 	return (offset);
1551 }
1552 
1553 
1554 #define	EMIT(t, v) offset = spdsock_encode_actattr(base, offset, (t), (v))
1555 
1556 static uint_t
1557 spdsock_encode_action(uint8_t *base, uint_t offset, const ipsec_action_t *ap)
1558 {
1559 	const struct ipsec_act *act = &(ap->ipa_act);
1560 	uint_t flags;
1561 
1562 	EMIT(SPD_ATTR_EMPTY, 0);
1563 	switch (act->ipa_type) {
1564 	case IPSEC_ACT_DISCARD:
1565 	case IPSEC_ACT_REJECT:
1566 		EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_DROP);
1567 		break;
1568 	case IPSEC_ACT_BYPASS:
1569 	case IPSEC_ACT_CLEAR:
1570 		EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_PASS);
1571 		break;
1572 
1573 	case IPSEC_ACT_APPLY:
1574 		EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_IPSEC);
1575 		flags = 0;
1576 		if (act->ipa_apply.ipp_use_ah)
1577 			flags |= SPD_APPLY_AH;
1578 		if (act->ipa_apply.ipp_use_esp)
1579 			flags |= SPD_APPLY_ESP;
1580 		if (act->ipa_apply.ipp_use_espa)
1581 			flags |= SPD_APPLY_ESPA;
1582 		if (act->ipa_apply.ipp_use_se)
1583 			flags |= SPD_APPLY_SE;
1584 		if (act->ipa_apply.ipp_use_unique)
1585 			flags |= SPD_APPLY_UNIQUE;
1586 		EMIT(SPD_ATTR_FLAGS, flags);
1587 		if (flags & SPD_APPLY_AH) {
1588 			EMIT(SPD_ATTR_AH_AUTH, act->ipa_apply.ipp_auth_alg);
1589 			EMIT(SPD_ATTR_AH_MINBITS,
1590 			    act->ipa_apply.ipp_ah_minbits);
1591 			EMIT(SPD_ATTR_AH_MAXBITS,
1592 			    act->ipa_apply.ipp_ah_maxbits);
1593 		}
1594 		if (flags & SPD_APPLY_ESP) {
1595 			EMIT(SPD_ATTR_ESP_ENCR, act->ipa_apply.ipp_encr_alg);
1596 			EMIT(SPD_ATTR_ENCR_MINBITS,
1597 			    act->ipa_apply.ipp_espe_minbits);
1598 			EMIT(SPD_ATTR_ENCR_MAXBITS,
1599 			    act->ipa_apply.ipp_espe_maxbits);
1600 			if (flags & SPD_APPLY_ESPA) {
1601 				EMIT(SPD_ATTR_ESP_AUTH,
1602 				    act->ipa_apply.ipp_esp_auth_alg);
1603 				EMIT(SPD_ATTR_ESPA_MINBITS,
1604 				    act->ipa_apply.ipp_espa_minbits);
1605 				EMIT(SPD_ATTR_ESPA_MAXBITS,
1606 				    act->ipa_apply.ipp_espa_maxbits);
1607 			}
1608 		}
1609 		if (act->ipa_apply.ipp_km_proto != 0)
1610 			EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_proto);
1611 		if (act->ipa_apply.ipp_km_cookie != 0)
1612 			EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_cookie);
1613 		if (act->ipa_apply.ipp_replay_depth != 0)
1614 			EMIT(SPD_ATTR_REPLAY_DEPTH,
1615 			    act->ipa_apply.ipp_replay_depth);
1616 		/* Add more here */
1617 		break;
1618 	}
1619 
1620 	return (offset);
1621 }
1622 
1623 static uint_t
1624 spdsock_encode_action_list(uint8_t *base, uint_t offset,
1625     const ipsec_action_t *ap)
1626 {
1627 	struct spd_ext_actions *act;
1628 	uint_t nact = 0;
1629 	uint_t start = offset;
1630 
1631 	ASSERT(ALIGNED64(offset));
1632 
1633 	if (base != NULL) {
1634 		act = (struct spd_ext_actions *)(base + offset);
1635 		act->spd_actions_len = 0;
1636 		act->spd_actions_exttype = SPD_EXT_ACTION;
1637 		act->spd_actions_count = 0;
1638 		act->spd_actions_reserved = 0;
1639 	}
1640 
1641 	offset += sizeof (*act);
1642 
1643 	ASSERT(ALIGNED64(offset));
1644 
1645 	while (ap != NULL) {
1646 		offset = spdsock_encode_action(base, offset, ap);
1647 		ap = ap->ipa_next;
1648 		nact++;
1649 		if (ap != NULL) {
1650 			EMIT(SPD_ATTR_NEXT, 0);
1651 		}
1652 	}
1653 	EMIT(SPD_ATTR_END, 0);
1654 
1655 	ASSERT(ALIGNED64(offset));
1656 
1657 	if (base != NULL) {
1658 		act->spd_actions_count = nact;
1659 		act->spd_actions_len = SPD_8TO64(offset - start);
1660 	}
1661 
1662 	return (offset);
1663 }
1664 
1665 #undef EMIT
1666 
1667 /* ARGSUSED */
1668 static uint_t
1669 spdsock_rule_flags(uint_t dir, uint_t af)
1670 {
1671 	uint_t flags = 0;
1672 
1673 	if (dir == IPSEC_TYPE_INBOUND)
1674 		flags |= SPD_RULE_FLAG_INBOUND;
1675 	if (dir == IPSEC_TYPE_OUTBOUND)
1676 		flags |= SPD_RULE_FLAG_OUTBOUND;
1677 
1678 	return (flags);
1679 }
1680 
1681 
1682 static uint_t
1683 spdsock_encode_rule_head(uint8_t *base, uint_t offset, spd_msg_t *req,
1684     const ipsec_policy_t *rule, uint_t dir, uint_t af, char *name,
1685     boolean_t tunnel)
1686 {
1687 	struct spd_msg *spmsg;
1688 	struct spd_rule *spr;
1689 	spd_if_t *sid;
1690 
1691 	uint_t start = offset;
1692 
1693 	ASSERT(ALIGNED64(offset));
1694 
1695 	if (base != NULL) {
1696 		spmsg = (struct spd_msg *)(base + offset);
1697 		bzero(spmsg, sizeof (*spmsg));
1698 		spmsg->spd_msg_version = PF_POLICY_V1;
1699 		spmsg->spd_msg_type = SPD_DUMP;
1700 		spmsg->spd_msg_seq = req->spd_msg_seq;
1701 		spmsg->spd_msg_pid = req->spd_msg_pid;
1702 	}
1703 	offset += sizeof (struct spd_msg);
1704 
1705 	ASSERT(ALIGNED64(offset));
1706 
1707 	if (base != NULL) {
1708 		spr = (struct spd_rule *)(base + offset);
1709 		spr->spd_rule_type = SPD_EXT_RULE;
1710 		spr->spd_rule_priority = rule->ipsp_prio;
1711 		spr->spd_rule_flags = spdsock_rule_flags(dir, af);
1712 		if (tunnel)
1713 			spr->spd_rule_flags |= SPD_RULE_FLAG_TUNNEL;
1714 		spr->spd_rule_unused = 0;
1715 		spr->spd_rule_len = SPD_8TO64(sizeof (*spr));
1716 		spr->spd_rule_index = rule->ipsp_index;
1717 	}
1718 	offset += sizeof (struct spd_rule);
1719 
1720 	/*
1721 	 * If we have an interface name (i.e. if this policy head came from
1722 	 * a tunnel), add the SPD_EXT_TUN_NAME extension.
1723 	 */
1724 	if (name != NULL) {
1725 
1726 		ASSERT(ALIGNED64(offset));
1727 
1728 		if (base != NULL) {
1729 			sid = (spd_if_t *)(base + offset);
1730 			sid->spd_if_exttype = SPD_EXT_TUN_NAME;
1731 			sid->spd_if_len = SPD_8TO64(sizeof (spd_if_t) +
1732 			    roundup((strlen(name) - 4), 8));
1733 			(void) strlcpy((char *)sid->spd_if_name, name,
1734 			    LIFNAMSIZ);
1735 		}
1736 
1737 		offset += sizeof (spd_if_t) + roundup((strlen(name) - 4), 8);
1738 	}
1739 
1740 	offset = spdsock_encode_sel(base, offset, rule->ipsp_sel);
1741 	offset = spdsock_encode_action_list(base, offset, rule->ipsp_act);
1742 
1743 	ASSERT(ALIGNED64(offset));
1744 
1745 	if (base != NULL) {
1746 		spmsg->spd_msg_len = SPD_8TO64(offset - start);
1747 	}
1748 	return (offset);
1749 }
1750 
1751 /* ARGSUSED */
1752 static mblk_t *
1753 spdsock_encode_rule(mblk_t *req, const ipsec_policy_t *rule,
1754     uint_t dir, uint_t af, char *name, boolean_t tunnel)
1755 {
1756 	mblk_t *m;
1757 	uint_t len;
1758 	spd_msg_t *mreq = (spd_msg_t *)req->b_rptr;
1759 
1760 	/*
1761 	 * Figure out how much space we'll need.
1762 	 */
1763 	len = spdsock_encode_rule_head(NULL, 0, mreq, rule, dir, af, name,
1764 	    tunnel);
1765 
1766 	/*
1767 	 * Allocate mblk.
1768 	 */
1769 	m = allocb(len, BPRI_HI);
1770 	if (m == NULL)
1771 		return (NULL);
1772 
1773 	/*
1774 	 * Fill it in..
1775 	 */
1776 	m->b_wptr = m->b_rptr + len;
1777 	bzero(m->b_rptr, len);
1778 	(void) spdsock_encode_rule_head(m->b_rptr, 0, mreq, rule, dir, af,
1779 	    name, tunnel);
1780 	return (m);
1781 }
1782 
1783 static ipsec_policy_t *
1784 spdsock_dump_next_in_chain(spdsock_t *ss, ipsec_policy_head_t *iph,
1785     ipsec_policy_t *cur)
1786 {
1787 	ASSERT(RW_READ_HELD(&iph->iph_lock));
1788 
1789 	ss->spdsock_dump_count++;
1790 	ss->spdsock_dump_cur_rule = cur->ipsp_hash.hash_next;
1791 	return (cur);
1792 }
1793 
1794 static ipsec_policy_t *
1795 spdsock_dump_next_rule(spdsock_t *ss, ipsec_policy_head_t *iph)
1796 {
1797 	ipsec_policy_t *cur;
1798 	ipsec_policy_root_t *ipr;
1799 	int chain, nchains, type, af;
1800 
1801 	ASSERT(RW_READ_HELD(&iph->iph_lock));
1802 
1803 	cur = ss->spdsock_dump_cur_rule;
1804 
1805 	if (cur != NULL)
1806 		return (spdsock_dump_next_in_chain(ss, iph, cur));
1807 
1808 	type = ss->spdsock_dump_cur_type;
1809 
1810 next:
1811 	chain = ss->spdsock_dump_cur_chain;
1812 	ipr = &iph->iph_root[type];
1813 	nchains = ipr->ipr_nchains;
1814 
1815 	while (chain < nchains) {
1816 		cur = ipr->ipr_hash[chain].hash_head;
1817 		chain++;
1818 		if (cur != NULL) {
1819 			ss->spdsock_dump_cur_chain = chain;
1820 			return (spdsock_dump_next_in_chain(ss, iph, cur));
1821 		}
1822 	}
1823 	ss->spdsock_dump_cur_chain = nchains;
1824 
1825 	af = ss->spdsock_dump_cur_af;
1826 	while (af < IPSEC_NAF) {
1827 		cur = ipr->ipr_nonhash[af];
1828 		af++;
1829 		if (cur != NULL) {
1830 			ss->spdsock_dump_cur_af = af;
1831 			return (spdsock_dump_next_in_chain(ss, iph, cur));
1832 		}
1833 	}
1834 
1835 	type++;
1836 	if (type >= IPSEC_NTYPES)
1837 		return (NULL);
1838 
1839 	ss->spdsock_dump_cur_chain = 0;
1840 	ss->spdsock_dump_cur_type = type;
1841 	ss->spdsock_dump_cur_af = IPSEC_AF_V4;
1842 	goto next;
1843 
1844 }
1845 
1846 /*
1847  * If we're done with one policy head, but have more to go, we iterate through
1848  * another IPsec tunnel policy head (itp).  Return NULL if it is an error
1849  * worthy of returning EAGAIN via PF_POLICY.
1850  */
1851 static ipsec_tun_pol_t *
1852 spdsock_dump_iterate_next_tunnel(spdsock_t *ss, ipsec_stack_t *ipss)
1853 {
1854 	ipsec_tun_pol_t *itp;
1855 
1856 	ASSERT(RW_READ_HELD(&ipss->ipsec_tunnel_policy_lock));
1857 	if (ipss->ipsec_tunnel_policy_gen > ss->spdsock_dump_tun_gen) {
1858 		/* Oops, state of the tunnel polheads changed. */
1859 		itp = NULL;
1860 	} else if (ss->spdsock_itp == NULL) {
1861 		/* Just finished global, find first node. */
1862 		itp = avl_first(&ipss->ipsec_tunnel_policies);
1863 	} else {
1864 		/* We just finished current polhead, find the next one. */
1865 		itp = AVL_NEXT(&ipss->ipsec_tunnel_policies, ss->spdsock_itp);
1866 	}
1867 	if (itp != NULL) {
1868 		ITP_REFHOLD(itp);
1869 	}
1870 	if (ss->spdsock_itp != NULL) {
1871 		ITP_REFRELE(ss->spdsock_itp, ipss->ipsec_netstack);
1872 	}
1873 	ss->spdsock_itp = itp;
1874 	return (itp);
1875 }
1876 
1877 static mblk_t *
1878 spdsock_dump_next_record(spdsock_t *ss)
1879 {
1880 	ipsec_policy_head_t *iph;
1881 	ipsec_policy_t *rule;
1882 	mblk_t *m;
1883 	ipsec_tun_pol_t *itp;
1884 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
1885 	ipsec_stack_t *ipss = ns->netstack_ipsec;
1886 
1887 	iph = ss->spdsock_dump_head;
1888 
1889 	ASSERT(iph != NULL);
1890 
1891 	rw_enter(&iph->iph_lock, RW_READER);
1892 
1893 	if (iph->iph_gen != ss->spdsock_dump_gen) {
1894 		rw_exit(&iph->iph_lock);
1895 		return (spdsock_dump_finish(ss, EAGAIN));
1896 	}
1897 
1898 	while ((rule = spdsock_dump_next_rule(ss, iph)) == NULL) {
1899 		rw_exit(&iph->iph_lock);
1900 		if (--(ss->spdsock_dump_remaining_polheads) == 0)
1901 			return (spdsock_dump_finish(ss, 0));
1902 
1903 
1904 		/*
1905 		 * If we reach here, we have more policy heads (tunnel
1906 		 * entries) to dump.  Let's reset to a new policy head
1907 		 * and get some more rules.
1908 		 *
1909 		 * An empty policy head will have spdsock_dump_next_rule()
1910 		 * return NULL, and we loop (while dropping the number of
1911 		 * remaining polheads).  If we loop to 0, we finish.  We
1912 		 * keep looping until we hit 0 or until we have a rule to
1913 		 * encode.
1914 		 *
1915 		 * NOTE:  No need for ITP_REF*() macros here as we're only
1916 		 * going after and refholding the policy head itself.
1917 		 */
1918 		rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
1919 		itp = spdsock_dump_iterate_next_tunnel(ss, ipss);
1920 		if (itp == NULL) {
1921 			rw_exit(&ipss->ipsec_tunnel_policy_lock);
1922 			return (spdsock_dump_finish(ss, EAGAIN));
1923 		}
1924 
1925 		/* Reset other spdsock_dump thingies. */
1926 		IPPH_REFRELE(ss->spdsock_dump_head, ns);
1927 		if (ss->spdsock_dump_active) {
1928 			ss->spdsock_dump_tunnel =
1929 			    itp->itp_flags & ITPF_P_TUNNEL;
1930 			iph = itp->itp_policy;
1931 		} else {
1932 			ss->spdsock_dump_tunnel =
1933 			    itp->itp_flags & ITPF_I_TUNNEL;
1934 			iph = itp->itp_inactive;
1935 		}
1936 		IPPH_REFHOLD(iph);
1937 		rw_exit(&ipss->ipsec_tunnel_policy_lock);
1938 
1939 		rw_enter(&iph->iph_lock, RW_READER);
1940 		RESET_SPDSOCK_DUMP_POLHEAD(ss, iph);
1941 	}
1942 
1943 	m = spdsock_encode_rule(ss->spdsock_dump_req, rule,
1944 	    ss->spdsock_dump_cur_type, ss->spdsock_dump_cur_af,
1945 	    (ss->spdsock_itp == NULL) ? NULL : ss->spdsock_itp->itp_name,
1946 	    ss->spdsock_dump_tunnel);
1947 	rw_exit(&iph->iph_lock);
1948 
1949 	if (m == NULL)
1950 		return (spdsock_dump_finish(ss, ENOMEM));
1951 	return (m);
1952 }
1953 
1954 /*
1955  * Dump records until we run into flow-control back-pressure.
1956  */
1957 static void
1958 spdsock_dump_some(queue_t *q, spdsock_t *ss)
1959 {
1960 	mblk_t *m, *dataind;
1961 
1962 	while ((ss->spdsock_dump_req != NULL) && canputnext(q)) {
1963 		m = spdsock_dump_next_record(ss);
1964 		if (m == NULL)
1965 			return;
1966 		dataind = allocb(sizeof (struct T_data_req), BPRI_HI);
1967 		if (dataind == NULL) {
1968 			freemsg(m);
1969 			return;
1970 		}
1971 		dataind->b_cont = m;
1972 		dataind->b_wptr += sizeof (struct T_data_req);
1973 		((struct T_data_ind *)dataind->b_rptr)->PRIM_type = T_DATA_IND;
1974 		((struct T_data_ind *)dataind->b_rptr)->MORE_flag = 0;
1975 		dataind->b_datap->db_type = M_PROTO;
1976 		putnext(q, dataind);
1977 	}
1978 }
1979 
1980 /*
1981  * Start dumping.
1982  * Format a start-of-dump record, and set up the stream and kick the rsrv
1983  * procedure to continue the job..
1984  */
1985 /* ARGSUSED */
1986 static void
1987 spdsock_dump(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp)
1988 {
1989 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
1990 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
1991 	ipsec_stack_t *ipss = ns->netstack_ipsec;
1992 	mblk_t *mr;
1993 
1994 	/* spdsock_open() already set spdsock_itp to NULL. */
1995 	if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) {
1996 		rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
1997 		ss->spdsock_dump_remaining_polheads = 1 +
1998 		    avl_numnodes(&ipss->ipsec_tunnel_policies);
1999 		ss->spdsock_dump_tun_gen = ipss->ipsec_tunnel_policy_gen;
2000 		rw_exit(&ipss->ipsec_tunnel_policy_lock);
2001 		if (iph == ALL_ACTIVE_POLHEADS) {
2002 			iph = ipsec_system_policy(ns);
2003 			ss->spdsock_dump_active = B_TRUE;
2004 		} else {
2005 			iph = ipsec_inactive_policy(ns);
2006 			ss->spdsock_dump_active = B_FALSE;
2007 		}
2008 		ASSERT(ss->spdsock_itp == NULL);
2009 	} else {
2010 		ss->spdsock_dump_remaining_polheads = 1;
2011 	}
2012 
2013 	rw_enter(&iph->iph_lock, RW_READER);
2014 
2015 	mr = spdsock_dump_ruleset(mp, iph, 0, 0);
2016 
2017 	if (!mr) {
2018 		rw_exit(&iph->iph_lock);
2019 		spdsock_error(q, mp, ENOMEM, 0);
2020 		return;
2021 	}
2022 
2023 	ss->spdsock_dump_req = mp;
2024 	RESET_SPDSOCK_DUMP_POLHEAD(ss, iph);
2025 
2026 	rw_exit(&iph->iph_lock);
2027 
2028 	qreply(q, mr);
2029 	qenable(OTHERQ(q));
2030 }
2031 
2032 /* Do NOT consume a reference to ITP. */
2033 void
2034 spdsock_clone_node(ipsec_tun_pol_t *itp, void *ep, netstack_t *ns)
2035 {
2036 	int *errptr = (int *)ep;
2037 
2038 	if (*errptr != 0)
2039 		return;	/* We've failed already for some reason. */
2040 	mutex_enter(&itp->itp_lock);
2041 	ITPF_CLONE(itp->itp_flags);
2042 	*errptr = ipsec_copy_polhead(itp->itp_policy, itp->itp_inactive, ns);
2043 	mutex_exit(&itp->itp_lock);
2044 }
2045 
2046 void
2047 spdsock_clone(queue_t *q, mblk_t *mp, spd_if_t *tunname)
2048 {
2049 	int error;
2050 	char *tname;
2051 	ipsec_tun_pol_t *itp;
2052 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2053 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
2054 
2055 	if (tunname != NULL) {
2056 		tname = (char *)tunname->spd_if_name;
2057 		if (*tname == '\0') {
2058 			error = ipsec_clone_system_policy(ns);
2059 			if (audit_active) {
2060 				boolean_t active;
2061 				spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2062 
2063 				active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2064 				audit_pf_policy(SPD_CLONE, DB_CRED(mp), ns,
2065 				    NULL, active, error, DB_CPID(mp));
2066 			}
2067 			if (error == 0) {
2068 				itp_walk(spdsock_clone_node, &error, ns);
2069 				if (audit_active) {
2070 					boolean_t active;
2071 					spd_msg_t *spmsg =
2072 					    (spd_msg_t *)mp->b_rptr;
2073 
2074 					active = (spmsg->spd_msg_spdid ==
2075 					    SPD_ACTIVE);
2076 					audit_pf_policy(SPD_CLONE, DB_CRED(mp),
2077 					    ns, "all tunnels", active, 0,
2078 					    DB_CPID(mp));
2079 				}
2080 			}
2081 		} else {
2082 			itp = get_tunnel_policy(tname, ns);
2083 			if (itp == NULL) {
2084 				spdsock_error(q, mp, ENOENT, 0);
2085 				if (audit_active) {
2086 					boolean_t active;
2087 					spd_msg_t *spmsg =
2088 					    (spd_msg_t *)mp->b_rptr;
2089 
2090 					active = (spmsg->spd_msg_spdid ==
2091 					    SPD_ACTIVE);
2092 					audit_pf_policy(SPD_CLONE, DB_CRED(mp),
2093 					    ns, ITP_NAME(itp), active, ENOENT,
2094 					    DB_CPID(mp));
2095 				}
2096 				return;
2097 			}
2098 			spdsock_clone_node(itp, &error, NULL);
2099 			ITP_REFRELE(itp, ns);
2100 			if (audit_active) {
2101 				boolean_t active;
2102 				spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2103 
2104 				active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2105 				audit_pf_policy(SPD_CLONE, DB_CRED(mp), ns,
2106 				    ITP_NAME(itp), active, error, DB_CPID(mp));
2107 			}
2108 		}
2109 	} else {
2110 		error = ipsec_clone_system_policy(ns);
2111 		if (audit_active) {
2112 			boolean_t active;
2113 			spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
2114 
2115 			active = (spmsg->spd_msg_spdid == SPD_ACTIVE);
2116 			audit_pf_policy(SPD_CLONE, DB_CRED(mp), ns, NULL,
2117 			    active, error, DB_CPID(mp));
2118 		}
2119 	}
2120 
2121 	if (error != 0)
2122 		spdsock_error(q, mp, error, 0);
2123 	else
2124 		spd_echo(q, mp);
2125 }
2126 
2127 /*
2128  * Process a SPD_ALGLIST request. The caller expects separate alg entries
2129  * for AH authentication, ESP authentication, and ESP encryption.
2130  * The same distinction is then used when setting the min and max key
2131  * sizes when defining policies.
2132  */
2133 
2134 #define	SPDSOCK_AH_AUTH		0
2135 #define	SPDSOCK_ESP_AUTH	1
2136 #define	SPDSOCK_ESP_ENCR	2
2137 #define	SPDSOCK_NTYPES		3
2138 
2139 static const uint_t algattr[SPDSOCK_NTYPES] = {
2140 	SPD_ATTR_AH_AUTH,
2141 	SPD_ATTR_ESP_AUTH,
2142 	SPD_ATTR_ESP_ENCR
2143 };
2144 static const uint_t minbitsattr[SPDSOCK_NTYPES] = {
2145 	SPD_ATTR_AH_MINBITS,
2146 	SPD_ATTR_ESPA_MINBITS,
2147 	SPD_ATTR_ENCR_MINBITS
2148 };
2149 static const uint_t maxbitsattr[SPDSOCK_NTYPES] = {
2150 	SPD_ATTR_AH_MAXBITS,
2151 	SPD_ATTR_ESPA_MAXBITS,
2152 	SPD_ATTR_ENCR_MAXBITS
2153 };
2154 static const uint_t defbitsattr[SPDSOCK_NTYPES] = {
2155 	SPD_ATTR_AH_DEFBITS,
2156 	SPD_ATTR_ESPA_DEFBITS,
2157 	SPD_ATTR_ENCR_DEFBITS
2158 };
2159 static const uint_t incrbitsattr[SPDSOCK_NTYPES] = {
2160 	SPD_ATTR_AH_INCRBITS,
2161 	SPD_ATTR_ESPA_INCRBITS,
2162 	SPD_ATTR_ENCR_INCRBITS
2163 };
2164 
2165 #define	ATTRPERALG	6	/* fixed attributes per algs */
2166 
2167 void
2168 spdsock_alglist(queue_t *q, mblk_t *mp)
2169 {
2170 	uint_t algtype;
2171 	uint_t algidx;
2172 	uint_t algcount;
2173 	uint_t size;
2174 	mblk_t *m;
2175 	uint8_t *cur;
2176 	spd_msg_t *msg;
2177 	struct spd_ext_actions *act;
2178 	struct spd_attribute *attr;
2179 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2180 	ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
2181 
2182 	mutex_enter(&ipss->ipsec_alg_lock);
2183 	/*
2184 	 * The SPD client expects to receive separate entries for
2185 	 * AH authentication and ESP authentication supported algorithms.
2186 	 *
2187 	 * Don't return the "any" algorithms, if defined, as no
2188 	 * kernel policies can be set for these algorithms.
2189 	 */
2190 	algcount = 2 * ipss->ipsec_nalgs[IPSEC_ALG_AUTH] +
2191 	    ipss->ipsec_nalgs[IPSEC_ALG_ENCR];
2192 
2193 	if (ipss->ipsec_alglists[IPSEC_ALG_AUTH][SADB_AALG_NONE] != NULL)
2194 		algcount--;
2195 	if (ipss->ipsec_alglists[IPSEC_ALG_ENCR][SADB_EALG_NONE] != NULL)
2196 		algcount--;
2197 
2198 	/*
2199 	 * For each algorithm, we encode:
2200 	 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT}
2201 	 */
2202 
2203 	size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions) +
2204 	    ATTRPERALG * sizeof (struct spd_attribute) * algcount;
2205 
2206 	ASSERT(ALIGNED64(size));
2207 
2208 	m = allocb(size, BPRI_HI);
2209 	if (m == NULL) {
2210 		mutex_exit(&ipss->ipsec_alg_lock);
2211 		spdsock_error(q, mp, ENOMEM, 0);
2212 		return;
2213 	}
2214 
2215 	m->b_wptr = m->b_rptr + size;
2216 	cur = m->b_rptr;
2217 
2218 	msg = (spd_msg_t *)cur;
2219 	bcopy(mp->b_rptr, cur, sizeof (*msg));
2220 
2221 	msg->spd_msg_len = SPD_8TO64(size);
2222 	msg->spd_msg_errno = 0;
2223 	msg->spd_msg_diagnostic = 0;
2224 
2225 	cur += sizeof (*msg);
2226 
2227 	act = (struct spd_ext_actions *)cur;
2228 	cur += sizeof (*act);
2229 
2230 	act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t));
2231 	act->spd_actions_exttype = SPD_EXT_ACTION;
2232 	act->spd_actions_count = algcount;
2233 	act->spd_actions_reserved = 0;
2234 
2235 	attr = (struct spd_attribute *)cur;
2236 
2237 #define	EMIT(tag, value) {					\
2238 		attr->spd_attr_tag = (tag); 			\
2239 		attr->spd_attr_value = (value); 		\
2240 		attr++;			  			\
2241 	}
2242 
2243 	/*
2244 	 * If you change the number of EMIT's here, change
2245 	 * ATTRPERALG above to match
2246 	 */
2247 #define	EMITALGATTRS(_type) {					\
2248 		EMIT(algattr[_type], algid); 		/* 1 */	\
2249 		EMIT(minbitsattr[_type], minbits);	/* 2 */	\
2250 		EMIT(maxbitsattr[_type], maxbits);	/* 3 */	\
2251 		EMIT(defbitsattr[_type], defbits);	/* 4 */	\
2252 		EMIT(incrbitsattr[_type], incr);	/* 5 */	\
2253 		EMIT(SPD_ATTR_NEXT, 0);			/* 6 */	\
2254 	}
2255 
2256 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2257 		for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2258 		    algidx++) {
2259 			int algid = ipss->ipsec_sortlist[algtype][algidx];
2260 			ipsec_alginfo_t *alg =
2261 			    ipss->ipsec_alglists[algtype][algid];
2262 			uint_t minbits = alg->alg_minbits;
2263 			uint_t maxbits = alg->alg_maxbits;
2264 			uint_t defbits = alg->alg_default_bits;
2265 			uint_t incr = alg->alg_increment;
2266 
2267 			if (algtype == IPSEC_ALG_AUTH) {
2268 				if (algid == SADB_AALG_NONE)
2269 					continue;
2270 				EMITALGATTRS(SPDSOCK_AH_AUTH);
2271 				EMITALGATTRS(SPDSOCK_ESP_AUTH);
2272 			} else {
2273 				if (algid == SADB_EALG_NONE)
2274 					continue;
2275 				ASSERT(algtype == IPSEC_ALG_ENCR);
2276 				EMITALGATTRS(SPDSOCK_ESP_ENCR);
2277 			}
2278 		}
2279 	}
2280 
2281 	mutex_exit(&ipss->ipsec_alg_lock);
2282 
2283 #undef EMITALGATTRS
2284 #undef EMIT
2285 #undef ATTRPERALG
2286 
2287 	attr--;
2288 	attr->spd_attr_tag = SPD_ATTR_END;
2289 
2290 	freemsg(mp);
2291 	qreply(q, m);
2292 }
2293 
2294 /*
2295  * Process a SPD_DUMPALGS request.
2296  */
2297 
2298 #define	ATTRPERALG	7	/* fixed attributes per algs */
2299 
2300 void
2301 spdsock_dumpalgs(queue_t *q, mblk_t *mp)
2302 {
2303 	uint_t algtype;
2304 	uint_t algidx;
2305 	uint_t size;
2306 	mblk_t *m;
2307 	uint8_t *cur;
2308 	spd_msg_t *msg;
2309 	struct spd_ext_actions *act;
2310 	struct spd_attribute *attr;
2311 	ipsec_alginfo_t *alg;
2312 	uint_t algid;
2313 	uint_t i;
2314 	uint_t alg_size;
2315 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2316 	ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
2317 
2318 	mutex_enter(&ipss->ipsec_alg_lock);
2319 
2320 	/*
2321 	 * For each algorithm, we encode:
2322 	 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT}
2323 	 *
2324 	 * ALG_ID / ALG_PROTO / ALG_INCRBITS / ALG_NKEYSIZES / ALG_KEYSIZE*
2325 	 * ALG_NBLOCKSIZES / ALG_BLOCKSIZE* / ALG_MECHNAME / {END, NEXT}
2326 	 */
2327 
2328 	/*
2329 	 * Compute the size of the SPD message.
2330 	 */
2331 	size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions);
2332 
2333 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2334 		for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2335 		    algidx++) {
2336 			algid = ipss->ipsec_sortlist[algtype][algidx];
2337 			alg = ipss->ipsec_alglists[algtype][algid];
2338 			alg_size = sizeof (struct spd_attribute) *
2339 			    (ATTRPERALG + alg->alg_nkey_sizes +
2340 			    alg->alg_nblock_sizes) + CRYPTO_MAX_MECH_NAME;
2341 			size += alg_size;
2342 		}
2343 	}
2344 
2345 	ASSERT(ALIGNED64(size));
2346 
2347 	m = allocb(size, BPRI_HI);
2348 	if (m == NULL) {
2349 		mutex_exit(&ipss->ipsec_alg_lock);
2350 		spdsock_error(q, mp, ENOMEM, 0);
2351 		return;
2352 	}
2353 
2354 	m->b_wptr = m->b_rptr + size;
2355 	cur = m->b_rptr;
2356 
2357 	msg = (spd_msg_t *)cur;
2358 	bcopy(mp->b_rptr, cur, sizeof (*msg));
2359 
2360 	msg->spd_msg_len = SPD_8TO64(size);
2361 	msg->spd_msg_errno = 0;
2362 	msg->spd_msg_diagnostic = 0;
2363 
2364 	cur += sizeof (*msg);
2365 
2366 	act = (struct spd_ext_actions *)cur;
2367 	cur += sizeof (*act);
2368 
2369 	act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t));
2370 	act->spd_actions_exttype = SPD_EXT_ACTION;
2371 	act->spd_actions_count = ipss->ipsec_nalgs[IPSEC_ALG_AUTH] +
2372 	    ipss->ipsec_nalgs[IPSEC_ALG_ENCR];
2373 	act->spd_actions_reserved = 0;
2374 
2375 	attr = (struct spd_attribute *)cur;
2376 
2377 #define	EMIT(tag, value) {					\
2378 		attr->spd_attr_tag = (tag); 			\
2379 		attr->spd_attr_value = (value); 		\
2380 		attr++;			  			\
2381 	}
2382 
2383 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2384 		for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
2385 		    algidx++) {
2386 
2387 			algid = ipss->ipsec_sortlist[algtype][algidx];
2388 			alg = ipss->ipsec_alglists[algtype][algid];
2389 
2390 			/*
2391 			 * If you change the number of EMIT's here, change
2392 			 * ATTRPERALG above to match
2393 			 */
2394 			EMIT(SPD_ATTR_ALG_ID, algid);
2395 			EMIT(SPD_ATTR_ALG_PROTO, algproto[algtype]);
2396 			EMIT(SPD_ATTR_ALG_INCRBITS, alg->alg_increment);
2397 
2398 			EMIT(SPD_ATTR_ALG_NKEYSIZES, alg->alg_nkey_sizes);
2399 			for (i = 0; i < alg->alg_nkey_sizes; i++)
2400 				EMIT(SPD_ATTR_ALG_KEYSIZE,
2401 				    alg->alg_key_sizes[i]);
2402 
2403 			EMIT(SPD_ATTR_ALG_NBLOCKSIZES, alg->alg_nblock_sizes);
2404 			for (i = 0; i < alg->alg_nblock_sizes; i++)
2405 				EMIT(SPD_ATTR_ALG_BLOCKSIZE,
2406 				    alg->alg_block_sizes[i]);
2407 
2408 			EMIT(SPD_ATTR_ALG_MECHNAME, CRYPTO_MAX_MECH_NAME);
2409 			bcopy(alg->alg_mech_name, attr, CRYPTO_MAX_MECH_NAME);
2410 			attr = (struct spd_attribute *)((char *)attr +
2411 			    CRYPTO_MAX_MECH_NAME);
2412 
2413 			EMIT(SPD_ATTR_NEXT, 0);
2414 		}
2415 	}
2416 
2417 	mutex_exit(&ipss->ipsec_alg_lock);
2418 
2419 #undef EMITALGATTRS
2420 #undef EMIT
2421 #undef ATTRPERALG
2422 
2423 	attr--;
2424 	attr->spd_attr_tag = SPD_ATTR_END;
2425 
2426 	freemsg(mp);
2427 	qreply(q, m);
2428 }
2429 
2430 /*
2431  * Do the actual work of processing an SPD_UPDATEALGS request. Can
2432  * be invoked either once IPsec is loaded on a cached request, or
2433  * when a request is received while IPsec is loaded.
2434  */
2435 static void
2436 spdsock_do_updatealg(spd_ext_t *extv[], int *diag, spd_stack_t *spds)
2437 {
2438 	struct spd_ext_actions *actp;
2439 	struct spd_attribute *attr, *endattr;
2440 	uint64_t *start, *end;
2441 	ipsec_alginfo_t *alg = NULL;
2442 	ipsec_algtype_t alg_type = 0;
2443 	boolean_t skip_alg = B_TRUE, doing_proto = B_FALSE;
2444 	uint_t i, cur_key, cur_block, algid;
2445 
2446 	*diag = -1;
2447 	ASSERT(MUTEX_HELD(&spds->spds_alg_lock));
2448 
2449 	/* parse the message, building the list of algorithms */
2450 
2451 	actp = (struct spd_ext_actions *)extv[SPD_EXT_ACTION];
2452 	if (actp == NULL) {
2453 		*diag = SPD_DIAGNOSTIC_NO_ACTION_EXT;
2454 		return;
2455 	}
2456 
2457 	start = (uint64_t *)actp;
2458 	end = (start + actp->spd_actions_len);
2459 	endattr = (struct spd_attribute *)end;
2460 	attr = (struct spd_attribute *)&actp[1];
2461 
2462 	bzero(spds->spds_algs, IPSEC_NALGTYPES * IPSEC_MAX_ALGS *
2463 	    sizeof (ipsec_alginfo_t *));
2464 
2465 	alg = kmem_zalloc(sizeof (*alg), KM_SLEEP);
2466 
2467 #define	ALG_KEY_SIZES(a)   (((a)->alg_nkey_sizes + 1) * sizeof (uint16_t))
2468 #define	ALG_BLOCK_SIZES(a) (((a)->alg_nblock_sizes + 1) * sizeof (uint16_t))
2469 
2470 	while (attr < endattr) {
2471 		switch (attr->spd_attr_tag) {
2472 		case SPD_ATTR_NOP:
2473 		case SPD_ATTR_EMPTY:
2474 			break;
2475 		case SPD_ATTR_END:
2476 			attr = endattr;
2477 			/* FALLTHRU */
2478 		case SPD_ATTR_NEXT:
2479 			if (doing_proto) {
2480 				doing_proto = B_FALSE;
2481 				break;
2482 			}
2483 			if (skip_alg) {
2484 				ipsec_alg_free(alg);
2485 			} else {
2486 				ipsec_alg_free(
2487 				    spds->spds_algs[alg_type][alg->alg_id]);
2488 				spds->spds_algs[alg_type][alg->alg_id] =
2489 				    alg;
2490 			}
2491 			alg = kmem_zalloc(sizeof (*alg), KM_SLEEP);
2492 			break;
2493 
2494 		case SPD_ATTR_ALG_ID:
2495 			if (attr->spd_attr_value >= IPSEC_MAX_ALGS) {
2496 				ss1dbg(spds, ("spdsock_do_updatealg: "
2497 				    "invalid alg id %d\n",
2498 				    attr->spd_attr_value));
2499 				*diag = SPD_DIAGNOSTIC_ALG_ID_RANGE;
2500 				goto bail;
2501 			}
2502 			alg->alg_id = attr->spd_attr_value;
2503 			break;
2504 
2505 		case SPD_ATTR_ALG_PROTO:
2506 			/* find the alg type */
2507 			for (i = 0; i < NALGPROTOS; i++)
2508 				if (algproto[i] == attr->spd_attr_value)
2509 					break;
2510 			skip_alg = (i == NALGPROTOS);
2511 			if (!skip_alg)
2512 				alg_type = i;
2513 			break;
2514 
2515 		case SPD_ATTR_ALG_INCRBITS:
2516 			alg->alg_increment = attr->spd_attr_value;
2517 			break;
2518 
2519 		case SPD_ATTR_ALG_NKEYSIZES:
2520 			if (alg->alg_key_sizes != NULL) {
2521 				kmem_free(alg->alg_key_sizes,
2522 				    ALG_KEY_SIZES(alg));
2523 			}
2524 			alg->alg_nkey_sizes = attr->spd_attr_value;
2525 			/*
2526 			 * Allocate room for the trailing zero key size
2527 			 * value as well.
2528 			 */
2529 			alg->alg_key_sizes = kmem_zalloc(ALG_KEY_SIZES(alg),
2530 			    KM_SLEEP);
2531 			cur_key = 0;
2532 			break;
2533 
2534 		case SPD_ATTR_ALG_KEYSIZE:
2535 			if (alg->alg_key_sizes == NULL ||
2536 			    cur_key >= alg->alg_nkey_sizes) {
2537 				ss1dbg(spds, ("spdsock_do_updatealg: "
2538 				    "too many key sizes\n"));
2539 				*diag = SPD_DIAGNOSTIC_ALG_NUM_KEY_SIZES;
2540 				goto bail;
2541 			}
2542 			alg->alg_key_sizes[cur_key++] = attr->spd_attr_value;
2543 			break;
2544 
2545 		case SPD_ATTR_ALG_NBLOCKSIZES:
2546 			if (alg->alg_block_sizes != NULL) {
2547 				kmem_free(alg->alg_block_sizes,
2548 				    ALG_BLOCK_SIZES(alg));
2549 			}
2550 			alg->alg_nblock_sizes = attr->spd_attr_value;
2551 			/*
2552 			 * Allocate room for the trailing zero block size
2553 			 * value as well.
2554 			 */
2555 			alg->alg_block_sizes = kmem_zalloc(ALG_BLOCK_SIZES(alg),
2556 			    KM_SLEEP);
2557 			cur_block = 0;
2558 			break;
2559 
2560 		case SPD_ATTR_ALG_BLOCKSIZE:
2561 			if (alg->alg_block_sizes == NULL ||
2562 			    cur_block >= alg->alg_nblock_sizes) {
2563 				ss1dbg(spds, ("spdsock_do_updatealg: "
2564 				    "too many block sizes\n"));
2565 				*diag = SPD_DIAGNOSTIC_ALG_NUM_BLOCK_SIZES;
2566 				goto bail;
2567 			}
2568 			alg->alg_block_sizes[cur_block++] =
2569 			    attr->spd_attr_value;
2570 			break;
2571 
2572 		case SPD_ATTR_ALG_MECHNAME: {
2573 			char *mech_name;
2574 
2575 			if (attr->spd_attr_value > CRYPTO_MAX_MECH_NAME) {
2576 				ss1dbg(spds, ("spdsock_do_updatealg: "
2577 				    "mech name too long\n"));
2578 				*diag = SPD_DIAGNOSTIC_ALG_MECH_NAME_LEN;
2579 				goto bail;
2580 			}
2581 			mech_name = (char *)(attr + 1);
2582 			bcopy(mech_name, alg->alg_mech_name,
2583 			    attr->spd_attr_value);
2584 			alg->alg_mech_name[CRYPTO_MAX_MECH_NAME-1] = '\0';
2585 			attr = (struct spd_attribute *)((char *)attr +
2586 			    attr->spd_attr_value);
2587 			break;
2588 		}
2589 
2590 		case SPD_ATTR_PROTO_ID:
2591 			doing_proto = B_TRUE;
2592 			for (i = 0; i < NALGPROTOS; i++) {
2593 				if (algproto[i] == attr->spd_attr_value) {
2594 					alg_type = i;
2595 					break;
2596 				}
2597 			}
2598 			break;
2599 
2600 		case SPD_ATTR_PROTO_EXEC_MODE:
2601 			if (!doing_proto)
2602 				break;
2603 			for (i = 0; i < NEXECMODES; i++) {
2604 				if (execmodes[i] == attr->spd_attr_value) {
2605 					spds->spds_algs_exec_mode[alg_type] = i;
2606 					break;
2607 				}
2608 			}
2609 			break;
2610 		}
2611 		attr++;
2612 	}
2613 
2614 #undef	ALG_KEY_SIZES
2615 #undef	ALG_BLOCK_SIZES
2616 
2617 	/* update the algorithm tables */
2618 	spdsock_merge_algs(spds);
2619 bail:
2620 	/* cleanup */
2621 	ipsec_alg_free(alg);
2622 	for (alg_type = 0; alg_type < IPSEC_NALGTYPES; alg_type++)
2623 		for (algid = 0; algid < IPSEC_MAX_ALGS; algid++)
2624 		if (spds->spds_algs[alg_type][algid] != NULL)
2625 			ipsec_alg_free(spds->spds_algs[alg_type][algid]);
2626 }
2627 
2628 /*
2629  * Process an SPD_UPDATEALGS request. If IPsec is not loaded, queue
2630  * the request until IPsec loads. If IPsec is loaded, act on it
2631  * immediately.
2632  */
2633 
2634 static void
2635 spdsock_updatealg(queue_t *q, mblk_t *mp, spd_ext_t *extv[])
2636 {
2637 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2638 	spd_stack_t	*spds = ss->spdsock_spds;
2639 	ipsec_stack_t	*ipss = spds->spds_netstack->netstack_ipsec;
2640 
2641 	if (!ipsec_loaded(ipss)) {
2642 		/*
2643 		 * IPsec is not loaded, save request and return nicely,
2644 		 * the message will be processed once IPsec loads.
2645 		 */
2646 		mblk_t *new_mp;
2647 
2648 		/* last update message wins */
2649 		if ((new_mp = copymsg(mp)) == NULL) {
2650 			spdsock_error(q, mp, ENOMEM, 0);
2651 			return;
2652 		}
2653 		mutex_enter(&spds->spds_alg_lock);
2654 		bcopy(extv, spds->spds_extv_algs,
2655 		    sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1));
2656 		if (spds->spds_mp_algs != NULL)
2657 			freemsg(spds->spds_mp_algs);
2658 		spds->spds_mp_algs = mp;
2659 		spds->spds_algs_pending = B_TRUE;
2660 		mutex_exit(&spds->spds_alg_lock);
2661 		if (audit_active)
2662 			audit_pf_policy(SPD_UPDATEALGS, DB_CRED(mp),
2663 			    spds->spds_netstack, NULL, B_TRUE, EAGAIN,
2664 			    DB_CPID(mp));
2665 		spd_echo(q, new_mp);
2666 	} else {
2667 		/*
2668 		 * IPsec is loaded, act on the message immediately.
2669 		 */
2670 		int diag;
2671 
2672 		mutex_enter(&spds->spds_alg_lock);
2673 		spdsock_do_updatealg(extv, &diag, spds);
2674 		mutex_exit(&spds->spds_alg_lock);
2675 		if (diag == -1) {
2676 			spd_echo(q, mp);
2677 		if (audit_active)
2678 			audit_pf_policy(SPD_UPDATEALGS, DB_CRED(mp),
2679 			    spds->spds_netstack, NULL, B_TRUE, 0,
2680 			    DB_CPID(mp));
2681 		} else {
2682 			spdsock_diag(q, mp, diag);
2683 		if (audit_active)
2684 			audit_pf_policy(SPD_UPDATEALGS, DB_CRED(mp),
2685 			    spds->spds_netstack, NULL, B_TRUE, diag,
2686 			    DB_CPID(mp));
2687 		}
2688 	}
2689 }
2690 
2691 /*
2692  * With a reference-held ill, dig down and find an instance of "tun", and
2693  * assign its tunnel policy pointer, while reference-holding it.  Also,
2694  * release ill's refrence when finished.
2695  *
2696  * We'll be messing with q_next, so be VERY careful.
2697  */
2698 static void
2699 find_tun_and_set_itp(ill_t *ill, ipsec_tun_pol_t *itp)
2700 {
2701 	queue_t *q;
2702 	tun_t *tun;
2703 
2704 	/* Don't bother if this ill is going away. */
2705 	if (ill->ill_flags & ILL_CONDEMNED) {
2706 		ill_refrele(ill);
2707 		return;
2708 	}
2709 
2710 
2711 	q = ill->ill_wq;
2712 	claimstr(q);	/* Lighter-weight than freezestr(). */
2713 
2714 	do {
2715 		/* Use strcmp() because "tun" is bounded. */
2716 		if (strcmp(q->q_qinfo->qi_minfo->mi_idname, "tun") == 0) {
2717 			/* Aha!  Got it. */
2718 			tun = (tun_t *)q->q_ptr;
2719 			if (tun != NULL) {
2720 				mutex_enter(&tun->tun_lock);
2721 				if (tun->tun_itp != itp) {
2722 					ASSERT(tun->tun_itp == NULL);
2723 					ITP_REFHOLD(itp);
2724 					tun->tun_itp = itp;
2725 				}
2726 				mutex_exit(&tun->tun_lock);
2727 				goto release_and_return;
2728 			}
2729 			/*
2730 			 * Else assume this is some other module named "tun"
2731 			 * and move on, hoping we find one that actually has
2732 			 * something in q_ptr.
2733 			 */
2734 		}
2735 		q = q->q_next;
2736 	} while (q != NULL);
2737 
2738 release_and_return:
2739 	releasestr(ill->ill_wq);
2740 	ill_refrele(ill);
2741 }
2742 
2743 /*
2744  * Sort through the mess of polhead options to retrieve an appropriate one.
2745  * Returns NULL if we send an spdsock error.  Returns a valid pointer if we
2746  * found a valid polhead.  Returns ALL_ACTIVE_POLHEADS (aka. -1) or
2747  * ALL_INACTIVE_POLHEADS (aka. -2) if the operation calls for the operation to
2748  * act on ALL policy heads.
2749  */
2750 static ipsec_policy_head_t *
2751 get_appropriate_polhead(queue_t *q, mblk_t *mp, spd_if_t *tunname, int spdid,
2752     int msgtype, ipsec_tun_pol_t **itpp)
2753 {
2754 	ipsec_tun_pol_t *itp;
2755 	ipsec_policy_head_t *iph;
2756 	int errno;
2757 	char *tname;
2758 	boolean_t active;
2759 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2760 	netstack_t *ns = ss->spdsock_spds->spds_netstack;
2761 	uint64_t gen;	/* Placeholder */
2762 	ill_t *v4, *v6;
2763 
2764 	active = (spdid == SPD_ACTIVE);
2765 	*itpp = NULL;
2766 	if (!active && spdid != SPD_STANDBY) {
2767 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_SPDID);
2768 		return (NULL);
2769 	}
2770 
2771 	if (tunname != NULL) {
2772 		/* Acting on a tunnel's SPD. */
2773 		tname = (char *)tunname->spd_if_name;
2774 		if (*tname == '\0') {
2775 			/* Handle all-polhead cases here. */
2776 			if (msgtype != SPD_FLUSH && msgtype != SPD_DUMP) {
2777 				spdsock_diag(q, mp,
2778 				    SPD_DIAGNOSTIC_NOT_GLOBAL_OP);
2779 				return (NULL);
2780 			}
2781 			return (active ? ALL_ACTIVE_POLHEADS :
2782 			    ALL_INACTIVE_POLHEADS);
2783 		}
2784 
2785 		itp = get_tunnel_policy(tname, ns);
2786 		if (itp == NULL) {
2787 			if (msgtype != SPD_ADDRULE) {
2788 				/* "Tunnel not found" */
2789 				spdsock_error(q, mp, ENOENT, 0);
2790 				return (NULL);
2791 			}
2792 
2793 			errno = 0;
2794 			itp = create_tunnel_policy(tname, &errno, &gen, ns);
2795 			if (itp == NULL) {
2796 				/*
2797 				 * Something very bad happened, most likely
2798 				 * ENOMEM.  Return an indicator.
2799 				 */
2800 				spdsock_error(q, mp, errno, 0);
2801 				return (NULL);
2802 			}
2803 		}
2804 		/*
2805 		 * Troll the plumbed tunnels and see if we have a
2806 		 * match.  We need to do this always in case we add
2807 		 * policy AFTER plumbing a tunnel.
2808 		 */
2809 		v4 = ill_lookup_on_name(tname, B_FALSE, B_FALSE, NULL,
2810 		    NULL, NULL, &errno, NULL, ns->netstack_ip);
2811 		if (v4 != NULL)
2812 			find_tun_and_set_itp(v4, itp);
2813 		v6 = ill_lookup_on_name(tname, B_FALSE, B_TRUE, NULL,
2814 		    NULL, NULL, &errno, NULL, ns->netstack_ip);
2815 		if (v6 != NULL)
2816 			find_tun_and_set_itp(v6, itp);
2817 		ASSERT(itp != NULL);
2818 		*itpp = itp;
2819 		/* For spdsock dump state, set the polhead's name. */
2820 		if (msgtype == SPD_DUMP) {
2821 			ITP_REFHOLD(itp);
2822 			ss->spdsock_itp = itp;
2823 			ss->spdsock_dump_tunnel = itp->itp_flags &
2824 			    (active ? ITPF_P_TUNNEL : ITPF_I_TUNNEL);
2825 		}
2826 	} else {
2827 		itp = NULL;
2828 		/* For spdsock dump state, indicate it's global policy. */
2829 		if (msgtype == SPD_DUMP)
2830 			ss->spdsock_itp = NULL;
2831 	}
2832 
2833 	if (active)
2834 		iph = (itp == NULL) ? ipsec_system_policy(ns) : itp->itp_policy;
2835 	else
2836 		iph = (itp == NULL) ? ipsec_inactive_policy(ns) :
2837 		    itp->itp_inactive;
2838 
2839 	ASSERT(iph != NULL);
2840 	if (itp != NULL) {
2841 		IPPH_REFHOLD(iph);
2842 	}
2843 
2844 	return (iph);
2845 }
2846 
2847 static void
2848 spdsock_parse(queue_t *q, mblk_t *mp)
2849 {
2850 	spd_msg_t *spmsg;
2851 	spd_ext_t *extv[SPD_EXT_MAX + 1];
2852 	uint_t msgsize;
2853 	ipsec_policy_head_t *iph;
2854 	ipsec_tun_pol_t *itp;
2855 	spd_if_t *tunname;
2856 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2857 	spd_stack_t *spds = ss->spdsock_spds;
2858 	netstack_t *ns = spds->spds_netstack;
2859 	ipsec_stack_t *ipss = ns->netstack_ipsec;
2860 
2861 	/* Make sure nothing's below me. */
2862 	ASSERT(WR(q)->q_next == NULL);
2863 
2864 	spmsg = (spd_msg_t *)mp->b_rptr;
2865 
2866 	msgsize = SPD_64TO8(spmsg->spd_msg_len);
2867 
2868 	if (msgdsize(mp) != msgsize) {
2869 		/*
2870 		 * Message len incorrect w.r.t. actual size.  Send an error
2871 		 * (EMSGSIZE).	It may be necessary to massage things a
2872 		 * bit.	 For example, if the spd_msg_type is hosed,
2873 		 * I need to set it to SPD_RESERVED to get delivery to
2874 		 * do the right thing.	Then again, maybe just letting
2875 		 * the error delivery do the right thing.
2876 		 */
2877 		ss2dbg(spds,
2878 		    ("mblk (%lu) and base (%d) message sizes don't jibe.\n",
2879 		    msgdsize(mp), msgsize));
2880 		spdsock_error(q, mp, EMSGSIZE, SPD_DIAGNOSTIC_NONE);
2881 		return;
2882 	}
2883 
2884 	if (msgsize > (uint_t)(mp->b_wptr - mp->b_rptr)) {
2885 		/* Get all message into one mblk. */
2886 		if (pullupmsg(mp, -1) == 0) {
2887 			/*
2888 			 * Something screwy happened.
2889 			 */
2890 			ss3dbg(spds, ("spdsock_parse: pullupmsg() failed.\n"));
2891 			return;
2892 		} else {
2893 			spmsg = (spd_msg_t *)mp->b_rptr;
2894 		}
2895 	}
2896 
2897 	switch (spdsock_get_ext(extv, spmsg, msgsize)) {
2898 	case KGE_DUP:
2899 		/* Handle duplicate extension. */
2900 		ss1dbg(spds, ("Got duplicate extension of type %d.\n",
2901 		    extv[0]->spd_ext_type));
2902 		spdsock_diag(q, mp, dup_ext_diag[extv[0]->spd_ext_type]);
2903 		return;
2904 	case KGE_UNK:
2905 		/* Handle unknown extension. */
2906 		ss1dbg(spds, ("Got unknown extension of type %d.\n",
2907 		    extv[0]->spd_ext_type));
2908 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_UNKNOWN_EXT);
2909 		return;
2910 	case KGE_LEN:
2911 		/* Length error. */
2912 		ss1dbg(spds, ("Length %d on extension type %d overrun or 0.\n",
2913 		    extv[0]->spd_ext_len, extv[0]->spd_ext_type));
2914 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_EXTLEN);
2915 		return;
2916 	case KGE_CHK:
2917 		/* Reality check failed. */
2918 		ss1dbg(spds, ("Reality check failed on extension type %d.\n",
2919 		    extv[0]->spd_ext_type));
2920 		spdsock_diag(q, mp, bad_ext_diag[extv[0]->spd_ext_type]);
2921 		return;
2922 	default:
2923 		/* Default case is no errors. */
2924 		break;
2925 	}
2926 
2927 	/*
2928 	 * Special-case SPD_UPDATEALGS so as not to load IPsec.
2929 	 */
2930 	if (!ipsec_loaded(ipss) && spmsg->spd_msg_type != SPD_UPDATEALGS) {
2931 		spdsock_t *ss = (spdsock_t *)q->q_ptr;
2932 
2933 		ASSERT(ss != NULL);
2934 		ipsec_loader_loadnow(ipss);
2935 		ss->spdsock_timeout_arg = mp;
2936 		ss->spdsock_timeout = qtimeout(q, spdsock_loadcheck,
2937 		    q, LOADCHECK_INTERVAL);
2938 		return;
2939 	}
2940 
2941 	/* First check for messages that need no polheads at all. */
2942 	switch (spmsg->spd_msg_type) {
2943 	case SPD_UPDATEALGS:
2944 		spdsock_updatealg(q, mp, extv);
2945 		return;
2946 	case SPD_ALGLIST:
2947 		spdsock_alglist(q, mp);
2948 		return;
2949 	case SPD_DUMPALGS:
2950 		spdsock_dumpalgs(q, mp);
2951 		return;
2952 	}
2953 
2954 	/*
2955 	 * Then check for ones that need both primary/secondary polheads,
2956 	 * finding the appropriate tunnel policy if need be.
2957 	 */
2958 	tunname = (spd_if_t *)extv[SPD_EXT_TUN_NAME];
2959 	switch (spmsg->spd_msg_type) {
2960 	case SPD_FLIP:
2961 		spdsock_flip(q, mp, tunname);
2962 		return;
2963 	case SPD_CLONE:
2964 		spdsock_clone(q, mp, tunname);
2965 		return;
2966 	}
2967 
2968 	/*
2969 	 * Finally, find ones that operate on exactly one polhead, or
2970 	 * "all polheads" of a given type (active/inactive).
2971 	 */
2972 	iph = get_appropriate_polhead(q, mp, tunname, spmsg->spd_msg_spdid,
2973 	    spmsg->spd_msg_type, &itp);
2974 	if (iph == NULL)
2975 		return;
2976 
2977 	/* All-polheads-ready operations. */
2978 	switch (spmsg->spd_msg_type) {
2979 	case SPD_FLUSH:
2980 		if (itp != NULL) {
2981 			mutex_enter(&itp->itp_lock);
2982 			if (spmsg->spd_msg_spdid == SPD_ACTIVE)
2983 				itp->itp_flags &= ~ITPF_PFLAGS;
2984 			else
2985 				itp->itp_flags &= ~ITPF_IFLAGS;
2986 			mutex_exit(&itp->itp_lock);
2987 			ITP_REFRELE(itp, ns);
2988 		}
2989 		spdsock_flush(q, iph, itp, mp);
2990 		return;
2991 	case SPD_DUMP:
2992 		if (itp != NULL)
2993 			ITP_REFRELE(itp, ns);
2994 		spdsock_dump(q, iph, mp);
2995 		return;
2996 	}
2997 
2998 	if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) {
2999 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_NOT_GLOBAL_OP);
3000 		return;
3001 	}
3002 
3003 	/* Single-polhead-only operations. */
3004 	switch (spmsg->spd_msg_type) {
3005 	case SPD_ADDRULE:
3006 		spdsock_addrule(q, iph, mp, extv, itp);
3007 		break;
3008 	case SPD_DELETERULE:
3009 		spdsock_deleterule(q, iph, mp, extv, itp);
3010 		break;
3011 	case SPD_LOOKUP:
3012 		spdsock_lookup(q, iph, mp, extv, itp);
3013 		break;
3014 	default:
3015 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_MSG_TYPE);
3016 		break;
3017 	}
3018 
3019 	IPPH_REFRELE(iph, ns);
3020 	if (itp != NULL)
3021 		ITP_REFRELE(itp, ns);
3022 }
3023 
3024 /*
3025  * If an algorithm mapping was received before IPsec was loaded, process it.
3026  * Called from the IPsec loader.
3027  */
3028 void
3029 spdsock_update_pending_algs(netstack_t *ns)
3030 {
3031 	spd_stack_t *spds = ns->netstack_spdsock;
3032 
3033 	mutex_enter(&spds->spds_alg_lock);
3034 	if (spds->spds_algs_pending) {
3035 		int diag;
3036 
3037 		spdsock_do_updatealg(spds->spds_extv_algs, &diag,
3038 		    spds);
3039 		spds->spds_algs_pending = B_FALSE;
3040 	}
3041 	mutex_exit(&spds->spds_alg_lock);
3042 }
3043 
3044 static void
3045 spdsock_loadcheck(void *arg)
3046 {
3047 	queue_t *q = (queue_t *)arg;
3048 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
3049 	mblk_t *mp;
3050 	ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
3051 
3052 	ASSERT(ss != NULL);
3053 
3054 	ss->spdsock_timeout = 0;
3055 	mp = ss->spdsock_timeout_arg;
3056 	ASSERT(mp != NULL);
3057 	ss->spdsock_timeout_arg = NULL;
3058 	if (ipsec_failed(ipss))
3059 		spdsock_error(q, mp, EPROTONOSUPPORT, 0);
3060 	else
3061 		spdsock_parse(q, mp);
3062 }
3063 
3064 /*
3065  * Copy relevant state bits.
3066  */
3067 static void
3068 spdsock_copy_info(struct T_info_ack *tap, spdsock_t *ss)
3069 {
3070 	*tap = spdsock_g_t_info_ack;
3071 	tap->CURRENT_state = ss->spdsock_state;
3072 	tap->OPT_size = spdsock_max_optsize;
3073 }
3074 
3075 /*
3076  * This routine responds to T_CAPABILITY_REQ messages.  It is called by
3077  * spdsock_wput.  Much of the T_CAPABILITY_ACK information is copied from
3078  * spdsock_g_t_info_ack.  The current state of the stream is copied from
3079  * spdsock_state.
3080  */
3081 static void
3082 spdsock_capability_req(queue_t *q, mblk_t *mp)
3083 {
3084 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
3085 	t_uscalar_t cap_bits1;
3086 	struct T_capability_ack	*tcap;
3087 
3088 	cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1;
3089 
3090 	mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack),
3091 	    mp->b_datap->db_type, T_CAPABILITY_ACK);
3092 	if (mp == NULL)
3093 		return;
3094 
3095 	tcap = (struct T_capability_ack *)mp->b_rptr;
3096 	tcap->CAP_bits1 = 0;
3097 
3098 	if (cap_bits1 & TC1_INFO) {
3099 		spdsock_copy_info(&tcap->INFO_ack, ss);
3100 		tcap->CAP_bits1 |= TC1_INFO;
3101 	}
3102 
3103 	qreply(q, mp);
3104 }
3105 
3106 /*
3107  * This routine responds to T_INFO_REQ messages. It is called by
3108  * spdsock_wput_other.
3109  * Most of the T_INFO_ACK information is copied from spdsock_g_t_info_ack.
3110  * The current state of the stream is copied from spdsock_state.
3111  */
3112 static void
3113 spdsock_info_req(q, mp)
3114 	queue_t	*q;
3115 	mblk_t	*mp;
3116 {
3117 	mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO,
3118 	    T_INFO_ACK);
3119 	if (mp == NULL)
3120 		return;
3121 	spdsock_copy_info((struct T_info_ack *)mp->b_rptr,
3122 	    (spdsock_t *)q->q_ptr);
3123 	qreply(q, mp);
3124 }
3125 
3126 /*
3127  * spdsock_err_ack. This routine creates a
3128  * T_ERROR_ACK message and passes it
3129  * upstream.
3130  */
3131 static void
3132 spdsock_err_ack(q, mp, t_error, sys_error)
3133 	queue_t	*q;
3134 	mblk_t	*mp;
3135 	int	t_error;
3136 	int	sys_error;
3137 {
3138 	if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL)
3139 		qreply(q, mp);
3140 }
3141 
3142 /*
3143  * This routine retrieves the current status of socket options.
3144  * It returns the size of the option retrieved.
3145  */
3146 /* ARGSUSED */
3147 int
3148 spdsock_opt_get(queue_t *q, int level, int name, uchar_t *ptr)
3149 {
3150 	int *i1 = (int *)ptr;
3151 
3152 	switch (level) {
3153 	case SOL_SOCKET:
3154 		switch (name) {
3155 		case SO_TYPE:
3156 			*i1 = SOCK_RAW;
3157 			break;
3158 		/*
3159 		 * The following two items can be manipulated,
3160 		 * but changing them should do nothing.
3161 		 */
3162 		case SO_SNDBUF:
3163 			*i1 = (int)q->q_hiwat;
3164 			break;
3165 		case SO_RCVBUF:
3166 			*i1 = (int)(RD(q)->q_hiwat);
3167 			break;
3168 		}
3169 		break;
3170 	default:
3171 		return (0);
3172 	}
3173 	return (sizeof (int));
3174 }
3175 
3176 /*
3177  * This routine sets socket options.
3178  */
3179 /* ARGSUSED */
3180 int
3181 spdsock_opt_set(queue_t *q, uint_t mgmt_flags, int level, int name,
3182     uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp,
3183     void *thisdg_attrs, cred_t *cr, mblk_t *mblk)
3184 {
3185 	int *i1 = (int *)invalp;
3186 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
3187 	spd_stack_t	*spds = ss->spdsock_spds;
3188 
3189 	switch (level) {
3190 	case SOL_SOCKET:
3191 		switch (name) {
3192 		case SO_SNDBUF:
3193 			if (*i1 > spds->spds_max_buf)
3194 				return (ENOBUFS);
3195 			q->q_hiwat = *i1;
3196 			break;
3197 		case SO_RCVBUF:
3198 			if (*i1 > spds->spds_max_buf)
3199 				return (ENOBUFS);
3200 			RD(q)->q_hiwat = *i1;
3201 			(void) proto_set_rx_hiwat(RD(q), NULL, *i1);
3202 			break;
3203 		}
3204 		break;
3205 	}
3206 	return (0);
3207 }
3208 
3209 
3210 /*
3211  * Handle STREAMS messages.
3212  */
3213 static void
3214 spdsock_wput_other(queue_t *q, mblk_t *mp)
3215 {
3216 	struct iocblk *iocp;
3217 	int error;
3218 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
3219 	spd_stack_t	*spds = ss->spdsock_spds;
3220 	cred_t		*cr;
3221 
3222 	switch (mp->b_datap->db_type) {
3223 	case M_PROTO:
3224 	case M_PCPROTO:
3225 		if ((mp->b_wptr - mp->b_rptr) < sizeof (long)) {
3226 			ss3dbg(spds, (
3227 			    "spdsock_wput_other: Not big enough M_PROTO\n"));
3228 			freemsg(mp);
3229 			return;
3230 		}
3231 		cr = zone_get_kcred(netstackid_to_zoneid(
3232 		    spds->spds_netstack->netstack_stackid));
3233 		ASSERT(cr != NULL);
3234 
3235 		switch (((union T_primitives *)mp->b_rptr)->type) {
3236 		case T_CAPABILITY_REQ:
3237 			spdsock_capability_req(q, mp);
3238 			break;
3239 		case T_INFO_REQ:
3240 			spdsock_info_req(q, mp);
3241 			break;
3242 		case T_SVR4_OPTMGMT_REQ:
3243 			(void) svr4_optcom_req(q, mp, DB_CREDDEF(mp, cr),
3244 			    &spdsock_opt_obj, B_FALSE);
3245 			break;
3246 		case T_OPTMGMT_REQ:
3247 			(void) tpi_optcom_req(q, mp, DB_CREDDEF(mp, cr),
3248 			    &spdsock_opt_obj, B_FALSE);
3249 			break;
3250 		case T_DATA_REQ:
3251 		case T_EXDATA_REQ:
3252 		case T_ORDREL_REQ:
3253 			/* Illegal for spdsock. */
3254 			freemsg(mp);
3255 			(void) putnextctl1(RD(q), M_ERROR, EPROTO);
3256 			break;
3257 		default:
3258 			/* Not supported by spdsock. */
3259 			spdsock_err_ack(q, mp, TNOTSUPPORT, 0);
3260 			break;
3261 		}
3262 		crfree(cr);
3263 		return;
3264 	case M_IOCTL:
3265 		iocp = (struct iocblk *)mp->b_rptr;
3266 		error = EINVAL;
3267 
3268 		switch (iocp->ioc_cmd) {
3269 		case ND_SET:
3270 		case ND_GET:
3271 			if (nd_getset(q, spds->spds_g_nd, mp)) {
3272 				qreply(q, mp);
3273 				return;
3274 			} else
3275 				error = ENOENT;
3276 			/* FALLTHRU */
3277 		default:
3278 			miocnak(q, mp, 0, error);
3279 			return;
3280 		}
3281 	case M_FLUSH:
3282 		if (*mp->b_rptr & FLUSHW) {
3283 			flushq(q, FLUSHALL);
3284 			*mp->b_rptr &= ~FLUSHW;
3285 		}
3286 		if (*mp->b_rptr & FLUSHR) {
3287 			qreply(q, mp);
3288 			return;
3289 		}
3290 		/* Else FALLTHRU */
3291 	}
3292 
3293 	/* If fell through, just black-hole the message. */
3294 	freemsg(mp);
3295 }
3296 
3297 static void
3298 spdsock_wput(queue_t *q, mblk_t *mp)
3299 {
3300 	uint8_t *rptr = mp->b_rptr;
3301 	mblk_t *mp1;
3302 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
3303 	spd_stack_t	*spds = ss->spdsock_spds;
3304 
3305 	/*
3306 	 * If we're dumping, defer processing other messages until the
3307 	 * dump completes.
3308 	 */
3309 	if (ss->spdsock_dump_req != NULL) {
3310 		if (!putq(q, mp))
3311 			freemsg(mp);
3312 		return;
3313 	}
3314 
3315 	switch (mp->b_datap->db_type) {
3316 	case M_DATA:
3317 		/*
3318 		 * Silently discard.
3319 		 */
3320 		ss2dbg(spds, ("raw M_DATA in spdsock.\n"));
3321 		freemsg(mp);
3322 		return;
3323 	case M_PROTO:
3324 	case M_PCPROTO:
3325 		if ((mp->b_wptr - rptr) >= sizeof (struct T_data_req)) {
3326 			if (((union T_primitives *)rptr)->type == T_DATA_REQ) {
3327 				if ((mp1 = mp->b_cont) == NULL) {
3328 					/* No data after T_DATA_REQ. */
3329 					ss2dbg(spds,
3330 					    ("No data after DATA_REQ.\n"));
3331 					freemsg(mp);
3332 					return;
3333 				}
3334 				freeb(mp);
3335 				mp = mp1;
3336 				ss2dbg(spds, ("T_DATA_REQ\n"));
3337 				break;	/* Out of switch. */
3338 			}
3339 		}
3340 		/* FALLTHRU */
3341 	default:
3342 		ss3dbg(spds, ("In default wput case (%d %d).\n",
3343 		    mp->b_datap->db_type, ((union T_primitives *)rptr)->type));
3344 		spdsock_wput_other(q, mp);
3345 		return;
3346 	}
3347 
3348 	/* I now have a PF_POLICY message in an M_DATA block. */
3349 	spdsock_parse(q, mp);
3350 }
3351 
3352 /*
3353  * Device open procedure, called when new queue pair created.
3354  * We are passed the read-side queue.
3355  */
3356 /* ARGSUSED */
3357 static int
3358 spdsock_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
3359 {
3360 	spdsock_t *ss;
3361 	queue_t *oq = OTHERQ(q);
3362 	minor_t ssminor;
3363 	netstack_t *ns;
3364 	spd_stack_t *spds;
3365 
3366 	if (secpolicy_ip_config(credp, B_FALSE) != 0)
3367 		return (EPERM);
3368 
3369 	if (q->q_ptr != NULL)
3370 		return (0);  /* Re-open of an already open instance. */
3371 
3372 	if (sflag & MODOPEN)
3373 		return (EINVAL);
3374 
3375 	ns = netstack_find_by_cred(credp);
3376 	ASSERT(ns != NULL);
3377 	spds = ns->netstack_spdsock;
3378 	ASSERT(spds != NULL);
3379 
3380 	ss2dbg(spds, ("Made it into PF_POLICY socket open.\n"));
3381 
3382 	ssminor = (minor_t)(uintptr_t)vmem_alloc(spdsock_vmem, 1, VM_NOSLEEP);
3383 	if (ssminor == 0) {
3384 		netstack_rele(spds->spds_netstack);
3385 		return (ENOMEM);
3386 	}
3387 	ss = kmem_zalloc(sizeof (spdsock_t), KM_NOSLEEP);
3388 	if (ss == NULL) {
3389 		vmem_free(spdsock_vmem, (void *)(uintptr_t)ssminor, 1);
3390 		netstack_rele(spds->spds_netstack);
3391 		return (ENOMEM);
3392 	}
3393 
3394 	ss->spdsock_minor = ssminor;
3395 	ss->spdsock_state = TS_UNBND;
3396 	ss->spdsock_dump_req = NULL;
3397 
3398 	ss->spdsock_spds = spds;
3399 
3400 	q->q_ptr = ss;
3401 	oq->q_ptr = ss;
3402 
3403 	q->q_hiwat = spds->spds_recv_hiwat;
3404 
3405 	oq->q_hiwat = spds->spds_xmit_hiwat;
3406 	oq->q_lowat = spds->spds_xmit_lowat;
3407 
3408 	qprocson(q);
3409 	(void) proto_set_rx_hiwat(q, NULL, spds->spds_recv_hiwat);
3410 
3411 	*devp = makedevice(getmajor(*devp), ss->spdsock_minor);
3412 	return (0);
3413 }
3414 
3415 /*
3416  * Read-side service procedure, invoked when we get back-enabled
3417  * when buffer space becomes available.
3418  *
3419  * Dump another chunk if we were dumping before; when we finish, kick
3420  * the write-side queue in case it's waiting for read queue space.
3421  */
3422 void
3423 spdsock_rsrv(queue_t *q)
3424 {
3425 	spdsock_t *ss = q->q_ptr;
3426 
3427 	if (ss->spdsock_dump_req != NULL)
3428 		spdsock_dump_some(q, ss);
3429 
3430 	if (ss->spdsock_dump_req == NULL)
3431 		qenable(OTHERQ(q));
3432 }
3433 
3434 /*
3435  * Write-side service procedure, invoked when we defer processing
3436  * if another message is received while a dump is in progress.
3437  */
3438 void
3439 spdsock_wsrv(queue_t *q)
3440 {
3441 	spdsock_t *ss = q->q_ptr;
3442 	mblk_t *mp;
3443 	ipsec_stack_t *ipss = ss->spdsock_spds->spds_netstack->netstack_ipsec;
3444 
3445 	if (ss->spdsock_dump_req != NULL) {
3446 		qenable(OTHERQ(q));
3447 		return;
3448 	}
3449 
3450 	while ((mp = getq(q)) != NULL) {
3451 		if (ipsec_loaded(ipss)) {
3452 			spdsock_wput(q, mp);
3453 			if (ss->spdsock_dump_req != NULL)
3454 				return;
3455 		} else if (!ipsec_failed(ipss)) {
3456 			(void) putq(q, mp);
3457 		} else {
3458 			spdsock_error(q, mp, EPFNOSUPPORT, 0);
3459 		}
3460 	}
3461 }
3462 
3463 static int
3464 spdsock_close(queue_t *q)
3465 {
3466 	spdsock_t *ss = q->q_ptr;
3467 	spd_stack_t	*spds = ss->spdsock_spds;
3468 
3469 	qprocsoff(q);
3470 
3471 	/* Safe assumption. */
3472 	ASSERT(ss != NULL);
3473 
3474 	if (ss->spdsock_timeout != 0)
3475 		(void) quntimeout(q, ss->spdsock_timeout);
3476 
3477 	ss3dbg(spds, ("Driver close, PF_POLICY socket is going away.\n"));
3478 
3479 	vmem_free(spdsock_vmem, (void *)(uintptr_t)ss->spdsock_minor, 1);
3480 	netstack_rele(ss->spdsock_spds->spds_netstack);
3481 
3482 	kmem_free(ss, sizeof (spdsock_t));
3483 	return (0);
3484 }
3485 
3486 /*
3487  * Merge the IPsec algorithms tables with the received algorithm information.
3488  */
3489 void
3490 spdsock_merge_algs(spd_stack_t *spds)
3491 {
3492 	ipsec_alginfo_t *alg, *oalg;
3493 	ipsec_algtype_t algtype;
3494 	uint_t algidx, algid, nalgs;
3495 	crypto_mech_name_t *mechs;
3496 	uint_t mech_count, mech_idx;
3497 	netstack_t	*ns = spds->spds_netstack;
3498 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
3499 
3500 	ASSERT(MUTEX_HELD(&spds->spds_alg_lock));
3501 
3502 	/*
3503 	 * Get the list of supported mechanisms from the crypto framework.
3504 	 * If a mechanism is supported by KCF, resolve its mechanism
3505 	 * id and mark it as being valid. This operation must be done
3506 	 * without holding alg_lock, since it can cause a provider
3507 	 * module to be loaded and the provider notification callback to
3508 	 * be invoked.
3509 	 */
3510 	mechs = crypto_get_mech_list(&mech_count, KM_SLEEP);
3511 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3512 		for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) {
3513 			int algflags = 0;
3514 			crypto_mech_type_t mt = CRYPTO_MECHANISM_INVALID;
3515 
3516 			alg = spds->spds_algs[algtype][algid];
3517 			if (alg == NULL)
3518 				continue;
3519 
3520 			/*
3521 			 * The NULL encryption algorithm is a special
3522 			 * case because there are no mechanisms, yet
3523 			 * the algorithm is still valid.
3524 			 */
3525 			if (alg->alg_id == SADB_EALG_NULL) {
3526 				alg->alg_mech_type = CRYPTO_MECHANISM_INVALID;
3527 				alg->alg_flags = ALG_FLAG_VALID;
3528 				continue;
3529 			}
3530 
3531 			for (mech_idx = 0; mech_idx < mech_count; mech_idx++) {
3532 				if (strncmp(alg->alg_mech_name, mechs[mech_idx],
3533 				    CRYPTO_MAX_MECH_NAME) == 0) {
3534 					mt = crypto_mech2id(alg->alg_mech_name);
3535 					ASSERT(mt != CRYPTO_MECHANISM_INVALID);
3536 					algflags = ALG_FLAG_VALID;
3537 					break;
3538 				}
3539 			}
3540 			alg->alg_mech_type = mt;
3541 			alg->alg_flags = algflags;
3542 		}
3543 	}
3544 
3545 	mutex_enter(&ipss->ipsec_alg_lock);
3546 
3547 	/*
3548 	 * For each algorithm currently defined, check if it is
3549 	 * present in the new tables created from the SPD_UPDATEALGS
3550 	 * message received from user-space.
3551 	 * Delete the algorithm entries that are currently defined
3552 	 * but not part of the new tables.
3553 	 */
3554 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3555 		nalgs = ipss->ipsec_nalgs[algtype];
3556 		for (algidx = 0; algidx < nalgs; algidx++) {
3557 			algid = ipss->ipsec_sortlist[algtype][algidx];
3558 			if (spds->spds_algs[algtype][algid] == NULL)
3559 				ipsec_alg_unreg(algtype, algid, ns);
3560 		}
3561 	}
3562 
3563 	/*
3564 	 * For each algorithm we just received, check if it is
3565 	 * present in the currently defined tables. If it is, swap
3566 	 * the entry with the one we just allocated.
3567 	 * If the new algorithm is not in the current tables,
3568 	 * add it.
3569 	 */
3570 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3571 		for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) {
3572 			alg = spds->spds_algs[algtype][algid];
3573 			if (alg == NULL)
3574 				continue;
3575 
3576 			if ((oalg = ipss->ipsec_alglists[algtype][algid]) ==
3577 			    NULL) {
3578 				/*
3579 				 * New algorithm, add it to the algorithm
3580 				 * table.
3581 				 */
3582 				ipsec_alg_reg(algtype, alg, ns);
3583 			} else {
3584 				/*
3585 				 * Algorithm is already in the table. Swap
3586 				 * the existing entry with the new one.
3587 				 */
3588 				ipsec_alg_fix_min_max(alg, algtype, ns);
3589 				ipss->ipsec_alglists[algtype][algid] = alg;
3590 				ipsec_alg_free(oalg);
3591 			}
3592 			spds->spds_algs[algtype][algid] = NULL;
3593 		}
3594 	}
3595 
3596 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3597 		ipss->ipsec_algs_exec_mode[algtype] =
3598 		    spds->spds_algs_exec_mode[algtype];
3599 	}
3600 
3601 	mutex_exit(&ipss->ipsec_alg_lock);
3602 
3603 	crypto_free_mech_list(mechs, mech_count);
3604 
3605 	ipsecah_algs_changed(ns);
3606 	ipsecesp_algs_changed(ns);
3607 }
3608