xref: /titanic_50/usr/src/uts/common/inet/ip/spdsock.c (revision 292f4c1c373bd6e2c3c0b6e199a87392f265291f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/param.h>
29 #include <sys/types.h>
30 #include <sys/stream.h>
31 #include <sys/strsubr.h>
32 #include <sys/strsun.h>
33 #include <sys/stropts.h>
34 #include <sys/vnode.h>
35 #include <sys/sysmacros.h>
36 #define	_SUN_TPI_VERSION 2
37 #include <sys/tihdr.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/mkdev.h>
41 #include <sys/debug.h>
42 #include <sys/kmem.h>
43 #include <sys/cmn_err.h>
44 #include <sys/suntpi.h>
45 #include <sys/policy.h>
46 
47 #include <sys/socket.h>
48 #include <netinet/in.h>
49 #include <net/pfkeyv2.h>
50 #include <net/pfpolicy.h>
51 
52 #include <inet/common.h>
53 #include <netinet/ip6.h>
54 #include <inet/ip.h>
55 #include <inet/ip6.h>
56 #include <inet/mi.h>
57 #include <inet/nd.h>
58 #include <inet/ip_if.h>
59 #include <inet/tun.h>
60 #include <inet/optcom.h>
61 #include <inet/ipsec_info.h>
62 #include <inet/ipsec_impl.h>
63 #include <inet/spdsock.h>
64 #include <inet/sadb.h>
65 
66 #include <sys/isa_defs.h>
67 
68 /*
69  * This is a transport provider for the PF_POLICY IPsec policy
70  * management socket, which provides a management interface into the
71  * SPD, allowing policy rules to be added, deleted, and queried.
72  *
73  * This effectively replaces the old private SIOC*IPSECONFIG ioctls
74  * with an extensible interface which will hopefully be public some
75  * day.
76  *
77  * See <net/pfpolicy.h> for more details on the protocol.
78  *
79  * We link against drv/ip and call directly into it to manipulate the
80  * SPD; see ipsec_impl.h for the policy data structures and spd.c for
81  * the code which maintains them.
82  *
83  * The MT model of this is QPAIR with the addition of some explicit
84  * locking to protect system-wide policy data structures.
85  */
86 
87 static vmem_t *spdsock_vmem;		/* for minor numbers. */
88 
89 #define	ALIGNED64(x) IS_P2ALIGNED((x), sizeof (uint64_t))
90 
91 /* Default structure copied into T_INFO_ACK messages (from rts.c...) */
92 static struct T_info_ack spdsock_g_t_info_ack = {
93 	T_INFO_ACK,
94 	T_INFINITE,	/* TSDU_size. Maximum size messages. */
95 	T_INVALID,	/* ETSDU_size. No expedited data. */
96 	T_INVALID,	/* CDATA_size. No connect data. */
97 	T_INVALID,	/* DDATA_size. No disconnect data. */
98 	0,		/* ADDR_size. */
99 	0,		/* OPT_size. No user-settable options */
100 	64 * 1024,	/* TIDU_size. spdsock allows maximum size messages. */
101 	T_COTS,		/* SERV_type. spdsock supports connection oriented. */
102 	TS_UNBND,	/* CURRENT_state. This is set from spdsock_state. */
103 	(XPG4_1)	/* Provider flags */
104 };
105 
106 /* Named Dispatch Parameter Management Structure */
107 typedef struct spdsockpparam_s {
108 	uint_t	spdsock_param_min;
109 	uint_t	spdsock_param_max;
110 	uint_t	spdsock_param_value;
111 	char *spdsock_param_name;
112 } spdsockparam_t;
113 
114 /*
115  * Table of NDD variables supported by spdsock. These are loaded into
116  * spdsock_g_nd in spdsock_init_nd.
117  * All of these are alterable, within the min/max values given, at run time.
118  */
119 static	spdsockparam_t	spdsock_param_arr[] = {
120 	/* min	max	value	name */
121 	{ 4096, 65536,	8192,	"spdsock_xmit_hiwat"},
122 	{ 0,	65536,	1024,	"spdsock_xmit_lowat"},
123 	{ 4096, 65536,	8192,	"spdsock_recv_hiwat"},
124 	{ 65536, 1024*1024*1024, 256*1024,	"spdsock_max_buf"},
125 	{ 0,	3,	0,	"spdsock_debug"},
126 };
127 #define	spdsock_xmit_hiwat	spdsock_param_arr[0].spdsock_param_value
128 #define	spdsock_xmit_lowat	spdsock_param_arr[1].spdsock_param_value
129 #define	spdsock_recv_hiwat	spdsock_param_arr[2].spdsock_param_value
130 #define	spdsock_max_buf		spdsock_param_arr[3].spdsock_param_value
131 #define	spdsock_debug		spdsock_param_arr[4].spdsock_param_value
132 
133 kmutex_t spdsock_param_lock;	/* Protects the NDD variables. */
134 
135 /*
136  * To save algorithm update messages that are processed only after IPsec
137  * is loaded.
138  */
139 static spd_ext_t *spdsock_extv_algs[SPD_EXT_MAX + 1];
140 static mblk_t *spdsock_mp_algs = NULL;
141 static boolean_t spdsock_algs_pending = B_FALSE;
142 static ipsec_alginfo_t *spdsock_algs[IPSEC_NALGTYPES][IPSEC_MAX_ALGS];
143 static ipsec_algs_exec_mode_t spdsock_algs_exec_mode[IPSEC_NALGTYPES];
144 static kmutex_t spdsock_alg_lock;
145 
146 #define	ss0dbg(a)	printf a
147 /* NOTE:  != 0 instead of > 0 so lint doesn't complain. */
148 #define	ss1dbg(a)	if (spdsock_debug != 0) printf a
149 #define	ss2dbg(a)	if (spdsock_debug > 1) printf a
150 #define	ss3dbg(a)	if (spdsock_debug > 2) printf a
151 
152 static IDP spdsock_g_nd;
153 
154 static int spdsock_close(queue_t *);
155 static int spdsock_open(queue_t *, dev_t *, int, int, cred_t *);
156 static void spdsock_wput(queue_t *, mblk_t *);
157 static void spdsock_wsrv(queue_t *);
158 static void spdsock_rsrv(queue_t *);
159 static void spdsock_loadcheck(void *);
160 static void spdsock_merge_algs(void);
161 static void spdsock_flush_one(ipsec_policy_head_t *);
162 static mblk_t *spdsock_dump_next_record(spdsock_t *);
163 
164 static struct module_info info = {
165 	5138, "spdsock", 1, INFPSZ, 512, 128
166 };
167 
168 static struct qinit rinit = {
169 	NULL, (pfi_t)spdsock_rsrv, spdsock_open, spdsock_close,
170 	NULL, &info
171 };
172 
173 static struct qinit winit = {
174 	(pfi_t)spdsock_wput, (pfi_t)spdsock_wsrv, NULL, NULL, NULL, &info
175 };
176 
177 struct streamtab spdsockinfo = {
178 	&rinit, &winit
179 };
180 
181 /* mapping from alg type to protocol number, as per RFC 2407 */
182 static const uint_t algproto[] = {
183 	PROTO_IPSEC_AH,
184 	PROTO_IPSEC_ESP,
185 };
186 
187 #define	NALGPROTOS	(sizeof (algproto) / sizeof (algproto[0]))
188 
189 /* mapping from kernel exec mode to spdsock exec mode */
190 static const uint_t execmodes[] = {
191 	SPD_ALG_EXEC_MODE_SYNC,
192 	SPD_ALG_EXEC_MODE_ASYNC
193 };
194 
195 #define	NEXECMODES	(sizeof (execmodes) / sizeof (execmodes[0]))
196 
197 #define	ALL_ACTIVE_POLHEADS ((ipsec_policy_head_t *)-1)
198 #define	ALL_INACTIVE_POLHEADS ((ipsec_policy_head_t *)-2)
199 
200 /* ARGSUSED */
201 static int
202 spdsock_param_get(q, mp, cp, cr)
203 	queue_t	*q;
204 	mblk_t	*mp;
205 	caddr_t	cp;
206 	cred_t *cr;
207 {
208 	spdsockparam_t	*spdsockpa = (spdsockparam_t *)cp;
209 	uint_t value;
210 
211 	mutex_enter(&spdsock_param_lock);
212 	value = spdsockpa->spdsock_param_value;
213 	mutex_exit(&spdsock_param_lock);
214 
215 	(void) mi_mpprintf(mp, "%u", value);
216 	return (0);
217 }
218 
219 /* This routine sets an NDD variable in a spdsockparam_t structure. */
220 /* ARGSUSED */
221 static int
222 spdsock_param_set(q, mp, value, cp, cr)
223 	queue_t	*q;
224 	mblk_t	*mp;
225 	char *value;
226 	caddr_t	cp;
227 	cred_t *cr;
228 {
229 	ulong_t	new_value;
230 	spdsockparam_t	*spdsockpa = (spdsockparam_t *)cp;
231 
232 	/* Convert the value from a string into a long integer. */
233 	if (ddi_strtoul(value, NULL, 10, &new_value) != 0)
234 		return (EINVAL);
235 
236 	mutex_enter(&spdsock_param_lock);
237 	/*
238 	 * Fail the request if the new value does not lie within the
239 	 * required bounds.
240 	 */
241 	if (new_value < spdsockpa->spdsock_param_min ||
242 	    new_value > spdsockpa->spdsock_param_max) {
243 		mutex_exit(&spdsock_param_lock);
244 		return (EINVAL);
245 	}
246 
247 	/* Set the new value */
248 	spdsockpa->spdsock_param_value = new_value;
249 	mutex_exit(&spdsock_param_lock);
250 
251 	return (0);
252 }
253 
254 boolean_t
255 spdsock_ddi_init(void)
256 {
257 	spdsockparam_t *ssp = spdsock_param_arr;
258 	int count = A_CNT(spdsock_param_arr);
259 
260 	if (!spdsock_g_nd) {
261 		for (; count-- > 0; ssp++) {
262 			if (ssp->spdsock_param_name != NULL &&
263 			    (ssp->spdsock_param_name[0] != '\0')) {
264 				if (!nd_load(&spdsock_g_nd,
265 				    ssp->spdsock_param_name,
266 				    spdsock_param_get, spdsock_param_set,
267 				    (caddr_t)ssp)) {
268 					nd_free(&spdsock_g_nd);
269 					return (B_FALSE);
270 				}
271 			}
272 		}
273 	}
274 
275 	spdsock_max_optsize = optcom_max_optsize(
276 	    spdsock_opt_obj.odb_opt_des_arr, spdsock_opt_obj.odb_opt_arr_cnt);
277 
278 	spdsock_vmem = vmem_create("spdsock", (void *)1, MAXMIN, 1,
279 	    NULL, NULL, NULL, 1, VM_SLEEP | VMC_IDENTIFIER);
280 
281 	mutex_init(&spdsock_param_lock, NULL, MUTEX_DEFAULT, NULL);
282 	mutex_init(&spdsock_alg_lock, NULL, MUTEX_DEFAULT, NULL);
283 
284 	return (B_TRUE);
285 }
286 
287 void
288 spdsock_ddi_destroy(void)
289 {
290 	vmem_destroy(spdsock_vmem);
291 	mutex_destroy(&spdsock_param_lock);
292 	mutex_destroy(&spdsock_alg_lock);
293 	nd_free(&spdsock_g_nd);
294 }
295 
296 /*
297  * NOTE: large quantities of this should be shared with keysock.
298  * Would be nice to combine some of this into a common module, but
299  * not possible given time pressures.
300  */
301 
302 /*
303  * High-level reality checking of extensions.
304  */
305 /* ARGSUSED */ /* XXX */
306 static boolean_t
307 ext_check(spd_ext_t *ext)
308 {
309 	spd_if_t *tunname = (spd_if_t *)ext;
310 	int i;
311 	char *idstr;
312 
313 	if (ext->spd_ext_type == SPD_EXT_TUN_NAME) {
314 		/* (NOTE:  Modified from SADB_EXT_IDENTITY..) */
315 
316 		/*
317 		 * Make sure the strings in these identities are
318 		 * null-terminated.  Let's "proactively" null-terminate the
319 		 * string at the last byte if it's not terminated sooner.
320 		 */
321 		i = SPD_64TO8(tunname->spd_if_len) - sizeof (spd_if_t);
322 		idstr = (char *)(tunname + 1);
323 		while (*idstr != '\0' && i > 0) {
324 			i--;
325 			idstr++;
326 		}
327 		if (i == 0) {
328 			/*
329 			 * I.e., if the bozo user didn't NULL-terminate the
330 			 * string...
331 			 */
332 			idstr--;
333 			*idstr = '\0';
334 		}
335 	}
336 	return (B_TRUE);	/* For now... */
337 }
338 
339 
340 
341 /* Return values for spdsock_get_ext(). */
342 #define	KGE_OK	0
343 #define	KGE_DUP	1
344 #define	KGE_UNK	2
345 #define	KGE_LEN	3
346 #define	KGE_CHK	4
347 
348 /*
349  * Parse basic extension headers and return in the passed-in pointer vector.
350  * Return values include:
351  *
352  *	KGE_OK	Everything's nice and parsed out.
353  *		If there are no extensions, place NULL in extv[0].
354  *	KGE_DUP	There is a duplicate extension.
355  *		First instance in appropriate bin.  First duplicate in
356  *		extv[0].
357  *	KGE_UNK	Unknown extension type encountered.  extv[0] contains
358  *		unknown header.
359  *	KGE_LEN	Extension length error.
360  *	KGE_CHK	High-level reality check failed on specific extension.
361  *
362  * My apologies for some of the pointer arithmetic in here.  I'm thinking
363  * like an assembly programmer, yet trying to make the compiler happy.
364  */
365 static int
366 spdsock_get_ext(spd_ext_t *extv[], spd_msg_t *basehdr, uint_t msgsize)
367 {
368 	bzero(extv, sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1));
369 
370 	/* Use extv[0] as the "current working pointer". */
371 
372 	extv[0] = (spd_ext_t *)(basehdr + 1);
373 
374 	while (extv[0] < (spd_ext_t *)(((uint8_t *)basehdr) + msgsize)) {
375 		/* Check for unknown headers. */
376 		if (extv[0]->spd_ext_type == 0 ||
377 		    extv[0]->spd_ext_type > SPD_EXT_MAX)
378 			return (KGE_UNK);
379 
380 		/*
381 		 * Check length.  Use uint64_t because extlen is in units
382 		 * of 64-bit words.  If length goes beyond the msgsize,
383 		 * return an error.  (Zero length also qualifies here.)
384 		 */
385 		if (extv[0]->spd_ext_len == 0 ||
386 		    (void *)((uint64_t *)extv[0] + extv[0]->spd_ext_len) >
387 		    (void *)((uint8_t *)basehdr + msgsize))
388 			return (KGE_LEN);
389 
390 		/* Check for redundant headers. */
391 		if (extv[extv[0]->spd_ext_type] != NULL)
392 			return (KGE_DUP);
393 
394 		/*
395 		 * Reality check the extension if possible at the spdsock
396 		 * level.
397 		 */
398 		if (!ext_check(extv[0]))
399 			return (KGE_CHK);
400 
401 		/* If I make it here, assign the appropriate bin. */
402 		extv[extv[0]->spd_ext_type] = extv[0];
403 
404 		/* Advance pointer (See above for uint64_t ptr reasoning.) */
405 		extv[0] = (spd_ext_t *)
406 		    ((uint64_t *)extv[0] + extv[0]->spd_ext_len);
407 	}
408 
409 	/* Everything's cool. */
410 
411 	/*
412 	 * If extv[0] == NULL, then there are no extension headers in this
413 	 * message.  Ensure that this is the case.
414 	 */
415 	if (extv[0] == (spd_ext_t *)(basehdr + 1))
416 		extv[0] = NULL;
417 
418 	return (KGE_OK);
419 }
420 
421 static const int bad_ext_diag[] = {
422 	SPD_DIAGNOSTIC_MALFORMED_LCLPORT,
423 	SPD_DIAGNOSTIC_MALFORMED_REMPORT,
424 	SPD_DIAGNOSTIC_MALFORMED_PROTO,
425 	SPD_DIAGNOSTIC_MALFORMED_LCLADDR,
426 	SPD_DIAGNOSTIC_MALFORMED_REMADDR,
427 	SPD_DIAGNOSTIC_MALFORMED_ACTION,
428 	SPD_DIAGNOSTIC_MALFORMED_RULE,
429 	SPD_DIAGNOSTIC_MALFORMED_RULESET,
430 	SPD_DIAGNOSTIC_MALFORMED_ICMP_TYPECODE
431 };
432 
433 static const int dup_ext_diag[] = {
434 	SPD_DIAGNOSTIC_DUPLICATE_LCLPORT,
435 	SPD_DIAGNOSTIC_DUPLICATE_REMPORT,
436 	SPD_DIAGNOSTIC_DUPLICATE_PROTO,
437 	SPD_DIAGNOSTIC_DUPLICATE_LCLADDR,
438 	SPD_DIAGNOSTIC_DUPLICATE_REMADDR,
439 	SPD_DIAGNOSTIC_DUPLICATE_ACTION,
440 	SPD_DIAGNOSTIC_DUPLICATE_RULE,
441 	SPD_DIAGNOSTIC_DUPLICATE_RULESET,
442 	SPD_DIAGNOSTIC_DUPLICATE_ICMP_TYPECODE
443 };
444 
445 /*
446  * Transmit a PF_POLICY error message to the instance either pointed to
447  * by ks, the instance with serial number serial, or more, depending.
448  *
449  * The faulty message (or a reasonable facsimile thereof) is in mp.
450  * This function will free mp or recycle it for delivery, thereby causing
451  * the stream head to free it.
452  */
453 static void
454 spdsock_error(queue_t *q, mblk_t *mp, int error, int diagnostic)
455 {
456 	spd_msg_t *spmsg = (spd_msg_t *)mp->b_rptr;
457 
458 	ASSERT(mp->b_datap->db_type == M_DATA);
459 
460 	if (spmsg->spd_msg_type < SPD_MIN ||
461 	    spmsg->spd_msg_type > SPD_MAX)
462 		spmsg->spd_msg_type = SPD_RESERVED;
463 
464 	/*
465 	 * Strip out extension headers.
466 	 */
467 	ASSERT(mp->b_rptr + sizeof (*spmsg) <= mp->b_datap->db_lim);
468 	mp->b_wptr = mp->b_rptr + sizeof (*spmsg);
469 	spmsg->spd_msg_len = SPD_8TO64(sizeof (spd_msg_t));
470 	spmsg->spd_msg_errno = (uint8_t)error;
471 	spmsg->spd_msg_diagnostic = (uint16_t)diagnostic;
472 
473 	qreply(q, mp);
474 }
475 
476 static void
477 spdsock_diag(queue_t *q, mblk_t *mp, int diagnostic)
478 {
479 	spdsock_error(q, mp, EINVAL, diagnostic);
480 }
481 
482 static void
483 spd_echo(queue_t *q, mblk_t *mp)
484 {
485 	qreply(q, mp);
486 }
487 
488 /*
489  * Do NOT consume a reference to itp.
490  */
491 static void
492 spdsock_flush_node(ipsec_tun_pol_t *itp, void *cookie)
493 {
494 	boolean_t active = (boolean_t)cookie;
495 	ipsec_policy_head_t *iph;
496 
497 	iph = active ? itp->itp_policy : itp->itp_inactive;
498 	IPPH_REFHOLD(iph);
499 	mutex_enter(&itp->itp_lock);
500 	spdsock_flush_one(iph);
501 	if (active)
502 		itp->itp_flags &= ~ITPF_PFLAGS;
503 	else
504 		itp->itp_flags &= ~ITPF_IFLAGS;
505 	mutex_exit(&itp->itp_lock);
506 }
507 
508 /*
509  * Clear out one polhead.
510  */
511 static void
512 spdsock_flush_one(ipsec_policy_head_t *iph)
513 {
514 	rw_enter(&iph->iph_lock, RW_WRITER);
515 	ipsec_polhead_flush(iph);
516 	rw_exit(&iph->iph_lock);
517 	IPPH_REFRELE(iph);
518 }
519 
520 static void
521 spdsock_flush(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp)
522 {
523 	boolean_t active;
524 
525 	if (iph != ALL_ACTIVE_POLHEADS && iph != ALL_INACTIVE_POLHEADS) {
526 		spdsock_flush_one(iph);
527 	} else {
528 		active = (iph == ALL_ACTIVE_POLHEADS);
529 
530 		/* First flush the global policy. */
531 		spdsock_flush_one(active ? ipsec_system_policy() :
532 		    ipsec_inactive_policy());
533 
534 		/* Then flush every tunnel's appropriate one. */
535 		itp_walk(spdsock_flush_node, (void *)active);
536 	}
537 
538 	spd_echo(q, mp);
539 }
540 
541 static boolean_t
542 spdsock_ext_to_sel(spd_ext_t **extv, ipsec_selkey_t *sel, int *diag)
543 {
544 	bzero(sel, sizeof (*sel));
545 
546 	if (extv[SPD_EXT_PROTO] != NULL) {
547 		struct spd_proto *pr =
548 		    (struct spd_proto *)extv[SPD_EXT_PROTO];
549 		sel->ipsl_proto = pr->spd_proto_number;
550 		sel->ipsl_valid |= IPSL_PROTOCOL;
551 	}
552 	if (extv[SPD_EXT_LCLPORT] != NULL) {
553 		struct spd_portrange *pr =
554 		    (struct spd_portrange *)extv[SPD_EXT_LCLPORT];
555 		sel->ipsl_lport = pr->spd_ports_minport;
556 		sel->ipsl_valid |= IPSL_LOCAL_PORT;
557 	}
558 	if (extv[SPD_EXT_REMPORT] != NULL) {
559 		struct spd_portrange *pr =
560 		    (struct spd_portrange *)extv[SPD_EXT_REMPORT];
561 		sel->ipsl_rport = pr->spd_ports_minport;
562 		sel->ipsl_valid |= IPSL_REMOTE_PORT;
563 	}
564 
565 	if (extv[SPD_EXT_ICMP_TYPECODE] != NULL) {
566 		struct spd_typecode *tc=
567 		    (struct spd_typecode *)extv[SPD_EXT_ICMP_TYPECODE];
568 
569 		sel->ipsl_valid |= IPSL_ICMP_TYPE;
570 		sel->ipsl_icmp_type = tc->spd_typecode_type;
571 		if (tc->spd_typecode_type_end < tc->spd_typecode_type)
572 			sel->ipsl_icmp_type_end = tc->spd_typecode_type;
573 		else
574 			sel->ipsl_icmp_type_end = tc->spd_typecode_type_end;
575 
576 		if (tc->spd_typecode_code != 255) {
577 			sel->ipsl_valid |= IPSL_ICMP_CODE;
578 			sel->ipsl_icmp_code = tc->spd_typecode_code;
579 			if (tc->spd_typecode_code_end < tc->spd_typecode_code)
580 				sel->ipsl_icmp_code_end = tc->spd_typecode_code;
581 			else
582 				sel->ipsl_icmp_code_end =
583 				    tc->spd_typecode_code_end;
584 		}
585 	}
586 #define	ADDR2SEL(sel, extv, field, pfield, extn, bit)			      \
587 	if ((extv)[(extn)] != NULL) {					      \
588 		uint_t addrlen;						      \
589 		struct spd_address *ap = 				      \
590 			(struct spd_address *)((extv)[(extn)]); 	      \
591 		addrlen = (ap->spd_address_af == AF_INET6) ? 		      \
592 			IPV6_ADDR_LEN : IP_ADDR_LEN;			      \
593 		if (SPD_64TO8(ap->spd_address_len) < 			      \
594 			(addrlen + sizeof (*ap))) {			      \
595 			*diag = SPD_DIAGNOSTIC_BAD_ADDR_LEN;		      \
596 			return (B_FALSE);				      \
597 		}							      \
598 		bcopy((ap+1), &((sel)->field), addrlen);		      \
599 		(sel)->pfield = ap->spd_address_prefixlen;		      \
600 		(sel)->ipsl_valid |= (bit);				      \
601 		(sel)->ipsl_valid |= (ap->spd_address_af == AF_INET6) ?	      \
602 			IPSL_IPV6 : IPSL_IPV4;				      \
603 	}
604 
605 	ADDR2SEL(sel, extv, ipsl_local, ipsl_local_pfxlen,
606 	    SPD_EXT_LCLADDR, IPSL_LOCAL_ADDR);
607 	ADDR2SEL(sel, extv, ipsl_remote, ipsl_remote_pfxlen,
608 	    SPD_EXT_REMADDR, IPSL_REMOTE_ADDR);
609 
610 	if ((sel->ipsl_valid & (IPSL_IPV6|IPSL_IPV4)) ==
611 	    (IPSL_IPV6|IPSL_IPV4)) {
612 		*diag = SPD_DIAGNOSTIC_MIXED_AF;
613 		return (B_FALSE);
614 	}
615 
616 #undef ADDR2SEL
617 
618 	return (B_TRUE);
619 }
620 
621 static boolean_t
622 spd_convert_type(uint32_t type, ipsec_act_t *act)
623 {
624 	switch (type) {
625 	case SPD_ACTTYPE_DROP:
626 		act->ipa_type = IPSEC_ACT_DISCARD;
627 		return (B_TRUE);
628 
629 	case SPD_ACTTYPE_PASS:
630 		act->ipa_type = IPSEC_ACT_CLEAR;
631 		return (B_TRUE);
632 
633 	case SPD_ACTTYPE_IPSEC:
634 		act->ipa_type = IPSEC_ACT_APPLY;
635 		return (B_TRUE);
636 	}
637 	return (B_FALSE);
638 }
639 
640 static boolean_t
641 spd_convert_flags(uint32_t flags, ipsec_act_t *act)
642 {
643 	/*
644 	 * Note use of !! for boolean canonicalization.
645 	 */
646 	act->ipa_apply.ipp_use_ah = !!(flags & SPD_APPLY_AH);
647 	act->ipa_apply.ipp_use_esp = !!(flags & SPD_APPLY_ESP);
648 	act->ipa_apply.ipp_use_espa = !!(flags & SPD_APPLY_ESPA);
649 	act->ipa_apply.ipp_use_se = !!(flags & SPD_APPLY_SE);
650 	act->ipa_apply.ipp_use_unique = !!(flags & SPD_APPLY_UNIQUE);
651 	return (B_TRUE);
652 }
653 
654 static void
655 spdsock_reset_act(ipsec_act_t *act)
656 {
657 	bzero(act, sizeof (*act));
658 	act->ipa_apply.ipp_espe_maxbits = IPSEC_MAX_KEYBITS;
659 	act->ipa_apply.ipp_espa_maxbits = IPSEC_MAX_KEYBITS;
660 	act->ipa_apply.ipp_ah_maxbits = IPSEC_MAX_KEYBITS;
661 }
662 
663 /*
664  * Sanity check action against reality, and shrink-wrap key sizes..
665  */
666 static boolean_t
667 spdsock_check_action(ipsec_act_t *act, boolean_t tunnel_polhead, int *diag)
668 {
669 	if (tunnel_polhead && act->ipa_apply.ipp_use_unique) {
670 		*diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
671 		return (B_FALSE);
672 	}
673 	if ((act->ipa_type != IPSEC_ACT_APPLY) &&
674 	    (act->ipa_apply.ipp_use_ah ||
675 		act->ipa_apply.ipp_use_esp ||
676 		act->ipa_apply.ipp_use_espa ||
677 		act->ipa_apply.ipp_use_se ||
678 		act->ipa_apply.ipp_use_unique)) {
679 		*diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
680 		return (B_FALSE);
681 	}
682 	if ((act->ipa_type == IPSEC_ACT_APPLY) &&
683 	    !act->ipa_apply.ipp_use_ah &&
684 	    !act->ipa_apply.ipp_use_esp) {
685 		*diag = SPD_DIAGNOSTIC_ADD_INCON_FLAGS;
686 		return (B_FALSE);
687 	}
688 	return (ipsec_check_action(act, diag));
689 }
690 
691 /*
692  * We may be short a few error checks here..
693  */
694 static boolean_t
695 spdsock_ext_to_actvec(spd_ext_t **extv, ipsec_act_t **actpp, uint_t *nactp,
696     int *diag)
697 {
698 	struct spd_ext_actions *sactp =
699 	    (struct spd_ext_actions *)extv[SPD_EXT_ACTION];
700 	ipsec_act_t act, *actp, *endactp;
701 	struct spd_attribute *attrp, *endattrp;
702 	uint64_t *endp;
703 	int nact;
704 	boolean_t tunnel_polhead;
705 
706 	tunnel_polhead = (extv[SPD_EXT_TUN_NAME] != NULL &&
707 	    (((struct spd_rule *)extv[SPD_EXT_RULE])->spd_rule_flags &
708 		SPD_RULE_FLAG_TUNNEL));
709 
710 	*actpp = NULL;
711 	*nactp = 0;
712 
713 	if (sactp == NULL) {
714 		*diag = SPD_DIAGNOSTIC_NO_ACTION_EXT;
715 		return (B_FALSE);
716 	}
717 
718 	/*
719 	 * Parse the "action" extension and convert into an action chain.
720 	 */
721 
722 	nact = sactp->spd_actions_count;
723 
724 	endp = (uint64_t *)sactp;
725 	endp += sactp->spd_actions_len;
726 	endattrp = (struct spd_attribute *)endp;
727 
728 	actp = kmem_alloc(sizeof (*actp) * nact, KM_NOSLEEP);
729 	if (actp == NULL) {
730 		*diag = SPD_DIAGNOSTIC_ADD_NO_MEM;
731 		return (B_FALSE);
732 	}
733 	*actpp = actp;
734 	*nactp = nact;
735 	endactp = actp + nact;
736 
737 	spdsock_reset_act(&act);
738 	attrp = (struct spd_attribute *)(&sactp[1]);
739 
740 	for (; attrp < endattrp; attrp++) {
741 		switch (attrp->spd_attr_tag) {
742 		case SPD_ATTR_NOP:
743 			break;
744 
745 		case SPD_ATTR_EMPTY:
746 			spdsock_reset_act(&act);
747 			break;
748 
749 		case SPD_ATTR_END:
750 			attrp = endattrp;
751 			/* FALLTHRU */
752 		case SPD_ATTR_NEXT:
753 			if (actp >= endactp) {
754 				*diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT;
755 				goto fail;
756 			}
757 			if (!spdsock_check_action(&act, tunnel_polhead, diag))
758 				goto fail;
759 			*actp++ = act;
760 			spdsock_reset_act(&act);
761 			break;
762 
763 		case SPD_ATTR_TYPE:
764 			if (!spd_convert_type(attrp->spd_attr_value, &act)) {
765 				*diag = SPD_DIAGNOSTIC_ADD_BAD_TYPE;
766 				goto fail;
767 			}
768 			break;
769 
770 		case SPD_ATTR_FLAGS:
771 			if (!tunnel_polhead && extv[SPD_EXT_TUN_NAME] != NULL) {
772 				/*
773 				 * Set "sa unique" for transport-mode
774 				 * tunnels whether we want to or not.
775 				 */
776 				attrp->spd_attr_value |= SPD_APPLY_UNIQUE;
777 			}
778 			if (!spd_convert_flags(attrp->spd_attr_value, &act)) {
779 				*diag = SPD_DIAGNOSTIC_ADD_BAD_FLAGS;
780 				goto fail;
781 			}
782 			break;
783 
784 		case SPD_ATTR_AH_AUTH:
785 			if (attrp->spd_attr_value == 0) {
786 				*diag = SPD_DIAGNOSTIC_UNSUPP_AH_ALG;
787 				goto fail;
788 			}
789 			act.ipa_apply.ipp_auth_alg = attrp->spd_attr_value;
790 			break;
791 
792 		case SPD_ATTR_ESP_ENCR:
793 			if (attrp->spd_attr_value == 0) {
794 				*diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_ALG;
795 				goto fail;
796 			}
797 			act.ipa_apply.ipp_encr_alg = attrp->spd_attr_value;
798 			break;
799 
800 		case SPD_ATTR_ESP_AUTH:
801 			if (attrp->spd_attr_value == 0) {
802 				*diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_ALG;
803 				goto fail;
804 			}
805 			act.ipa_apply.ipp_esp_auth_alg = attrp->spd_attr_value;
806 			break;
807 
808 		case SPD_ATTR_ENCR_MINBITS:
809 			act.ipa_apply.ipp_espe_minbits = attrp->spd_attr_value;
810 			break;
811 
812 		case SPD_ATTR_ENCR_MAXBITS:
813 			act.ipa_apply.ipp_espe_maxbits = attrp->spd_attr_value;
814 			break;
815 
816 		case SPD_ATTR_AH_MINBITS:
817 			act.ipa_apply.ipp_ah_minbits = attrp->spd_attr_value;
818 			break;
819 
820 		case SPD_ATTR_AH_MAXBITS:
821 			act.ipa_apply.ipp_ah_maxbits = attrp->spd_attr_value;
822 			break;
823 
824 		case SPD_ATTR_ESPA_MINBITS:
825 			act.ipa_apply.ipp_espa_minbits = attrp->spd_attr_value;
826 			break;
827 
828 		case SPD_ATTR_ESPA_MAXBITS:
829 			act.ipa_apply.ipp_espa_maxbits = attrp->spd_attr_value;
830 			break;
831 
832 		case SPD_ATTR_LIFE_SOFT_TIME:
833 		case SPD_ATTR_LIFE_HARD_TIME:
834 		case SPD_ATTR_LIFE_SOFT_BYTES:
835 		case SPD_ATTR_LIFE_HARD_BYTES:
836 			break;
837 
838 		case SPD_ATTR_KM_PROTO:
839 			act.ipa_apply.ipp_km_proto = attrp->spd_attr_value;
840 			break;
841 
842 		case SPD_ATTR_KM_COOKIE:
843 			act.ipa_apply.ipp_km_cookie = attrp->spd_attr_value;
844 			break;
845 
846 		case SPD_ATTR_REPLAY_DEPTH:
847 			act.ipa_apply.ipp_replay_depth = attrp->spd_attr_value;
848 			break;
849 		}
850 	}
851 	if (actp != endactp) {
852 		*diag = SPD_DIAGNOSTIC_ADD_WRONG_ACT_COUNT;
853 		goto fail;
854 	}
855 
856 	return (B_TRUE);
857 fail:
858 	ipsec_actvec_free(*actpp, nact);
859 	*actpp = NULL;
860 	return (B_FALSE);
861 }
862 
863 typedef struct
864 {
865 	ipsec_policy_t *pol;
866 	int dir;
867 } tmprule_t;
868 
869 static int
870 mkrule(ipsec_policy_head_t *iph, struct spd_rule *rule,
871     ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t af,
872     tmprule_t **rp, uint64_t *index)
873 {
874 	ipsec_policy_t *pol;
875 
876 	sel->ipsl_valid &= ~(IPSL_IPV6|IPSL_IPV4);
877 	sel->ipsl_valid |= af;
878 
879 	pol = ipsec_policy_create(sel, actp, nact, rule->spd_rule_priority,
880 	    index);
881 	if (pol == NULL)
882 		return (ENOMEM);
883 
884 	(*rp)->pol = pol;
885 	(*rp)->dir = dir;
886 	(*rp)++;
887 
888 	if (!ipsec_check_policy(iph, pol, dir))
889 		return (EEXIST);
890 
891 	rule->spd_rule_index = pol->ipsp_index;
892 	return (0);
893 }
894 
895 static int
896 mkrulepair(ipsec_policy_head_t *iph, struct spd_rule *rule,
897     ipsec_selkey_t *sel, ipsec_act_t *actp, int nact, uint_t dir, uint_t afs,
898     tmprule_t **rp, uint64_t *index)
899 {
900 	int error;
901 
902 	if (afs & IPSL_IPV4) {
903 		error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV4, rp,
904 		    index);
905 		if (error != 0)
906 			return (error);
907 	}
908 	if (afs & IPSL_IPV6) {
909 		error = mkrule(iph, rule, sel, actp, nact, dir, IPSL_IPV6, rp,
910 		    index);
911 		if (error != 0)
912 			return (error);
913 	}
914 	return (0);
915 }
916 
917 
918 static void
919 spdsock_addrule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
920     spd_ext_t **extv, ipsec_tun_pol_t *itp)
921 {
922 	ipsec_selkey_t sel;
923 	ipsec_act_t *actp;
924 	uint_t nact;
925 	int diag = 0, error, afs;
926 	struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE];
927 	tmprule_t rules[4], *rulep = &rules[0];
928 	boolean_t tunnel_mode, empty_itp, active;
929 	uint64_t *index = (itp == NULL) ? NULL : &itp->itp_next_policy_index;
930 
931 	if (rule == NULL) {
932 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT);
933 		return;
934 	}
935 
936 	tunnel_mode = (rule->spd_rule_flags & SPD_RULE_FLAG_TUNNEL);
937 
938 	if (itp != NULL) {
939 		mutex_enter(&itp->itp_lock);
940 		ASSERT(itp->itp_policy == iph || itp->itp_inactive == iph);
941 		active = (itp->itp_policy == iph);
942 		if (ITP_P_ISACTIVE(itp, iph)) {
943 			/* Check for mix-and-match of tunnel/transport. */
944 			if ((tunnel_mode && !ITP_P_ISTUNNEL(itp, iph)) ||
945 			    (!tunnel_mode && ITP_P_ISTUNNEL(itp, iph))) {
946 				mutex_exit(&itp->itp_lock);
947 				spdsock_error(q, mp, EBUSY, 0);
948 				return;
949 			}
950 			empty_itp = B_FALSE;
951 		} else {
952 			empty_itp = B_TRUE;
953 			itp->itp_flags = active ? ITPF_P_ACTIVE : ITPF_I_ACTIVE;
954 			if (tunnel_mode)
955 				itp->itp_flags |= active ? ITPF_P_TUNNEL :
956 				    ITPF_I_TUNNEL;
957 		}
958 	} else {
959 		empty_itp = B_FALSE;
960 	}
961 
962 	if (rule->spd_rule_index != 0) {
963 		diag = SPD_DIAGNOSTIC_INVALID_RULE_INDEX;
964 		error = EINVAL;
965 		goto fail2;
966 	}
967 
968 	if (!spdsock_ext_to_sel(extv, &sel, &diag)) {
969 		error = EINVAL;
970 		goto fail2;
971 	}
972 
973 	if (itp != NULL) {
974 		if (tunnel_mode) {
975 			if (sel.ipsl_valid &
976 			    (IPSL_REMOTE_PORT | IPSL_LOCAL_PORT)) {
977 				itp->itp_flags |= active ?
978 				    ITPF_P_PER_PORT_SECURITY :
979 				    ITPF_I_PER_PORT_SECURITY;
980 			}
981 		} else {
982 			/*
983 			 * For now, we don't allow transport-mode on a tunnel
984 			 * with ANY specific selectors.  Bail if we have such
985 			 * a request.
986 			 */
987 			if (sel.ipsl_valid & IPSL_WILDCARD) {
988 				diag = SPD_DIAGNOSTIC_NO_TUNNEL_SELECTORS;
989 				error = EINVAL;
990 				goto fail2;
991 			}
992 		}
993 	}
994 
995 	if (!spdsock_ext_to_actvec(extv, &actp, &nact, &diag)) {
996 		error = EINVAL;
997 		goto fail2;
998 	}
999 	/*
1000 	 * If no addresses were specified, add both.
1001 	 */
1002 	afs = sel.ipsl_valid & (IPSL_IPV6|IPSL_IPV4);
1003 	if (afs == 0)
1004 		afs = (IPSL_IPV6|IPSL_IPV4);
1005 
1006 	rw_enter(&iph->iph_lock, RW_WRITER);
1007 
1008 	if (rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) {
1009 		error = mkrulepair(iph, rule, &sel, actp, nact,
1010 		    IPSEC_TYPE_OUTBOUND, afs, &rulep, index);
1011 		if (error != 0)
1012 			goto fail;
1013 	}
1014 
1015 	if (rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) {
1016 		error = mkrulepair(iph, rule, &sel, actp, nact,
1017 		    IPSEC_TYPE_INBOUND, afs, &rulep, index);
1018 		if (error != 0)
1019 			goto fail;
1020 	}
1021 
1022 	while ((--rulep) >= &rules[0])
1023 		ipsec_enter_policy(iph, rulep->pol, rulep->dir);
1024 
1025 	rw_exit(&iph->iph_lock);
1026 	if (itp != NULL)
1027 		mutex_exit(&itp->itp_lock);
1028 
1029 	ipsec_actvec_free(actp, nact);
1030 	spd_echo(q, mp);
1031 	return;
1032 
1033 fail:
1034 	rw_exit(&iph->iph_lock);
1035 	while ((--rulep) >= &rules[0]) {
1036 		IPPOL_REFRELE(rulep->pol);
1037 	}
1038 	ipsec_actvec_free(actp, nact);
1039 fail2:
1040 	if (itp != NULL) {
1041 		if (empty_itp)
1042 			itp->itp_flags = 0;
1043 		mutex_exit(&itp->itp_lock);
1044 	}
1045 	spdsock_error(q, mp, error, diag);
1046 }
1047 
1048 void
1049 spdsock_deleterule(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1050     spd_ext_t **extv, ipsec_tun_pol_t *itp)
1051 {
1052 	ipsec_selkey_t sel;
1053 	struct spd_rule *rule = (struct spd_rule *)extv[SPD_EXT_RULE];
1054 	int err, diag = 0;
1055 
1056 	if (rule == NULL) {
1057 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_NO_RULE_EXT);
1058 		return;
1059 	}
1060 
1061 	/*
1062 	 * Must enter itp_lock first to avoid deadlock.  See tun.c's
1063 	 * set_sec_simple() for the other case of itp_lock and iph_lock.
1064 	 */
1065 	if (itp != NULL)
1066 		mutex_enter(&itp->itp_lock);
1067 
1068 	if (rule->spd_rule_index != 0) {
1069 		if (ipsec_policy_delete_index(iph, rule->spd_rule_index) != 0) {
1070 			err = ESRCH;
1071 			goto fail;
1072 		}
1073 	} else {
1074 		if (!spdsock_ext_to_sel(extv, &sel, &diag)) {
1075 			err = EINVAL;	/* diag already set... */
1076 			goto fail;
1077 		}
1078 
1079 		if ((rule->spd_rule_flags & SPD_RULE_FLAG_INBOUND) &&
1080 		    !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_INBOUND)) {
1081 			err = ESRCH;
1082 			goto fail;
1083 		}
1084 
1085 		if ((rule->spd_rule_flags & SPD_RULE_FLAG_OUTBOUND) &&
1086 		    !ipsec_policy_delete(iph, &sel, IPSEC_TYPE_OUTBOUND)) {
1087 			err = ESRCH;
1088 			goto fail;
1089 		}
1090 	}
1091 
1092 	if (itp != NULL) {
1093 		ASSERT(iph == itp->itp_policy || iph == itp->itp_inactive);
1094 		rw_enter(&iph->iph_lock, RW_READER);
1095 		if (avl_numnodes(&iph->iph_rulebyid) == 0) {
1096 			if (iph == itp->itp_policy)
1097 				itp->itp_flags &= ~ITPF_PFLAGS;
1098 			else
1099 				itp->itp_flags &= ~ITPF_IFLAGS;
1100 		}
1101 		/* Can exit locks in any order. */
1102 		rw_exit(&iph->iph_lock);
1103 		mutex_exit(&itp->itp_lock);
1104 	}
1105 	spd_echo(q, mp);
1106 	return;
1107 fail:
1108 	if (itp != NULL)
1109 		mutex_exit(&itp->itp_lock);
1110 	spdsock_error(q, mp, err, diag);
1111 }
1112 
1113 /* Do NOT consume a reference to itp. */
1114 /* ARGSUSED */
1115 static void
1116 spdsock_flip_node(ipsec_tun_pol_t *itp, void *ignoreme)
1117 {
1118 	mutex_enter(&itp->itp_lock);
1119 	ITPF_SWAP(itp->itp_flags);
1120 	ipsec_swap_policy(itp->itp_policy, itp->itp_inactive);
1121 	mutex_exit(&itp->itp_lock);
1122 }
1123 
1124 void
1125 spdsock_flip(queue_t *q, mblk_t *mp, spd_if_t *tunname)
1126 {
1127 	char *tname;
1128 	ipsec_tun_pol_t *itp;
1129 
1130 	if (tunname != NULL) {
1131 		tname = (char *)tunname->spd_if_name;
1132 		if (*tname == '\0') {
1133 			ipsec_swap_global_policy();	/* can't fail */
1134 			itp_walk(spdsock_flip_node, NULL);
1135 		} else {
1136 			itp = get_tunnel_policy(tname);
1137 			if (itp == NULL) {
1138 				/* Better idea for "tunnel not found"? */
1139 				spdsock_error(q, mp, ESRCH, 0);
1140 				return;
1141 			}
1142 			spdsock_flip_node(itp, NULL);
1143 			ITP_REFRELE(itp);
1144 		}
1145 	} else {
1146 		ipsec_swap_global_policy();	/* can't fail */
1147 	}
1148 	spd_echo(q, mp);
1149 }
1150 
1151 /*
1152  * Unimplemented feature
1153  */
1154 /* ARGSUSED */
1155 static void
1156 spdsock_lookup(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp,
1157     spd_ext_t **extv, ipsec_tun_pol_t *itp)
1158 {
1159 	spdsock_error(q, mp, EINVAL, 0);
1160 }
1161 
1162 
1163 static mblk_t *
1164 spdsock_dump_ruleset(mblk_t *req, ipsec_policy_head_t *iph,
1165     uint32_t count, uint16_t error)
1166 {
1167 	size_t len = sizeof (spd_ruleset_ext_t) + sizeof (spd_msg_t);
1168 	spd_msg_t *msg;
1169 	spd_ruleset_ext_t *ruleset;
1170 	mblk_t *m = allocb(len, BPRI_HI);
1171 
1172 	ASSERT(RW_READ_HELD(&iph->iph_lock));
1173 
1174 	if (m == NULL) {
1175 		return (NULL);
1176 	}
1177 	msg = (spd_msg_t *)m->b_rptr;
1178 	ruleset = (spd_ruleset_ext_t *)(&msg[1]);
1179 
1180 	m->b_wptr = (uint8_t *)&ruleset[1];
1181 
1182 	*msg = *(spd_msg_t *)(req->b_rptr);
1183 	msg->spd_msg_len = SPD_8TO64(len);
1184 	msg->spd_msg_errno = error;
1185 
1186 	ruleset->spd_ruleset_len = SPD_8TO64(sizeof (*ruleset));
1187 	ruleset->spd_ruleset_type = SPD_EXT_RULESET;
1188 	ruleset->spd_ruleset_count = count;
1189 	ruleset->spd_ruleset_version = iph->iph_gen;
1190 	return (m);
1191 }
1192 
1193 static mblk_t *
1194 spdsock_dump_finish(spdsock_t *ss, int error)
1195 {
1196 	mblk_t *m;
1197 	ipsec_policy_head_t *iph = ss->spdsock_dump_head;
1198 	mblk_t *req = ss->spdsock_dump_req;
1199 	ipsec_tun_pol_t *itp, dummy;
1200 
1201 	ss->spdsock_dump_remaining_polheads--;
1202 	if (error == 0 && ss->spdsock_dump_remaining_polheads != 0) {
1203 		/* Attempt a respin with a new policy head. */
1204 		rw_enter(&tunnel_policy_lock, RW_READER);
1205 		/* NOTE:  No need for ITP_REF*() macros here. */
1206 		if (tunnel_policy_gen > ss->spdsock_dump_tun_gen) {
1207 			/* Bail with EAGAIN. */
1208 			error = EAGAIN;
1209 		} else if (ss->spdsock_dump_name[0] == '\0') {
1210 			/* Just finished global, find first node. */
1211 			itp = (ipsec_tun_pol_t *)avl_first(&tunnel_policies);
1212 		} else {
1213 			/*
1214 			 * We just finished current-named polhead, find
1215 			 * the next one.
1216 			 */
1217 			(void) strncpy(dummy.itp_name, ss->spdsock_dump_name,
1218 			    LIFNAMSIZ);
1219 			itp = (ipsec_tun_pol_t *)avl_find(&tunnel_policies,
1220 			    &dummy, NULL);
1221 			ASSERT(itp != NULL);
1222 			itp = (ipsec_tun_pol_t *)AVL_NEXT(&tunnel_policies,
1223 			    itp);
1224 			/* remaining_polheads should maintain this assertion. */
1225 			ASSERT(itp != NULL);
1226 		}
1227 		if (error == 0) {
1228 			(void) strncpy(ss->spdsock_dump_name, itp->itp_name,
1229 			    LIFNAMSIZ);
1230 			/* Reset other spdsock_dump thingies. */
1231 			IPPH_REFRELE(ss->spdsock_dump_head);
1232 			if (ss->spdsock_dump_active) {
1233 				ss->spdsock_dump_tunnel =
1234 				    itp->itp_flags & ITPF_P_TUNNEL;
1235 				iph = itp->itp_policy;
1236 			} else {
1237 				ss->spdsock_dump_tunnel =
1238 				    itp->itp_flags & ITPF_I_TUNNEL;
1239 				iph = itp->itp_inactive;
1240 			}
1241 			IPPH_REFHOLD(iph);
1242 			rw_enter(&iph->iph_lock, RW_READER);
1243 			ss->spdsock_dump_head = iph;
1244 			ss->spdsock_dump_gen = iph->iph_gen;
1245 			ss->spdsock_dump_cur_type = 0;
1246 			ss->spdsock_dump_cur_af = IPSEC_AF_V4;
1247 			ss->spdsock_dump_cur_rule = NULL;
1248 			ss->spdsock_dump_count = 0;
1249 			ss->spdsock_dump_cur_chain = 0;
1250 			rw_exit(&iph->iph_lock);
1251 			rw_exit(&tunnel_policy_lock);
1252 			/* And start again. */
1253 			return (spdsock_dump_next_record(ss));
1254 		}
1255 		rw_exit(&tunnel_policy_lock);
1256 	}
1257 
1258 	rw_enter(&iph->iph_lock, RW_READER);
1259 	m = spdsock_dump_ruleset(req, iph, ss->spdsock_dump_count, error);
1260 	rw_exit(&iph->iph_lock);
1261 	IPPH_REFRELE(iph);
1262 	ss->spdsock_dump_req = NULL;
1263 	freemsg(req);
1264 
1265 	return (m);
1266 }
1267 
1268 /*
1269  * Rule encoding functions.
1270  * We do a two-pass encode.
1271  * If base != NULL, fill in encoded rule part starting at base+offset.
1272  * Always return "offset" plus length of to-be-encoded data.
1273  */
1274 static uint_t
1275 spdsock_encode_typecode(uint8_t *base, uint_t offset, uint8_t type,
1276     uint8_t type_end, uint8_t code, uint8_t code_end)
1277 {
1278 	struct spd_typecode *tcp;
1279 
1280 	ASSERT(ALIGNED64(offset));
1281 
1282 	if (base != NULL) {
1283 		tcp = (struct spd_typecode *)(base + offset);
1284 		tcp->spd_typecode_len = SPD_8TO64(sizeof (*tcp));
1285 		tcp->spd_typecode_exttype = SPD_EXT_ICMP_TYPECODE;
1286 		tcp->spd_typecode_code = code;
1287 		tcp->spd_typecode_type = type;
1288 		tcp->spd_typecode_type_end = type_end;
1289 		tcp->spd_typecode_code_end = code_end;
1290 	}
1291 	offset += sizeof (*tcp);
1292 
1293 	ASSERT(ALIGNED64(offset));
1294 
1295 	return (offset);
1296 }
1297 
1298 static uint_t
1299 spdsock_encode_proto(uint8_t *base, uint_t offset, uint8_t proto)
1300 {
1301 	struct spd_proto *spp;
1302 
1303 	ASSERT(ALIGNED64(offset));
1304 
1305 	if (base != NULL) {
1306 		spp = (struct spd_proto *)(base + offset);
1307 		spp->spd_proto_len = SPD_8TO64(sizeof (*spp));
1308 		spp->spd_proto_exttype = SPD_EXT_PROTO;
1309 		spp->spd_proto_number = proto;
1310 		spp->spd_proto_reserved1 = 0;
1311 		spp->spd_proto_reserved2 = 0;
1312 	}
1313 	offset += sizeof (*spp);
1314 
1315 	ASSERT(ALIGNED64(offset));
1316 
1317 	return (offset);
1318 }
1319 
1320 static uint_t
1321 spdsock_encode_port(uint8_t *base, uint_t offset, uint16_t ext, uint16_t port)
1322 {
1323 	struct spd_portrange *spp;
1324 
1325 	ASSERT(ALIGNED64(offset));
1326 
1327 	if (base != NULL) {
1328 		spp = (struct spd_portrange *)(base + offset);
1329 		spp->spd_ports_len = SPD_8TO64(sizeof (*spp));
1330 		spp->spd_ports_exttype = ext;
1331 		spp->spd_ports_minport = port;
1332 		spp->spd_ports_maxport = port;
1333 	}
1334 	offset += sizeof (*spp);
1335 
1336 	ASSERT(ALIGNED64(offset));
1337 
1338 	return (offset);
1339 }
1340 
1341 static uint_t
1342 spdsock_encode_addr(uint8_t *base, uint_t offset, uint16_t ext,
1343     const ipsec_selkey_t *sel, const ipsec_addr_t *addr, uint_t pfxlen)
1344 {
1345 	struct spd_address *sae;
1346 	ipsec_addr_t *spdaddr;
1347 	uint_t start = offset;
1348 	uint_t addrlen;
1349 	uint_t af;
1350 
1351 	if (sel->ipsl_valid & IPSL_IPV4) {
1352 		af = AF_INET;
1353 		addrlen = IP_ADDR_LEN;
1354 	} else {
1355 		af = AF_INET6;
1356 		addrlen = IPV6_ADDR_LEN;
1357 	}
1358 
1359 	ASSERT(ALIGNED64(offset));
1360 
1361 	if (base != NULL) {
1362 		sae = (struct spd_address *)(base + offset);
1363 		sae->spd_address_exttype = ext;
1364 		sae->spd_address_af = af;
1365 		sae->spd_address_prefixlen = pfxlen;
1366 		sae->spd_address_reserved2 = 0;
1367 
1368 		spdaddr = (ipsec_addr_t *)(&sae[1]);
1369 		bcopy(addr, spdaddr, addrlen);
1370 	}
1371 	offset += sizeof (*sae);
1372 	addrlen = roundup(addrlen, sizeof (uint64_t));
1373 	offset += addrlen;
1374 
1375 	ASSERT(ALIGNED64(offset));
1376 
1377 	if (base != NULL)
1378 		sae->spd_address_len = SPD_8TO64(offset - start);
1379 	return (offset);
1380 }
1381 
1382 static uint_t
1383 spdsock_encode_sel(uint8_t *base, uint_t offset, const ipsec_sel_t *sel)
1384 {
1385 	const ipsec_selkey_t *selkey = &sel->ipsl_key;
1386 
1387 	if (selkey->ipsl_valid & IPSL_PROTOCOL)
1388 		offset = spdsock_encode_proto(base, offset, selkey->ipsl_proto);
1389 	if (selkey->ipsl_valid & IPSL_LOCAL_PORT)
1390 		offset = spdsock_encode_port(base, offset, SPD_EXT_LCLPORT,
1391 		    selkey->ipsl_lport);
1392 	if (selkey->ipsl_valid & IPSL_REMOTE_PORT)
1393 		offset = spdsock_encode_port(base, offset, SPD_EXT_REMPORT,
1394 		    selkey->ipsl_rport);
1395 	if (selkey->ipsl_valid & IPSL_REMOTE_ADDR)
1396 		offset = spdsock_encode_addr(base, offset, SPD_EXT_REMADDR,
1397 		    selkey, &selkey->ipsl_remote, selkey->ipsl_remote_pfxlen);
1398 	if (selkey->ipsl_valid & IPSL_LOCAL_ADDR)
1399 		offset = spdsock_encode_addr(base, offset, SPD_EXT_LCLADDR,
1400 		    selkey, &selkey->ipsl_local, selkey->ipsl_local_pfxlen);
1401 	if (selkey->ipsl_valid & IPSL_ICMP_TYPE) {
1402 		offset = spdsock_encode_typecode(base, offset,
1403 		    selkey->ipsl_icmp_type, selkey->ipsl_icmp_type_end,
1404 		    (selkey->ipsl_valid & IPSL_ICMP_CODE) ?
1405 			selkey->ipsl_icmp_code : 255,
1406 		    (selkey->ipsl_valid & IPSL_ICMP_CODE) ?
1407 			selkey->ipsl_icmp_code_end : 255);
1408 	}
1409 	return (offset);
1410 }
1411 
1412 static uint_t
1413 spdsock_encode_actattr(uint8_t *base, uint_t offset, uint32_t tag,
1414     uint32_t value)
1415 {
1416 	struct spd_attribute *attr;
1417 
1418 	ASSERT(ALIGNED64(offset));
1419 
1420 	if (base != NULL) {
1421 		attr = (struct spd_attribute *)(base + offset);
1422 		attr->spd_attr_tag = tag;
1423 		attr->spd_attr_value = value;
1424 	}
1425 	offset += sizeof (struct spd_attribute);
1426 
1427 	ASSERT(ALIGNED64(offset));
1428 
1429 	return (offset);
1430 }
1431 
1432 
1433 #define	EMIT(t, v) offset = spdsock_encode_actattr(base, offset, (t), (v))
1434 
1435 static uint_t
1436 spdsock_encode_action(uint8_t *base, uint_t offset, const ipsec_action_t *ap)
1437 {
1438 	const struct ipsec_act *act = &(ap->ipa_act);
1439 	uint_t flags;
1440 
1441 	EMIT(SPD_ATTR_EMPTY, 0);
1442 	switch (act->ipa_type) {
1443 	case IPSEC_ACT_DISCARD:
1444 	case IPSEC_ACT_REJECT:
1445 		EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_DROP);
1446 		break;
1447 	case IPSEC_ACT_BYPASS:
1448 	case IPSEC_ACT_CLEAR:
1449 		EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_PASS);
1450 		break;
1451 
1452 	case IPSEC_ACT_APPLY:
1453 		EMIT(SPD_ATTR_TYPE, SPD_ACTTYPE_IPSEC);
1454 		flags = 0;
1455 		if (act->ipa_apply.ipp_use_ah)
1456 			flags |= SPD_APPLY_AH;
1457 		if (act->ipa_apply.ipp_use_esp)
1458 			flags |= SPD_APPLY_ESP;
1459 		if (act->ipa_apply.ipp_use_espa)
1460 			flags |= SPD_APPLY_ESPA;
1461 		if (act->ipa_apply.ipp_use_se)
1462 			flags |= SPD_APPLY_SE;
1463 		if (act->ipa_apply.ipp_use_unique)
1464 			flags |= SPD_APPLY_UNIQUE;
1465 		EMIT(SPD_ATTR_FLAGS, flags);
1466 		if (flags & SPD_APPLY_AH) {
1467 			EMIT(SPD_ATTR_AH_AUTH, act->ipa_apply.ipp_auth_alg);
1468 			EMIT(SPD_ATTR_AH_MINBITS,
1469 			    act->ipa_apply.ipp_ah_minbits);
1470 			EMIT(SPD_ATTR_AH_MAXBITS,
1471 			    act->ipa_apply.ipp_ah_maxbits);
1472 		}
1473 		if (flags & SPD_APPLY_ESP) {
1474 			EMIT(SPD_ATTR_ESP_ENCR, act->ipa_apply.ipp_encr_alg);
1475 			EMIT(SPD_ATTR_ENCR_MINBITS,
1476 			    act->ipa_apply.ipp_espe_minbits);
1477 			EMIT(SPD_ATTR_ENCR_MAXBITS,
1478 			    act->ipa_apply.ipp_espe_maxbits);
1479 			if (flags & SPD_APPLY_ESPA) {
1480 				EMIT(SPD_ATTR_ESP_AUTH,
1481 				    act->ipa_apply.ipp_esp_auth_alg);
1482 				EMIT(SPD_ATTR_ESPA_MINBITS,
1483 				    act->ipa_apply.ipp_espa_minbits);
1484 				EMIT(SPD_ATTR_ESPA_MAXBITS,
1485 				    act->ipa_apply.ipp_espa_maxbits);
1486 			}
1487 		}
1488 		if (act->ipa_apply.ipp_km_proto != 0)
1489 			EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_proto);
1490 		if (act->ipa_apply.ipp_km_cookie != 0)
1491 			EMIT(SPD_ATTR_KM_PROTO, act->ipa_apply.ipp_km_cookie);
1492 		if (act->ipa_apply.ipp_replay_depth != 0)
1493 			EMIT(SPD_ATTR_REPLAY_DEPTH,
1494 			    act->ipa_apply.ipp_replay_depth);
1495 		/* Add more here */
1496 		break;
1497 	}
1498 
1499 	return (offset);
1500 }
1501 
1502 static uint_t
1503 spdsock_encode_action_list(uint8_t *base, uint_t offset,
1504     const ipsec_action_t *ap)
1505 {
1506 	struct spd_ext_actions *act;
1507 	uint_t nact = 0;
1508 	uint_t start = offset;
1509 
1510 	ASSERT(ALIGNED64(offset));
1511 
1512 	if (base != NULL) {
1513 		act = (struct spd_ext_actions *)(base + offset);
1514 		act->spd_actions_len = 0;
1515 		act->spd_actions_exttype = SPD_EXT_ACTION;
1516 		act->spd_actions_count = 0;
1517 		act->spd_actions_reserved = 0;
1518 	}
1519 
1520 	offset += sizeof (*act);
1521 
1522 	ASSERT(ALIGNED64(offset));
1523 
1524 	while (ap != NULL) {
1525 		offset = spdsock_encode_action(base, offset, ap);
1526 		ap = ap->ipa_next;
1527 		nact++;
1528 		if (ap != NULL) {
1529 			EMIT(SPD_ATTR_NEXT, 0);
1530 		}
1531 	}
1532 	EMIT(SPD_ATTR_END, 0);
1533 
1534 	ASSERT(ALIGNED64(offset));
1535 
1536 	if (base != NULL) {
1537 		act->spd_actions_count = nact;
1538 		act->spd_actions_len = SPD_8TO64(offset - start);
1539 	}
1540 
1541 	return (offset);
1542 }
1543 
1544 #undef EMIT
1545 
1546 /* ARGSUSED */
1547 static uint_t
1548 spdsock_rule_flags(uint_t dir, uint_t af)
1549 {
1550 	uint_t flags = 0;
1551 
1552 	if (dir == IPSEC_TYPE_INBOUND)
1553 		flags |= SPD_RULE_FLAG_INBOUND;
1554 	if (dir == IPSEC_TYPE_OUTBOUND)
1555 		flags |= SPD_RULE_FLAG_OUTBOUND;
1556 
1557 	return (flags);
1558 }
1559 
1560 
1561 static uint_t
1562 spdsock_encode_rule_head(uint8_t *base, uint_t offset, spd_msg_t *req,
1563     const ipsec_policy_t *rule, uint_t dir, uint_t af, char *name,
1564     boolean_t tunnel)
1565 {
1566 	struct spd_msg *spmsg;
1567 	struct spd_rule *spr;
1568 	spd_if_t *sid;
1569 
1570 	uint_t start = offset;
1571 
1572 	ASSERT(ALIGNED64(offset));
1573 
1574 	if (base != NULL) {
1575 		spmsg = (struct spd_msg *)(base + offset);
1576 		bzero(spmsg, sizeof (*spmsg));
1577 		spmsg->spd_msg_version = PF_POLICY_V1;
1578 		spmsg->spd_msg_type = SPD_DUMP;
1579 		spmsg->spd_msg_seq = req->spd_msg_seq;
1580 		spmsg->spd_msg_pid = req->spd_msg_pid;
1581 	}
1582 	offset += sizeof (struct spd_msg);
1583 
1584 	ASSERT(ALIGNED64(offset));
1585 
1586 	if (base != NULL) {
1587 		spr = (struct spd_rule *)(base + offset);
1588 		spr->spd_rule_type = SPD_EXT_RULE;
1589 		spr->spd_rule_priority = rule->ipsp_prio;
1590 		spr->spd_rule_flags = spdsock_rule_flags(dir, af);
1591 		if (tunnel)
1592 			spr->spd_rule_flags |= SPD_RULE_FLAG_TUNNEL;
1593 		spr->spd_rule_unused = 0;
1594 		spr->spd_rule_len = SPD_8TO64(sizeof (*spr));
1595 		spr->spd_rule_index = rule->ipsp_index;
1596 	}
1597 	offset += sizeof (struct spd_rule);
1598 
1599 	/*
1600 	 * If we have an interface name (i.e. if this policy head came from
1601 	 * a tunnel), add the SPD_EXT_TUN_NAME extension.
1602 	 */
1603 	if (name[0] != '\0') {
1604 
1605 		ASSERT(ALIGNED64(offset));
1606 
1607 		if (base != NULL) {
1608 			sid = (spd_if_t *)(base + offset);
1609 			sid->spd_if_exttype = SPD_EXT_TUN_NAME;
1610 			sid->spd_if_len = SPD_8TO64(sizeof (spd_if_t) +
1611 			    roundup((strlen(name) - 4), 8));
1612 			(void) strlcpy((char *)sid->spd_if_name, name,
1613 			    LIFNAMSIZ);
1614 		}
1615 
1616 		offset += sizeof (spd_if_t) + roundup((strlen(name) - 4), 8);
1617 	}
1618 
1619 	offset = spdsock_encode_sel(base, offset, rule->ipsp_sel);
1620 	offset = spdsock_encode_action_list(base, offset, rule->ipsp_act);
1621 
1622 	ASSERT(ALIGNED64(offset));
1623 
1624 	if (base != NULL) {
1625 		spmsg->spd_msg_len = SPD_8TO64(offset - start);
1626 	}
1627 	return (offset);
1628 }
1629 
1630 /* ARGSUSED */
1631 static mblk_t *
1632 spdsock_encode_rule(mblk_t *req, const ipsec_policy_t *rule,
1633     uint_t dir, uint_t af, char *name, boolean_t tunnel)
1634 {
1635 	mblk_t *m;
1636 	uint_t len;
1637 	spd_msg_t *mreq = (spd_msg_t *)req->b_rptr;
1638 
1639 	/*
1640 	 * Figure out how much space we'll need.
1641 	 */
1642 	len = spdsock_encode_rule_head(NULL, 0, mreq, rule, dir, af, name,
1643 	    tunnel);
1644 
1645 	/*
1646 	 * Allocate mblk.
1647 	 */
1648 	m = allocb(len, BPRI_HI);
1649 	if (m == NULL)
1650 		return (NULL);
1651 
1652 	/*
1653 	 * Fill it in..
1654 	 */
1655 	m->b_wptr = m->b_rptr + len;
1656 	bzero(m->b_rptr, len);
1657 	(void) spdsock_encode_rule_head(m->b_rptr, 0, mreq, rule, dir, af,
1658 	    name, tunnel);
1659 	return (m);
1660 }
1661 
1662 static ipsec_policy_t *
1663 spdsock_dump_next_in_chain(spdsock_t *ss, ipsec_policy_head_t *iph,
1664     ipsec_policy_t *cur)
1665 {
1666 	ASSERT(RW_READ_HELD(&iph->iph_lock));
1667 
1668 	ss->spdsock_dump_count++;
1669 	ss->spdsock_dump_cur_rule = cur->ipsp_hash.hash_next;
1670 	return (cur);
1671 }
1672 
1673 static ipsec_policy_t *
1674 spdsock_dump_next_rule(spdsock_t *ss, ipsec_policy_head_t *iph)
1675 {
1676 	ipsec_policy_t *cur;
1677 	ipsec_policy_root_t *ipr;
1678 	int chain, nchains, type, af;
1679 
1680 	ASSERT(RW_READ_HELD(&iph->iph_lock));
1681 
1682 	cur = ss->spdsock_dump_cur_rule;
1683 
1684 	if (cur != NULL)
1685 		return (spdsock_dump_next_in_chain(ss, iph, cur));
1686 
1687 	type = ss->spdsock_dump_cur_type;
1688 
1689 next:
1690 	chain = ss->spdsock_dump_cur_chain;
1691 	ipr = &iph->iph_root[type];
1692 	nchains = ipr->ipr_nchains;
1693 
1694 	while (chain < nchains) {
1695 		cur = ipr->ipr_hash[chain].hash_head;
1696 		chain++;
1697 		if (cur != NULL) {
1698 			ss->spdsock_dump_cur_chain = chain;
1699 			return (spdsock_dump_next_in_chain(ss, iph, cur));
1700 		}
1701 	}
1702 	ss->spdsock_dump_cur_chain = nchains;
1703 
1704 	af = ss->spdsock_dump_cur_af;
1705 	while (af < IPSEC_NAF) {
1706 		cur = ipr->ipr_nonhash[af];
1707 		af++;
1708 		if (cur != NULL) {
1709 			ss->spdsock_dump_cur_af = af;
1710 			return (spdsock_dump_next_in_chain(ss, iph, cur));
1711 		}
1712 	}
1713 
1714 	type++;
1715 	if (type >= IPSEC_NTYPES)
1716 		return (NULL);
1717 
1718 	ss->spdsock_dump_cur_chain = 0;
1719 	ss->spdsock_dump_cur_type = type;
1720 	ss->spdsock_dump_cur_af = IPSEC_AF_V4;
1721 	goto next;
1722 
1723 }
1724 
1725 static mblk_t *
1726 spdsock_dump_next_record(spdsock_t *ss)
1727 {
1728 	ipsec_policy_head_t *iph;
1729 	ipsec_policy_t *rule;
1730 	mblk_t *m;
1731 	mblk_t *req = ss->spdsock_dump_req;
1732 
1733 	iph = ss->spdsock_dump_head;
1734 
1735 	ASSERT(iph != NULL);
1736 
1737 	rw_enter(&iph->iph_lock, RW_READER);
1738 
1739 	if (iph->iph_gen != ss->spdsock_dump_gen) {
1740 		rw_exit(&iph->iph_lock);
1741 		return (spdsock_dump_finish(ss, EAGAIN));
1742 	}
1743 
1744 	rule = spdsock_dump_next_rule(ss, iph);
1745 
1746 	if (!rule) {
1747 		rw_exit(&iph->iph_lock);
1748 		return (spdsock_dump_finish(ss, 0));
1749 	}
1750 
1751 	m = spdsock_encode_rule(req, rule, ss->spdsock_dump_cur_type,
1752 	    ss->spdsock_dump_cur_af, ss->spdsock_dump_name,
1753 	    ss->spdsock_dump_tunnel);
1754 	rw_exit(&iph->iph_lock);
1755 
1756 	if (m == NULL)
1757 		return (spdsock_dump_finish(ss, ENOMEM));
1758 	return (m);
1759 }
1760 
1761 /*
1762  * Dump records until we run into flow-control back-pressure.
1763  */
1764 static void
1765 spdsock_dump_some(queue_t *q, spdsock_t *ss)
1766 {
1767 	mblk_t *m, *dataind;
1768 
1769 	while ((ss->spdsock_dump_req != NULL) && canputnext(q)) {
1770 		m = spdsock_dump_next_record(ss);
1771 		if (m == NULL)
1772 			return;
1773 		dataind = allocb(sizeof (struct T_data_req), BPRI_HI);
1774 		if (dataind == NULL) {
1775 			freemsg(m);
1776 			return;
1777 		}
1778 		dataind->b_cont = m;
1779 		dataind->b_wptr += sizeof (struct T_data_req);
1780 		((struct T_data_ind *)dataind->b_rptr)->PRIM_type = T_DATA_IND;
1781 		((struct T_data_ind *)dataind->b_rptr)->MORE_flag = 0;
1782 		dataind->b_datap->db_type = M_PROTO;
1783 		putnext(q, dataind);
1784 	}
1785 }
1786 
1787 /*
1788  * Start dumping.
1789  * Format a start-of-dump record, and set up the stream and kick the rsrv
1790  * procedure to continue the job..
1791  */
1792 /* ARGSUSED */
1793 static void
1794 spdsock_dump(queue_t *q, ipsec_policy_head_t *iph, mblk_t *mp)
1795 {
1796 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
1797 	mblk_t *mr;
1798 
1799 	/* spdsock_parse() already NULL-terminated spdsock_dump_name. */
1800 	if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) {
1801 		rw_enter(&tunnel_policy_lock, RW_READER);
1802 		ss->spdsock_dump_remaining_polheads = 1 +
1803 		    avl_numnodes(&tunnel_policies);
1804 		ss->spdsock_dump_tun_gen = tunnel_policy_gen;
1805 		rw_exit(&tunnel_policy_lock);
1806 		if (iph == ALL_ACTIVE_POLHEADS) {
1807 			iph = ipsec_system_policy();
1808 			ss->spdsock_dump_active = B_TRUE;
1809 		} else {
1810 			iph = ipsec_inactive_policy();
1811 			ss->spdsock_dump_active = B_FALSE;
1812 		}
1813 		ASSERT(ss->spdsock_dump_name[0] == '\0');
1814 	} else {
1815 		ss->spdsock_dump_remaining_polheads = 1;
1816 	}
1817 
1818 	rw_enter(&iph->iph_lock, RW_READER);
1819 
1820 	mr = spdsock_dump_ruleset(mp, iph, 0, 0);
1821 
1822 	if (!mr) {
1823 		rw_exit(&iph->iph_lock);
1824 		spdsock_error(q, mp, ENOMEM, 0);
1825 		return;
1826 	}
1827 
1828 	ss->spdsock_dump_req = mp;
1829 	ss->spdsock_dump_head = iph;
1830 	ss->spdsock_dump_gen = iph->iph_gen;
1831 	ss->spdsock_dump_cur_type = 0;
1832 	ss->spdsock_dump_cur_af = IPSEC_AF_V4;
1833 	ss->spdsock_dump_cur_rule = NULL;
1834 	ss->spdsock_dump_count = 0;
1835 	ss->spdsock_dump_cur_chain = 0;
1836 	rw_exit(&iph->iph_lock);
1837 
1838 	qreply(q, mr);
1839 	qenable(OTHERQ(q));
1840 }
1841 
1842 /* Do NOT consume a reference to ITP. */
1843 void
1844 spdsock_clone_node(ipsec_tun_pol_t *itp, void *ep)
1845 {
1846 	int *errptr = (int *)ep;
1847 
1848 	if (*errptr != 0)
1849 		return;	/* We've failed already for some reason. */
1850 	mutex_enter(&itp->itp_lock);
1851 	ITPF_CLONE(itp->itp_flags);
1852 	*errptr = ipsec_copy_polhead(itp->itp_policy, itp->itp_inactive);
1853 	mutex_exit(&itp->itp_lock);
1854 }
1855 
1856 void
1857 spdsock_clone(queue_t *q, mblk_t *mp, spd_if_t *tunname)
1858 {
1859 	int error;
1860 	char *tname;
1861 	ipsec_tun_pol_t *itp;
1862 
1863 	if (tunname != NULL) {
1864 		tname = (char *)tunname->spd_if_name;
1865 		if (*tname == '\0') {
1866 			error = ipsec_clone_system_policy();
1867 			if (error == 0)
1868 				itp_walk(spdsock_clone_node, &error);
1869 		} else {
1870 			itp = get_tunnel_policy(tname);
1871 			if (itp == NULL) {
1872 				spdsock_error(q, mp, ENOENT, 0);
1873 				return;
1874 			}
1875 			spdsock_clone_node(itp, &error);
1876 			ITP_REFRELE(itp);
1877 		}
1878 	} else {
1879 		error = ipsec_clone_system_policy();
1880 	}
1881 
1882 	if (error != 0)
1883 		spdsock_error(q, mp, error, 0);
1884 	else
1885 		spd_echo(q, mp);
1886 }
1887 
1888 /*
1889  * Process a SPD_ALGLIST request. The caller expects separate alg entries
1890  * for AH authentication, ESP authentication, and ESP encryption.
1891  * The same distinction is then used when setting the min and max key
1892  * sizes when defining policies.
1893  */
1894 
1895 #define	SPDSOCK_AH_AUTH		0
1896 #define	SPDSOCK_ESP_AUTH	1
1897 #define	SPDSOCK_ESP_ENCR	2
1898 #define	SPDSOCK_NTYPES		3
1899 
1900 static const uint_t algattr[SPDSOCK_NTYPES] = {
1901 	SPD_ATTR_AH_AUTH,
1902 	SPD_ATTR_ESP_AUTH,
1903 	SPD_ATTR_ESP_ENCR
1904 };
1905 static const uint_t minbitsattr[SPDSOCK_NTYPES] = {
1906 	SPD_ATTR_AH_MINBITS,
1907 	SPD_ATTR_ESPA_MINBITS,
1908 	SPD_ATTR_ENCR_MINBITS
1909 };
1910 static const uint_t maxbitsattr[SPDSOCK_NTYPES] = {
1911 	SPD_ATTR_AH_MAXBITS,
1912 	SPD_ATTR_ESPA_MAXBITS,
1913 	SPD_ATTR_ENCR_MAXBITS
1914 };
1915 static const uint_t defbitsattr[SPDSOCK_NTYPES] = {
1916 	SPD_ATTR_AH_DEFBITS,
1917 	SPD_ATTR_ESPA_DEFBITS,
1918 	SPD_ATTR_ENCR_DEFBITS
1919 };
1920 static const uint_t incrbitsattr[SPDSOCK_NTYPES] = {
1921 	SPD_ATTR_AH_INCRBITS,
1922 	SPD_ATTR_ESPA_INCRBITS,
1923 	SPD_ATTR_ENCR_INCRBITS
1924 };
1925 
1926 #define	ATTRPERALG	6	/* fixed attributes per algs */
1927 
1928 void
1929 spdsock_alglist(queue_t *q, mblk_t *mp)
1930 {
1931 	uint_t algtype;
1932 	uint_t algidx;
1933 	uint_t algcount;
1934 	uint_t size;
1935 	mblk_t *m;
1936 	uint8_t *cur;
1937 	spd_msg_t *msg;
1938 	struct spd_ext_actions *act;
1939 	struct spd_attribute *attr;
1940 
1941 	mutex_enter(&alg_lock);
1942 
1943 	/*
1944 	 * The SPD client expects to receive separate entries for
1945 	 * AH authentication and ESP authentication supported algorithms.
1946 	 *
1947 	 * Don't return the "any" algorithms, if defined, as no
1948 	 * kernel policies can be set for these algorithms.
1949 	 */
1950 	algcount = 2 * ipsec_nalgs[IPSEC_ALG_AUTH] +
1951 	    ipsec_nalgs[IPSEC_ALG_ENCR];
1952 
1953 	if (ipsec_alglists[IPSEC_ALG_AUTH][SADB_AALG_NONE] != NULL)
1954 		algcount--;
1955 	if (ipsec_alglists[IPSEC_ALG_ENCR][SADB_EALG_NONE] != NULL)
1956 		algcount--;
1957 
1958 	/*
1959 	 * For each algorithm, we encode:
1960 	 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT}
1961 	 */
1962 
1963 	size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions) +
1964 	    ATTRPERALG * sizeof (struct spd_attribute) * algcount;
1965 
1966 	ASSERT(ALIGNED64(size));
1967 
1968 	m = allocb(size, BPRI_HI);
1969 	if (m == NULL) {
1970 		mutex_exit(&alg_lock);
1971 		spdsock_error(q, mp, ENOMEM, 0);
1972 		return;
1973 	}
1974 
1975 	m->b_wptr = m->b_rptr + size;
1976 	cur = m->b_rptr;
1977 
1978 	msg = (spd_msg_t *)cur;
1979 	bcopy(mp->b_rptr, cur, sizeof (*msg));
1980 
1981 	msg->spd_msg_len = SPD_8TO64(size);
1982 	msg->spd_msg_errno = 0;
1983 	msg->spd_msg_diagnostic = 0;
1984 
1985 	cur += sizeof (*msg);
1986 
1987 	act = (struct spd_ext_actions *)cur;
1988 	cur += sizeof (*act);
1989 
1990 	act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t));
1991 	act->spd_actions_exttype = SPD_EXT_ACTION;
1992 	act->spd_actions_count = algcount;
1993 	act->spd_actions_reserved = 0;
1994 
1995 	attr = (struct spd_attribute *)cur;
1996 
1997 #define	EMIT(tag, value) {					\
1998 		attr->spd_attr_tag = (tag); 			\
1999 		attr->spd_attr_value = (value); 		\
2000 		attr++;			  			\
2001 	}
2002 
2003 	/*
2004 	 * If you change the number of EMIT's here, change
2005 	 * ATTRPERALG above to match
2006 	 */
2007 #define	EMITALGATTRS(_type) {					\
2008 		EMIT(algattr[_type], algid); 		/* 1 */	\
2009 		EMIT(minbitsattr[_type], minbits);	/* 2 */	\
2010 		EMIT(maxbitsattr[_type], maxbits);	/* 3 */	\
2011 		EMIT(defbitsattr[_type], defbits);	/* 4 */	\
2012 		EMIT(incrbitsattr[_type], incr);	/* 5 */	\
2013 		EMIT(SPD_ATTR_NEXT, 0);			/* 6 */	\
2014 	}
2015 
2016 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2017 		for (algidx = 0; algidx < ipsec_nalgs[algtype]; algidx++) {
2018 			int algid = ipsec_sortlist[algtype][algidx];
2019 			ipsec_alginfo_t *alg = ipsec_alglists[algtype][algid];
2020 			uint_t minbits = alg->alg_minbits;
2021 			uint_t maxbits = alg->alg_maxbits;
2022 			uint_t defbits = alg->alg_default_bits;
2023 			uint_t incr = alg->alg_increment;
2024 
2025 			if (algtype == IPSEC_ALG_AUTH) {
2026 				if (algid == SADB_AALG_NONE)
2027 					continue;
2028 				EMITALGATTRS(SPDSOCK_AH_AUTH);
2029 				EMITALGATTRS(SPDSOCK_ESP_AUTH);
2030 			} else {
2031 				if (algid == SADB_EALG_NONE)
2032 					continue;
2033 				ASSERT(algtype == IPSEC_ALG_ENCR);
2034 				EMITALGATTRS(SPDSOCK_ESP_ENCR);
2035 			}
2036 		}
2037 	}
2038 
2039 	mutex_exit(&alg_lock);
2040 
2041 #undef EMITALGATTRS
2042 #undef EMIT
2043 #undef ATTRPERALG
2044 
2045 	attr--;
2046 	attr->spd_attr_tag = SPD_ATTR_END;
2047 
2048 	freemsg(mp);
2049 	qreply(q, m);
2050 }
2051 
2052 /*
2053  * Process a SPD_DUMPALGS request.
2054  */
2055 
2056 #define	ATTRPERALG	7	/* fixed attributes per algs */
2057 
2058 void
2059 spdsock_dumpalgs(queue_t *q, mblk_t *mp)
2060 {
2061 	uint_t algtype;
2062 	uint_t algidx;
2063 	uint_t size;
2064 	mblk_t *m;
2065 	uint8_t *cur;
2066 	spd_msg_t *msg;
2067 	struct spd_ext_actions *act;
2068 	struct spd_attribute *attr;
2069 	ipsec_alginfo_t *alg;
2070 	uint_t algid;
2071 	uint_t i;
2072 	uint_t alg_size;
2073 
2074 	mutex_enter(&alg_lock);
2075 
2076 	/*
2077 	 * For each algorithm, we encode:
2078 	 * ALG / MINBITS / MAXBITS / DEFBITS / INCRBITS / {END, NEXT}
2079 	 *
2080 	 * ALG_ID / ALG_PROTO / ALG_INCRBITS / ALG_NKEYSIZES / ALG_KEYSIZE*
2081 	 * ALG_NBLOCKSIZES / ALG_BLOCKSIZE* / ALG_MECHNAME / {END, NEXT}
2082 	 */
2083 
2084 	/*
2085 	 * Compute the size of the SPD message.
2086 	 */
2087 	size = sizeof (spd_msg_t) + sizeof (struct spd_ext_actions);
2088 
2089 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2090 		for (algidx = 0; algidx < ipsec_nalgs[algtype]; algidx++) {
2091 			algid = ipsec_sortlist[algtype][algidx];
2092 			alg = ipsec_alglists[algtype][algid];
2093 			alg_size = sizeof (struct spd_attribute) *
2094 			    (ATTRPERALG + alg->alg_nkey_sizes +
2095 			    alg->alg_nblock_sizes) + CRYPTO_MAX_MECH_NAME;
2096 			size += alg_size;
2097 		}
2098 	}
2099 
2100 	ASSERT(ALIGNED64(size));
2101 
2102 	m = allocb(size, BPRI_HI);
2103 	if (m == NULL) {
2104 		mutex_exit(&alg_lock);
2105 		spdsock_error(q, mp, ENOMEM, 0);
2106 		return;
2107 	}
2108 
2109 	m->b_wptr = m->b_rptr + size;
2110 	cur = m->b_rptr;
2111 
2112 	msg = (spd_msg_t *)cur;
2113 	bcopy(mp->b_rptr, cur, sizeof (*msg));
2114 
2115 	msg->spd_msg_len = SPD_8TO64(size);
2116 	msg->spd_msg_errno = 0;
2117 	msg->spd_msg_diagnostic = 0;
2118 
2119 	cur += sizeof (*msg);
2120 
2121 	act = (struct spd_ext_actions *)cur;
2122 	cur += sizeof (*act);
2123 
2124 	act->spd_actions_len = SPD_8TO64(size - sizeof (spd_msg_t));
2125 	act->spd_actions_exttype = SPD_EXT_ACTION;
2126 	act->spd_actions_count = ipsec_nalgs[IPSEC_ALG_AUTH] +
2127 	    ipsec_nalgs[IPSEC_ALG_ENCR];
2128 	act->spd_actions_reserved = 0;
2129 
2130 	attr = (struct spd_attribute *)cur;
2131 
2132 #define	EMIT(tag, value) {					\
2133 		attr->spd_attr_tag = (tag); 			\
2134 		attr->spd_attr_value = (value); 		\
2135 		attr++;			  			\
2136 	}
2137 
2138 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
2139 		for (algidx = 0; algidx < ipsec_nalgs[algtype]; algidx++) {
2140 
2141 			algid = ipsec_sortlist[algtype][algidx];
2142 			alg = ipsec_alglists[algtype][algid];
2143 
2144 			/*
2145 			 * If you change the number of EMIT's here, change
2146 			 * ATTRPERALG above to match
2147 			 */
2148 			EMIT(SPD_ATTR_ALG_ID, algid);
2149 			EMIT(SPD_ATTR_ALG_PROTO, algproto[algtype]);
2150 			EMIT(SPD_ATTR_ALG_INCRBITS, alg->alg_increment);
2151 
2152 			EMIT(SPD_ATTR_ALG_NKEYSIZES, alg->alg_nkey_sizes);
2153 			for (i = 0; i < alg->alg_nkey_sizes; i++)
2154 				EMIT(SPD_ATTR_ALG_KEYSIZE,
2155 				    alg->alg_key_sizes[i]);
2156 
2157 			EMIT(SPD_ATTR_ALG_NBLOCKSIZES, alg->alg_nblock_sizes);
2158 			for (i = 0; i < alg->alg_nblock_sizes; i++)
2159 				EMIT(SPD_ATTR_ALG_BLOCKSIZE,
2160 				    alg->alg_block_sizes[i]);
2161 
2162 			EMIT(SPD_ATTR_ALG_MECHNAME, CRYPTO_MAX_MECH_NAME);
2163 			bcopy(alg->alg_mech_name, attr, CRYPTO_MAX_MECH_NAME);
2164 			attr = (struct spd_attribute *)((char *)attr +
2165 			    CRYPTO_MAX_MECH_NAME);
2166 
2167 			EMIT(SPD_ATTR_NEXT, 0);
2168 		}
2169 	}
2170 
2171 	mutex_exit(&alg_lock);
2172 
2173 #undef EMITALGATTRS
2174 #undef EMIT
2175 #undef ATTRPERALG
2176 
2177 	attr--;
2178 	attr->spd_attr_tag = SPD_ATTR_END;
2179 
2180 	freemsg(mp);
2181 	qreply(q, m);
2182 }
2183 
2184 /*
2185  * Do the actual work of processing an SPD_UPDATEALGS request. Can
2186  * be invoked either once IPsec is loaded on a cached request, or
2187  * when a request is received while IPsec is loaded.
2188  */
2189 static void
2190 spdsock_do_updatealg(spd_ext_t *extv[], int *diag)
2191 {
2192 	struct spd_ext_actions *actp;
2193 	struct spd_attribute *attr, *endattr;
2194 	uint64_t *start, *end;
2195 	ipsec_alginfo_t *alg = NULL;
2196 	ipsec_algtype_t alg_type = 0;
2197 	boolean_t skip_alg = B_TRUE, doing_proto = B_FALSE;
2198 	uint_t i, cur_key, cur_block, algid;
2199 
2200 	*diag = -1;
2201 	ASSERT(MUTEX_HELD(&spdsock_alg_lock));
2202 
2203 	/* parse the message, building the list of algorithms */
2204 
2205 	actp = (struct spd_ext_actions *)extv[SPD_EXT_ACTION];
2206 	if (actp == NULL) {
2207 		*diag = SPD_DIAGNOSTIC_NO_ACTION_EXT;
2208 		return;
2209 	}
2210 
2211 	start = (uint64_t *)actp;
2212 	end = (start + actp->spd_actions_len);
2213 	endattr = (struct spd_attribute *)end;
2214 	attr = (struct spd_attribute *)&actp[1];
2215 
2216 	bzero(spdsock_algs, IPSEC_NALGTYPES * IPSEC_MAX_ALGS *
2217 	    sizeof (ipsec_alginfo_t *));
2218 
2219 	alg = kmem_zalloc(sizeof (*alg), KM_SLEEP);
2220 
2221 #define	ALG_KEY_SIZES(a)   (((a)->alg_nkey_sizes + 1) * sizeof (uint16_t))
2222 #define	ALG_BLOCK_SIZES(a) (((a)->alg_nblock_sizes + 1) * sizeof (uint16_t))
2223 
2224 	while (attr < endattr) {
2225 		switch (attr->spd_attr_tag) {
2226 		case SPD_ATTR_NOP:
2227 		case SPD_ATTR_EMPTY:
2228 			break;
2229 		case SPD_ATTR_END:
2230 			attr = endattr;
2231 			/* FALLTHRU */
2232 		case SPD_ATTR_NEXT:
2233 			if (doing_proto) {
2234 				doing_proto = B_FALSE;
2235 				break;
2236 			}
2237 			if (skip_alg) {
2238 				ipsec_alg_free(alg);
2239 			} else {
2240 				ipsec_alg_free(
2241 				    spdsock_algs[alg_type][alg->alg_id]);
2242 				spdsock_algs[alg_type][alg->alg_id] = alg;
2243 			}
2244 			alg = kmem_zalloc(sizeof (*alg), KM_SLEEP);
2245 			break;
2246 
2247 		case SPD_ATTR_ALG_ID:
2248 			if (attr->spd_attr_value >= IPSEC_MAX_ALGS) {
2249 				ss1dbg(("spdsock_do_updatealg: "
2250 				    "invalid alg id %d\n",
2251 				    attr->spd_attr_value));
2252 				*diag = SPD_DIAGNOSTIC_ALG_ID_RANGE;
2253 				goto bail;
2254 			}
2255 			alg->alg_id = attr->spd_attr_value;
2256 			break;
2257 
2258 		case SPD_ATTR_ALG_PROTO:
2259 			/* find the alg type */
2260 			for (i = 0; i < NALGPROTOS; i++)
2261 				if (algproto[i] == attr->spd_attr_value)
2262 					break;
2263 			skip_alg = (i == NALGPROTOS);
2264 			if (!skip_alg)
2265 				alg_type = i;
2266 			break;
2267 
2268 		case SPD_ATTR_ALG_INCRBITS:
2269 			alg->alg_increment = attr->spd_attr_value;
2270 			break;
2271 
2272 		case SPD_ATTR_ALG_NKEYSIZES:
2273 			if (alg->alg_key_sizes != NULL) {
2274 				kmem_free(alg->alg_key_sizes,
2275 				    ALG_KEY_SIZES(alg));
2276 			}
2277 			alg->alg_nkey_sizes = attr->spd_attr_value;
2278 			/*
2279 			 * Allocate room for the trailing zero key size
2280 			 * value as well.
2281 			 */
2282 			alg->alg_key_sizes = kmem_zalloc(ALG_KEY_SIZES(alg),
2283 			    KM_SLEEP);
2284 			cur_key = 0;
2285 			break;
2286 
2287 		case SPD_ATTR_ALG_KEYSIZE:
2288 			if (alg->alg_key_sizes == NULL ||
2289 			    cur_key >= alg->alg_nkey_sizes) {
2290 				ss1dbg(("spdsock_do_updatealg: "
2291 					"too many key sizes\n"));
2292 				*diag = SPD_DIAGNOSTIC_ALG_NUM_KEY_SIZES;
2293 				goto bail;
2294 			}
2295 			alg->alg_key_sizes[cur_key++] = attr->spd_attr_value;
2296 			break;
2297 
2298 		case SPD_ATTR_ALG_NBLOCKSIZES:
2299 			if (alg->alg_block_sizes != NULL) {
2300 				kmem_free(alg->alg_block_sizes,
2301 				    ALG_BLOCK_SIZES(alg));
2302 			}
2303 			alg->alg_nblock_sizes = attr->spd_attr_value;
2304 			/*
2305 			 * Allocate room for the trailing zero block size
2306 			 * value as well.
2307 			 */
2308 			alg->alg_block_sizes = kmem_zalloc(ALG_BLOCK_SIZES(alg),
2309 			    KM_SLEEP);
2310 			cur_block = 0;
2311 			break;
2312 
2313 		case SPD_ATTR_ALG_BLOCKSIZE:
2314 			if (alg->alg_block_sizes == NULL ||
2315 			    cur_block >= alg->alg_nblock_sizes) {
2316 				ss1dbg(("spdsock_do_updatealg: "
2317 					"too many block sizes\n"));
2318 				*diag = SPD_DIAGNOSTIC_ALG_NUM_BLOCK_SIZES;
2319 				goto bail;
2320 			}
2321 			alg->alg_block_sizes[cur_block++] =
2322 			    attr->spd_attr_value;
2323 			break;
2324 
2325 		case SPD_ATTR_ALG_MECHNAME: {
2326 			char *mech_name;
2327 
2328 			if (attr->spd_attr_value > CRYPTO_MAX_MECH_NAME) {
2329 				ss1dbg(("spdsock_do_updatealg: "
2330 					"mech name too long\n"));
2331 				*diag = SPD_DIAGNOSTIC_ALG_MECH_NAME_LEN;
2332 				goto bail;
2333 			}
2334 			mech_name = (char *)(attr + 1);
2335 			bcopy(mech_name, alg->alg_mech_name,
2336 			    attr->spd_attr_value);
2337 			alg->alg_mech_name[CRYPTO_MAX_MECH_NAME-1] = '\0';
2338 			attr = (struct spd_attribute *)((char *)attr +
2339 			    attr->spd_attr_value);
2340 			break;
2341 		}
2342 
2343 		case SPD_ATTR_PROTO_ID:
2344 			doing_proto = B_TRUE;
2345 			for (i = 0; i < NALGPROTOS; i++) {
2346 				if (algproto[i] == attr->spd_attr_value) {
2347 					alg_type = i;
2348 					break;
2349 				}
2350 			}
2351 			break;
2352 
2353 		case SPD_ATTR_PROTO_EXEC_MODE:
2354 			if (!doing_proto)
2355 				break;
2356 			for (i = 0; i < NEXECMODES; i++) {
2357 				if (execmodes[i] == attr->spd_attr_value) {
2358 					spdsock_algs_exec_mode[alg_type] = i;
2359 					break;
2360 				}
2361 			}
2362 			break;
2363 		}
2364 		attr++;
2365 	}
2366 
2367 #undef	ALG_KEY_SIZES
2368 #undef	ALG_BLOCK_SIZES
2369 
2370 	/* update the algorithm tables */
2371 	spdsock_merge_algs();
2372 bail:
2373 	/* cleanup */
2374 	ipsec_alg_free(alg);
2375 	for (alg_type = 0; alg_type < IPSEC_NALGTYPES; alg_type++)
2376 		for (algid = 0; algid < IPSEC_MAX_ALGS; algid++)
2377 			if (spdsock_algs[alg_type][algid] != NULL)
2378 				ipsec_alg_free(spdsock_algs[alg_type][algid]);
2379 }
2380 
2381 /*
2382  * Process an SPD_UPDATEALGS request. If IPsec is not loaded, queue
2383  * the request until IPsec loads. If IPsec is loaded, act on it
2384  * immediately.
2385  */
2386 
2387 static void
2388 spdsock_updatealg(queue_t *q, mblk_t *mp, spd_ext_t *extv[])
2389 {
2390 	if (!ipsec_loaded()) {
2391 		/*
2392 		 * IPsec is not loaded, save request and return nicely,
2393 		 * the message will be processed once IPsec loads.
2394 		 */
2395 		mblk_t *new_mp;
2396 
2397 		/* last update message wins */
2398 		if ((new_mp = copymsg(mp)) == NULL) {
2399 			spdsock_error(q, mp, ENOMEM, 0);
2400 			return;
2401 		}
2402 		mutex_enter(&spdsock_alg_lock);
2403 		bcopy(extv, spdsock_extv_algs,
2404 		    sizeof (spd_ext_t *) * (SPD_EXT_MAX + 1));
2405 		if (spdsock_mp_algs != NULL)
2406 			freemsg(spdsock_mp_algs);
2407 		spdsock_mp_algs = mp;
2408 		spdsock_algs_pending = B_TRUE;
2409 		mutex_exit(&spdsock_alg_lock);
2410 
2411 		spd_echo(q, new_mp);
2412 	} else {
2413 		/*
2414 		 * IPsec is loaded, act on the message immediately.
2415 		 */
2416 		int diag;
2417 
2418 		mutex_enter(&spdsock_alg_lock);
2419 		spdsock_do_updatealg(extv, &diag);
2420 		mutex_exit(&spdsock_alg_lock);
2421 		if (diag == -1)
2422 			spd_echo(q, mp);
2423 		else
2424 			spdsock_diag(q, mp, diag);
2425 	}
2426 }
2427 
2428 /*
2429  * With a reference-held ill, dig down and find an instance of "tun", and
2430  * assign its tunnel policy pointer, while reference-holding it.  Also,
2431  * release ill's refrence when finished.
2432  *
2433  * We'll be messing with q_next, so be VERY careful.
2434  */
2435 static void
2436 find_tun_and_set_itp(ill_t *ill, ipsec_tun_pol_t *itp)
2437 {
2438 	queue_t *q;
2439 	tun_t *tun;
2440 
2441 	/* Don't bother if this ill is going away. */
2442 	if (ill->ill_flags & ILL_CONDEMNED) {
2443 		ill_refrele(ill);
2444 		return;
2445 	}
2446 
2447 
2448 	q = ill->ill_wq;
2449 	claimstr(q);	/* Lighter-weight than freezestr(). */
2450 
2451 	do {
2452 		/* Use strcmp() because "tun" is bounded. */
2453 		if (strcmp(q->q_qinfo->qi_minfo->mi_idname, "tun") == 0) {
2454 			/* Aha!  Got it. */
2455 			tun = (tun_t *)q->q_ptr;
2456 			if (tun != NULL) {
2457 				mutex_enter(&tun->tun_lock);
2458 				if (tun->tun_itp != itp) {
2459 					ASSERT(tun->tun_itp == NULL);
2460 					ITP_REFHOLD(itp);
2461 					tun->tun_itp = itp;
2462 				}
2463 				mutex_exit(&tun->tun_lock);
2464 				goto release_and_return;
2465 			}
2466 			/*
2467 			 * Else assume this is some other module named "tun"
2468 			 * and move on, hoping we find one that actually has
2469 			 * something in q_ptr.
2470 			 */
2471 		}
2472 		q = q->q_next;
2473 	} while (q != NULL);
2474 
2475 release_and_return:
2476 	releasestr(ill->ill_wq);
2477 	ill_refrele(ill);
2478 }
2479 
2480 /*
2481  * Sort through the mess of polhead options to retrieve an appropriate one.
2482  * Returns NULL if we send an spdsock error.  Returns a valid pointer if we
2483  * found a valid polhead.  Returns ALL_ACTIVE_POLHEADS (aka. -1) or
2484  * ALL_INACTIVE_POLHEADS (aka. -2) if the operation calls for the operation to
2485  * act on ALL policy heads.
2486  */
2487 static ipsec_policy_head_t *
2488 get_appropriate_polhead(queue_t *q, mblk_t *mp, spd_if_t *tunname, int spdid,
2489     int msgtype, ipsec_tun_pol_t **itpp)
2490 {
2491 	ipsec_tun_pol_t *itp;
2492 	ipsec_policy_head_t *iph;
2493 	int errno;
2494 	char *tname;
2495 	boolean_t active;
2496 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2497 	uint64_t gen;	/* Placeholder */
2498 	ill_t *v4, *v6;
2499 
2500 	active = (spdid == SPD_ACTIVE);
2501 	*itpp = NULL;
2502 	if (!active && spdid != SPD_STANDBY) {
2503 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_SPDID);
2504 		return (NULL);
2505 	}
2506 
2507 	if (tunname != NULL) {
2508 		/* Acting on a tunnel's SPD. */
2509 		tname = (char *)tunname->spd_if_name;
2510 		if (*tname == '\0') {
2511 			/* Handle all-polhead cases here. */
2512 			if (msgtype != SPD_FLUSH && msgtype != SPD_DUMP) {
2513 				spdsock_diag(q, mp,
2514 				    SPD_DIAGNOSTIC_NOT_GLOBAL_OP);
2515 				return (NULL);
2516 			}
2517 			return (active ? ALL_ACTIVE_POLHEADS :
2518 			    ALL_INACTIVE_POLHEADS);
2519 		}
2520 
2521 		itp = get_tunnel_policy(tname);
2522 		if (itp == NULL) {
2523 			if (msgtype != SPD_ADDRULE) {
2524 				/* "Tunnel not found" */
2525 				spdsock_error(q, mp, ENOENT, 0);
2526 				return (NULL);
2527 			}
2528 
2529 			errno = 0;
2530 			itp = create_tunnel_policy(tname, &errno, &gen);
2531 			if (itp == NULL) {
2532 				/*
2533 				 * Something very bad happened, most likely
2534 				 * ENOMEM.  Return an indicator.
2535 				 */
2536 				spdsock_error(q, mp, errno, 0);
2537 				return (NULL);
2538 			}
2539 		}
2540 		/*
2541 		 * Troll the plumbed tunnels and see if we have a
2542 		 * match.  We need to do this always in case we add
2543 		 * policy AFTER plumbing a tunnel.
2544 		 */
2545 		v4 = ill_lookup_on_name(tname, B_FALSE, B_FALSE, NULL,
2546 		    NULL, NULL, &errno, NULL);
2547 		if (v4 != NULL)
2548 			find_tun_and_set_itp(v4, itp);
2549 		v6 = ill_lookup_on_name(tname, B_FALSE, B_TRUE, NULL,
2550 		    NULL, NULL, &errno, NULL);
2551 		if (v6 != NULL)
2552 			find_tun_and_set_itp(v6, itp);
2553 		ASSERT(itp != NULL);
2554 		*itpp = itp;
2555 		/* For spdsock dump state, set the polhead's name. */
2556 		if (msgtype == SPD_DUMP) {
2557 			(void) strncpy(ss->spdsock_dump_name, tname, LIFNAMSIZ);
2558 			ss->spdsock_dump_tunnel = itp->itp_flags &
2559 			    (active ? ITPF_P_TUNNEL : ITPF_I_TUNNEL);
2560 		}
2561 	} else {
2562 		itp = NULL;
2563 		/* For spdsock dump state, indicate it's global policy. */
2564 		if (msgtype == SPD_DUMP)
2565 			ss->spdsock_dump_name[0] = '\0';
2566 	}
2567 
2568 	if (active)
2569 		iph = (itp == NULL) ? ipsec_system_policy() : itp->itp_policy;
2570 	else
2571 		iph = (itp == NULL) ? ipsec_inactive_policy() :
2572 		    itp->itp_inactive;
2573 
2574 	ASSERT(iph != NULL);
2575 	if (itp != NULL) {
2576 		IPPH_REFHOLD(iph);
2577 	}
2578 
2579 	return (iph);
2580 }
2581 
2582 static void
2583 spdsock_parse(queue_t *q, mblk_t *mp)
2584 {
2585 	spd_msg_t *spmsg;
2586 	spd_ext_t *extv[SPD_EXT_MAX + 1];
2587 	uint_t msgsize;
2588 	ipsec_policy_head_t *iph;
2589 	ipsec_tun_pol_t *itp;
2590 	spd_if_t *tunname;
2591 
2592 	/* Make sure nothing's below me. */
2593 	ASSERT(WR(q)->q_next == NULL);
2594 
2595 	spmsg = (spd_msg_t *)mp->b_rptr;
2596 
2597 	msgsize = SPD_64TO8(spmsg->spd_msg_len);
2598 
2599 	if (msgdsize(mp) != msgsize) {
2600 		/*
2601 		 * Message len incorrect w.r.t. actual size.  Send an error
2602 		 * (EMSGSIZE).	It may be necessary to massage things a
2603 		 * bit.	 For example, if the spd_msg_type is hosed,
2604 		 * I need to set it to SPD_RESERVED to get delivery to
2605 		 * do the right thing.	Then again, maybe just letting
2606 		 * the error delivery do the right thing.
2607 		 */
2608 		ss2dbg(("mblk (%lu) and base (%d) message sizes don't jibe.\n",
2609 		    msgdsize(mp), msgsize));
2610 		spdsock_error(q, mp, EMSGSIZE, SPD_DIAGNOSTIC_NONE);
2611 		return;
2612 	}
2613 
2614 	if (msgsize > (uint_t)(mp->b_wptr - mp->b_rptr)) {
2615 		/* Get all message into one mblk. */
2616 		if (pullupmsg(mp, -1) == 0) {
2617 			/*
2618 			 * Something screwy happened.
2619 			 */
2620 			ss3dbg(("spdsock_parse: pullupmsg() failed.\n"));
2621 			return;
2622 		} else {
2623 			spmsg = (spd_msg_t *)mp->b_rptr;
2624 		}
2625 	}
2626 
2627 	switch (spdsock_get_ext(extv, spmsg, msgsize)) {
2628 	case KGE_DUP:
2629 		/* Handle duplicate extension. */
2630 		ss1dbg(("Got duplicate extension of type %d.\n",
2631 		    extv[0]->spd_ext_type));
2632 		spdsock_diag(q, mp, dup_ext_diag[extv[0]->spd_ext_type]);
2633 		return;
2634 	case KGE_UNK:
2635 		/* Handle unknown extension. */
2636 		ss1dbg(("Got unknown extension of type %d.\n",
2637 		    extv[0]->spd_ext_type));
2638 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_UNKNOWN_EXT);
2639 		return;
2640 	case KGE_LEN:
2641 		/* Length error. */
2642 		ss1dbg(("Length %d on extension type %d overrun or 0.\n",
2643 		    extv[0]->spd_ext_len, extv[0]->spd_ext_type));
2644 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_EXTLEN);
2645 		return;
2646 	case KGE_CHK:
2647 		/* Reality check failed. */
2648 		ss1dbg(("Reality check failed on extension type %d.\n",
2649 		    extv[0]->spd_ext_type));
2650 		spdsock_diag(q, mp, bad_ext_diag[extv[0]->spd_ext_type]);
2651 		return;
2652 	default:
2653 		/* Default case is no errors. */
2654 		break;
2655 	}
2656 
2657 	/*
2658 	 * Special-case SPD_UPDATEALGS so as not to load IPsec.
2659 	 */
2660 	if (!ipsec_loaded() && spmsg->spd_msg_type != SPD_UPDATEALGS) {
2661 		spdsock_t *ss = (spdsock_t *)q->q_ptr;
2662 
2663 		ASSERT(ss != NULL);
2664 		ipsec_loader_loadnow();
2665 		ss->spdsock_timeout_arg = mp;
2666 		ss->spdsock_timeout = qtimeout(q, spdsock_loadcheck,
2667 		    q, LOADCHECK_INTERVAL);
2668 		return;
2669 	}
2670 
2671 	/* First check for messages that need no polheads at all. */
2672 	switch (spmsg->spd_msg_type) {
2673 	case SPD_UPDATEALGS:
2674 		spdsock_updatealg(q, mp, extv);
2675 		return;
2676 	case SPD_ALGLIST:
2677 		spdsock_alglist(q, mp);
2678 		return;
2679 	case SPD_DUMPALGS:
2680 		spdsock_dumpalgs(q, mp);
2681 		return;
2682 	}
2683 
2684 	/*
2685 	 * Then check for ones that need both primary/secondary polheads,
2686 	 * finding the appropriate tunnel policy if need be.
2687 	 */
2688 	tunname = (spd_if_t *)extv[SPD_EXT_TUN_NAME];
2689 	switch (spmsg->spd_msg_type) {
2690 	case SPD_FLIP:
2691 		spdsock_flip(q, mp, tunname);
2692 		return;
2693 	case SPD_CLONE:
2694 		spdsock_clone(q, mp, tunname);
2695 		return;
2696 	}
2697 
2698 	/*
2699 	 * Finally, find ones that operate on exactly one polhead, or
2700 	 * "all polheads" of a given type (active/inactive).
2701 	 */
2702 	iph = get_appropriate_polhead(q, mp, tunname, spmsg->spd_msg_spdid,
2703 	    spmsg->spd_msg_type, &itp);
2704 	if (iph == NULL)
2705 		return;
2706 
2707 	/* All-polheads-ready operations. */
2708 	switch (spmsg->spd_msg_type) {
2709 	case SPD_FLUSH:
2710 		if (itp != NULL) {
2711 			mutex_enter(&itp->itp_lock);
2712 			if (spmsg->spd_msg_spdid == SPD_ACTIVE)
2713 				itp->itp_flags &= ~ITPF_PFLAGS;
2714 			else
2715 				itp->itp_flags &= ~ITPF_IFLAGS;
2716 			mutex_exit(&itp->itp_lock);
2717 			ITP_REFRELE(itp);
2718 		}
2719 		spdsock_flush(q, iph, mp);
2720 		return;
2721 	case SPD_DUMP:
2722 		if (itp != NULL)
2723 			ITP_REFRELE(itp);
2724 		spdsock_dump(q, iph, mp);
2725 		return;
2726 	}
2727 
2728 	if (iph == ALL_ACTIVE_POLHEADS || iph == ALL_INACTIVE_POLHEADS) {
2729 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_NOT_GLOBAL_OP);
2730 		return;
2731 	}
2732 
2733 	/* Single-polhead-only operations. */
2734 	switch (spmsg->spd_msg_type) {
2735 	case SPD_ADDRULE:
2736 		spdsock_addrule(q, iph, mp, extv, itp);
2737 		break;
2738 	case SPD_DELETERULE:
2739 		spdsock_deleterule(q, iph, mp, extv, itp);
2740 		break;
2741 	case SPD_LOOKUP:
2742 		spdsock_lookup(q, iph, mp, extv, itp);
2743 		break;
2744 	default:
2745 		spdsock_diag(q, mp, SPD_DIAGNOSTIC_BAD_MSG_TYPE);
2746 		break;
2747 	}
2748 
2749 	IPPH_REFRELE(iph);
2750 	if (itp != NULL)
2751 		ITP_REFRELE(itp);
2752 }
2753 
2754 /*
2755  * If an algorithm mapping was received before IPsec was loaded, process it.
2756  * Called from the IPsec loader.
2757  */
2758 void
2759 spdsock_update_pending_algs(void)
2760 {
2761 	mutex_enter(&spdsock_alg_lock);
2762 	if (spdsock_algs_pending) {
2763 		int diag;
2764 		spdsock_do_updatealg(spdsock_extv_algs, &diag);
2765 		spdsock_algs_pending = B_FALSE;
2766 	}
2767 	mutex_exit(&spdsock_alg_lock);
2768 }
2769 
2770 static void
2771 spdsock_loadcheck(void *arg)
2772 {
2773 	queue_t *q = (queue_t *)arg;
2774 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2775 	mblk_t *mp;
2776 
2777 	ASSERT(ss != NULL);
2778 
2779 	ss->spdsock_timeout = 0;
2780 	mp = ss->spdsock_timeout_arg;
2781 	ASSERT(mp != NULL);
2782 	ss->spdsock_timeout_arg = NULL;
2783 	if (ipsec_failed())
2784 		spdsock_error(q, mp, EPROTONOSUPPORT, 0);
2785 	else
2786 		spdsock_parse(q, mp);
2787 }
2788 
2789 /*
2790  * Copy relevant state bits.
2791  */
2792 static void
2793 spdsock_copy_info(struct T_info_ack *tap, spdsock_t *ss)
2794 {
2795 	*tap = spdsock_g_t_info_ack;
2796 	tap->CURRENT_state = ss->spdsock_state;
2797 	tap->OPT_size = spdsock_max_optsize;
2798 }
2799 
2800 /*
2801  * This routine responds to T_CAPABILITY_REQ messages.  It is called by
2802  * spdsock_wput.  Much of the T_CAPABILITY_ACK information is copied from
2803  * spdsock_g_t_info_ack.  The current state of the stream is copied from
2804  * spdsock_state.
2805  */
2806 static void
2807 spdsock_capability_req(queue_t *q, mblk_t *mp)
2808 {
2809 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
2810 	t_uscalar_t cap_bits1;
2811 	struct T_capability_ack	*tcap;
2812 
2813 	cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1;
2814 
2815 	mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack),
2816 		mp->b_datap->db_type, T_CAPABILITY_ACK);
2817 	if (mp == NULL)
2818 		return;
2819 
2820 	tcap = (struct T_capability_ack *)mp->b_rptr;
2821 	tcap->CAP_bits1 = 0;
2822 
2823 	if (cap_bits1 & TC1_INFO) {
2824 		spdsock_copy_info(&tcap->INFO_ack, ss);
2825 		tcap->CAP_bits1 |= TC1_INFO;
2826 	}
2827 
2828 	qreply(q, mp);
2829 }
2830 
2831 /*
2832  * This routine responds to T_INFO_REQ messages. It is called by
2833  * spdsock_wput_other.
2834  * Most of the T_INFO_ACK information is copied from spdsock_g_t_info_ack.
2835  * The current state of the stream is copied from spdsock_state.
2836  */
2837 static void
2838 spdsock_info_req(q, mp)
2839 	queue_t	*q;
2840 	mblk_t	*mp;
2841 {
2842 	mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO,
2843 	    T_INFO_ACK);
2844 	if (mp == NULL)
2845 		return;
2846 	spdsock_copy_info((struct T_info_ack *)mp->b_rptr,
2847 	    (spdsock_t *)q->q_ptr);
2848 	qreply(q, mp);
2849 }
2850 
2851 /*
2852  * spdsock_err_ack. This routine creates a
2853  * T_ERROR_ACK message and passes it
2854  * upstream.
2855  */
2856 static void
2857 spdsock_err_ack(q, mp, t_error, sys_error)
2858 	queue_t	*q;
2859 	mblk_t	*mp;
2860 	int	t_error;
2861 	int	sys_error;
2862 {
2863 	if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL)
2864 		qreply(q, mp);
2865 }
2866 
2867 /*
2868  * This routine retrieves the current status of socket options.
2869  * It returns the size of the option retrieved.
2870  */
2871 /* ARGSUSED */
2872 int
2873 spdsock_opt_get(queue_t *q, int level, int name, uchar_t *ptr)
2874 {
2875 	int *i1 = (int *)ptr;
2876 
2877 	switch (level) {
2878 	case SOL_SOCKET:
2879 		switch (name) {
2880 		case SO_TYPE:
2881 			*i1 = SOCK_RAW;
2882 			break;
2883 		/*
2884 		 * The following two items can be manipulated,
2885 		 * but changing them should do nothing.
2886 		 */
2887 		case SO_SNDBUF:
2888 			*i1 = (int)q->q_hiwat;
2889 			break;
2890 		case SO_RCVBUF:
2891 			*i1 = (int)(RD(q)->q_hiwat);
2892 			break;
2893 		}
2894 		break;
2895 	default:
2896 		return (0);
2897 	}
2898 	return (sizeof (int));
2899 }
2900 
2901 /*
2902  * This routine sets socket options.
2903  */
2904 /* ARGSUSED */
2905 int
2906 spdsock_opt_set(queue_t *q, uint_t mgmt_flags, int level, int name,
2907     uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp,
2908     void *thisdg_attrs, cred_t *cr, mblk_t *mblk)
2909 {
2910 	int *i1 = (int *)invalp;
2911 
2912 	switch (level) {
2913 	case SOL_SOCKET:
2914 		switch (name) {
2915 		case SO_SNDBUF:
2916 			if (*i1 > spdsock_max_buf)
2917 				return (ENOBUFS);
2918 			q->q_hiwat = *i1;
2919 			break;
2920 		case SO_RCVBUF:
2921 			if (*i1 > spdsock_max_buf)
2922 				return (ENOBUFS);
2923 			RD(q)->q_hiwat = *i1;
2924 			(void) mi_set_sth_hiwat(RD(q), *i1);
2925 			break;
2926 		}
2927 		break;
2928 	}
2929 	return (0);
2930 }
2931 
2932 
2933 /*
2934  * Handle STREAMS messages.
2935  */
2936 static void
2937 spdsock_wput_other(queue_t *q, mblk_t *mp)
2938 {
2939 	struct iocblk *iocp;
2940 	int error;
2941 
2942 	switch (mp->b_datap->db_type) {
2943 	case M_PROTO:
2944 	case M_PCPROTO:
2945 		if ((mp->b_wptr - mp->b_rptr) < sizeof (long)) {
2946 			ss3dbg((
2947 			    "spdsock_wput_other: Not big enough M_PROTO\n"));
2948 			freemsg(mp);
2949 			return;
2950 		}
2951 		switch (((union T_primitives *)mp->b_rptr)->type) {
2952 		case T_CAPABILITY_REQ:
2953 			spdsock_capability_req(q, mp);
2954 			return;
2955 		case T_INFO_REQ:
2956 			spdsock_info_req(q, mp);
2957 			return;
2958 		case T_SVR4_OPTMGMT_REQ:
2959 			(void) svr4_optcom_req(q, mp, DB_CREDDEF(mp, kcred),
2960 			    &spdsock_opt_obj);
2961 			return;
2962 		case T_OPTMGMT_REQ:
2963 			(void) tpi_optcom_req(q, mp, DB_CREDDEF(mp, kcred),
2964 			    &spdsock_opt_obj);
2965 			return;
2966 		case T_DATA_REQ:
2967 		case T_EXDATA_REQ:
2968 		case T_ORDREL_REQ:
2969 			/* Illegal for spdsock. */
2970 			freemsg(mp);
2971 			(void) putnextctl1(RD(q), M_ERROR, EPROTO);
2972 			return;
2973 		default:
2974 			/* Not supported by spdsock. */
2975 			spdsock_err_ack(q, mp, TNOTSUPPORT, 0);
2976 			return;
2977 		}
2978 	case M_IOCTL:
2979 		iocp = (struct iocblk *)mp->b_rptr;
2980 		error = EINVAL;
2981 
2982 		switch (iocp->ioc_cmd) {
2983 		case ND_SET:
2984 		case ND_GET:
2985 			if (nd_getset(q, spdsock_g_nd, mp)) {
2986 				qreply(q, mp);
2987 				return;
2988 			} else
2989 				error = ENOENT;
2990 			/* FALLTHRU */
2991 		default:
2992 			miocnak(q, mp, 0, error);
2993 			return;
2994 		}
2995 	case M_FLUSH:
2996 		if (*mp->b_rptr & FLUSHW) {
2997 			flushq(q, FLUSHALL);
2998 			*mp->b_rptr &= ~FLUSHW;
2999 		}
3000 		if (*mp->b_rptr & FLUSHR) {
3001 			qreply(q, mp);
3002 			return;
3003 		}
3004 		/* Else FALLTHRU */
3005 	}
3006 
3007 	/* If fell through, just black-hole the message. */
3008 	freemsg(mp);
3009 }
3010 
3011 static void
3012 spdsock_wput(queue_t *q, mblk_t *mp)
3013 {
3014 	uint8_t *rptr = mp->b_rptr;
3015 	mblk_t *mp1;
3016 	spdsock_t *ss = (spdsock_t *)q->q_ptr;
3017 
3018 	/*
3019 	 * If we're dumping, defer processing other messages until the
3020 	 * dump completes.
3021 	 */
3022 	if (ss->spdsock_dump_req != NULL) {
3023 		if (!putq(q, mp))
3024 			freemsg(mp);
3025 		return;
3026 	}
3027 
3028 	switch (mp->b_datap->db_type) {
3029 	case M_DATA:
3030 		/*
3031 		 * Silently discard.
3032 		 */
3033 		ss2dbg(("raw M_DATA in spdsock.\n"));
3034 		freemsg(mp);
3035 		return;
3036 	case M_PROTO:
3037 	case M_PCPROTO:
3038 		if ((mp->b_wptr - rptr) >= sizeof (struct T_data_req)) {
3039 			if (((union T_primitives *)rptr)->type == T_DATA_REQ) {
3040 				if ((mp1 = mp->b_cont) == NULL) {
3041 					/* No data after T_DATA_REQ. */
3042 					ss2dbg(("No data after DATA_REQ.\n"));
3043 					freemsg(mp);
3044 					return;
3045 				}
3046 				freeb(mp);
3047 				mp = mp1;
3048 				ss2dbg(("T_DATA_REQ\n"));
3049 				break;	/* Out of switch. */
3050 			}
3051 		}
3052 		/* FALLTHRU */
3053 	default:
3054 		ss3dbg(("In default wput case (%d %d).\n",
3055 		    mp->b_datap->db_type, ((union T_primitives *)rptr)->type));
3056 		spdsock_wput_other(q, mp);
3057 		return;
3058 	}
3059 
3060 	/* I now have a PF_POLICY message in an M_DATA block. */
3061 	spdsock_parse(q, mp);
3062 }
3063 
3064 /*
3065  * Device open procedure, called when new queue pair created.
3066  * We are passed the read-side queue.
3067  */
3068 /* ARGSUSED */
3069 static int
3070 spdsock_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
3071 {
3072 	spdsock_t *ss;
3073 	queue_t *oq = OTHERQ(q);
3074 	minor_t ssminor;
3075 
3076 	if (secpolicy_net_config(credp, B_FALSE) != 0)
3077 		return (EPERM);
3078 
3079 	if (q->q_ptr != NULL)
3080 		return (0);  /* Re-open of an already open instance. */
3081 
3082 	if (sflag & MODOPEN)
3083 		return (EINVAL);
3084 
3085 	ss2dbg(("Made it into PF_POLICY socket open.\n"));
3086 
3087 	ssminor = (minor_t)(uintptr_t)vmem_alloc(spdsock_vmem, 1, VM_NOSLEEP);
3088 	if (ssminor == 0)
3089 		return (ENOMEM);
3090 
3091 	ss = kmem_zalloc(sizeof (spdsock_t), KM_NOSLEEP);
3092 	if (ss == NULL) {
3093 		vmem_free(spdsock_vmem, (void *)(uintptr_t)ssminor, 1);
3094 		return (ENOMEM);
3095 	}
3096 
3097 	ss->spdsock_minor = ssminor;
3098 	ss->spdsock_state = TS_UNBND;
3099 	ss->spdsock_dump_req = NULL;
3100 
3101 	q->q_ptr = ss;
3102 	oq->q_ptr = ss;
3103 
3104 	q->q_hiwat = spdsock_recv_hiwat;
3105 
3106 	oq->q_hiwat = spdsock_xmit_hiwat;
3107 	oq->q_lowat = spdsock_xmit_lowat;
3108 
3109 	qprocson(q);
3110 	(void) mi_set_sth_hiwat(q, spdsock_recv_hiwat);
3111 
3112 	*devp = makedevice(getmajor(*devp), ss->spdsock_minor);
3113 	return (0);
3114 }
3115 
3116 /*
3117  * Read-side service procedure, invoked when we get back-enabled
3118  * when buffer space becomes available.
3119  *
3120  * Dump another chunk if we were dumping before; when we finish, kick
3121  * the write-side queue in case it's waiting for read queue space.
3122  */
3123 void
3124 spdsock_rsrv(queue_t *q)
3125 {
3126 	spdsock_t *ss = q->q_ptr;
3127 
3128 	if (ss->spdsock_dump_req != NULL)
3129 		spdsock_dump_some(q, ss);
3130 
3131 	if (ss->spdsock_dump_req == NULL)
3132 		qenable(OTHERQ(q));
3133 }
3134 
3135 /*
3136  * Write-side service procedure, invoked when we defer processing
3137  * if another message is received while a dump is in progress.
3138  */
3139 void
3140 spdsock_wsrv(queue_t *q)
3141 {
3142 	spdsock_t *ss = q->q_ptr;
3143 	mblk_t *mp;
3144 
3145 	if (ss->spdsock_dump_req != NULL) {
3146 		qenable(OTHERQ(q));
3147 		return;
3148 	}
3149 
3150 	while ((mp = getq(q)) != NULL) {
3151 		if (ipsec_loaded()) {
3152 			spdsock_wput(q, mp);
3153 			if (ss->spdsock_dump_req != NULL)
3154 				return;
3155 		} else if (!ipsec_failed()) {
3156 			(void) putq(q, mp);
3157 		} else {
3158 			spdsock_error(q, mp, EPFNOSUPPORT, 0);
3159 		}
3160 	}
3161 }
3162 
3163 static int
3164 spdsock_close(queue_t *q)
3165 {
3166 	spdsock_t *ss = q->q_ptr;
3167 
3168 	qprocsoff(q);
3169 
3170 	/* Safe assumption. */
3171 	ASSERT(ss != NULL);
3172 
3173 	if (ss->spdsock_timeout != 0)
3174 		(void) quntimeout(q, ss->spdsock_timeout);
3175 
3176 	ss3dbg(("Driver close, PF_POLICY socket is going away.\n"));
3177 
3178 	vmem_free(spdsock_vmem, (void *)(uintptr_t)ss->spdsock_minor, 1);
3179 
3180 	kmem_free(ss, sizeof (spdsock_t));
3181 	return (0);
3182 }
3183 
3184 /*
3185  * Merge the IPsec algorithms tables with the received algorithm information.
3186  */
3187 void
3188 spdsock_merge_algs(void)
3189 {
3190 	ipsec_alginfo_t *alg, *oalg;
3191 	ipsec_algtype_t algtype;
3192 	uint_t algidx, algid, nalgs;
3193 	crypto_mech_name_t *mechs;
3194 	uint_t mech_count, mech_idx;
3195 
3196 	ASSERT(MUTEX_HELD(&spdsock_alg_lock));
3197 
3198 	/*
3199 	 * Get the list of supported mechanisms from the crypto framework.
3200 	 * If a mechanism is supported by KCF, resolve its mechanism
3201 	 * id and mark it as being valid. This operation must be done
3202 	 * without holding alg_lock, since it can cause a provider
3203 	 * module to be loaded and the provider notification callback to
3204 	 * be invoked.
3205 	 */
3206 	mechs = crypto_get_mech_list(&mech_count, KM_SLEEP);
3207 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3208 		for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) {
3209 			int algflags = 0;
3210 			crypto_mech_type_t mt = CRYPTO_MECHANISM_INVALID;
3211 
3212 			if ((alg = spdsock_algs[algtype][algid]) == NULL)
3213 				continue;
3214 
3215 			/*
3216 			 * The NULL encryption algorithm is a special
3217 			 * case because there are no mechanisms, yet
3218 			 * the algorithm is still valid.
3219 			 */
3220 			if (alg->alg_id == SADB_EALG_NULL) {
3221 				alg->alg_mech_type = CRYPTO_MECHANISM_INVALID;
3222 				alg->alg_flags = ALG_FLAG_VALID;
3223 				continue;
3224 			}
3225 
3226 			for (mech_idx = 0; mech_idx < mech_count; mech_idx++) {
3227 				if (strncmp(alg->alg_mech_name, mechs[mech_idx],
3228 				    CRYPTO_MAX_MECH_NAME) == 0) {
3229 					mt = crypto_mech2id(alg->alg_mech_name);
3230 					ASSERT(mt != CRYPTO_MECHANISM_INVALID);
3231 					algflags = ALG_FLAG_VALID;
3232 					break;
3233 				}
3234 			}
3235 			alg->alg_mech_type = mt;
3236 			alg->alg_flags = algflags;
3237 		}
3238 	}
3239 
3240 	mutex_enter(&alg_lock);
3241 
3242 	/*
3243 	 * For each algorithm currently defined, check if it is
3244 	 * present in the new tables created from the SPD_UPDATEALGS
3245 	 * message received from user-space.
3246 	 * Delete the algorithm entries that are currently defined
3247 	 * but not part of the new tables.
3248 	 */
3249 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3250 		nalgs = ipsec_nalgs[algtype];
3251 		for (algidx = 0; algidx < nalgs; algidx++) {
3252 			algid = ipsec_sortlist[algtype][algidx];
3253 			if (spdsock_algs[algtype][algid] == NULL)
3254 				ipsec_alg_unreg(algtype, algid);
3255 		}
3256 	}
3257 
3258 	/*
3259 	 * For each algorithm we just received, check if it is
3260 	 * present in the currently defined tables. If it is, swap
3261 	 * the entry with the one we just allocated.
3262 	 * If the new algorithm is not in the current tables,
3263 	 * add it.
3264 	 */
3265 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
3266 		for (algid = 0; algid < IPSEC_MAX_ALGS; algid++) {
3267 			if ((alg = spdsock_algs[algtype][algid]) == NULL)
3268 				continue;
3269 
3270 			if ((oalg = ipsec_alglists[algtype][algid]) == NULL) {
3271 				/*
3272 				 * New algorithm, add it to the algorithm
3273 				 * table.
3274 				 */
3275 				ipsec_alg_reg(algtype, alg);
3276 			} else {
3277 				/*
3278 				 * Algorithm is already in the table. Swap
3279 				 * the existing entry with the new one.
3280 				 */
3281 				ipsec_alg_fix_min_max(alg, algtype);
3282 				ipsec_alglists[algtype][algid] = alg;
3283 				ipsec_alg_free(oalg);
3284 			}
3285 			spdsock_algs[algtype][algid] = NULL;
3286 		}
3287 	}
3288 
3289 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++)
3290 		ipsec_algs_exec_mode[algtype] = spdsock_algs_exec_mode[algtype];
3291 
3292 	mutex_exit(&alg_lock);
3293 
3294 	crypto_free_mech_list(mechs, mech_count);
3295 
3296 	ipsecah_algs_changed();
3297 	ipsecesp_algs_changed();
3298 }
3299