xref: /illumos-gate/usr/src/uts/common/inet/ip/spd.c (revision fc256490629fe68815f7e0f23cf9b3545720cfac)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * IPsec Security Policy Database.
28  *
29  * This module maintains the SPD and provides routines used by ip and ip6
30  * to apply IPsec policy to inbound and outbound datagrams.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/stream.h>
35 #include <sys/stropts.h>
36 #include <sys/sysmacros.h>
37 #include <sys/strsubr.h>
38 #include <sys/strsun.h>
39 #include <sys/strlog.h>
40 #include <sys/strsun.h>
41 #include <sys/cmn_err.h>
42 #include <sys/zone.h>
43 
44 #include <sys/systm.h>
45 #include <sys/param.h>
46 #include <sys/kmem.h>
47 #include <sys/ddi.h>
48 
49 #include <sys/crypto/api.h>
50 
51 #include <inet/common.h>
52 #include <inet/mi.h>
53 
54 #include <netinet/ip6.h>
55 #include <netinet/icmp6.h>
56 #include <netinet/udp.h>
57 
58 #include <inet/ip.h>
59 #include <inet/ip6.h>
60 
61 #include <net/pfkeyv2.h>
62 #include <net/pfpolicy.h>
63 #include <inet/sadb.h>
64 #include <inet/ipsec_impl.h>
65 
66 #include <inet/ip_impl.h>	/* For IP_MOD_ID */
67 
68 #include <inet/ipsecah.h>
69 #include <inet/ipsecesp.h>
70 #include <inet/ipdrop.h>
71 #include <inet/ipclassifier.h>
72 #include <inet/iptun.h>
73 #include <inet/iptun/iptun_impl.h>
74 
75 static void ipsec_update_present_flags(ipsec_stack_t *);
76 static ipsec_act_t *ipsec_act_wildcard_expand(ipsec_act_t *, uint_t *,
77     netstack_t *);
78 static mblk_t *ipsec_check_ipsecin_policy(mblk_t *, ipsec_policy_t *,
79     ipha_t *, ip6_t *, uint64_t, ip_recv_attr_t *, netstack_t *);
80 static void ipsec_action_free_table(ipsec_action_t *);
81 static void ipsec_action_reclaim(void *);
82 static void ipsec_action_reclaim_stack(netstack_t *);
83 static void ipsid_init(netstack_t *);
84 static void ipsid_fini(netstack_t *);
85 
86 /* sel_flags values for ipsec_init_inbound_sel(). */
87 #define	SEL_NONE	0x0000
88 #define	SEL_PORT_POLICY	0x0001
89 #define	SEL_IS_ICMP	0x0002
90 #define	SEL_TUNNEL_MODE	0x0004
91 #define	SEL_POST_FRAG	0x0008
92 
93 /* Return values for ipsec_init_inbound_sel(). */
94 typedef enum { SELRET_NOMEM, SELRET_BADPKT, SELRET_SUCCESS, SELRET_TUNFRAG}
95     selret_t;
96 
97 static selret_t ipsec_init_inbound_sel(ipsec_selector_t *, mblk_t *,
98     ipha_t *, ip6_t *, uint8_t);
99 
100 static boolean_t ipsec_check_ipsecin_action(ip_recv_attr_t *, mblk_t *,
101     struct ipsec_action_s *, ipha_t *ipha, ip6_t *ip6h, const char **,
102     kstat_named_t **, netstack_t *);
103 static void ipsec_unregister_prov_update(void);
104 static void ipsec_prov_update_callback_stack(uint32_t, void *, netstack_t *);
105 static boolean_t ipsec_compare_action(ipsec_policy_t *, ipsec_policy_t *);
106 static uint32_t selector_hash(ipsec_selector_t *, ipsec_policy_root_t *);
107 static boolean_t ipsec_kstat_init(ipsec_stack_t *);
108 static void ipsec_kstat_destroy(ipsec_stack_t *);
109 static int ipsec_free_tables(ipsec_stack_t *);
110 static int tunnel_compare(const void *, const void *);
111 static void ipsec_freemsg_chain(mblk_t *);
112 static void ip_drop_packet_chain(mblk_t *, boolean_t, ill_t *,
113     struct kstat_named *, ipdropper_t *);
114 static boolean_t ipsec_kstat_init(ipsec_stack_t *);
115 static void ipsec_kstat_destroy(ipsec_stack_t *);
116 static int ipsec_free_tables(ipsec_stack_t *);
117 static int tunnel_compare(const void *, const void *);
118 static void ipsec_freemsg_chain(mblk_t *);
119 
120 /*
121  * Selector hash table is statically sized at module load time.
122  * we default to 251 buckets, which is the largest prime number under 255
123  */
124 
125 #define	IPSEC_SPDHASH_DEFAULT 251
126 
127 /* SPD hash-size tunable per tunnel. */
128 #define	TUN_SPDHASH_DEFAULT 5
129 
130 uint32_t ipsec_spd_hashsize;
131 uint32_t tun_spd_hashsize;
132 
133 #define	IPSEC_SEL_NOHASH ((uint32_t)(~0))
134 
135 /*
136  * Handle global across all stack instances
137  */
138 static crypto_notify_handle_t prov_update_handle = NULL;
139 
140 static kmem_cache_t *ipsec_action_cache;
141 static kmem_cache_t *ipsec_sel_cache;
142 static kmem_cache_t *ipsec_pol_cache;
143 
144 /* Frag cache prototypes */
145 static void ipsec_fragcache_clean(ipsec_fragcache_t *, ipsec_stack_t *);
146 static ipsec_fragcache_entry_t *fragcache_delentry(int,
147     ipsec_fragcache_entry_t *, ipsec_fragcache_t *, ipsec_stack_t *);
148 boolean_t ipsec_fragcache_init(ipsec_fragcache_t *);
149 void ipsec_fragcache_uninit(ipsec_fragcache_t *, ipsec_stack_t *ipss);
150 mblk_t *ipsec_fragcache_add(ipsec_fragcache_t *, mblk_t *, mblk_t *,
151     int, ipsec_stack_t *);
152 
153 int ipsec_hdr_pullup_needed = 0;
154 int ipsec_weird_null_inbound_policy = 0;
155 
156 #define	ALGBITS_ROUND_DOWN(x, align)	(((x)/(align))*(align))
157 #define	ALGBITS_ROUND_UP(x, align)	ALGBITS_ROUND_DOWN((x)+(align)-1, align)
158 
159 /*
160  * Inbound traffic should have matching identities for both SA's.
161  */
162 
163 #define	SA_IDS_MATCH(sa1, sa2) 						\
164 	(((sa1) == NULL) || ((sa2) == NULL) ||				\
165 	(((sa1)->ipsa_src_cid == (sa2)->ipsa_src_cid) &&		\
166 	    (((sa1)->ipsa_dst_cid == (sa2)->ipsa_dst_cid))))
167 
168 /*
169  * IPv6 Fragments
170  */
171 #define	IS_V6_FRAGMENT(ipp)	(ipp.ipp_fields & IPPF_FRAGHDR)
172 
173 /*
174  * Policy failure messages.
175  */
176 static char *ipsec_policy_failure_msgs[] = {
177 
178 	/* IPSEC_POLICY_NOT_NEEDED */
179 	"%s: Dropping the datagram because the incoming packet "
180 	"is %s, but the recipient expects clear; Source %s, "
181 	"Destination %s.\n",
182 
183 	/* IPSEC_POLICY_MISMATCH */
184 	"%s: Policy Failure for the incoming packet (%s); Source %s, "
185 	"Destination %s.\n",
186 
187 	/* IPSEC_POLICY_AUTH_NOT_NEEDED	*/
188 	"%s: Authentication present while not expected in the "
189 	"incoming %s packet; Source %s, Destination %s.\n",
190 
191 	/* IPSEC_POLICY_ENCR_NOT_NEEDED */
192 	"%s: Encryption present while not expected in the "
193 	"incoming %s packet; Source %s, Destination %s.\n",
194 
195 	/* IPSEC_POLICY_SE_NOT_NEEDED */
196 	"%s: Self-Encapsulation present while not expected in the "
197 	"incoming %s packet; Source %s, Destination %s.\n",
198 };
199 
200 /*
201  * General overviews:
202  *
203  * Locking:
204  *
205  *	All of the system policy structures are protected by a single
206  *	rwlock.  These structures are threaded in a
207  *	fairly complex fashion and are not expected to change on a
208  *	regular basis, so this should not cause scaling/contention
209  *	problems.  As a result, policy checks should (hopefully) be MT-hot.
210  *
211  * Allocation policy:
212  *
213  *	We use custom kmem cache types for the various
214  *	bits & pieces of the policy data structures.  All allocations
215  *	use KM_NOSLEEP instead of KM_SLEEP for policy allocation.  The
216  *	policy table is of potentially unbounded size, so we don't
217  *	want to provide a way to hog all system memory with policy
218  *	entries..
219  */
220 
221 /* Convenient functions for freeing or dropping a b_next linked mblk chain */
222 
223 /* Free all messages in an mblk chain */
224 static void
225 ipsec_freemsg_chain(mblk_t *mp)
226 {
227 	mblk_t *mpnext;
228 	while (mp != NULL) {
229 		ASSERT(mp->b_prev == NULL);
230 		mpnext = mp->b_next;
231 		mp->b_next = NULL;
232 		freemsg(mp);
233 		mp = mpnext;
234 	}
235 }
236 
237 /*
238  * ip_drop all messages in an mblk chain
239  * Can handle a b_next chain of ip_recv_attr_t mblks, or just a b_next chain
240  * of data.
241  */
242 static void
243 ip_drop_packet_chain(mblk_t *mp, boolean_t inbound, ill_t *ill,
244     struct kstat_named *counter, ipdropper_t *who_called)
245 {
246 	mblk_t *mpnext;
247 	while (mp != NULL) {
248 		ASSERT(mp->b_prev == NULL);
249 		mpnext = mp->b_next;
250 		mp->b_next = NULL;
251 		if (ip_recv_attr_is_mblk(mp))
252 			mp = ip_recv_attr_free_mblk(mp);
253 		ip_drop_packet(mp, inbound, ill, counter, who_called);
254 		mp = mpnext;
255 	}
256 }
257 
258 /*
259  * AVL tree comparison function.
260  * the in-kernel avl assumes unique keys for all objects.
261  * Since sometimes policy will duplicate rules, we may insert
262  * multiple rules with the same rule id, so we need a tie-breaker.
263  */
264 static int
265 ipsec_policy_cmpbyid(const void *a, const void *b)
266 {
267 	const ipsec_policy_t *ipa, *ipb;
268 	uint64_t idxa, idxb;
269 
270 	ipa = (const ipsec_policy_t *)a;
271 	ipb = (const ipsec_policy_t *)b;
272 	idxa = ipa->ipsp_index;
273 	idxb = ipb->ipsp_index;
274 
275 	if (idxa < idxb)
276 		return (-1);
277 	if (idxa > idxb)
278 		return (1);
279 	/*
280 	 * Tie-breaker #1: All installed policy rules have a non-NULL
281 	 * ipsl_sel (selector set), so an entry with a NULL ipsp_sel is not
282 	 * actually in-tree but rather a template node being used in
283 	 * an avl_find query; see ipsec_policy_delete().  This gives us
284 	 * a placeholder in the ordering just before the first entry with
285 	 * a key >= the one we're looking for, so we can walk forward from
286 	 * that point to get the remaining entries with the same id.
287 	 */
288 	if ((ipa->ipsp_sel == NULL) && (ipb->ipsp_sel != NULL))
289 		return (-1);
290 	if ((ipb->ipsp_sel == NULL) && (ipa->ipsp_sel != NULL))
291 		return (1);
292 	/*
293 	 * At most one of the arguments to the comparison should have a
294 	 * NULL selector pointer; if not, the tree is broken.
295 	 */
296 	ASSERT(ipa->ipsp_sel != NULL);
297 	ASSERT(ipb->ipsp_sel != NULL);
298 	/*
299 	 * Tie-breaker #2: use the virtual address of the policy node
300 	 * to arbitrarily break ties.  Since we use the new tree node in
301 	 * the avl_find() in ipsec_insert_always, the new node will be
302 	 * inserted into the tree in the right place in the sequence.
303 	 */
304 	if (ipa < ipb)
305 		return (-1);
306 	if (ipa > ipb)
307 		return (1);
308 	return (0);
309 }
310 
311 /*
312  * Free what ipsec_alloc_table allocated.
313  */
314 void
315 ipsec_polhead_free_table(ipsec_policy_head_t *iph)
316 {
317 	int dir;
318 	int i;
319 
320 	for (dir = 0; dir < IPSEC_NTYPES; dir++) {
321 		ipsec_policy_root_t *ipr = &iph->iph_root[dir];
322 
323 		if (ipr->ipr_hash == NULL)
324 			continue;
325 
326 		for (i = 0; i < ipr->ipr_nchains; i++) {
327 			ASSERT(ipr->ipr_hash[i].hash_head == NULL);
328 		}
329 		kmem_free(ipr->ipr_hash, ipr->ipr_nchains *
330 		    sizeof (ipsec_policy_hash_t));
331 		ipr->ipr_hash = NULL;
332 	}
333 }
334 
335 void
336 ipsec_polhead_destroy(ipsec_policy_head_t *iph)
337 {
338 	int dir;
339 
340 	avl_destroy(&iph->iph_rulebyid);
341 	rw_destroy(&iph->iph_lock);
342 
343 	for (dir = 0; dir < IPSEC_NTYPES; dir++) {
344 		ipsec_policy_root_t *ipr = &iph->iph_root[dir];
345 		int chain;
346 
347 		for (chain = 0; chain < ipr->ipr_nchains; chain++)
348 			mutex_destroy(&(ipr->ipr_hash[chain].hash_lock));
349 
350 	}
351 	ipsec_polhead_free_table(iph);
352 }
353 
354 /*
355  * Free the IPsec stack instance.
356  */
357 /* ARGSUSED */
358 static void
359 ipsec_stack_fini(netstackid_t stackid, void *arg)
360 {
361 	ipsec_stack_t	*ipss = (ipsec_stack_t *)arg;
362 	void *cookie;
363 	ipsec_tun_pol_t *node;
364 	netstack_t	*ns = ipss->ipsec_netstack;
365 	int		i;
366 	ipsec_algtype_t	algtype;
367 
368 	ipsec_loader_destroy(ipss);
369 
370 	rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_WRITER);
371 	/*
372 	 * It's possible we can just ASSERT() the tree is empty.  After all,
373 	 * we aren't called until IP is ready to unload (and presumably all
374 	 * tunnels have been unplumbed).  But we'll play it safe for now, the
375 	 * loop will just exit immediately if it's empty.
376 	 */
377 	cookie = NULL;
378 	while ((node = (ipsec_tun_pol_t *)
379 	    avl_destroy_nodes(&ipss->ipsec_tunnel_policies,
380 	    &cookie)) != NULL) {
381 		ITP_REFRELE(node, ns);
382 	}
383 	avl_destroy(&ipss->ipsec_tunnel_policies);
384 	rw_exit(&ipss->ipsec_tunnel_policy_lock);
385 	rw_destroy(&ipss->ipsec_tunnel_policy_lock);
386 
387 	ipsec_config_flush(ns);
388 
389 	ipsec_kstat_destroy(ipss);
390 
391 	ip_drop_unregister(&ipss->ipsec_dropper);
392 
393 	ip_drop_unregister(&ipss->ipsec_spd_dropper);
394 	ip_drop_destroy(ipss);
395 	/*
396 	 * Globals start with ref == 1 to prevent IPPH_REFRELE() from
397 	 * attempting to free them, hence they should have 1 now.
398 	 */
399 	ipsec_polhead_destroy(&ipss->ipsec_system_policy);
400 	ASSERT(ipss->ipsec_system_policy.iph_refs == 1);
401 	ipsec_polhead_destroy(&ipss->ipsec_inactive_policy);
402 	ASSERT(ipss->ipsec_inactive_policy.iph_refs == 1);
403 
404 	for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++) {
405 		ipsec_action_free_table(ipss->ipsec_action_hash[i].hash_head);
406 		ipss->ipsec_action_hash[i].hash_head = NULL;
407 		mutex_destroy(&(ipss->ipsec_action_hash[i].hash_lock));
408 	}
409 
410 	for (i = 0; i < ipss->ipsec_spd_hashsize; i++) {
411 		ASSERT(ipss->ipsec_sel_hash[i].hash_head == NULL);
412 		mutex_destroy(&(ipss->ipsec_sel_hash[i].hash_lock));
413 	}
414 
415 	mutex_enter(&ipss->ipsec_alg_lock);
416 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype ++) {
417 		int nalgs = ipss->ipsec_nalgs[algtype];
418 
419 		for (i = 0; i < nalgs; i++) {
420 			if (ipss->ipsec_alglists[algtype][i] != NULL)
421 				ipsec_alg_unreg(algtype, i, ns);
422 		}
423 	}
424 	mutex_exit(&ipss->ipsec_alg_lock);
425 	mutex_destroy(&ipss->ipsec_alg_lock);
426 
427 	ipsid_gc(ns);
428 	ipsid_fini(ns);
429 
430 	(void) ipsec_free_tables(ipss);
431 	kmem_free(ipss, sizeof (*ipss));
432 }
433 
434 void
435 ipsec_policy_g_destroy(void)
436 {
437 	kmem_cache_destroy(ipsec_action_cache);
438 	kmem_cache_destroy(ipsec_sel_cache);
439 	kmem_cache_destroy(ipsec_pol_cache);
440 
441 	ipsec_unregister_prov_update();
442 
443 	netstack_unregister(NS_IPSEC);
444 }
445 
446 
447 /*
448  * Free what ipsec_alloc_tables allocated.
449  * Called when table allocation fails to free the table.
450  */
451 static int
452 ipsec_free_tables(ipsec_stack_t *ipss)
453 {
454 	int i;
455 
456 	if (ipss->ipsec_sel_hash != NULL) {
457 		for (i = 0; i < ipss->ipsec_spd_hashsize; i++) {
458 			ASSERT(ipss->ipsec_sel_hash[i].hash_head == NULL);
459 		}
460 		kmem_free(ipss->ipsec_sel_hash, ipss->ipsec_spd_hashsize *
461 		    sizeof (*ipss->ipsec_sel_hash));
462 		ipss->ipsec_sel_hash = NULL;
463 		ipss->ipsec_spd_hashsize = 0;
464 	}
465 	ipsec_polhead_free_table(&ipss->ipsec_system_policy);
466 	ipsec_polhead_free_table(&ipss->ipsec_inactive_policy);
467 
468 	return (ENOMEM);
469 }
470 
471 /*
472  * Attempt to allocate the tables in a single policy head.
473  * Return nonzero on failure after cleaning up any work in progress.
474  */
475 int
476 ipsec_alloc_table(ipsec_policy_head_t *iph, int nchains, int kmflag,
477     boolean_t global_cleanup, netstack_t *ns)
478 {
479 	int dir;
480 
481 	for (dir = 0; dir < IPSEC_NTYPES; dir++) {
482 		ipsec_policy_root_t *ipr = &iph->iph_root[dir];
483 
484 		ipr->ipr_nchains = nchains;
485 		ipr->ipr_hash = kmem_zalloc(nchains *
486 		    sizeof (ipsec_policy_hash_t), kmflag);
487 		if (ipr->ipr_hash == NULL)
488 			return (global_cleanup ?
489 			    ipsec_free_tables(ns->netstack_ipsec) :
490 			    ENOMEM);
491 	}
492 	return (0);
493 }
494 
495 /*
496  * Attempt to allocate the various tables.  Return nonzero on failure
497  * after cleaning up any work in progress.
498  */
499 static int
500 ipsec_alloc_tables(int kmflag, netstack_t *ns)
501 {
502 	int error;
503 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
504 
505 	error = ipsec_alloc_table(&ipss->ipsec_system_policy,
506 	    ipss->ipsec_spd_hashsize, kmflag, B_TRUE, ns);
507 	if (error != 0)
508 		return (error);
509 
510 	error = ipsec_alloc_table(&ipss->ipsec_inactive_policy,
511 	    ipss->ipsec_spd_hashsize, kmflag, B_TRUE, ns);
512 	if (error != 0)
513 		return (error);
514 
515 	ipss->ipsec_sel_hash = kmem_zalloc(ipss->ipsec_spd_hashsize *
516 	    sizeof (*ipss->ipsec_sel_hash), kmflag);
517 
518 	if (ipss->ipsec_sel_hash == NULL)
519 		return (ipsec_free_tables(ipss));
520 
521 	return (0);
522 }
523 
524 /*
525  * After table allocation, initialize a policy head.
526  */
527 void
528 ipsec_polhead_init(ipsec_policy_head_t *iph, int nchains)
529 {
530 	int dir, chain;
531 
532 	rw_init(&iph->iph_lock, NULL, RW_DEFAULT, NULL);
533 	avl_create(&iph->iph_rulebyid, ipsec_policy_cmpbyid,
534 	    sizeof (ipsec_policy_t), offsetof(ipsec_policy_t, ipsp_byid));
535 
536 	for (dir = 0; dir < IPSEC_NTYPES; dir++) {
537 		ipsec_policy_root_t *ipr = &iph->iph_root[dir];
538 		ipr->ipr_nchains = nchains;
539 
540 		for (chain = 0; chain < nchains; chain++) {
541 			mutex_init(&(ipr->ipr_hash[chain].hash_lock),
542 			    NULL, MUTEX_DEFAULT, NULL);
543 		}
544 	}
545 }
546 
547 static boolean_t
548 ipsec_kstat_init(ipsec_stack_t *ipss)
549 {
550 	ipss->ipsec_ksp = kstat_create_netstack("ip", 0, "ipsec_stat", "net",
551 	    KSTAT_TYPE_NAMED, sizeof (ipsec_kstats_t) / sizeof (kstat_named_t),
552 	    KSTAT_FLAG_PERSISTENT, ipss->ipsec_netstack->netstack_stackid);
553 
554 	if (ipss->ipsec_ksp == NULL || ipss->ipsec_ksp->ks_data == NULL)
555 		return (B_FALSE);
556 
557 	ipss->ipsec_kstats = ipss->ipsec_ksp->ks_data;
558 
559 #define	KI(x) kstat_named_init(&ipss->ipsec_kstats->x, #x, KSTAT_DATA_UINT64)
560 	KI(esp_stat_in_requests);
561 	KI(esp_stat_in_discards);
562 	KI(esp_stat_lookup_failure);
563 	KI(ah_stat_in_requests);
564 	KI(ah_stat_in_discards);
565 	KI(ah_stat_lookup_failure);
566 	KI(sadb_acquire_maxpackets);
567 	KI(sadb_acquire_qhiwater);
568 #undef KI
569 
570 	kstat_install(ipss->ipsec_ksp);
571 	return (B_TRUE);
572 }
573 
574 static void
575 ipsec_kstat_destroy(ipsec_stack_t *ipss)
576 {
577 	kstat_delete_netstack(ipss->ipsec_ksp,
578 	    ipss->ipsec_netstack->netstack_stackid);
579 	ipss->ipsec_kstats = NULL;
580 
581 }
582 
583 /*
584  * Initialize the IPsec stack instance.
585  */
586 /* ARGSUSED */
587 static void *
588 ipsec_stack_init(netstackid_t stackid, netstack_t *ns)
589 {
590 	ipsec_stack_t	*ipss;
591 	int i;
592 
593 	ipss = (ipsec_stack_t *)kmem_zalloc(sizeof (*ipss), KM_SLEEP);
594 	ipss->ipsec_netstack = ns;
595 
596 	/*
597 	 * FIXME: netstack_ipsec is used by some of the routines we call
598 	 * below, but it isn't set until this routine returns.
599 	 * Either we introduce optional xxx_stack_alloc() functions
600 	 * that will be called by the netstack framework before xxx_stack_init,
601 	 * or we switch spd.c and sadb.c to operate on ipsec_stack_t
602 	 * (latter has some include file order issues for sadb.h, but makes
603 	 * sense if we merge some of the ipsec related stack_t's together.
604 	 */
605 	ns->netstack_ipsec = ipss;
606 
607 	/*
608 	 * Make two attempts to allocate policy hash tables; try it at
609 	 * the "preferred" size (may be set in /etc/system) first,
610 	 * then fall back to the default size.
611 	 */
612 	ipss->ipsec_spd_hashsize = (ipsec_spd_hashsize == 0) ?
613 	    IPSEC_SPDHASH_DEFAULT : ipsec_spd_hashsize;
614 
615 	if (ipsec_alloc_tables(KM_NOSLEEP, ns) != 0) {
616 		cmn_err(CE_WARN,
617 		    "Unable to allocate %d entry IPsec policy hash table",
618 		    ipss->ipsec_spd_hashsize);
619 		ipss->ipsec_spd_hashsize = IPSEC_SPDHASH_DEFAULT;
620 		cmn_err(CE_WARN, "Falling back to %d entries",
621 		    ipss->ipsec_spd_hashsize);
622 		(void) ipsec_alloc_tables(KM_SLEEP, ns);
623 	}
624 
625 	/* Just set a default for tunnels. */
626 	ipss->ipsec_tun_spd_hashsize = (tun_spd_hashsize == 0) ?
627 	    TUN_SPDHASH_DEFAULT : tun_spd_hashsize;
628 
629 	ipsid_init(ns);
630 	/*
631 	 * Globals need ref == 1 to prevent IPPH_REFRELE() from attempting
632 	 * to free them.
633 	 */
634 	ipss->ipsec_system_policy.iph_refs = 1;
635 	ipss->ipsec_inactive_policy.iph_refs = 1;
636 	ipsec_polhead_init(&ipss->ipsec_system_policy,
637 	    ipss->ipsec_spd_hashsize);
638 	ipsec_polhead_init(&ipss->ipsec_inactive_policy,
639 	    ipss->ipsec_spd_hashsize);
640 	rw_init(&ipss->ipsec_tunnel_policy_lock, NULL, RW_DEFAULT, NULL);
641 	avl_create(&ipss->ipsec_tunnel_policies, tunnel_compare,
642 	    sizeof (ipsec_tun_pol_t), 0);
643 
644 	ipss->ipsec_next_policy_index = 1;
645 
646 	rw_init(&ipss->ipsec_system_policy.iph_lock, NULL, RW_DEFAULT, NULL);
647 	rw_init(&ipss->ipsec_inactive_policy.iph_lock, NULL, RW_DEFAULT, NULL);
648 
649 	for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++)
650 		mutex_init(&(ipss->ipsec_action_hash[i].hash_lock),
651 		    NULL, MUTEX_DEFAULT, NULL);
652 
653 	for (i = 0; i < ipss->ipsec_spd_hashsize; i++)
654 		mutex_init(&(ipss->ipsec_sel_hash[i].hash_lock),
655 		    NULL, MUTEX_DEFAULT, NULL);
656 
657 	mutex_init(&ipss->ipsec_alg_lock, NULL, MUTEX_DEFAULT, NULL);
658 	for (i = 0; i < IPSEC_NALGTYPES; i++) {
659 		ipss->ipsec_nalgs[i] = 0;
660 	}
661 
662 	ip_drop_init(ipss);
663 	ip_drop_register(&ipss->ipsec_spd_dropper, "IPsec SPD");
664 
665 	/* IP's IPsec code calls the packet dropper */
666 	ip_drop_register(&ipss->ipsec_dropper, "IP IPsec processing");
667 
668 	(void) ipsec_kstat_init(ipss);
669 
670 	ipsec_loader_init(ipss);
671 	ipsec_loader_start(ipss);
672 
673 	return (ipss);
674 }
675 
676 /* Global across all stack instances */
677 void
678 ipsec_policy_g_init(void)
679 {
680 	ipsec_action_cache = kmem_cache_create("ipsec_actions",
681 	    sizeof (ipsec_action_t), _POINTER_ALIGNMENT, NULL, NULL,
682 	    ipsec_action_reclaim, NULL, NULL, 0);
683 	ipsec_sel_cache = kmem_cache_create("ipsec_selectors",
684 	    sizeof (ipsec_sel_t), _POINTER_ALIGNMENT, NULL, NULL,
685 	    NULL, NULL, NULL, 0);
686 	ipsec_pol_cache = kmem_cache_create("ipsec_policy",
687 	    sizeof (ipsec_policy_t), _POINTER_ALIGNMENT, NULL, NULL,
688 	    NULL, NULL, NULL, 0);
689 
690 	/*
691 	 * We want to be informed each time a stack is created or
692 	 * destroyed in the kernel, so we can maintain the
693 	 * set of ipsec_stack_t's.
694 	 */
695 	netstack_register(NS_IPSEC, ipsec_stack_init, NULL, ipsec_stack_fini);
696 }
697 
698 /*
699  * Sort algorithm lists.
700  *
701  * I may need to split this based on
702  * authentication/encryption, and I may wish to have an administrator
703  * configure this list.  Hold on to some NDD variables...
704  *
705  * XXX For now, sort on minimum key size (GAG!).  While minimum key size is
706  * not the ideal metric, it's the only quantifiable measure available.
707  * We need a better metric for sorting algorithms by preference.
708  */
709 static void
710 alg_insert_sortlist(enum ipsec_algtype at, uint8_t algid, netstack_t *ns)
711 {
712 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
713 	ipsec_alginfo_t *ai = ipss->ipsec_alglists[at][algid];
714 	uint8_t holder, swap;
715 	uint_t i;
716 	uint_t count = ipss->ipsec_nalgs[at];
717 	ASSERT(ai != NULL);
718 	ASSERT(algid == ai->alg_id);
719 
720 	ASSERT(MUTEX_HELD(&ipss->ipsec_alg_lock));
721 
722 	holder = algid;
723 
724 	for (i = 0; i < count - 1; i++) {
725 		ipsec_alginfo_t *alt;
726 
727 		alt = ipss->ipsec_alglists[at][ipss->ipsec_sortlist[at][i]];
728 		/*
729 		 * If you want to give precedence to newly added algs,
730 		 * add the = in the > comparison.
731 		 */
732 		if ((holder != algid) || (ai->alg_minbits > alt->alg_minbits)) {
733 			/* Swap sortlist[i] and holder. */
734 			swap = ipss->ipsec_sortlist[at][i];
735 			ipss->ipsec_sortlist[at][i] = holder;
736 			holder = swap;
737 			ai = alt;
738 		} /* Else just continue. */
739 	}
740 
741 	/* Store holder in last slot. */
742 	ipss->ipsec_sortlist[at][i] = holder;
743 }
744 
745 /*
746  * Remove an algorithm from a sorted algorithm list.
747  * This should be considerably easier, even with complex sorting.
748  */
749 static void
750 alg_remove_sortlist(enum ipsec_algtype at, uint8_t algid, netstack_t *ns)
751 {
752 	boolean_t copyback = B_FALSE;
753 	int i;
754 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
755 	int newcount = ipss->ipsec_nalgs[at];
756 
757 	ASSERT(MUTEX_HELD(&ipss->ipsec_alg_lock));
758 
759 	for (i = 0; i <= newcount; i++) {
760 		if (copyback) {
761 			ipss->ipsec_sortlist[at][i-1] =
762 			    ipss->ipsec_sortlist[at][i];
763 		} else if (ipss->ipsec_sortlist[at][i] == algid) {
764 			copyback = B_TRUE;
765 		}
766 	}
767 }
768 
769 /*
770  * Add the specified algorithm to the algorithm tables.
771  * Must be called while holding the algorithm table writer lock.
772  */
773 void
774 ipsec_alg_reg(ipsec_algtype_t algtype, ipsec_alginfo_t *alg, netstack_t *ns)
775 {
776 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
777 
778 	ASSERT(MUTEX_HELD(&ipss->ipsec_alg_lock));
779 
780 	ASSERT(ipss->ipsec_alglists[algtype][alg->alg_id] == NULL);
781 	ipsec_alg_fix_min_max(alg, algtype, ns);
782 	ipss->ipsec_alglists[algtype][alg->alg_id] = alg;
783 
784 	ipss->ipsec_nalgs[algtype]++;
785 	alg_insert_sortlist(algtype, alg->alg_id, ns);
786 }
787 
788 /*
789  * Remove the specified algorithm from the algorithm tables.
790  * Must be called while holding the algorithm table writer lock.
791  */
792 void
793 ipsec_alg_unreg(ipsec_algtype_t algtype, uint8_t algid, netstack_t *ns)
794 {
795 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
796 
797 	ASSERT(MUTEX_HELD(&ipss->ipsec_alg_lock));
798 
799 	ASSERT(ipss->ipsec_alglists[algtype][algid] != NULL);
800 	ipsec_alg_free(ipss->ipsec_alglists[algtype][algid]);
801 	ipss->ipsec_alglists[algtype][algid] = NULL;
802 
803 	ipss->ipsec_nalgs[algtype]--;
804 	alg_remove_sortlist(algtype, algid, ns);
805 }
806 
807 /*
808  * Hooks for spdsock to get a grip on system policy.
809  */
810 
811 ipsec_policy_head_t *
812 ipsec_system_policy(netstack_t *ns)
813 {
814 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
815 	ipsec_policy_head_t *h = &ipss->ipsec_system_policy;
816 
817 	IPPH_REFHOLD(h);
818 	return (h);
819 }
820 
821 ipsec_policy_head_t *
822 ipsec_inactive_policy(netstack_t *ns)
823 {
824 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
825 	ipsec_policy_head_t *h = &ipss->ipsec_inactive_policy;
826 
827 	IPPH_REFHOLD(h);
828 	return (h);
829 }
830 
831 /*
832  * Lock inactive policy, then active policy, then exchange policy root
833  * pointers.
834  */
835 void
836 ipsec_swap_policy(ipsec_policy_head_t *active, ipsec_policy_head_t *inactive,
837     netstack_t *ns)
838 {
839 	int af, dir;
840 	avl_tree_t r1, r2;
841 
842 	rw_enter(&inactive->iph_lock, RW_WRITER);
843 	rw_enter(&active->iph_lock, RW_WRITER);
844 
845 	r1 = active->iph_rulebyid;
846 	r2 = inactive->iph_rulebyid;
847 	active->iph_rulebyid = r2;
848 	inactive->iph_rulebyid = r1;
849 
850 	for (dir = 0; dir < IPSEC_NTYPES; dir++) {
851 		ipsec_policy_hash_t *h1, *h2;
852 
853 		h1 = active->iph_root[dir].ipr_hash;
854 		h2 = inactive->iph_root[dir].ipr_hash;
855 		active->iph_root[dir].ipr_hash = h2;
856 		inactive->iph_root[dir].ipr_hash = h1;
857 
858 		for (af = 0; af < IPSEC_NAF; af++) {
859 			ipsec_policy_t *t1, *t2;
860 
861 			t1 = active->iph_root[dir].ipr_nonhash[af];
862 			t2 = inactive->iph_root[dir].ipr_nonhash[af];
863 			active->iph_root[dir].ipr_nonhash[af] = t2;
864 			inactive->iph_root[dir].ipr_nonhash[af] = t1;
865 			if (t1 != NULL) {
866 				t1->ipsp_hash.hash_pp =
867 				    &(inactive->iph_root[dir].ipr_nonhash[af]);
868 			}
869 			if (t2 != NULL) {
870 				t2->ipsp_hash.hash_pp =
871 				    &(active->iph_root[dir].ipr_nonhash[af]);
872 			}
873 
874 		}
875 	}
876 	active->iph_gen++;
877 	inactive->iph_gen++;
878 	ipsec_update_present_flags(ns->netstack_ipsec);
879 	rw_exit(&active->iph_lock);
880 	rw_exit(&inactive->iph_lock);
881 }
882 
883 /*
884  * Swap global policy primary/secondary.
885  */
886 void
887 ipsec_swap_global_policy(netstack_t *ns)
888 {
889 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
890 
891 	ipsec_swap_policy(&ipss->ipsec_system_policy,
892 	    &ipss->ipsec_inactive_policy, ns);
893 }
894 
895 /*
896  * Clone one policy rule..
897  */
898 static ipsec_policy_t *
899 ipsec_copy_policy(const ipsec_policy_t *src)
900 {
901 	ipsec_policy_t *dst = kmem_cache_alloc(ipsec_pol_cache, KM_NOSLEEP);
902 
903 	if (dst == NULL)
904 		return (NULL);
905 
906 	/*
907 	 * Adjust refcounts of cloned state.
908 	 */
909 	IPACT_REFHOLD(src->ipsp_act);
910 	src->ipsp_sel->ipsl_refs++;
911 
912 	HASH_NULL(dst, ipsp_hash);
913 	dst->ipsp_netstack = src->ipsp_netstack;
914 	dst->ipsp_refs = 1;
915 	dst->ipsp_sel = src->ipsp_sel;
916 	dst->ipsp_act = src->ipsp_act;
917 	dst->ipsp_prio = src->ipsp_prio;
918 	dst->ipsp_index = src->ipsp_index;
919 
920 	return (dst);
921 }
922 
923 void
924 ipsec_insert_always(avl_tree_t *tree, void *new_node)
925 {
926 	void *node;
927 	avl_index_t where;
928 
929 	node = avl_find(tree, new_node, &where);
930 	ASSERT(node == NULL);
931 	avl_insert(tree, new_node, where);
932 }
933 
934 
935 static int
936 ipsec_copy_chain(ipsec_policy_head_t *dph, ipsec_policy_t *src,
937     ipsec_policy_t **dstp)
938 {
939 	for (; src != NULL; src = src->ipsp_hash.hash_next) {
940 		ipsec_policy_t *dst = ipsec_copy_policy(src);
941 		if (dst == NULL)
942 			return (ENOMEM);
943 
944 		HASHLIST_INSERT(dst, ipsp_hash, *dstp);
945 		ipsec_insert_always(&dph->iph_rulebyid, dst);
946 	}
947 	return (0);
948 }
949 
950 
951 
952 /*
953  * Make one policy head look exactly like another.
954  *
955  * As with ipsec_swap_policy, we lock the destination policy head first, then
956  * the source policy head. Note that we only need to read-lock the source
957  * policy head as we are not changing it.
958  */
959 int
960 ipsec_copy_polhead(ipsec_policy_head_t *sph, ipsec_policy_head_t *dph,
961     netstack_t *ns)
962 {
963 	int af, dir, chain, nchains;
964 
965 	rw_enter(&dph->iph_lock, RW_WRITER);
966 
967 	ipsec_polhead_flush(dph, ns);
968 
969 	rw_enter(&sph->iph_lock, RW_READER);
970 
971 	for (dir = 0; dir < IPSEC_NTYPES; dir++) {
972 		ipsec_policy_root_t *dpr = &dph->iph_root[dir];
973 		ipsec_policy_root_t *spr = &sph->iph_root[dir];
974 		nchains = dpr->ipr_nchains;
975 
976 		ASSERT(dpr->ipr_nchains == spr->ipr_nchains);
977 
978 		for (af = 0; af < IPSEC_NAF; af++) {
979 			if (ipsec_copy_chain(dph, spr->ipr_nonhash[af],
980 			    &dpr->ipr_nonhash[af]))
981 				goto abort_copy;
982 		}
983 
984 		for (chain = 0; chain < nchains; chain++) {
985 			if (ipsec_copy_chain(dph,
986 			    spr->ipr_hash[chain].hash_head,
987 			    &dpr->ipr_hash[chain].hash_head))
988 				goto abort_copy;
989 		}
990 	}
991 
992 	dph->iph_gen++;
993 
994 	rw_exit(&sph->iph_lock);
995 	rw_exit(&dph->iph_lock);
996 	return (0);
997 
998 abort_copy:
999 	ipsec_polhead_flush(dph, ns);
1000 	rw_exit(&sph->iph_lock);
1001 	rw_exit(&dph->iph_lock);
1002 	return (ENOMEM);
1003 }
1004 
1005 /*
1006  * Clone currently active policy to the inactive policy list.
1007  */
1008 int
1009 ipsec_clone_system_policy(netstack_t *ns)
1010 {
1011 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
1012 
1013 	return (ipsec_copy_polhead(&ipss->ipsec_system_policy,
1014 	    &ipss->ipsec_inactive_policy, ns));
1015 }
1016 
1017 /*
1018  * Extract the string from ipsec_policy_failure_msgs[type] and
1019  * log it.
1020  *
1021  */
1022 void
1023 ipsec_log_policy_failure(int type, char *func_name, ipha_t *ipha, ip6_t *ip6h,
1024     boolean_t secure, netstack_t *ns)
1025 {
1026 	char	sbuf[INET6_ADDRSTRLEN];
1027 	char	dbuf[INET6_ADDRSTRLEN];
1028 	char	*s;
1029 	char	*d;
1030 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
1031 
1032 	ASSERT((ipha == NULL && ip6h != NULL) ||
1033 	    (ip6h == NULL && ipha != NULL));
1034 
1035 	if (ipha != NULL) {
1036 		s = inet_ntop(AF_INET, &ipha->ipha_src, sbuf, sizeof (sbuf));
1037 		d = inet_ntop(AF_INET, &ipha->ipha_dst, dbuf, sizeof (dbuf));
1038 	} else {
1039 		s = inet_ntop(AF_INET6, &ip6h->ip6_src, sbuf, sizeof (sbuf));
1040 		d = inet_ntop(AF_INET6, &ip6h->ip6_dst, dbuf, sizeof (dbuf));
1041 
1042 	}
1043 
1044 	/* Always bump the policy failure counter. */
1045 	ipss->ipsec_policy_failure_count[type]++;
1046 
1047 	ipsec_rl_strlog(ns, IP_MOD_ID, 0, 0, SL_ERROR|SL_WARN|SL_CONSOLE,
1048 	    ipsec_policy_failure_msgs[type], func_name,
1049 	    (secure ? "secure" : "not secure"), s, d);
1050 }
1051 
1052 /*
1053  * Rate-limiting front-end to strlog() for AH and ESP.	Uses the ndd variables
1054  * in /dev/ip and the same rate-limiting clock so that there's a single
1055  * knob to turn to throttle the rate of messages.
1056  */
1057 void
1058 ipsec_rl_strlog(netstack_t *ns, short mid, short sid, char level, ushort_t sl,
1059     char *fmt, ...)
1060 {
1061 	va_list adx;
1062 	hrtime_t current = gethrtime();
1063 	ip_stack_t	*ipst = ns->netstack_ip;
1064 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
1065 
1066 	sl |= SL_CONSOLE;
1067 	/*
1068 	 * Throttle logging to stop syslog from being swamped. If variable
1069 	 * 'ipsec_policy_log_interval' is zero, don't log any messages at
1070 	 * all, otherwise log only one message every 'ipsec_policy_log_interval'
1071 	 * msec. Convert interval (in msec) to hrtime (in nsec).
1072 	 */
1073 
1074 	if (ipst->ips_ipsec_policy_log_interval) {
1075 		if (ipss->ipsec_policy_failure_last +
1076 		    ((hrtime_t)ipst->ips_ipsec_policy_log_interval *
1077 		    (hrtime_t)1000000) <= current) {
1078 			va_start(adx, fmt);
1079 			(void) vstrlog(mid, sid, level, sl, fmt, adx);
1080 			va_end(adx);
1081 			ipss->ipsec_policy_failure_last = current;
1082 		}
1083 	}
1084 }
1085 
1086 void
1087 ipsec_config_flush(netstack_t *ns)
1088 {
1089 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
1090 
1091 	rw_enter(&ipss->ipsec_system_policy.iph_lock, RW_WRITER);
1092 	ipsec_polhead_flush(&ipss->ipsec_system_policy, ns);
1093 	ipss->ipsec_next_policy_index = 1;
1094 	rw_exit(&ipss->ipsec_system_policy.iph_lock);
1095 	ipsec_action_reclaim_stack(ns);
1096 }
1097 
1098 /*
1099  * Clip a policy's min/max keybits vs. the capabilities of the
1100  * algorithm.
1101  */
1102 static void
1103 act_alg_adjust(uint_t algtype, uint_t algid,
1104     uint16_t *minbits, uint16_t *maxbits, netstack_t *ns)
1105 {
1106 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
1107 	ipsec_alginfo_t *algp = ipss->ipsec_alglists[algtype][algid];
1108 
1109 	if (algp != NULL) {
1110 		/*
1111 		 * If passed-in minbits is zero, we assume the caller trusts
1112 		 * us with setting the minimum key size.  We pick the
1113 		 * algorithms DEFAULT key size for the minimum in this case.
1114 		 */
1115 		if (*minbits == 0) {
1116 			*minbits = algp->alg_default_bits;
1117 			ASSERT(*minbits >= algp->alg_minbits);
1118 		} else {
1119 			*minbits = MAX(MIN(*minbits, algp->alg_maxbits),
1120 			    algp->alg_minbits);
1121 		}
1122 		if (*maxbits == 0)
1123 			*maxbits = algp->alg_maxbits;
1124 		else
1125 			*maxbits = MIN(MAX(*maxbits, algp->alg_minbits),
1126 			    algp->alg_maxbits);
1127 		ASSERT(*minbits <= *maxbits);
1128 	} else {
1129 		*minbits = 0;
1130 		*maxbits = 0;
1131 	}
1132 }
1133 
1134 /*
1135  * Check an action's requested algorithms against the algorithms currently
1136  * loaded in the system.
1137  */
1138 boolean_t
1139 ipsec_check_action(ipsec_act_t *act, int *diag, netstack_t *ns)
1140 {
1141 	ipsec_prot_t *ipp;
1142 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
1143 
1144 	ipp = &act->ipa_apply;
1145 
1146 	if (ipp->ipp_use_ah &&
1147 	    ipss->ipsec_alglists[IPSEC_ALG_AUTH][ipp->ipp_auth_alg] == NULL) {
1148 		*diag = SPD_DIAGNOSTIC_UNSUPP_AH_ALG;
1149 		return (B_FALSE);
1150 	}
1151 	if (ipp->ipp_use_espa &&
1152 	    ipss->ipsec_alglists[IPSEC_ALG_AUTH][ipp->ipp_esp_auth_alg] ==
1153 	    NULL) {
1154 		*diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_ALG;
1155 		return (B_FALSE);
1156 	}
1157 	if (ipp->ipp_use_esp &&
1158 	    ipss->ipsec_alglists[IPSEC_ALG_ENCR][ipp->ipp_encr_alg] == NULL) {
1159 		*diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_ALG;
1160 		return (B_FALSE);
1161 	}
1162 
1163 	act_alg_adjust(IPSEC_ALG_AUTH, ipp->ipp_auth_alg,
1164 	    &ipp->ipp_ah_minbits, &ipp->ipp_ah_maxbits, ns);
1165 	act_alg_adjust(IPSEC_ALG_AUTH, ipp->ipp_esp_auth_alg,
1166 	    &ipp->ipp_espa_minbits, &ipp->ipp_espa_maxbits, ns);
1167 	act_alg_adjust(IPSEC_ALG_ENCR, ipp->ipp_encr_alg,
1168 	    &ipp->ipp_espe_minbits, &ipp->ipp_espe_maxbits, ns);
1169 
1170 	if (ipp->ipp_ah_minbits > ipp->ipp_ah_maxbits) {
1171 		*diag = SPD_DIAGNOSTIC_UNSUPP_AH_KEYSIZE;
1172 		return (B_FALSE);
1173 	}
1174 	if (ipp->ipp_espa_minbits > ipp->ipp_espa_maxbits) {
1175 		*diag = SPD_DIAGNOSTIC_UNSUPP_ESP_AUTH_KEYSIZE;
1176 		return (B_FALSE);
1177 	}
1178 	if (ipp->ipp_espe_minbits > ipp->ipp_espe_maxbits) {
1179 		*diag = SPD_DIAGNOSTIC_UNSUPP_ESP_ENCR_KEYSIZE;
1180 		return (B_FALSE);
1181 	}
1182 	/* TODO: sanity check lifetimes */
1183 	return (B_TRUE);
1184 }
1185 
1186 /*
1187  * Set up a single action during wildcard expansion..
1188  */
1189 static void
1190 ipsec_setup_act(ipsec_act_t *outact, ipsec_act_t *act,
1191     uint_t auth_alg, uint_t encr_alg, uint_t eauth_alg, netstack_t *ns)
1192 {
1193 	ipsec_prot_t *ipp;
1194 
1195 	*outact = *act;
1196 	ipp = &outact->ipa_apply;
1197 	ipp->ipp_auth_alg = (uint8_t)auth_alg;
1198 	ipp->ipp_encr_alg = (uint8_t)encr_alg;
1199 	ipp->ipp_esp_auth_alg = (uint8_t)eauth_alg;
1200 
1201 	act_alg_adjust(IPSEC_ALG_AUTH, auth_alg,
1202 	    &ipp->ipp_ah_minbits, &ipp->ipp_ah_maxbits, ns);
1203 	act_alg_adjust(IPSEC_ALG_AUTH, eauth_alg,
1204 	    &ipp->ipp_espa_minbits, &ipp->ipp_espa_maxbits, ns);
1205 	act_alg_adjust(IPSEC_ALG_ENCR, encr_alg,
1206 	    &ipp->ipp_espe_minbits, &ipp->ipp_espe_maxbits, ns);
1207 }
1208 
1209 /*
1210  * combinatoric expansion time: expand a wildcarded action into an
1211  * array of wildcarded actions; we return the exploded action list,
1212  * and return a count in *nact (output only).
1213  */
1214 static ipsec_act_t *
1215 ipsec_act_wildcard_expand(ipsec_act_t *act, uint_t *nact, netstack_t *ns)
1216 {
1217 	boolean_t use_ah, use_esp, use_espa;
1218 	boolean_t wild_auth, wild_encr, wild_eauth;
1219 	uint_t	auth_alg, auth_idx, auth_min, auth_max;
1220 	uint_t	eauth_alg, eauth_idx, eauth_min, eauth_max;
1221 	uint_t  encr_alg, encr_idx, encr_min, encr_max;
1222 	uint_t	action_count, ai;
1223 	ipsec_act_t *outact;
1224 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
1225 
1226 	if (act->ipa_type != IPSEC_ACT_APPLY) {
1227 		outact = kmem_alloc(sizeof (*act), KM_NOSLEEP);
1228 		*nact = 1;
1229 		if (outact != NULL)
1230 			bcopy(act, outact, sizeof (*act));
1231 		return (outact);
1232 	}
1233 	/*
1234 	 * compute the combinatoric explosion..
1235 	 *
1236 	 * we assume a request for encr if esp_req is PREF_REQUIRED
1237 	 * we assume a request for ah auth if ah_req is PREF_REQUIRED.
1238 	 * we assume a request for esp auth if !ah and esp_req is PREF_REQUIRED
1239 	 */
1240 
1241 	use_ah = act->ipa_apply.ipp_use_ah;
1242 	use_esp = act->ipa_apply.ipp_use_esp;
1243 	use_espa = act->ipa_apply.ipp_use_espa;
1244 	auth_alg = act->ipa_apply.ipp_auth_alg;
1245 	eauth_alg = act->ipa_apply.ipp_esp_auth_alg;
1246 	encr_alg = act->ipa_apply.ipp_encr_alg;
1247 
1248 	wild_auth = use_ah && (auth_alg == 0);
1249 	wild_eauth = use_espa && (eauth_alg == 0);
1250 	wild_encr = use_esp && (encr_alg == 0);
1251 
1252 	action_count = 1;
1253 	auth_min = auth_max = auth_alg;
1254 	eauth_min = eauth_max = eauth_alg;
1255 	encr_min = encr_max = encr_alg;
1256 
1257 	/*
1258 	 * set up for explosion.. for each dimension, expand output
1259 	 * size by the explosion factor.
1260 	 *
1261 	 * Don't include the "any" algorithms, if defined, as no
1262 	 * kernel policies should be set for these algorithms.
1263 	 */
1264 
1265 #define	SET_EXP_MINMAX(type, wild, alg, min, max, ipss)		\
1266 	if (wild) {						\
1267 		int nalgs = ipss->ipsec_nalgs[type];		\
1268 		if (ipss->ipsec_alglists[type][alg] != NULL)	\
1269 			nalgs--;				\
1270 		action_count *= nalgs;				\
1271 		min = 0;					\
1272 		max = ipss->ipsec_nalgs[type] - 1;		\
1273 	}
1274 
1275 	SET_EXP_MINMAX(IPSEC_ALG_AUTH, wild_auth, SADB_AALG_NONE,
1276 	    auth_min, auth_max, ipss);
1277 	SET_EXP_MINMAX(IPSEC_ALG_AUTH, wild_eauth, SADB_AALG_NONE,
1278 	    eauth_min, eauth_max, ipss);
1279 	SET_EXP_MINMAX(IPSEC_ALG_ENCR, wild_encr, SADB_EALG_NONE,
1280 	    encr_min, encr_max, ipss);
1281 
1282 #undef	SET_EXP_MINMAX
1283 
1284 	/*
1285 	 * ok, allocate the whole mess..
1286 	 */
1287 
1288 	outact = kmem_alloc(sizeof (*outact) * action_count, KM_NOSLEEP);
1289 	if (outact == NULL)
1290 		return (NULL);
1291 
1292 	/*
1293 	 * Now compute all combinations.  Note that non-wildcarded
1294 	 * dimensions just get a single value from auth_min, while
1295 	 * wildcarded dimensions indirect through the sortlist.
1296 	 *
1297 	 * We do encryption outermost since, at this time, there's
1298 	 * greater difference in security and performance between
1299 	 * encryption algorithms vs. authentication algorithms.
1300 	 */
1301 
1302 	ai = 0;
1303 
1304 #define	WHICH_ALG(type, wild, idx, ipss) \
1305 	((wild)?(ipss->ipsec_sortlist[type][idx]):(idx))
1306 
1307 	for (encr_idx = encr_min; encr_idx <= encr_max; encr_idx++) {
1308 		encr_alg = WHICH_ALG(IPSEC_ALG_ENCR, wild_encr, encr_idx, ipss);
1309 		if (wild_encr && encr_alg == SADB_EALG_NONE)
1310 			continue;
1311 		for (auth_idx = auth_min; auth_idx <= auth_max; auth_idx++) {
1312 			auth_alg = WHICH_ALG(IPSEC_ALG_AUTH, wild_auth,
1313 			    auth_idx, ipss);
1314 			if (wild_auth && auth_alg == SADB_AALG_NONE)
1315 				continue;
1316 			for (eauth_idx = eauth_min; eauth_idx <= eauth_max;
1317 			    eauth_idx++) {
1318 				eauth_alg = WHICH_ALG(IPSEC_ALG_AUTH,
1319 				    wild_eauth, eauth_idx, ipss);
1320 				if (wild_eauth && eauth_alg == SADB_AALG_NONE)
1321 					continue;
1322 
1323 				ipsec_setup_act(&outact[ai], act,
1324 				    auth_alg, encr_alg, eauth_alg, ns);
1325 				ai++;
1326 			}
1327 		}
1328 	}
1329 
1330 #undef WHICH_ALG
1331 
1332 	ASSERT(ai == action_count);
1333 	*nact = action_count;
1334 	return (outact);
1335 }
1336 
1337 /*
1338  * Extract the parts of an ipsec_prot_t from an old-style ipsec_req_t.
1339  */
1340 static void
1341 ipsec_prot_from_req(const ipsec_req_t *req, ipsec_prot_t *ipp)
1342 {
1343 	bzero(ipp, sizeof (*ipp));
1344 	/*
1345 	 * ipp_use_* are bitfields.  Look at "!!" in the following as a
1346 	 * "boolean canonicalization" operator.
1347 	 */
1348 	ipp->ipp_use_ah = !!(req->ipsr_ah_req & IPSEC_PREF_REQUIRED);
1349 	ipp->ipp_use_esp = !!(req->ipsr_esp_req & IPSEC_PREF_REQUIRED);
1350 	ipp->ipp_use_espa = !!(req->ipsr_esp_auth_alg);
1351 	ipp->ipp_use_se = !!(req->ipsr_self_encap_req & IPSEC_PREF_REQUIRED);
1352 	ipp->ipp_use_unique = !!((req->ipsr_ah_req|req->ipsr_esp_req) &
1353 	    IPSEC_PREF_UNIQUE);
1354 	ipp->ipp_encr_alg = req->ipsr_esp_alg;
1355 	/*
1356 	 * SADB_AALG_ANY is a placeholder to distinguish "any" from
1357 	 * "none" above.  If auth is required, as determined above,
1358 	 * SADB_AALG_ANY becomes 0, which is the representation
1359 	 * of "any" and "none" in PF_KEY v2.
1360 	 */
1361 	ipp->ipp_auth_alg = (req->ipsr_auth_alg != SADB_AALG_ANY) ?
1362 	    req->ipsr_auth_alg : 0;
1363 	ipp->ipp_esp_auth_alg = (req->ipsr_esp_auth_alg != SADB_AALG_ANY) ?
1364 	    req->ipsr_esp_auth_alg : 0;
1365 }
1366 
1367 /*
1368  * Extract a new-style action from a request.
1369  */
1370 void
1371 ipsec_actvec_from_req(const ipsec_req_t *req, ipsec_act_t **actp, uint_t *nactp,
1372     netstack_t *ns)
1373 {
1374 	struct ipsec_act act;
1375 
1376 	bzero(&act, sizeof (act));
1377 	if ((req->ipsr_ah_req & IPSEC_PREF_NEVER) &&
1378 	    (req->ipsr_esp_req & IPSEC_PREF_NEVER)) {
1379 		act.ipa_type = IPSEC_ACT_BYPASS;
1380 	} else {
1381 		act.ipa_type = IPSEC_ACT_APPLY;
1382 		ipsec_prot_from_req(req, &act.ipa_apply);
1383 	}
1384 	*actp = ipsec_act_wildcard_expand(&act, nactp, ns);
1385 }
1386 
1387 /*
1388  * Convert a new-style "prot" back to an ipsec_req_t (more backwards compat).
1389  * We assume caller has already zero'ed *req for us.
1390  */
1391 static int
1392 ipsec_req_from_prot(ipsec_prot_t *ipp, ipsec_req_t *req)
1393 {
1394 	req->ipsr_esp_alg = ipp->ipp_encr_alg;
1395 	req->ipsr_auth_alg = ipp->ipp_auth_alg;
1396 	req->ipsr_esp_auth_alg = ipp->ipp_esp_auth_alg;
1397 
1398 	if (ipp->ipp_use_unique) {
1399 		req->ipsr_ah_req |= IPSEC_PREF_UNIQUE;
1400 		req->ipsr_esp_req |= IPSEC_PREF_UNIQUE;
1401 	}
1402 	if (ipp->ipp_use_se)
1403 		req->ipsr_self_encap_req |= IPSEC_PREF_REQUIRED;
1404 	if (ipp->ipp_use_ah)
1405 		req->ipsr_ah_req |= IPSEC_PREF_REQUIRED;
1406 	if (ipp->ipp_use_esp)
1407 		req->ipsr_esp_req |= IPSEC_PREF_REQUIRED;
1408 	return (sizeof (*req));
1409 }
1410 
1411 /*
1412  * Convert a new-style action back to an ipsec_req_t (more backwards compat).
1413  * We assume caller has already zero'ed *req for us.
1414  */
1415 static int
1416 ipsec_req_from_act(ipsec_action_t *ap, ipsec_req_t *req)
1417 {
1418 	switch (ap->ipa_act.ipa_type) {
1419 	case IPSEC_ACT_BYPASS:
1420 		req->ipsr_ah_req = IPSEC_PREF_NEVER;
1421 		req->ipsr_esp_req = IPSEC_PREF_NEVER;
1422 		return (sizeof (*req));
1423 	case IPSEC_ACT_APPLY:
1424 		return (ipsec_req_from_prot(&ap->ipa_act.ipa_apply, req));
1425 	}
1426 	return (sizeof (*req));
1427 }
1428 
1429 /*
1430  * Convert a new-style action back to an ipsec_req_t (more backwards compat).
1431  * We assume caller has already zero'ed *req for us.
1432  */
1433 int
1434 ipsec_req_from_head(ipsec_policy_head_t *ph, ipsec_req_t *req, int af)
1435 {
1436 	ipsec_policy_t *p;
1437 
1438 	/*
1439 	 * FULL-PERSOCK: consult hash table, too?
1440 	 */
1441 	for (p = ph->iph_root[IPSEC_INBOUND].ipr_nonhash[af];
1442 	    p != NULL;
1443 	    p = p->ipsp_hash.hash_next) {
1444 		if ((p->ipsp_sel->ipsl_key.ipsl_valid & IPSL_WILDCARD) == 0)
1445 			return (ipsec_req_from_act(p->ipsp_act, req));
1446 	}
1447 	return (sizeof (*req));
1448 }
1449 
1450 /*
1451  * Based on per-socket or latched policy, convert to an appropriate
1452  * IP_SEC_OPT ipsec_req_t for the socket option; return size so we can
1453  * be tail-called from ip.
1454  */
1455 int
1456 ipsec_req_from_conn(conn_t *connp, ipsec_req_t *req, int af)
1457 {
1458 	ipsec_latch_t *ipl;
1459 	int rv = sizeof (ipsec_req_t);
1460 
1461 	bzero(req, sizeof (*req));
1462 
1463 	ASSERT(MUTEX_HELD(&connp->conn_lock));
1464 	ipl = connp->conn_latch;
1465 
1466 	/*
1467 	 * Find appropriate policy.  First choice is latched action;
1468 	 * failing that, see latched policy; failing that,
1469 	 * look at configured policy.
1470 	 */
1471 	if (ipl != NULL) {
1472 		if (connp->conn_latch_in_action != NULL) {
1473 			rv = ipsec_req_from_act(connp->conn_latch_in_action,
1474 			    req);
1475 			goto done;
1476 		}
1477 		if (connp->conn_latch_in_policy != NULL) {
1478 			rv = ipsec_req_from_act(
1479 			    connp->conn_latch_in_policy->ipsp_act, req);
1480 			goto done;
1481 		}
1482 	}
1483 	if (connp->conn_policy != NULL)
1484 		rv = ipsec_req_from_head(connp->conn_policy, req, af);
1485 done:
1486 	return (rv);
1487 }
1488 
1489 void
1490 ipsec_actvec_free(ipsec_act_t *act, uint_t nact)
1491 {
1492 	kmem_free(act, nact * sizeof (*act));
1493 }
1494 
1495 /*
1496  * Consumes a reference to ipsp.
1497  */
1498 static mblk_t *
1499 ipsec_check_loopback_policy(mblk_t *data_mp, ip_recv_attr_t *ira,
1500     ipsec_policy_t *ipsp)
1501 {
1502 	if (!(ira->ira_flags & IRAF_IPSEC_SECURE))
1503 		return (data_mp);
1504 
1505 	ASSERT(ira->ira_flags & IRAF_LOOPBACK);
1506 
1507 	IPPOL_REFRELE(ipsp);
1508 
1509 	/*
1510 	 * We should do an actual policy check here.  Revisit this
1511 	 * when we revisit the IPsec API.  (And pass a conn_t in when we
1512 	 * get there.)
1513 	 */
1514 
1515 	return (data_mp);
1516 }
1517 
1518 /*
1519  * Check that packet's inbound ports & proto match the selectors
1520  * expected by the SAs it traversed on the way in.
1521  */
1522 static boolean_t
1523 ipsec_check_ipsecin_unique(ip_recv_attr_t *ira, const char **reason,
1524     kstat_named_t **counter, uint64_t pkt_unique, netstack_t *ns)
1525 {
1526 	uint64_t ah_mask, esp_mask;
1527 	ipsa_t *ah_assoc;
1528 	ipsa_t *esp_assoc;
1529 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
1530 
1531 	ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1532 	ASSERT(!(ira->ira_flags & IRAF_LOOPBACK));
1533 
1534 	ah_assoc = ira->ira_ipsec_ah_sa;
1535 	esp_assoc = ira->ira_ipsec_esp_sa;
1536 	ASSERT((ah_assoc != NULL) || (esp_assoc != NULL));
1537 
1538 	ah_mask = (ah_assoc != NULL) ? ah_assoc->ipsa_unique_mask : 0;
1539 	esp_mask = (esp_assoc != NULL) ? esp_assoc->ipsa_unique_mask : 0;
1540 
1541 	if ((ah_mask == 0) && (esp_mask == 0))
1542 		return (B_TRUE);
1543 
1544 	/*
1545 	 * The pkt_unique check will also check for tunnel mode on the SA
1546 	 * vs. the tunneled_packet boolean.  "Be liberal in what you receive"
1547 	 * should not apply in this case.  ;)
1548 	 */
1549 
1550 	if (ah_mask != 0 &&
1551 	    ah_assoc->ipsa_unique_id != (pkt_unique & ah_mask)) {
1552 		*reason = "AH inner header mismatch";
1553 		*counter = DROPPER(ipss, ipds_spd_ah_innermismatch);
1554 		return (B_FALSE);
1555 	}
1556 	if (esp_mask != 0 &&
1557 	    esp_assoc->ipsa_unique_id != (pkt_unique & esp_mask)) {
1558 		*reason = "ESP inner header mismatch";
1559 		*counter = DROPPER(ipss, ipds_spd_esp_innermismatch);
1560 		return (B_FALSE);
1561 	}
1562 	return (B_TRUE);
1563 }
1564 
1565 static boolean_t
1566 ipsec_check_ipsecin_action(ip_recv_attr_t *ira, mblk_t *mp, ipsec_action_t *ap,
1567     ipha_t *ipha, ip6_t *ip6h, const char **reason, kstat_named_t **counter,
1568     netstack_t *ns)
1569 {
1570 	boolean_t ret = B_TRUE;
1571 	ipsec_prot_t *ipp;
1572 	ipsa_t *ah_assoc;
1573 	ipsa_t *esp_assoc;
1574 	boolean_t decaps;
1575 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
1576 
1577 	ASSERT((ipha == NULL && ip6h != NULL) ||
1578 	    (ip6h == NULL && ipha != NULL));
1579 
1580 	if (ira->ira_flags & IRAF_LOOPBACK) {
1581 		/*
1582 		 * Besides accepting pointer-equivalent actions, we also
1583 		 * accept any ICMP errors we generated for ourselves,
1584 		 * regardless of policy.  If we do not wish to make this
1585 		 * assumption in the future, check here, and where
1586 		 * IXAF_TRUSTED_ICMP is initialized in ip.c and ip6.c.
1587 		 */
1588 		if (ap == ira->ira_ipsec_action ||
1589 		    (ira->ira_flags & IRAF_TRUSTED_ICMP))
1590 			return (B_TRUE);
1591 
1592 		/* Deep compare necessary here?? */
1593 		*counter = DROPPER(ipss, ipds_spd_loopback_mismatch);
1594 		*reason = "loopback policy mismatch";
1595 		return (B_FALSE);
1596 	}
1597 	ASSERT(!(ira->ira_flags & IRAF_TRUSTED_ICMP));
1598 	ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1599 
1600 	ah_assoc = ira->ira_ipsec_ah_sa;
1601 	esp_assoc = ira->ira_ipsec_esp_sa;
1602 
1603 	decaps = (ira->ira_flags & IRAF_IPSEC_DECAPS);
1604 
1605 	switch (ap->ipa_act.ipa_type) {
1606 	case IPSEC_ACT_DISCARD:
1607 	case IPSEC_ACT_REJECT:
1608 		/* Should "fail hard" */
1609 		*counter = DROPPER(ipss, ipds_spd_explicit);
1610 		*reason = "blocked by policy";
1611 		return (B_FALSE);
1612 
1613 	case IPSEC_ACT_BYPASS:
1614 	case IPSEC_ACT_CLEAR:
1615 		*counter = DROPPER(ipss, ipds_spd_got_secure);
1616 		*reason = "expected clear, got protected";
1617 		return (B_FALSE);
1618 
1619 	case IPSEC_ACT_APPLY:
1620 		ipp = &ap->ipa_act.ipa_apply;
1621 		/*
1622 		 * As of now we do the simple checks of whether
1623 		 * the datagram has gone through the required IPSEC
1624 		 * protocol constraints or not. We might have more
1625 		 * in the future like sensitive levels, key bits, etc.
1626 		 * If it fails the constraints, check whether we would
1627 		 * have accepted this if it had come in clear.
1628 		 */
1629 		if (ipp->ipp_use_ah) {
1630 			if (ah_assoc == NULL) {
1631 				ret = ipsec_inbound_accept_clear(mp, ipha,
1632 				    ip6h);
1633 				*counter = DROPPER(ipss, ipds_spd_got_clear);
1634 				*reason = "unprotected not accepted";
1635 				break;
1636 			}
1637 			ASSERT(ah_assoc != NULL);
1638 			ASSERT(ipp->ipp_auth_alg != 0);
1639 
1640 			if (ah_assoc->ipsa_auth_alg !=
1641 			    ipp->ipp_auth_alg) {
1642 				*counter = DROPPER(ipss, ipds_spd_bad_ahalg);
1643 				*reason = "unacceptable ah alg";
1644 				ret = B_FALSE;
1645 				break;
1646 			}
1647 		} else if (ah_assoc != NULL) {
1648 			/*
1649 			 * Don't allow this. Check IPSEC NOTE above
1650 			 * ip_fanout_proto().
1651 			 */
1652 			*counter = DROPPER(ipss, ipds_spd_got_ah);
1653 			*reason = "unexpected AH";
1654 			ret = B_FALSE;
1655 			break;
1656 		}
1657 		if (ipp->ipp_use_esp) {
1658 			if (esp_assoc == NULL) {
1659 				ret = ipsec_inbound_accept_clear(mp, ipha,
1660 				    ip6h);
1661 				*counter = DROPPER(ipss, ipds_spd_got_clear);
1662 				*reason = "unprotected not accepted";
1663 				break;
1664 			}
1665 			ASSERT(esp_assoc != NULL);
1666 			ASSERT(ipp->ipp_encr_alg != 0);
1667 
1668 			if (esp_assoc->ipsa_encr_alg !=
1669 			    ipp->ipp_encr_alg) {
1670 				*counter = DROPPER(ipss, ipds_spd_bad_espealg);
1671 				*reason = "unacceptable esp alg";
1672 				ret = B_FALSE;
1673 				break;
1674 			}
1675 			/*
1676 			 * If the client does not need authentication,
1677 			 * we don't verify the alogrithm.
1678 			 */
1679 			if (ipp->ipp_use_espa) {
1680 				if (esp_assoc->ipsa_auth_alg !=
1681 				    ipp->ipp_esp_auth_alg) {
1682 					*counter = DROPPER(ipss,
1683 					    ipds_spd_bad_espaalg);
1684 					*reason = "unacceptable esp auth alg";
1685 					ret = B_FALSE;
1686 					break;
1687 				}
1688 			}
1689 		} else if (esp_assoc != NULL) {
1690 			/*
1691 			 * Don't allow this. Check IPSEC NOTE above
1692 			 * ip_fanout_proto().
1693 			 */
1694 			*counter = DROPPER(ipss, ipds_spd_got_esp);
1695 			*reason = "unexpected ESP";
1696 			ret = B_FALSE;
1697 			break;
1698 		}
1699 		if (ipp->ipp_use_se) {
1700 			if (!decaps) {
1701 				ret = ipsec_inbound_accept_clear(mp, ipha,
1702 				    ip6h);
1703 				if (!ret) {
1704 					/* XXX mutant? */
1705 					*counter = DROPPER(ipss,
1706 					    ipds_spd_bad_selfencap);
1707 					*reason = "self encap not found";
1708 					break;
1709 				}
1710 			}
1711 		} else if (decaps) {
1712 			/*
1713 			 * XXX If the packet comes in tunneled and the
1714 			 * recipient does not expect it to be tunneled, it
1715 			 * is okay. But we drop to be consistent with the
1716 			 * other cases.
1717 			 */
1718 			*counter = DROPPER(ipss, ipds_spd_got_selfencap);
1719 			*reason = "unexpected self encap";
1720 			ret = B_FALSE;
1721 			break;
1722 		}
1723 		if (ira->ira_ipsec_action != NULL) {
1724 			/*
1725 			 * This can happen if we do a double policy-check on
1726 			 * a packet
1727 			 * XXX XXX should fix this case!
1728 			 */
1729 			IPACT_REFRELE(ira->ira_ipsec_action);
1730 		}
1731 		ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1732 		ASSERT(ira->ira_ipsec_action == NULL);
1733 		IPACT_REFHOLD(ap);
1734 		ira->ira_ipsec_action = ap;
1735 		break;	/* from switch */
1736 	}
1737 	return (ret);
1738 }
1739 
1740 static boolean_t
1741 spd_match_inbound_ids(ipsec_latch_t *ipl, ipsa_t *sa)
1742 {
1743 	ASSERT(ipl->ipl_ids_latched == B_TRUE);
1744 	return ipsid_equal(ipl->ipl_remote_cid, sa->ipsa_src_cid) &&
1745 	    ipsid_equal(ipl->ipl_local_cid, sa->ipsa_dst_cid);
1746 }
1747 
1748 /*
1749  * Takes a latched conn and an inbound packet and returns a unique_id suitable
1750  * for SA comparisons.  Most of the time we will copy from the conn_t, but
1751  * there are cases when the conn_t is latched but it has wildcard selectors,
1752  * and then we need to fallback to scooping them out of the packet.
1753  *
1754  * Assume we'll never have 0 with a conn_t present, so use 0 as a failure.  We
1755  * can get away with this because we only have non-zero ports/proto for
1756  * latched conn_ts.
1757  *
1758  * Ideal candidate for an "inline" keyword, as we're JUST convoluted enough
1759  * to not be a nice macro.
1760  */
1761 static uint64_t
1762 conn_to_unique(conn_t *connp, mblk_t *data_mp, ipha_t *ipha, ip6_t *ip6h)
1763 {
1764 	ipsec_selector_t sel;
1765 	uint8_t ulp = connp->conn_proto;
1766 
1767 	ASSERT(connp->conn_latch_in_policy != NULL);
1768 
1769 	if ((ulp == IPPROTO_TCP || ulp == IPPROTO_UDP || ulp == IPPROTO_SCTP) &&
1770 	    (connp->conn_fport == 0 || connp->conn_lport == 0)) {
1771 		/* Slow path - we gotta grab from the packet. */
1772 		if (ipsec_init_inbound_sel(&sel, data_mp, ipha, ip6h,
1773 		    SEL_NONE) != SELRET_SUCCESS) {
1774 			/* Failure -> have caller free packet with ENOMEM. */
1775 			return (0);
1776 		}
1777 		return (SA_UNIQUE_ID(sel.ips_remote_port, sel.ips_local_port,
1778 		    sel.ips_protocol, 0));
1779 	}
1780 
1781 #ifdef DEBUG_NOT_UNTIL_6478464
1782 	if (ipsec_init_inbound_sel(&sel, data_mp, ipha, ip6h, SEL_NONE) ==
1783 	    SELRET_SUCCESS) {
1784 		ASSERT(sel.ips_local_port == connp->conn_lport);
1785 		ASSERT(sel.ips_remote_port == connp->conn_fport);
1786 		ASSERT(sel.ips_protocol == connp->conn_proto);
1787 	}
1788 	ASSERT(connp->conn_proto != 0);
1789 #endif
1790 
1791 	return (SA_UNIQUE_ID(connp->conn_fport, connp->conn_lport, ulp, 0));
1792 }
1793 
1794 /*
1795  * Called to check policy on a latched connection.
1796  * Note that we don't dereference conn_latch or conn_ihere since the conn might
1797  * be closing. The caller passes a held ipsec_latch_t instead.
1798  */
1799 static boolean_t
1800 ipsec_check_ipsecin_latch(ip_recv_attr_t *ira, mblk_t *mp, ipsec_latch_t *ipl,
1801     ipsec_action_t *ap, ipha_t *ipha, ip6_t *ip6h, const char **reason,
1802     kstat_named_t **counter, conn_t *connp, netstack_t *ns)
1803 {
1804 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
1805 
1806 	ASSERT(ipl->ipl_ids_latched == B_TRUE);
1807 	ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1808 
1809 	if (!(ira->ira_flags & IRAF_LOOPBACK)) {
1810 		/*
1811 		 * Over loopback, there aren't real security associations,
1812 		 * so there are neither identities nor "unique" values
1813 		 * for us to check the packet against.
1814 		 */
1815 		if (ira->ira_ipsec_ah_sa != NULL) {
1816 			if (!spd_match_inbound_ids(ipl,
1817 			    ira->ira_ipsec_ah_sa)) {
1818 				*counter = DROPPER(ipss, ipds_spd_ah_badid);
1819 				*reason = "AH identity mismatch";
1820 				return (B_FALSE);
1821 			}
1822 		}
1823 
1824 		if (ira->ira_ipsec_esp_sa != NULL) {
1825 			if (!spd_match_inbound_ids(ipl,
1826 			    ira->ira_ipsec_esp_sa)) {
1827 				*counter = DROPPER(ipss, ipds_spd_esp_badid);
1828 				*reason = "ESP identity mismatch";
1829 				return (B_FALSE);
1830 			}
1831 		}
1832 
1833 		/*
1834 		 * Can fudge pkt_unique from connp because we're latched.
1835 		 * In DEBUG kernels (see conn_to_unique()'s implementation),
1836 		 * verify this even if it REALLY slows things down.
1837 		 */
1838 		if (!ipsec_check_ipsecin_unique(ira, reason, counter,
1839 		    conn_to_unique(connp, mp, ipha, ip6h), ns)) {
1840 			return (B_FALSE);
1841 		}
1842 	}
1843 	return (ipsec_check_ipsecin_action(ira, mp, ap, ipha, ip6h, reason,
1844 	    counter, ns));
1845 }
1846 
1847 /*
1848  * Check to see whether this secured datagram meets the policy
1849  * constraints specified in ipsp.
1850  *
1851  * Called from ipsec_check_global_policy, and ipsec_check_inbound_policy.
1852  *
1853  * Consumes a reference to ipsp.
1854  * Returns the mblk if ok.
1855  */
1856 static mblk_t *
1857 ipsec_check_ipsecin_policy(mblk_t *data_mp, ipsec_policy_t *ipsp,
1858     ipha_t *ipha, ip6_t *ip6h, uint64_t pkt_unique, ip_recv_attr_t *ira,
1859     netstack_t *ns)
1860 {
1861 	ipsec_action_t *ap;
1862 	const char *reason = "no policy actions found";
1863 	ip_stack_t	*ipst = ns->netstack_ip;
1864 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
1865 	kstat_named_t *counter;
1866 
1867 	counter = DROPPER(ipss, ipds_spd_got_secure);
1868 
1869 	ASSERT(ipsp != NULL);
1870 
1871 	ASSERT((ipha == NULL && ip6h != NULL) ||
1872 	    (ip6h == NULL && ipha != NULL));
1873 
1874 	if (ira->ira_flags & IRAF_LOOPBACK)
1875 		return (ipsec_check_loopback_policy(data_mp, ira, ipsp));
1876 
1877 	ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
1878 
1879 	if (ira->ira_ipsec_action != NULL) {
1880 		/*
1881 		 * this can happen if we do a double policy-check on a packet
1882 		 * Would be nice to be able to delete this test..
1883 		 */
1884 		IPACT_REFRELE(ira->ira_ipsec_action);
1885 	}
1886 	ASSERT(ira->ira_ipsec_action == NULL);
1887 
1888 	if (!SA_IDS_MATCH(ira->ira_ipsec_ah_sa, ira->ira_ipsec_esp_sa)) {
1889 		reason = "inbound AH and ESP identities differ";
1890 		counter = DROPPER(ipss, ipds_spd_ahesp_diffid);
1891 		goto drop;
1892 	}
1893 
1894 	if (!ipsec_check_ipsecin_unique(ira, &reason, &counter, pkt_unique,
1895 	    ns))
1896 		goto drop;
1897 
1898 	/*
1899 	 * Ok, now loop through the possible actions and see if any
1900 	 * of them work for us.
1901 	 */
1902 
1903 	for (ap = ipsp->ipsp_act; ap != NULL; ap = ap->ipa_next) {
1904 		if (ipsec_check_ipsecin_action(ira, data_mp, ap,
1905 		    ipha, ip6h, &reason, &counter, ns)) {
1906 			BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
1907 			IPPOL_REFRELE(ipsp);
1908 			return (data_mp);
1909 		}
1910 	}
1911 drop:
1912 	ipsec_rl_strlog(ns, IP_MOD_ID, 0, 0, SL_ERROR|SL_WARN|SL_CONSOLE,
1913 	    "ipsec inbound policy mismatch: %s, packet dropped\n",
1914 	    reason);
1915 	IPPOL_REFRELE(ipsp);
1916 	ASSERT(ira->ira_ipsec_action == NULL);
1917 	BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
1918 	ip_drop_packet(data_mp, B_TRUE, NULL, counter,
1919 	    &ipss->ipsec_spd_dropper);
1920 	return (NULL);
1921 }
1922 
1923 /*
1924  * sleazy prefix-length-based compare.
1925  * another inlining candidate..
1926  */
1927 boolean_t
1928 ip_addr_match(uint8_t *addr1, int pfxlen, in6_addr_t *addr2p)
1929 {
1930 	int offset = pfxlen>>3;
1931 	int bitsleft = pfxlen & 7;
1932 	uint8_t *addr2 = (uint8_t *)addr2p;
1933 
1934 	/*
1935 	 * and there was much evil..
1936 	 * XXX should inline-expand the bcmp here and do this 32 bits
1937 	 * or 64 bits at a time..
1938 	 */
1939 	return ((bcmp(addr1, addr2, offset) == 0) &&
1940 	    ((bitsleft == 0) ||
1941 	    (((addr1[offset] ^ addr2[offset]) & (0xff<<(8-bitsleft))) == 0)));
1942 }
1943 
1944 static ipsec_policy_t *
1945 ipsec_find_policy_chain(ipsec_policy_t *best, ipsec_policy_t *chain,
1946     ipsec_selector_t *sel, boolean_t is_icmp_inv_acq)
1947 {
1948 	ipsec_selkey_t *isel;
1949 	ipsec_policy_t *p;
1950 	int bpri = best ? best->ipsp_prio : 0;
1951 
1952 	for (p = chain; p != NULL; p = p->ipsp_hash.hash_next) {
1953 		uint32_t valid;
1954 
1955 		if (p->ipsp_prio <= bpri)
1956 			continue;
1957 		isel = &p->ipsp_sel->ipsl_key;
1958 		valid = isel->ipsl_valid;
1959 
1960 		if ((valid & IPSL_PROTOCOL) &&
1961 		    (isel->ipsl_proto != sel->ips_protocol))
1962 			continue;
1963 
1964 		if ((valid & IPSL_REMOTE_ADDR) &&
1965 		    !ip_addr_match((uint8_t *)&isel->ipsl_remote,
1966 		    isel->ipsl_remote_pfxlen, &sel->ips_remote_addr_v6))
1967 			continue;
1968 
1969 		if ((valid & IPSL_LOCAL_ADDR) &&
1970 		    !ip_addr_match((uint8_t *)&isel->ipsl_local,
1971 		    isel->ipsl_local_pfxlen, &sel->ips_local_addr_v6))
1972 			continue;
1973 
1974 		if ((valid & IPSL_REMOTE_PORT) &&
1975 		    isel->ipsl_rport != sel->ips_remote_port)
1976 			continue;
1977 
1978 		if ((valid & IPSL_LOCAL_PORT) &&
1979 		    isel->ipsl_lport != sel->ips_local_port)
1980 			continue;
1981 
1982 		if (!is_icmp_inv_acq) {
1983 			if ((valid & IPSL_ICMP_TYPE) &&
1984 			    (isel->ipsl_icmp_type > sel->ips_icmp_type ||
1985 			    isel->ipsl_icmp_type_end < sel->ips_icmp_type)) {
1986 				continue;
1987 			}
1988 
1989 			if ((valid & IPSL_ICMP_CODE) &&
1990 			    (isel->ipsl_icmp_code > sel->ips_icmp_code ||
1991 			    isel->ipsl_icmp_code_end <
1992 			    sel->ips_icmp_code)) {
1993 				continue;
1994 			}
1995 		} else {
1996 			/*
1997 			 * special case for icmp inverse acquire
1998 			 * we only want policies that aren't drop/pass
1999 			 */
2000 			if (p->ipsp_act->ipa_act.ipa_type != IPSEC_ACT_APPLY)
2001 				continue;
2002 		}
2003 
2004 		/* we matched all the packet-port-field selectors! */
2005 		best = p;
2006 		bpri = p->ipsp_prio;
2007 	}
2008 
2009 	return (best);
2010 }
2011 
2012 /*
2013  * Try to find and return the best policy entry under a given policy
2014  * root for a given set of selectors; the first parameter "best" is
2015  * the current best policy so far.  If "best" is non-null, we have a
2016  * reference to it.  We return a reference to a policy; if that policy
2017  * is not the original "best", we need to release that reference
2018  * before returning.
2019  */
2020 ipsec_policy_t *
2021 ipsec_find_policy_head(ipsec_policy_t *best, ipsec_policy_head_t *head,
2022     int direction, ipsec_selector_t *sel)
2023 {
2024 	ipsec_policy_t *curbest;
2025 	ipsec_policy_root_t *root;
2026 	uint8_t is_icmp_inv_acq = sel->ips_is_icmp_inv_acq;
2027 	int af = sel->ips_isv4 ? IPSEC_AF_V4 : IPSEC_AF_V6;
2028 
2029 	curbest = best;
2030 	root = &head->iph_root[direction];
2031 
2032 #ifdef DEBUG
2033 	if (is_icmp_inv_acq) {
2034 		if (sel->ips_isv4) {
2035 			if (sel->ips_protocol != IPPROTO_ICMP) {
2036 				cmn_err(CE_WARN, "ipsec_find_policy_head:"
2037 				    " expecting icmp, got %d",
2038 				    sel->ips_protocol);
2039 			}
2040 		} else {
2041 			if (sel->ips_protocol != IPPROTO_ICMPV6) {
2042 				cmn_err(CE_WARN, "ipsec_find_policy_head:"
2043 				    " expecting icmpv6, got %d",
2044 				    sel->ips_protocol);
2045 			}
2046 		}
2047 	}
2048 #endif
2049 
2050 	rw_enter(&head->iph_lock, RW_READER);
2051 
2052 	if (root->ipr_nchains > 0) {
2053 		curbest = ipsec_find_policy_chain(curbest,
2054 		    root->ipr_hash[selector_hash(sel, root)].hash_head, sel,
2055 		    is_icmp_inv_acq);
2056 	}
2057 	curbest = ipsec_find_policy_chain(curbest, root->ipr_nonhash[af], sel,
2058 	    is_icmp_inv_acq);
2059 
2060 	/*
2061 	 * Adjust reference counts if we found anything new.
2062 	 */
2063 	if (curbest != best) {
2064 		ASSERT(curbest != NULL);
2065 		IPPOL_REFHOLD(curbest);
2066 
2067 		if (best != NULL) {
2068 			IPPOL_REFRELE(best);
2069 		}
2070 	}
2071 
2072 	rw_exit(&head->iph_lock);
2073 
2074 	return (curbest);
2075 }
2076 
2077 /*
2078  * Find the best system policy (either global or per-interface) which
2079  * applies to the given selector; look in all the relevant policy roots
2080  * to figure out which policy wins.
2081  *
2082  * Returns a reference to a policy; caller must release this
2083  * reference when done.
2084  */
2085 ipsec_policy_t *
2086 ipsec_find_policy(int direction, const conn_t *connp, ipsec_selector_t *sel,
2087     netstack_t *ns)
2088 {
2089 	ipsec_policy_t *p;
2090 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
2091 
2092 	p = ipsec_find_policy_head(NULL, &ipss->ipsec_system_policy,
2093 	    direction, sel);
2094 	if ((connp != NULL) && (connp->conn_policy != NULL)) {
2095 		p = ipsec_find_policy_head(p, connp->conn_policy,
2096 		    direction, sel);
2097 	}
2098 
2099 	return (p);
2100 }
2101 
2102 /*
2103  * Check with global policy and see whether this inbound
2104  * packet meets the policy constraints.
2105  *
2106  * Locate appropriate policy from global policy, supplemented by the
2107  * conn's configured and/or cached policy if the conn is supplied.
2108  *
2109  * Dispatch to ipsec_check_ipsecin_policy if we have policy and an
2110  * encrypted packet to see if they match.
2111  *
2112  * Otherwise, see if the policy allows cleartext; if not, drop it on the
2113  * floor.
2114  */
2115 mblk_t *
2116 ipsec_check_global_policy(mblk_t *data_mp, conn_t *connp,
2117     ipha_t *ipha, ip6_t *ip6h, ip_recv_attr_t *ira, netstack_t *ns)
2118 {
2119 	ipsec_policy_t *p;
2120 	ipsec_selector_t sel;
2121 	boolean_t policy_present;
2122 	kstat_named_t *counter;
2123 	uint64_t pkt_unique;
2124 	ip_stack_t	*ipst = ns->netstack_ip;
2125 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
2126 
2127 	sel.ips_is_icmp_inv_acq = 0;
2128 
2129 	ASSERT((ipha == NULL && ip6h != NULL) ||
2130 	    (ip6h == NULL && ipha != NULL));
2131 
2132 	if (ipha != NULL)
2133 		policy_present = ipss->ipsec_inbound_v4_policy_present;
2134 	else
2135 		policy_present = ipss->ipsec_inbound_v6_policy_present;
2136 
2137 	if (!policy_present && connp == NULL) {
2138 		/*
2139 		 * No global policy and no per-socket policy;
2140 		 * just pass it back (but we shouldn't get here in that case)
2141 		 */
2142 		return (data_mp);
2143 	}
2144 
2145 	/*
2146 	 * If we have cached policy, use it.
2147 	 * Otherwise consult system policy.
2148 	 */
2149 	if ((connp != NULL) && (connp->conn_latch != NULL)) {
2150 		p = connp->conn_latch_in_policy;
2151 		if (p != NULL) {
2152 			IPPOL_REFHOLD(p);
2153 		}
2154 		/*
2155 		 * Fudge sel for UNIQUE_ID setting below.
2156 		 */
2157 		pkt_unique = conn_to_unique(connp, data_mp, ipha, ip6h);
2158 	} else {
2159 		/* Initialize the ports in the selector */
2160 		if (ipsec_init_inbound_sel(&sel, data_mp, ipha, ip6h,
2161 		    SEL_NONE) == SELRET_NOMEM) {
2162 			/*
2163 			 * Technically not a policy mismatch, but it is
2164 			 * an internal failure.
2165 			 */
2166 			ipsec_log_policy_failure(IPSEC_POLICY_MISMATCH,
2167 			    "ipsec_init_inbound_sel", ipha, ip6h, B_TRUE, ns);
2168 			counter = DROPPER(ipss, ipds_spd_nomem);
2169 			goto fail;
2170 		}
2171 
2172 		/*
2173 		 * Find the policy which best applies.
2174 		 *
2175 		 * If we find global policy, we should look at both
2176 		 * local policy and global policy and see which is
2177 		 * stronger and match accordingly.
2178 		 *
2179 		 * If we don't find a global policy, check with
2180 		 * local policy alone.
2181 		 */
2182 
2183 		p = ipsec_find_policy(IPSEC_TYPE_INBOUND, connp, &sel, ns);
2184 		pkt_unique = SA_UNIQUE_ID(sel.ips_remote_port,
2185 		    sel.ips_local_port, sel.ips_protocol, 0);
2186 	}
2187 
2188 	if (p == NULL) {
2189 		if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
2190 			/*
2191 			 * We have no policy; default to succeeding.
2192 			 * XXX paranoid system design doesn't do this.
2193 			 */
2194 			BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
2195 			return (data_mp);
2196 		} else {
2197 			counter = DROPPER(ipss, ipds_spd_got_secure);
2198 			ipsec_log_policy_failure(IPSEC_POLICY_NOT_NEEDED,
2199 			    "ipsec_check_global_policy", ipha, ip6h, B_TRUE,
2200 			    ns);
2201 			goto fail;
2202 		}
2203 	}
2204 	if (ira->ira_flags & IRAF_IPSEC_SECURE) {
2205 		return (ipsec_check_ipsecin_policy(data_mp, p, ipha, ip6h,
2206 		    pkt_unique, ira, ns));
2207 	}
2208 	if (p->ipsp_act->ipa_allow_clear) {
2209 		BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
2210 		IPPOL_REFRELE(p);
2211 		return (data_mp);
2212 	}
2213 	IPPOL_REFRELE(p);
2214 	/*
2215 	 * If we reach here, we will drop the packet because it failed the
2216 	 * global policy check because the packet was cleartext, and it
2217 	 * should not have been.
2218 	 */
2219 	ipsec_log_policy_failure(IPSEC_POLICY_MISMATCH,
2220 	    "ipsec_check_global_policy", ipha, ip6h, B_FALSE, ns);
2221 	counter = DROPPER(ipss, ipds_spd_got_clear);
2222 
2223 fail:
2224 	ip_drop_packet(data_mp, B_TRUE, NULL, counter,
2225 	    &ipss->ipsec_spd_dropper);
2226 	BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
2227 	return (NULL);
2228 }
2229 
2230 /*
2231  * We check whether an inbound datagram is a valid one
2232  * to accept in clear. If it is secure, it is the job
2233  * of IPSEC to log information appropriately if it
2234  * suspects that it may not be the real one.
2235  *
2236  * It is called only while fanning out to the ULP
2237  * where ULP accepts only secure data and the incoming
2238  * is clear. Usually we never accept clear datagrams in
2239  * such cases. ICMP is the only exception.
2240  *
2241  * NOTE : We don't call this function if the client (ULP)
2242  * is willing to accept things in clear.
2243  */
2244 boolean_t
2245 ipsec_inbound_accept_clear(mblk_t *mp, ipha_t *ipha, ip6_t *ip6h)
2246 {
2247 	ushort_t iph_hdr_length;
2248 	icmph_t *icmph;
2249 	icmp6_t *icmp6;
2250 	uint8_t *nexthdrp;
2251 
2252 	ASSERT((ipha != NULL && ip6h == NULL) ||
2253 	    (ipha == NULL && ip6h != NULL));
2254 
2255 	if (ip6h != NULL) {
2256 		iph_hdr_length = ip_hdr_length_v6(mp, ip6h);
2257 		if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length,
2258 		    &nexthdrp)) {
2259 			return (B_FALSE);
2260 		}
2261 		if (*nexthdrp != IPPROTO_ICMPV6)
2262 			return (B_FALSE);
2263 		icmp6 = (icmp6_t *)(&mp->b_rptr[iph_hdr_length]);
2264 		/* Match IPv6 ICMP policy as closely as IPv4 as possible. */
2265 		switch (icmp6->icmp6_type) {
2266 		case ICMP6_PARAM_PROB:
2267 			/* Corresponds to port/proto unreach in IPv4. */
2268 		case ICMP6_ECHO_REQUEST:
2269 			/* Just like IPv4. */
2270 			return (B_FALSE);
2271 
2272 		case MLD_LISTENER_QUERY:
2273 		case MLD_LISTENER_REPORT:
2274 		case MLD_LISTENER_REDUCTION:
2275 			/*
2276 			 * XXX Seperate NDD in IPv4 what about here?
2277 			 * Plus, mcast is important to ND.
2278 			 */
2279 		case ICMP6_DST_UNREACH:
2280 			/* Corresponds to HOST/NET unreachable in IPv4. */
2281 		case ICMP6_PACKET_TOO_BIG:
2282 		case ICMP6_ECHO_REPLY:
2283 			/* These are trusted in IPv4. */
2284 		case ND_ROUTER_SOLICIT:
2285 		case ND_ROUTER_ADVERT:
2286 		case ND_NEIGHBOR_SOLICIT:
2287 		case ND_NEIGHBOR_ADVERT:
2288 		case ND_REDIRECT:
2289 			/* Trust ND messages for now. */
2290 		case ICMP6_TIME_EXCEEDED:
2291 		default:
2292 			return (B_TRUE);
2293 		}
2294 	} else {
2295 		/*
2296 		 * If it is not ICMP, fail this request.
2297 		 */
2298 		if (ipha->ipha_protocol != IPPROTO_ICMP) {
2299 #ifdef FRAGCACHE_DEBUG
2300 			cmn_err(CE_WARN, "Dropping - ipha_proto = %d\n",
2301 			    ipha->ipha_protocol);
2302 #endif
2303 			return (B_FALSE);
2304 		}
2305 		iph_hdr_length = IPH_HDR_LENGTH(ipha);
2306 		icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length];
2307 		/*
2308 		 * It is an insecure icmp message. Check to see whether we are
2309 		 * willing to accept this one.
2310 		 */
2311 
2312 		switch (icmph->icmph_type) {
2313 		case ICMP_ECHO_REPLY:
2314 		case ICMP_TIME_STAMP_REPLY:
2315 		case ICMP_INFO_REPLY:
2316 		case ICMP_ROUTER_ADVERTISEMENT:
2317 			/*
2318 			 * We should not encourage clear replies if this
2319 			 * client expects secure. If somebody is replying
2320 			 * in clear some mailicious user watching both the
2321 			 * request and reply, can do chosen-plain-text attacks.
2322 			 * With global policy we might be just expecting secure
2323 			 * but sending out clear. We don't know what the right
2324 			 * thing is. We can't do much here as we can't control
2325 			 * the sender here. Till we are sure of what to do,
2326 			 * accept them.
2327 			 */
2328 			return (B_TRUE);
2329 		case ICMP_ECHO_REQUEST:
2330 		case ICMP_TIME_STAMP_REQUEST:
2331 		case ICMP_INFO_REQUEST:
2332 		case ICMP_ADDRESS_MASK_REQUEST:
2333 		case ICMP_ROUTER_SOLICITATION:
2334 		case ICMP_ADDRESS_MASK_REPLY:
2335 			/*
2336 			 * Don't accept this as somebody could be sending
2337 			 * us plain text to get encrypted data. If we reply,
2338 			 * it will lead to chosen plain text attack.
2339 			 */
2340 			return (B_FALSE);
2341 		case ICMP_DEST_UNREACHABLE:
2342 			switch (icmph->icmph_code) {
2343 			case ICMP_FRAGMENTATION_NEEDED:
2344 				/*
2345 				 * Be in sync with icmp_inbound, where we have
2346 				 * already set dce_pmtu
2347 				 */
2348 #ifdef FRAGCACHE_DEBUG
2349 			cmn_err(CE_WARN, "ICMP frag needed\n");
2350 #endif
2351 				return (B_TRUE);
2352 			case ICMP_HOST_UNREACHABLE:
2353 			case ICMP_NET_UNREACHABLE:
2354 				/*
2355 				 * By accepting, we could reset a connection.
2356 				 * How do we solve the problem of some
2357 				 * intermediate router sending in-secure ICMP
2358 				 * messages ?
2359 				 */
2360 				return (B_TRUE);
2361 			case ICMP_PORT_UNREACHABLE:
2362 			case ICMP_PROTOCOL_UNREACHABLE:
2363 			default :
2364 				return (B_FALSE);
2365 			}
2366 		case ICMP_SOURCE_QUENCH:
2367 			/*
2368 			 * If this is an attack, TCP will slow start
2369 			 * because of this. Is it very harmful ?
2370 			 */
2371 			return (B_TRUE);
2372 		case ICMP_PARAM_PROBLEM:
2373 			return (B_FALSE);
2374 		case ICMP_TIME_EXCEEDED:
2375 			return (B_TRUE);
2376 		case ICMP_REDIRECT:
2377 			return (B_FALSE);
2378 		default :
2379 			return (B_FALSE);
2380 		}
2381 	}
2382 }
2383 
2384 void
2385 ipsec_latch_ids(ipsec_latch_t *ipl, ipsid_t *local, ipsid_t *remote)
2386 {
2387 	mutex_enter(&ipl->ipl_lock);
2388 
2389 	if (ipl->ipl_ids_latched) {
2390 		/* I lost, someone else got here before me */
2391 		mutex_exit(&ipl->ipl_lock);
2392 		return;
2393 	}
2394 
2395 	if (local != NULL)
2396 		IPSID_REFHOLD(local);
2397 	if (remote != NULL)
2398 		IPSID_REFHOLD(remote);
2399 
2400 	ipl->ipl_local_cid = local;
2401 	ipl->ipl_remote_cid = remote;
2402 	ipl->ipl_ids_latched = B_TRUE;
2403 	mutex_exit(&ipl->ipl_lock);
2404 }
2405 
2406 void
2407 ipsec_latch_inbound(conn_t *connp, ip_recv_attr_t *ira)
2408 {
2409 	ipsa_t *sa;
2410 	ipsec_latch_t *ipl = connp->conn_latch;
2411 
2412 	if (!ipl->ipl_ids_latched) {
2413 		ipsid_t *local = NULL;
2414 		ipsid_t *remote = NULL;
2415 
2416 		if (!(ira->ira_flags & IRAF_LOOPBACK)) {
2417 			ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
2418 			if (ira->ira_ipsec_esp_sa != NULL)
2419 				sa = ira->ira_ipsec_esp_sa;
2420 			else
2421 				sa = ira->ira_ipsec_ah_sa;
2422 			ASSERT(sa != NULL);
2423 			local = sa->ipsa_dst_cid;
2424 			remote = sa->ipsa_src_cid;
2425 		}
2426 		ipsec_latch_ids(ipl, local, remote);
2427 	}
2428 	if (ira->ira_flags & IRAF_IPSEC_SECURE) {
2429 		if (connp->conn_latch_in_action != NULL) {
2430 			/*
2431 			 * Previously cached action.  This is probably
2432 			 * harmless, but in DEBUG kernels, check for
2433 			 * action equality.
2434 			 *
2435 			 * Preserve the existing action to preserve latch
2436 			 * invariance.
2437 			 */
2438 			ASSERT(connp->conn_latch_in_action ==
2439 			    ira->ira_ipsec_action);
2440 			return;
2441 		}
2442 		connp->conn_latch_in_action = ira->ira_ipsec_action;
2443 		IPACT_REFHOLD(connp->conn_latch_in_action);
2444 	}
2445 }
2446 
2447 /*
2448  * Check whether the policy constraints are met either for an
2449  * inbound datagram; called from IP in numerous places.
2450  *
2451  * Note that this is not a chokepoint for inbound policy checks;
2452  * see also ipsec_check_ipsecin_latch() and ipsec_check_global_policy()
2453  */
2454 mblk_t *
2455 ipsec_check_inbound_policy(mblk_t *mp, conn_t *connp,
2456     ipha_t *ipha, ip6_t *ip6h, ip_recv_attr_t *ira)
2457 {
2458 	boolean_t	ret;
2459 	ipsec_latch_t	*ipl;
2460 	ipsec_action_t	*ap;
2461 	uint64_t	unique_id;
2462 	ipsec_stack_t	*ipss;
2463 	ip_stack_t	*ipst;
2464 	netstack_t	*ns;
2465 	ipsec_policy_head_t *policy_head;
2466 	ipsec_policy_t	*p = NULL;
2467 
2468 	ASSERT(connp != NULL);
2469 	ns = connp->conn_netstack;
2470 	ipss = ns->netstack_ipsec;
2471 	ipst = ns->netstack_ip;
2472 
2473 	if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
2474 		/*
2475 		 * This is the case where the incoming datagram is
2476 		 * cleartext and we need to see whether this client
2477 		 * would like to receive such untrustworthy things from
2478 		 * the wire.
2479 		 */
2480 		ASSERT(mp != NULL);
2481 
2482 		mutex_enter(&connp->conn_lock);
2483 		if (connp->conn_state_flags & CONN_CONDEMNED) {
2484 			mutex_exit(&connp->conn_lock);
2485 			ip_drop_packet(mp, B_TRUE, NULL,
2486 			    DROPPER(ipss, ipds_spd_got_clear),
2487 			    &ipss->ipsec_spd_dropper);
2488 			BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
2489 			return (NULL);
2490 		}
2491 		if (connp->conn_latch != NULL) {
2492 			/* Hold a reference in case the conn is closing */
2493 			p = connp->conn_latch_in_policy;
2494 			if (p != NULL)
2495 				IPPOL_REFHOLD(p);
2496 			mutex_exit(&connp->conn_lock);
2497 			/*
2498 			 * Policy is cached in the conn.
2499 			 */
2500 			if (p != NULL && !p->ipsp_act->ipa_allow_clear) {
2501 				ret = ipsec_inbound_accept_clear(mp,
2502 				    ipha, ip6h);
2503 				if (ret) {
2504 					BUMP_MIB(&ipst->ips_ip_mib,
2505 					    ipsecInSucceeded);
2506 					IPPOL_REFRELE(p);
2507 					return (mp);
2508 				} else {
2509 					ipsec_log_policy_failure(
2510 					    IPSEC_POLICY_MISMATCH,
2511 					    "ipsec_check_inbound_policy", ipha,
2512 					    ip6h, B_FALSE, ns);
2513 					ip_drop_packet(mp, B_TRUE, NULL,
2514 					    DROPPER(ipss, ipds_spd_got_clear),
2515 					    &ipss->ipsec_spd_dropper);
2516 					BUMP_MIB(&ipst->ips_ip_mib,
2517 					    ipsecInFailed);
2518 					IPPOL_REFRELE(p);
2519 					return (NULL);
2520 				}
2521 			} else {
2522 				BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
2523 				if (p != NULL)
2524 					IPPOL_REFRELE(p);
2525 				return (mp);
2526 			}
2527 		} else {
2528 			policy_head = connp->conn_policy;
2529 
2530 			/* Hold a reference in case the conn is closing */
2531 			if (policy_head != NULL)
2532 				IPPH_REFHOLD(policy_head);
2533 			mutex_exit(&connp->conn_lock);
2534 			/*
2535 			 * As this is a non-hardbound connection we need
2536 			 * to look at both per-socket policy and global
2537 			 * policy.
2538 			 */
2539 			mp = ipsec_check_global_policy(mp, connp,
2540 			    ipha, ip6h, ira, ns);
2541 			if (policy_head != NULL)
2542 				IPPH_REFRELE(policy_head, ns);
2543 			return (mp);
2544 		}
2545 	}
2546 
2547 	mutex_enter(&connp->conn_lock);
2548 	/* Connection is closing */
2549 	if (connp->conn_state_flags & CONN_CONDEMNED) {
2550 		mutex_exit(&connp->conn_lock);
2551 		ip_drop_packet(mp, B_TRUE, NULL,
2552 		    DROPPER(ipss, ipds_spd_got_clear),
2553 		    &ipss->ipsec_spd_dropper);
2554 		BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
2555 		return (NULL);
2556 	}
2557 
2558 	/*
2559 	 * Once a connection is latched it remains so for life, the conn_latch
2560 	 * pointer on the conn has not changed, simply initializing ipl here
2561 	 * as the earlier initialization was done only in the cleartext case.
2562 	 */
2563 	if ((ipl = connp->conn_latch) == NULL) {
2564 		mblk_t *retmp;
2565 		policy_head = connp->conn_policy;
2566 
2567 		/* Hold a reference in case the conn is closing */
2568 		if (policy_head != NULL)
2569 			IPPH_REFHOLD(policy_head);
2570 		mutex_exit(&connp->conn_lock);
2571 		/*
2572 		 * We don't have policies cached in the conn
2573 		 * for this stream. So, look at the global
2574 		 * policy. It will check against conn or global
2575 		 * depending on whichever is stronger.
2576 		 */
2577 		retmp = ipsec_check_global_policy(mp, connp,
2578 		    ipha, ip6h, ira, ns);
2579 		if (policy_head != NULL)
2580 			IPPH_REFRELE(policy_head, ns);
2581 		return (retmp);
2582 	}
2583 
2584 	IPLATCH_REFHOLD(ipl);
2585 	/* Hold reference on conn_latch_in_action in case conn is closing */
2586 	ap = connp->conn_latch_in_action;
2587 	if (ap != NULL)
2588 		IPACT_REFHOLD(ap);
2589 	mutex_exit(&connp->conn_lock);
2590 
2591 	if (ap != NULL) {
2592 		/* Policy is cached & latched; fast(er) path */
2593 		const char *reason;
2594 		kstat_named_t *counter;
2595 
2596 		if (ipsec_check_ipsecin_latch(ira, mp, ipl, ap,
2597 		    ipha, ip6h, &reason, &counter, connp, ns)) {
2598 			BUMP_MIB(&ipst->ips_ip_mib, ipsecInSucceeded);
2599 			IPLATCH_REFRELE(ipl);
2600 			IPACT_REFRELE(ap);
2601 			return (mp);
2602 		}
2603 		ipsec_rl_strlog(ns, IP_MOD_ID, 0, 0,
2604 		    SL_ERROR|SL_WARN|SL_CONSOLE,
2605 		    "ipsec inbound policy mismatch: %s, packet dropped\n",
2606 		    reason);
2607 		ip_drop_packet(mp, B_TRUE, NULL, counter,
2608 		    &ipss->ipsec_spd_dropper);
2609 		BUMP_MIB(&ipst->ips_ip_mib, ipsecInFailed);
2610 		IPLATCH_REFRELE(ipl);
2611 		IPACT_REFRELE(ap);
2612 		return (NULL);
2613 	}
2614 	if ((p = connp->conn_latch_in_policy) == NULL) {
2615 		ipsec_weird_null_inbound_policy++;
2616 		IPLATCH_REFRELE(ipl);
2617 		return (mp);
2618 	}
2619 
2620 	unique_id = conn_to_unique(connp, mp, ipha, ip6h);
2621 	IPPOL_REFHOLD(p);
2622 	mp = ipsec_check_ipsecin_policy(mp, p, ipha, ip6h, unique_id, ira, ns);
2623 	/*
2624 	 * NOTE: ipsecIn{Failed,Succeeeded} bumped by
2625 	 * ipsec_check_ipsecin_policy().
2626 	 */
2627 	if (mp != NULL)
2628 		ipsec_latch_inbound(connp, ira);
2629 	IPLATCH_REFRELE(ipl);
2630 	return (mp);
2631 }
2632 
2633 /*
2634  * Handle all sorts of cases like tunnel-mode and ICMP.
2635  */
2636 static int
2637 prepended_length(mblk_t *mp, uintptr_t hptr)
2638 {
2639 	int rc = 0;
2640 
2641 	while (mp != NULL) {
2642 		if (hptr >= (uintptr_t)mp->b_rptr && hptr <
2643 		    (uintptr_t)mp->b_wptr) {
2644 			rc += (int)(hptr - (uintptr_t)mp->b_rptr);
2645 			break;	/* out of while loop */
2646 		}
2647 		rc += (int)MBLKL(mp);
2648 		mp = mp->b_cont;
2649 	}
2650 
2651 	if (mp == NULL) {
2652 		/*
2653 		 * IF (big IF) we make it here by naturally exiting the loop,
2654 		 * then ip6h isn't in the mblk chain "mp" at all.
2655 		 *
2656 		 * The only case where this happens is with a reversed IP
2657 		 * header that gets passed up by inbound ICMP processing.
2658 		 * This unfortunately triggers longstanding bug 6478464.  For
2659 		 * now, just pass up 0 for the answer.
2660 		 */
2661 #ifdef DEBUG_NOT_UNTIL_6478464
2662 		ASSERT(mp != NULL);
2663 #endif
2664 		rc = 0;
2665 	}
2666 
2667 	return (rc);
2668 }
2669 
2670 /*
2671  * Returns:
2672  *
2673  * SELRET_NOMEM --> msgpullup() needed to gather things failed.
2674  * SELRET_BADPKT --> If we're being called after tunnel-mode fragment
2675  *		     gathering, the initial fragment is too short for
2676  *		     useful data.  Only returned if SEL_TUNNEL_FIRSTFRAG is
2677  *		     set.
2678  * SELRET_SUCCESS --> "sel" now has initialized IPsec selector data.
2679  * SELRET_TUNFRAG --> This is a fragment in a tunnel-mode packet.  Caller
2680  *		      should put this packet in a fragment-gathering queue.
2681  *		      Only returned if SEL_TUNNEL_MODE and SEL_PORT_POLICY
2682  *		      is set.
2683  *
2684  * Note that ipha/ip6h can be in a different mblk (mp->b_cont) in the case
2685  * of tunneled packets.
2686  * Also, mp->b_rptr can be an ICMP error where ipha/ip6h is the packet in
2687  * error past the ICMP error.
2688  */
2689 static selret_t
2690 ipsec_init_inbound_sel(ipsec_selector_t *sel, mblk_t *mp, ipha_t *ipha,
2691     ip6_t *ip6h, uint8_t sel_flags)
2692 {
2693 	uint16_t *ports;
2694 	int outer_hdr_len = 0;	/* For ICMP or tunnel-mode cases... */
2695 	ushort_t hdr_len;
2696 	mblk_t *spare_mp = NULL;
2697 	uint8_t *nexthdrp, *transportp;
2698 	uint8_t nexthdr;
2699 	uint8_t icmp_proto;
2700 	ip_pkt_t ipp;
2701 	boolean_t port_policy_present = (sel_flags & SEL_PORT_POLICY);
2702 	boolean_t is_icmp = (sel_flags & SEL_IS_ICMP);
2703 	boolean_t tunnel_mode = (sel_flags & SEL_TUNNEL_MODE);
2704 	boolean_t post_frag = (sel_flags & SEL_POST_FRAG);
2705 
2706 	ASSERT((ipha == NULL && ip6h != NULL) ||
2707 	    (ipha != NULL && ip6h == NULL));
2708 
2709 	if (ip6h != NULL) {
2710 		outer_hdr_len = prepended_length(mp, (uintptr_t)ip6h);
2711 		nexthdr = ip6h->ip6_nxt;
2712 		icmp_proto = IPPROTO_ICMPV6;
2713 		sel->ips_isv4 = B_FALSE;
2714 		sel->ips_local_addr_v6 = ip6h->ip6_dst;
2715 		sel->ips_remote_addr_v6 = ip6h->ip6_src;
2716 
2717 		bzero(&ipp, sizeof (ipp));
2718 
2719 		switch (nexthdr) {
2720 		case IPPROTO_HOPOPTS:
2721 		case IPPROTO_ROUTING:
2722 		case IPPROTO_DSTOPTS:
2723 		case IPPROTO_FRAGMENT:
2724 			/*
2725 			 * Use ip_hdr_length_nexthdr_v6().  And have a spare
2726 			 * mblk that's contiguous to feed it
2727 			 */
2728 			if ((spare_mp = msgpullup(mp, -1)) == NULL)
2729 				return (SELRET_NOMEM);
2730 			if (!ip_hdr_length_nexthdr_v6(spare_mp,
2731 			    (ip6_t *)(spare_mp->b_rptr + outer_hdr_len),
2732 			    &hdr_len, &nexthdrp)) {
2733 				/* Malformed packet - caller frees. */
2734 				ipsec_freemsg_chain(spare_mp);
2735 				return (SELRET_BADPKT);
2736 			}
2737 			/* Repopulate now that we have the whole packet */
2738 			ip6h = (ip6_t *)(spare_mp->b_rptr + outer_hdr_len);
2739 			(void) ip_find_hdr_v6(spare_mp, ip6h, B_FALSE, &ipp,
2740 			    NULL);
2741 			nexthdr = *nexthdrp;
2742 			/* We can just extract based on hdr_len now. */
2743 			break;
2744 		default:
2745 			(void) ip_find_hdr_v6(mp, ip6h, B_FALSE, &ipp, NULL);
2746 			hdr_len = IPV6_HDR_LEN;
2747 			break;
2748 		}
2749 		if (port_policy_present && IS_V6_FRAGMENT(ipp) && !is_icmp) {
2750 			/* IPv6 Fragment */
2751 			ipsec_freemsg_chain(spare_mp);
2752 			return (SELRET_TUNFRAG);
2753 		}
2754 		transportp = (uint8_t *)ip6h + hdr_len;
2755 	} else {
2756 		outer_hdr_len = prepended_length(mp, (uintptr_t)ipha);
2757 		icmp_proto = IPPROTO_ICMP;
2758 		sel->ips_isv4 = B_TRUE;
2759 		sel->ips_local_addr_v4 = ipha->ipha_dst;
2760 		sel->ips_remote_addr_v4 = ipha->ipha_src;
2761 		nexthdr = ipha->ipha_protocol;
2762 		hdr_len = IPH_HDR_LENGTH(ipha);
2763 
2764 		if (port_policy_present &&
2765 		    IS_V4_FRAGMENT(ipha->ipha_fragment_offset_and_flags) &&
2766 		    !is_icmp) {
2767 			/* IPv4 Fragment */
2768 			ipsec_freemsg_chain(spare_mp);
2769 			return (SELRET_TUNFRAG);
2770 		}
2771 		transportp = (uint8_t *)ipha + hdr_len;
2772 	}
2773 	sel->ips_protocol = nexthdr;
2774 
2775 	if ((nexthdr != IPPROTO_TCP && nexthdr != IPPROTO_UDP &&
2776 	    nexthdr != IPPROTO_SCTP && nexthdr != icmp_proto) ||
2777 	    (!port_policy_present && !post_frag && tunnel_mode)) {
2778 		sel->ips_remote_port = sel->ips_local_port = 0;
2779 		ipsec_freemsg_chain(spare_mp);
2780 		return (SELRET_SUCCESS);
2781 	}
2782 
2783 	if (transportp + 4 > mp->b_wptr) {
2784 		/* If we didn't pullup a copy already, do so now. */
2785 		/*
2786 		 * XXX performance, will upper-layers frequently split TCP/UDP
2787 		 * apart from IP or options?  If so, perhaps we should revisit
2788 		 * the spare_mp strategy.
2789 		 */
2790 		ipsec_hdr_pullup_needed++;
2791 		if (spare_mp == NULL &&
2792 		    (spare_mp = msgpullup(mp, -1)) == NULL) {
2793 			return (SELRET_NOMEM);
2794 		}
2795 		transportp = &spare_mp->b_rptr[hdr_len + outer_hdr_len];
2796 	}
2797 
2798 	if (nexthdr == icmp_proto) {
2799 		sel->ips_icmp_type = *transportp++;
2800 		sel->ips_icmp_code = *transportp;
2801 		sel->ips_remote_port = sel->ips_local_port = 0;
2802 	} else {
2803 		ports = (uint16_t *)transportp;
2804 		sel->ips_remote_port = *ports++;
2805 		sel->ips_local_port = *ports;
2806 	}
2807 	ipsec_freemsg_chain(spare_mp);
2808 	return (SELRET_SUCCESS);
2809 }
2810 
2811 /*
2812  * This is called with a b_next chain of messages from the fragcache code,
2813  * hence it needs to discard a chain on error.
2814  */
2815 static boolean_t
2816 ipsec_init_outbound_ports(ipsec_selector_t *sel, mblk_t *mp, ipha_t *ipha,
2817     ip6_t *ip6h, int outer_hdr_len, ipsec_stack_t *ipss)
2818 {
2819 	/*
2820 	 * XXX cut&paste shared with ipsec_init_inbound_sel
2821 	 */
2822 	uint16_t *ports;
2823 	ushort_t hdr_len;
2824 	mblk_t *spare_mp = NULL;
2825 	uint8_t *nexthdrp;
2826 	uint8_t nexthdr;
2827 	uint8_t *typecode;
2828 	uint8_t check_proto;
2829 
2830 	ASSERT((ipha == NULL && ip6h != NULL) ||
2831 	    (ipha != NULL && ip6h == NULL));
2832 
2833 	if (ip6h != NULL) {
2834 		check_proto = IPPROTO_ICMPV6;
2835 		nexthdr = ip6h->ip6_nxt;
2836 		switch (nexthdr) {
2837 		case IPPROTO_HOPOPTS:
2838 		case IPPROTO_ROUTING:
2839 		case IPPROTO_DSTOPTS:
2840 		case IPPROTO_FRAGMENT:
2841 			/*
2842 			 * Use ip_hdr_length_nexthdr_v6().  And have a spare
2843 			 * mblk that's contiguous to feed it
2844 			 */
2845 			spare_mp = msgpullup(mp, -1);
2846 			if (spare_mp == NULL ||
2847 			    !ip_hdr_length_nexthdr_v6(spare_mp,
2848 			    (ip6_t *)(spare_mp->b_rptr + outer_hdr_len),
2849 			    &hdr_len, &nexthdrp)) {
2850 				/* Always works, even if NULL. */
2851 				ipsec_freemsg_chain(spare_mp);
2852 				ip_drop_packet_chain(mp, B_FALSE, NULL,
2853 				    DROPPER(ipss, ipds_spd_nomem),
2854 				    &ipss->ipsec_spd_dropper);
2855 				return (B_FALSE);
2856 			} else {
2857 				nexthdr = *nexthdrp;
2858 				/* We can just extract based on hdr_len now. */
2859 			}
2860 			break;
2861 		default:
2862 			hdr_len = IPV6_HDR_LEN;
2863 			break;
2864 		}
2865 	} else {
2866 		check_proto = IPPROTO_ICMP;
2867 		hdr_len = IPH_HDR_LENGTH(ipha);
2868 		nexthdr = ipha->ipha_protocol;
2869 	}
2870 
2871 	sel->ips_protocol = nexthdr;
2872 	if (nexthdr != IPPROTO_TCP && nexthdr != IPPROTO_UDP &&
2873 	    nexthdr != IPPROTO_SCTP && nexthdr != check_proto) {
2874 		sel->ips_local_port = sel->ips_remote_port = 0;
2875 		ipsec_freemsg_chain(spare_mp); /* Always works, even if NULL */
2876 		return (B_TRUE);
2877 	}
2878 
2879 	if (&mp->b_rptr[hdr_len] + 4 + outer_hdr_len > mp->b_wptr) {
2880 		/* If we didn't pullup a copy already, do so now. */
2881 		/*
2882 		 * XXX performance, will upper-layers frequently split TCP/UDP
2883 		 * apart from IP or options?  If so, perhaps we should revisit
2884 		 * the spare_mp strategy.
2885 		 *
2886 		 * XXX should this be msgpullup(mp, hdr_len+4) ???
2887 		 */
2888 		if (spare_mp == NULL &&
2889 		    (spare_mp = msgpullup(mp, -1)) == NULL) {
2890 			ip_drop_packet_chain(mp, B_FALSE, NULL,
2891 			    DROPPER(ipss, ipds_spd_nomem),
2892 			    &ipss->ipsec_spd_dropper);
2893 			return (B_FALSE);
2894 		}
2895 		ports = (uint16_t *)&spare_mp->b_rptr[hdr_len + outer_hdr_len];
2896 	} else {
2897 		ports = (uint16_t *)&mp->b_rptr[hdr_len + outer_hdr_len];
2898 	}
2899 
2900 	if (nexthdr == check_proto) {
2901 		typecode = (uint8_t *)ports;
2902 		sel->ips_icmp_type = *typecode++;
2903 		sel->ips_icmp_code = *typecode;
2904 		sel->ips_remote_port = sel->ips_local_port = 0;
2905 	} else {
2906 		sel->ips_local_port = *ports++;
2907 		sel->ips_remote_port = *ports;
2908 	}
2909 	ipsec_freemsg_chain(spare_mp);	/* Always works, even if NULL */
2910 	return (B_TRUE);
2911 }
2912 
2913 /*
2914  * Prepend an mblk with a ipsec_crypto_t to the message chain.
2915  * Frees the argument and returns NULL should the allocation fail.
2916  * Returns the pointer to the crypto data part.
2917  */
2918 mblk_t *
2919 ipsec_add_crypto_data(mblk_t *data_mp, ipsec_crypto_t **icp)
2920 {
2921 	mblk_t	*mp;
2922 
2923 	mp = allocb(sizeof (ipsec_crypto_t), BPRI_MED);
2924 	if (mp == NULL) {
2925 		freemsg(data_mp);
2926 		return (NULL);
2927 	}
2928 	bzero(mp->b_rptr, sizeof (ipsec_crypto_t));
2929 	mp->b_wptr += sizeof (ipsec_crypto_t);
2930 	mp->b_cont = data_mp;
2931 	mp->b_datap->db_type = M_EVENT;	/* For ASSERT */
2932 	*icp = (ipsec_crypto_t *)mp->b_rptr;
2933 	return (mp);
2934 }
2935 
2936 /*
2937  * Remove what was prepended above. Return b_cont and a pointer to the
2938  * crypto data.
2939  * The caller must call ipsec_free_crypto_data for mblk once it is done
2940  * with the crypto data.
2941  */
2942 mblk_t *
2943 ipsec_remove_crypto_data(mblk_t *crypto_mp, ipsec_crypto_t **icp)
2944 {
2945 	ASSERT(crypto_mp->b_datap->db_type == M_EVENT);
2946 	ASSERT(MBLKL(crypto_mp) == sizeof (ipsec_crypto_t));
2947 
2948 	*icp = (ipsec_crypto_t *)crypto_mp->b_rptr;
2949 	return (crypto_mp->b_cont);
2950 }
2951 
2952 /*
2953  * Free what was prepended above. Return b_cont.
2954  */
2955 mblk_t *
2956 ipsec_free_crypto_data(mblk_t *crypto_mp)
2957 {
2958 	mblk_t	*mp;
2959 
2960 	ASSERT(crypto_mp->b_datap->db_type == M_EVENT);
2961 	ASSERT(MBLKL(crypto_mp) == sizeof (ipsec_crypto_t));
2962 
2963 	mp = crypto_mp->b_cont;
2964 	freeb(crypto_mp);
2965 	return (mp);
2966 }
2967 
2968 /*
2969  * Create an ipsec_action_t based on the way an inbound packet was protected.
2970  * Used to reflect traffic back to a sender.
2971  *
2972  * We don't bother interning the action into the hash table.
2973  */
2974 ipsec_action_t *
2975 ipsec_in_to_out_action(ip_recv_attr_t *ira)
2976 {
2977 	ipsa_t *ah_assoc, *esp_assoc;
2978 	uint_t auth_alg = 0, encr_alg = 0, espa_alg = 0;
2979 	ipsec_action_t *ap;
2980 	boolean_t unique;
2981 
2982 	ap = kmem_cache_alloc(ipsec_action_cache, KM_NOSLEEP);
2983 
2984 	if (ap == NULL)
2985 		return (NULL);
2986 
2987 	bzero(ap, sizeof (*ap));
2988 	HASH_NULL(ap, ipa_hash);
2989 	ap->ipa_next = NULL;
2990 	ap->ipa_refs = 1;
2991 
2992 	/*
2993 	 * Get the algorithms that were used for this packet.
2994 	 */
2995 	ap->ipa_act.ipa_type = IPSEC_ACT_APPLY;
2996 	ap->ipa_act.ipa_log = 0;
2997 	ASSERT(ira->ira_flags & IRAF_IPSEC_SECURE);
2998 
2999 	ah_assoc = ira->ira_ipsec_ah_sa;
3000 	ap->ipa_act.ipa_apply.ipp_use_ah = (ah_assoc != NULL);
3001 
3002 	esp_assoc = ira->ira_ipsec_esp_sa;
3003 	ap->ipa_act.ipa_apply.ipp_use_esp = (esp_assoc != NULL);
3004 
3005 	if (esp_assoc != NULL) {
3006 		encr_alg = esp_assoc->ipsa_encr_alg;
3007 		espa_alg = esp_assoc->ipsa_auth_alg;
3008 		ap->ipa_act.ipa_apply.ipp_use_espa = (espa_alg != 0);
3009 	}
3010 	if (ah_assoc != NULL)
3011 		auth_alg = ah_assoc->ipsa_auth_alg;
3012 
3013 	ap->ipa_act.ipa_apply.ipp_encr_alg = (uint8_t)encr_alg;
3014 	ap->ipa_act.ipa_apply.ipp_auth_alg = (uint8_t)auth_alg;
3015 	ap->ipa_act.ipa_apply.ipp_esp_auth_alg = (uint8_t)espa_alg;
3016 	ap->ipa_act.ipa_apply.ipp_use_se =
3017 	    !!(ira->ira_flags & IRAF_IPSEC_DECAPS);
3018 	unique = B_FALSE;
3019 
3020 	if (esp_assoc != NULL) {
3021 		ap->ipa_act.ipa_apply.ipp_espa_minbits =
3022 		    esp_assoc->ipsa_authkeybits;
3023 		ap->ipa_act.ipa_apply.ipp_espa_maxbits =
3024 		    esp_assoc->ipsa_authkeybits;
3025 		ap->ipa_act.ipa_apply.ipp_espe_minbits =
3026 		    esp_assoc->ipsa_encrkeybits;
3027 		ap->ipa_act.ipa_apply.ipp_espe_maxbits =
3028 		    esp_assoc->ipsa_encrkeybits;
3029 		ap->ipa_act.ipa_apply.ipp_km_proto = esp_assoc->ipsa_kmp;
3030 		ap->ipa_act.ipa_apply.ipp_km_cookie = esp_assoc->ipsa_kmc;
3031 		if (esp_assoc->ipsa_flags & IPSA_F_UNIQUE)
3032 			unique = B_TRUE;
3033 	}
3034 	if (ah_assoc != NULL) {
3035 		ap->ipa_act.ipa_apply.ipp_ah_minbits =
3036 		    ah_assoc->ipsa_authkeybits;
3037 		ap->ipa_act.ipa_apply.ipp_ah_maxbits =
3038 		    ah_assoc->ipsa_authkeybits;
3039 		ap->ipa_act.ipa_apply.ipp_km_proto = ah_assoc->ipsa_kmp;
3040 		ap->ipa_act.ipa_apply.ipp_km_cookie = ah_assoc->ipsa_kmc;
3041 		if (ah_assoc->ipsa_flags & IPSA_F_UNIQUE)
3042 			unique = B_TRUE;
3043 	}
3044 	ap->ipa_act.ipa_apply.ipp_use_unique = unique;
3045 	ap->ipa_want_unique = unique;
3046 	ap->ipa_allow_clear = B_FALSE;
3047 	ap->ipa_want_se = !!(ira->ira_flags & IRAF_IPSEC_DECAPS);
3048 	ap->ipa_want_ah = (ah_assoc != NULL);
3049 	ap->ipa_want_esp = (esp_assoc != NULL);
3050 
3051 	ap->ipa_ovhd = ipsec_act_ovhd(&ap->ipa_act);
3052 
3053 	ap->ipa_act.ipa_apply.ipp_replay_depth = 0; /* don't care */
3054 
3055 	return (ap);
3056 }
3057 
3058 
3059 /*
3060  * Compute the worst-case amount of extra space required by an action.
3061  * Note that, because of the ESP considerations listed below, this is
3062  * actually not the same as the best-case reduction in the MTU; in the
3063  * future, we should pass additional information to this function to
3064  * allow the actual MTU impact to be computed.
3065  *
3066  * AH: Revisit this if we implement algorithms with
3067  * a verifier size of more than 12 bytes.
3068  *
3069  * ESP: A more exact but more messy computation would take into
3070  * account the interaction between the cipher block size and the
3071  * effective MTU, yielding the inner payload size which reflects a
3072  * packet with *minimum* ESP padding..
3073  */
3074 int32_t
3075 ipsec_act_ovhd(const ipsec_act_t *act)
3076 {
3077 	int32_t overhead = 0;
3078 
3079 	if (act->ipa_type == IPSEC_ACT_APPLY) {
3080 		const ipsec_prot_t *ipp = &act->ipa_apply;
3081 
3082 		if (ipp->ipp_use_ah)
3083 			overhead += IPSEC_MAX_AH_HDR_SIZE;
3084 		if (ipp->ipp_use_esp) {
3085 			overhead += IPSEC_MAX_ESP_HDR_SIZE;
3086 			overhead += sizeof (struct udphdr);
3087 		}
3088 		if (ipp->ipp_use_se)
3089 			overhead += IP_SIMPLE_HDR_LENGTH;
3090 	}
3091 	return (overhead);
3092 }
3093 
3094 /*
3095  * This hash function is used only when creating policies and thus is not
3096  * performance-critical for packet flows.
3097  *
3098  * Future work: canonicalize the structures hashed with this (i.e.,
3099  * zeroize padding) so the hash works correctly.
3100  */
3101 /* ARGSUSED */
3102 static uint32_t
3103 policy_hash(int size, const void *start, const void *end)
3104 {
3105 	return (0);
3106 }
3107 
3108 
3109 /*
3110  * Hash function macros for each address type.
3111  *
3112  * The IPV6 hash function assumes that the low order 32-bits of the
3113  * address (typically containing the low order 24 bits of the mac
3114  * address) are reasonably well-distributed.  Revisit this if we run
3115  * into trouble from lots of collisions on ::1 addresses and the like
3116  * (seems unlikely).
3117  */
3118 #define	IPSEC_IPV4_HASH(a, n) ((a) % (n))
3119 #define	IPSEC_IPV6_HASH(a, n) (((a).s6_addr32[3]) % (n))
3120 
3121 /*
3122  * These two hash functions should produce coordinated values
3123  * but have slightly different roles.
3124  */
3125 static uint32_t
3126 selkey_hash(const ipsec_selkey_t *selkey, netstack_t *ns)
3127 {
3128 	uint32_t valid = selkey->ipsl_valid;
3129 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
3130 
3131 	if (!(valid & IPSL_REMOTE_ADDR))
3132 		return (IPSEC_SEL_NOHASH);
3133 
3134 	if (valid & IPSL_IPV4) {
3135 		if (selkey->ipsl_remote_pfxlen == 32) {
3136 			return (IPSEC_IPV4_HASH(selkey->ipsl_remote.ipsad_v4,
3137 			    ipss->ipsec_spd_hashsize));
3138 		}
3139 	}
3140 	if (valid & IPSL_IPV6) {
3141 		if (selkey->ipsl_remote_pfxlen == 128) {
3142 			return (IPSEC_IPV6_HASH(selkey->ipsl_remote.ipsad_v6,
3143 			    ipss->ipsec_spd_hashsize));
3144 		}
3145 	}
3146 	return (IPSEC_SEL_NOHASH);
3147 }
3148 
3149 static uint32_t
3150 selector_hash(ipsec_selector_t *sel, ipsec_policy_root_t *root)
3151 {
3152 	if (sel->ips_isv4) {
3153 		return (IPSEC_IPV4_HASH(sel->ips_remote_addr_v4,
3154 		    root->ipr_nchains));
3155 	}
3156 	return (IPSEC_IPV6_HASH(sel->ips_remote_addr_v6, root->ipr_nchains));
3157 }
3158 
3159 /*
3160  * Intern actions into the action hash table.
3161  */
3162 ipsec_action_t *
3163 ipsec_act_find(const ipsec_act_t *a, int n, netstack_t *ns)
3164 {
3165 	int i;
3166 	uint32_t hval;
3167 	ipsec_action_t *ap;
3168 	ipsec_action_t *prev = NULL;
3169 	int32_t overhead, maxovhd = 0;
3170 	boolean_t allow_clear = B_FALSE;
3171 	boolean_t want_ah = B_FALSE;
3172 	boolean_t want_esp = B_FALSE;
3173 	boolean_t want_se = B_FALSE;
3174 	boolean_t want_unique = B_FALSE;
3175 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
3176 
3177 	/*
3178 	 * TODO: should canonicalize a[] (i.e., zeroize any padding)
3179 	 * so we can use a non-trivial policy_hash function.
3180 	 */
3181 	for (i = n-1; i >= 0; i--) {
3182 		hval = policy_hash(IPSEC_ACTION_HASH_SIZE, &a[i], &a[n]);
3183 
3184 		HASH_LOCK(ipss->ipsec_action_hash, hval);
3185 
3186 		for (HASH_ITERATE(ap, ipa_hash,
3187 		    ipss->ipsec_action_hash, hval)) {
3188 			if (bcmp(&ap->ipa_act, &a[i], sizeof (*a)) != 0)
3189 				continue;
3190 			if (ap->ipa_next != prev)
3191 				continue;
3192 			break;
3193 		}
3194 		if (ap != NULL) {
3195 			HASH_UNLOCK(ipss->ipsec_action_hash, hval);
3196 			prev = ap;
3197 			continue;
3198 		}
3199 		/*
3200 		 * need to allocate a new one..
3201 		 */
3202 		ap = kmem_cache_alloc(ipsec_action_cache, KM_NOSLEEP);
3203 		if (ap == NULL) {
3204 			HASH_UNLOCK(ipss->ipsec_action_hash, hval);
3205 			if (prev != NULL)
3206 				ipsec_action_free(prev);
3207 			return (NULL);
3208 		}
3209 		HASH_INSERT(ap, ipa_hash, ipss->ipsec_action_hash, hval);
3210 
3211 		ap->ipa_next = prev;
3212 		ap->ipa_act = a[i];
3213 
3214 		overhead = ipsec_act_ovhd(&a[i]);
3215 		if (maxovhd < overhead)
3216 			maxovhd = overhead;
3217 
3218 		if ((a[i].ipa_type == IPSEC_ACT_BYPASS) ||
3219 		    (a[i].ipa_type == IPSEC_ACT_CLEAR))
3220 			allow_clear = B_TRUE;
3221 		if (a[i].ipa_type == IPSEC_ACT_APPLY) {
3222 			const ipsec_prot_t *ipp = &a[i].ipa_apply;
3223 
3224 			ASSERT(ipp->ipp_use_ah || ipp->ipp_use_esp);
3225 			want_ah |= ipp->ipp_use_ah;
3226 			want_esp |= ipp->ipp_use_esp;
3227 			want_se |= ipp->ipp_use_se;
3228 			want_unique |= ipp->ipp_use_unique;
3229 		}
3230 		ap->ipa_allow_clear = allow_clear;
3231 		ap->ipa_want_ah = want_ah;
3232 		ap->ipa_want_esp = want_esp;
3233 		ap->ipa_want_se = want_se;
3234 		ap->ipa_want_unique = want_unique;
3235 		ap->ipa_refs = 1; /* from the hash table */
3236 		ap->ipa_ovhd = maxovhd;
3237 		if (prev)
3238 			prev->ipa_refs++;
3239 		prev = ap;
3240 		HASH_UNLOCK(ipss->ipsec_action_hash, hval);
3241 	}
3242 
3243 	ap->ipa_refs++;		/* caller's reference */
3244 
3245 	return (ap);
3246 }
3247 
3248 /*
3249  * Called when refcount goes to 0, indicating that all references to this
3250  * node are gone.
3251  *
3252  * This does not unchain the action from the hash table.
3253  */
3254 void
3255 ipsec_action_free(ipsec_action_t *ap)
3256 {
3257 	for (;;) {
3258 		ipsec_action_t *np = ap->ipa_next;
3259 		ASSERT(ap->ipa_refs == 0);
3260 		ASSERT(ap->ipa_hash.hash_pp == NULL);
3261 		kmem_cache_free(ipsec_action_cache, ap);
3262 		ap = np;
3263 		/* Inlined IPACT_REFRELE -- avoid recursion */
3264 		if (ap == NULL)
3265 			break;
3266 		membar_exit();
3267 		if (atomic_add_32_nv(&(ap)->ipa_refs, -1) != 0)
3268 			break;
3269 		/* End inlined IPACT_REFRELE */
3270 	}
3271 }
3272 
3273 /*
3274  * Called when the action hash table goes away.
3275  *
3276  * The actions can be queued on an mblk with ipsec_in or
3277  * ipsec_out, hence the actions might still be around.
3278  * But we decrement ipa_refs here since we no longer have
3279  * a reference to the action from the hash table.
3280  */
3281 static void
3282 ipsec_action_free_table(ipsec_action_t *ap)
3283 {
3284 	while (ap != NULL) {
3285 		ipsec_action_t *np = ap->ipa_next;
3286 
3287 		/* FIXME: remove? */
3288 		(void) printf("ipsec_action_free_table(%p) ref %d\n",
3289 		    (void *)ap, ap->ipa_refs);
3290 		ASSERT(ap->ipa_refs > 0);
3291 		IPACT_REFRELE(ap);
3292 		ap = np;
3293 	}
3294 }
3295 
3296 /*
3297  * Need to walk all stack instances since the reclaim function
3298  * is global for all instances
3299  */
3300 /* ARGSUSED */
3301 static void
3302 ipsec_action_reclaim(void *arg)
3303 {
3304 	netstack_handle_t nh;
3305 	netstack_t *ns;
3306 
3307 	netstack_next_init(&nh);
3308 	while ((ns = netstack_next(&nh)) != NULL) {
3309 		ipsec_action_reclaim_stack(ns);
3310 		netstack_rele(ns);
3311 	}
3312 	netstack_next_fini(&nh);
3313 }
3314 
3315 /*
3316  * Periodically sweep action hash table for actions with refcount==1, and
3317  * nuke them.  We cannot do this "on demand" (i.e., from IPACT_REFRELE)
3318  * because we can't close the race between another thread finding the action
3319  * in the hash table without holding the bucket lock during IPACT_REFRELE.
3320  * Instead, we run this function sporadically to clean up after ourselves;
3321  * we also set it as the "reclaim" function for the action kmem_cache.
3322  *
3323  * Note that it may take several passes of ipsec_action_gc() to free all
3324  * "stale" actions.
3325  */
3326 static void
3327 ipsec_action_reclaim_stack(netstack_t *ns)
3328 {
3329 	int i;
3330 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
3331 
3332 	for (i = 0; i < IPSEC_ACTION_HASH_SIZE; i++) {
3333 		ipsec_action_t *ap, *np;
3334 
3335 		/* skip the lock if nobody home */
3336 		if (ipss->ipsec_action_hash[i].hash_head == NULL)
3337 			continue;
3338 
3339 		HASH_LOCK(ipss->ipsec_action_hash, i);
3340 		for (ap = ipss->ipsec_action_hash[i].hash_head;
3341 		    ap != NULL; ap = np) {
3342 			ASSERT(ap->ipa_refs > 0);
3343 			np = ap->ipa_hash.hash_next;
3344 			if (ap->ipa_refs > 1)
3345 				continue;
3346 			HASH_UNCHAIN(ap, ipa_hash,
3347 			    ipss->ipsec_action_hash, i);
3348 			IPACT_REFRELE(ap);
3349 		}
3350 		HASH_UNLOCK(ipss->ipsec_action_hash, i);
3351 	}
3352 }
3353 
3354 /*
3355  * Intern a selector set into the selector set hash table.
3356  * This is simpler than the actions case..
3357  */
3358 static ipsec_sel_t *
3359 ipsec_find_sel(ipsec_selkey_t *selkey, netstack_t *ns)
3360 {
3361 	ipsec_sel_t *sp;
3362 	uint32_t hval, bucket;
3363 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
3364 
3365 	/*
3366 	 * Exactly one AF bit should be set in selkey.
3367 	 */
3368 	ASSERT(!(selkey->ipsl_valid & IPSL_IPV4) ^
3369 	    !(selkey->ipsl_valid & IPSL_IPV6));
3370 
3371 	hval = selkey_hash(selkey, ns);
3372 	/* Set pol_hval to uninitialized until we put it in a polhead. */
3373 	selkey->ipsl_sel_hval = hval;
3374 
3375 	bucket = (hval == IPSEC_SEL_NOHASH) ? 0 : hval;
3376 
3377 	ASSERT(!HASH_LOCKED(ipss->ipsec_sel_hash, bucket));
3378 	HASH_LOCK(ipss->ipsec_sel_hash, bucket);
3379 
3380 	for (HASH_ITERATE(sp, ipsl_hash, ipss->ipsec_sel_hash, bucket)) {
3381 		if (bcmp(&sp->ipsl_key, selkey,
3382 		    offsetof(ipsec_selkey_t, ipsl_pol_hval)) == 0)
3383 			break;
3384 	}
3385 	if (sp != NULL) {
3386 		sp->ipsl_refs++;
3387 
3388 		HASH_UNLOCK(ipss->ipsec_sel_hash, bucket);
3389 		return (sp);
3390 	}
3391 
3392 	sp = kmem_cache_alloc(ipsec_sel_cache, KM_NOSLEEP);
3393 	if (sp == NULL) {
3394 		HASH_UNLOCK(ipss->ipsec_sel_hash, bucket);
3395 		return (NULL);
3396 	}
3397 
3398 	HASH_INSERT(sp, ipsl_hash, ipss->ipsec_sel_hash, bucket);
3399 	sp->ipsl_refs = 2;	/* one for hash table, one for caller */
3400 	sp->ipsl_key = *selkey;
3401 	/* Set to uninitalized and have insertion into polhead fix things. */
3402 	if (selkey->ipsl_sel_hval != IPSEC_SEL_NOHASH)
3403 		sp->ipsl_key.ipsl_pol_hval = 0;
3404 	else
3405 		sp->ipsl_key.ipsl_pol_hval = IPSEC_SEL_NOHASH;
3406 
3407 	HASH_UNLOCK(ipss->ipsec_sel_hash, bucket);
3408 
3409 	return (sp);
3410 }
3411 
3412 static void
3413 ipsec_sel_rel(ipsec_sel_t **spp, netstack_t *ns)
3414 {
3415 	ipsec_sel_t *sp = *spp;
3416 	int hval = sp->ipsl_key.ipsl_sel_hval;
3417 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
3418 
3419 	*spp = NULL;
3420 
3421 	if (hval == IPSEC_SEL_NOHASH)
3422 		hval = 0;
3423 
3424 	ASSERT(!HASH_LOCKED(ipss->ipsec_sel_hash, hval));
3425 	HASH_LOCK(ipss->ipsec_sel_hash, hval);
3426 	if (--sp->ipsl_refs == 1) {
3427 		HASH_UNCHAIN(sp, ipsl_hash, ipss->ipsec_sel_hash, hval);
3428 		sp->ipsl_refs--;
3429 		HASH_UNLOCK(ipss->ipsec_sel_hash, hval);
3430 		ASSERT(sp->ipsl_refs == 0);
3431 		kmem_cache_free(ipsec_sel_cache, sp);
3432 		/* Caller unlocks */
3433 		return;
3434 	}
3435 
3436 	HASH_UNLOCK(ipss->ipsec_sel_hash, hval);
3437 }
3438 
3439 /*
3440  * Free a policy rule which we know is no longer being referenced.
3441  */
3442 void
3443 ipsec_policy_free(ipsec_policy_t *ipp)
3444 {
3445 	ASSERT(ipp->ipsp_refs == 0);
3446 	ASSERT(ipp->ipsp_sel != NULL);
3447 	ASSERT(ipp->ipsp_act != NULL);
3448 	ASSERT(ipp->ipsp_netstack != NULL);
3449 
3450 	ipsec_sel_rel(&ipp->ipsp_sel, ipp->ipsp_netstack);
3451 	IPACT_REFRELE(ipp->ipsp_act);
3452 	kmem_cache_free(ipsec_pol_cache, ipp);
3453 }
3454 
3455 /*
3456  * Construction of new policy rules; construct a policy, and add it to
3457  * the appropriate tables.
3458  */
3459 ipsec_policy_t *
3460 ipsec_policy_create(ipsec_selkey_t *keys, const ipsec_act_t *a,
3461     int nacts, int prio, uint64_t *index_ptr, netstack_t *ns)
3462 {
3463 	ipsec_action_t *ap;
3464 	ipsec_sel_t *sp;
3465 	ipsec_policy_t *ipp;
3466 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
3467 
3468 	if (index_ptr == NULL)
3469 		index_ptr = &ipss->ipsec_next_policy_index;
3470 
3471 	ipp = kmem_cache_alloc(ipsec_pol_cache, KM_NOSLEEP);
3472 	ap = ipsec_act_find(a, nacts, ns);
3473 	sp = ipsec_find_sel(keys, ns);
3474 
3475 	if ((ap == NULL) || (sp == NULL) || (ipp == NULL)) {
3476 		if (ap != NULL) {
3477 			IPACT_REFRELE(ap);
3478 		}
3479 		if (sp != NULL)
3480 			ipsec_sel_rel(&sp, ns);
3481 		if (ipp != NULL)
3482 			kmem_cache_free(ipsec_pol_cache, ipp);
3483 		return (NULL);
3484 	}
3485 
3486 	HASH_NULL(ipp, ipsp_hash);
3487 
3488 	ipp->ipsp_netstack = ns;	/* Needed for ipsec_policy_free */
3489 	ipp->ipsp_refs = 1;	/* caller's reference */
3490 	ipp->ipsp_sel = sp;
3491 	ipp->ipsp_act = ap;
3492 	ipp->ipsp_prio = prio;	/* rule priority */
3493 	ipp->ipsp_index = *index_ptr;
3494 	(*index_ptr)++;
3495 
3496 	return (ipp);
3497 }
3498 
3499 static void
3500 ipsec_update_present_flags(ipsec_stack_t *ipss)
3501 {
3502 	boolean_t hashpol;
3503 
3504 	hashpol = (avl_numnodes(&ipss->ipsec_system_policy.iph_rulebyid) > 0);
3505 
3506 	if (hashpol) {
3507 		ipss->ipsec_outbound_v4_policy_present = B_TRUE;
3508 		ipss->ipsec_outbound_v6_policy_present = B_TRUE;
3509 		ipss->ipsec_inbound_v4_policy_present = B_TRUE;
3510 		ipss->ipsec_inbound_v6_policy_present = B_TRUE;
3511 		return;
3512 	}
3513 
3514 	ipss->ipsec_outbound_v4_policy_present = (NULL !=
3515 	    ipss->ipsec_system_policy.iph_root[IPSEC_TYPE_OUTBOUND].
3516 	    ipr_nonhash[IPSEC_AF_V4]);
3517 	ipss->ipsec_outbound_v6_policy_present = (NULL !=
3518 	    ipss->ipsec_system_policy.iph_root[IPSEC_TYPE_OUTBOUND].
3519 	    ipr_nonhash[IPSEC_AF_V6]);
3520 	ipss->ipsec_inbound_v4_policy_present = (NULL !=
3521 	    ipss->ipsec_system_policy.iph_root[IPSEC_TYPE_INBOUND].
3522 	    ipr_nonhash[IPSEC_AF_V4]);
3523 	ipss->ipsec_inbound_v6_policy_present = (NULL !=
3524 	    ipss->ipsec_system_policy.iph_root[IPSEC_TYPE_INBOUND].
3525 	    ipr_nonhash[IPSEC_AF_V6]);
3526 }
3527 
3528 boolean_t
3529 ipsec_policy_delete(ipsec_policy_head_t *php, ipsec_selkey_t *keys, int dir,
3530 	netstack_t *ns)
3531 {
3532 	ipsec_sel_t *sp;
3533 	ipsec_policy_t *ip, *nip, *head;
3534 	int af;
3535 	ipsec_policy_root_t *pr = &php->iph_root[dir];
3536 
3537 	sp = ipsec_find_sel(keys, ns);
3538 
3539 	if (sp == NULL)
3540 		return (B_FALSE);
3541 
3542 	af = (sp->ipsl_key.ipsl_valid & IPSL_IPV4) ? IPSEC_AF_V4 : IPSEC_AF_V6;
3543 
3544 	rw_enter(&php->iph_lock, RW_WRITER);
3545 
3546 	if (sp->ipsl_key.ipsl_pol_hval == IPSEC_SEL_NOHASH) {
3547 		head = pr->ipr_nonhash[af];
3548 	} else {
3549 		head = pr->ipr_hash[sp->ipsl_key.ipsl_pol_hval].hash_head;
3550 	}
3551 
3552 	for (ip = head; ip != NULL; ip = nip) {
3553 		nip = ip->ipsp_hash.hash_next;
3554 		if (ip->ipsp_sel != sp) {
3555 			continue;
3556 		}
3557 
3558 		IPPOL_UNCHAIN(php, ip);
3559 
3560 		php->iph_gen++;
3561 		ipsec_update_present_flags(ns->netstack_ipsec);
3562 
3563 		rw_exit(&php->iph_lock);
3564 
3565 		ipsec_sel_rel(&sp, ns);
3566 
3567 		return (B_TRUE);
3568 	}
3569 
3570 	rw_exit(&php->iph_lock);
3571 	ipsec_sel_rel(&sp, ns);
3572 	return (B_FALSE);
3573 }
3574 
3575 int
3576 ipsec_policy_delete_index(ipsec_policy_head_t *php, uint64_t policy_index,
3577     netstack_t *ns)
3578 {
3579 	boolean_t found = B_FALSE;
3580 	ipsec_policy_t ipkey;
3581 	ipsec_policy_t *ip;
3582 	avl_index_t where;
3583 
3584 	bzero(&ipkey, sizeof (ipkey));
3585 	ipkey.ipsp_index = policy_index;
3586 
3587 	rw_enter(&php->iph_lock, RW_WRITER);
3588 
3589 	/*
3590 	 * We could be cleverer here about the walk.
3591 	 * but well, (k+1)*log(N) will do for now (k==number of matches,
3592 	 * N==number of table entries
3593 	 */
3594 	for (;;) {
3595 		ip = (ipsec_policy_t *)avl_find(&php->iph_rulebyid,
3596 		    (void *)&ipkey, &where);
3597 		ASSERT(ip == NULL);
3598 
3599 		ip = avl_nearest(&php->iph_rulebyid, where, AVL_AFTER);
3600 
3601 		if (ip == NULL)
3602 			break;
3603 
3604 		if (ip->ipsp_index != policy_index) {
3605 			ASSERT(ip->ipsp_index > policy_index);
3606 			break;
3607 		}
3608 
3609 		IPPOL_UNCHAIN(php, ip);
3610 		found = B_TRUE;
3611 	}
3612 
3613 	if (found) {
3614 		php->iph_gen++;
3615 		ipsec_update_present_flags(ns->netstack_ipsec);
3616 	}
3617 
3618 	rw_exit(&php->iph_lock);
3619 
3620 	return (found ? 0 : ENOENT);
3621 }
3622 
3623 /*
3624  * Given a constructed ipsec_policy_t policy rule, see if it can be entered
3625  * into the correct policy ruleset.  As a side-effect, it sets the hash
3626  * entries on "ipp"'s ipsp_pol_hval.
3627  *
3628  * Returns B_TRUE if it can be entered, B_FALSE if it can't be (because a
3629  * duplicate policy exists with exactly the same selectors), or an icmp
3630  * rule exists with a different encryption/authentication action.
3631  */
3632 boolean_t
3633 ipsec_check_policy(ipsec_policy_head_t *php, ipsec_policy_t *ipp, int direction)
3634 {
3635 	ipsec_policy_root_t *pr = &php->iph_root[direction];
3636 	int af = -1;
3637 	ipsec_policy_t *p2, *head;
3638 	uint8_t check_proto;
3639 	ipsec_selkey_t *selkey = &ipp->ipsp_sel->ipsl_key;
3640 	uint32_t	valid = selkey->ipsl_valid;
3641 
3642 	if (valid & IPSL_IPV6) {
3643 		ASSERT(!(valid & IPSL_IPV4));
3644 		af = IPSEC_AF_V6;
3645 		check_proto = IPPROTO_ICMPV6;
3646 	} else {
3647 		ASSERT(valid & IPSL_IPV4);
3648 		af = IPSEC_AF_V4;
3649 		check_proto = IPPROTO_ICMP;
3650 	}
3651 
3652 	ASSERT(RW_WRITE_HELD(&php->iph_lock));
3653 
3654 	/*
3655 	 * Double-check that we don't have any duplicate selectors here.
3656 	 * Because selectors are interned below, we need only compare pointers
3657 	 * for equality.
3658 	 */
3659 	if (selkey->ipsl_sel_hval == IPSEC_SEL_NOHASH) {
3660 		head = pr->ipr_nonhash[af];
3661 	} else {
3662 		selkey->ipsl_pol_hval =
3663 		    (selkey->ipsl_valid & IPSL_IPV4) ?
3664 		    IPSEC_IPV4_HASH(selkey->ipsl_remote.ipsad_v4,
3665 		    pr->ipr_nchains) :
3666 		    IPSEC_IPV6_HASH(selkey->ipsl_remote.ipsad_v6,
3667 		    pr->ipr_nchains);
3668 
3669 		head = pr->ipr_hash[selkey->ipsl_pol_hval].hash_head;
3670 	}
3671 
3672 	for (p2 = head; p2 != NULL; p2 = p2->ipsp_hash.hash_next) {
3673 		if (p2->ipsp_sel == ipp->ipsp_sel)
3674 			return (B_FALSE);
3675 	}
3676 
3677 	/*
3678 	 * If it's ICMP and not a drop or pass rule, run through the ICMP
3679 	 * rules and make sure the action is either new or the same as any
3680 	 * other actions.  We don't have to check the full chain because
3681 	 * discard and bypass will override all other actions
3682 	 */
3683 
3684 	if (valid & IPSL_PROTOCOL &&
3685 	    selkey->ipsl_proto == check_proto &&
3686 	    (ipp->ipsp_act->ipa_act.ipa_type == IPSEC_ACT_APPLY)) {
3687 
3688 		for (p2 = head; p2 != NULL; p2 = p2->ipsp_hash.hash_next) {
3689 
3690 			if (p2->ipsp_sel->ipsl_key.ipsl_valid & IPSL_PROTOCOL &&
3691 			    p2->ipsp_sel->ipsl_key.ipsl_proto == check_proto &&
3692 			    (p2->ipsp_act->ipa_act.ipa_type ==
3693 			    IPSEC_ACT_APPLY)) {
3694 				return (ipsec_compare_action(p2, ipp));
3695 			}
3696 		}
3697 	}
3698 
3699 	return (B_TRUE);
3700 }
3701 
3702 /*
3703  * compare the action chains of two policies for equality
3704  * B_TRUE -> effective equality
3705  */
3706 
3707 static boolean_t
3708 ipsec_compare_action(ipsec_policy_t *p1, ipsec_policy_t *p2)
3709 {
3710 
3711 	ipsec_action_t *act1, *act2;
3712 
3713 	/* We have a valid rule. Let's compare the actions */
3714 	if (p1->ipsp_act == p2->ipsp_act) {
3715 		/* same action. We are good */
3716 		return (B_TRUE);
3717 	}
3718 
3719 	/* we have to walk the chain */
3720 
3721 	act1 = p1->ipsp_act;
3722 	act2 = p2->ipsp_act;
3723 
3724 	while (act1 != NULL && act2 != NULL) {
3725 
3726 		/* otherwise, Are we close enough? */
3727 		if (act1->ipa_allow_clear != act2->ipa_allow_clear ||
3728 		    act1->ipa_want_ah != act2->ipa_want_ah ||
3729 		    act1->ipa_want_esp != act2->ipa_want_esp ||
3730 		    act1->ipa_want_se != act2->ipa_want_se) {
3731 			/* Nope, we aren't */
3732 			return (B_FALSE);
3733 		}
3734 
3735 		if (act1->ipa_want_ah) {
3736 			if (act1->ipa_act.ipa_apply.ipp_auth_alg !=
3737 			    act2->ipa_act.ipa_apply.ipp_auth_alg) {
3738 				return (B_FALSE);
3739 			}
3740 
3741 			if (act1->ipa_act.ipa_apply.ipp_ah_minbits !=
3742 			    act2->ipa_act.ipa_apply.ipp_ah_minbits ||
3743 			    act1->ipa_act.ipa_apply.ipp_ah_maxbits !=
3744 			    act2->ipa_act.ipa_apply.ipp_ah_maxbits) {
3745 				return (B_FALSE);
3746 			}
3747 		}
3748 
3749 		if (act1->ipa_want_esp) {
3750 			if (act1->ipa_act.ipa_apply.ipp_use_esp !=
3751 			    act2->ipa_act.ipa_apply.ipp_use_esp ||
3752 			    act1->ipa_act.ipa_apply.ipp_use_espa !=
3753 			    act2->ipa_act.ipa_apply.ipp_use_espa) {
3754 				return (B_FALSE);
3755 			}
3756 
3757 			if (act1->ipa_act.ipa_apply.ipp_use_esp) {
3758 				if (act1->ipa_act.ipa_apply.ipp_encr_alg !=
3759 				    act2->ipa_act.ipa_apply.ipp_encr_alg) {
3760 					return (B_FALSE);
3761 				}
3762 
3763 				if (act1->ipa_act.ipa_apply.ipp_espe_minbits !=
3764 				    act2->ipa_act.ipa_apply.ipp_espe_minbits ||
3765 				    act1->ipa_act.ipa_apply.ipp_espe_maxbits !=
3766 				    act2->ipa_act.ipa_apply.ipp_espe_maxbits) {
3767 					return (B_FALSE);
3768 				}
3769 			}
3770 
3771 			if (act1->ipa_act.ipa_apply.ipp_use_espa) {
3772 				if (act1->ipa_act.ipa_apply.ipp_esp_auth_alg !=
3773 				    act2->ipa_act.ipa_apply.ipp_esp_auth_alg) {
3774 					return (B_FALSE);
3775 				}
3776 
3777 				if (act1->ipa_act.ipa_apply.ipp_espa_minbits !=
3778 				    act2->ipa_act.ipa_apply.ipp_espa_minbits ||
3779 				    act1->ipa_act.ipa_apply.ipp_espa_maxbits !=
3780 				    act2->ipa_act.ipa_apply.ipp_espa_maxbits) {
3781 					return (B_FALSE);
3782 				}
3783 			}
3784 
3785 		}
3786 
3787 		act1 = act1->ipa_next;
3788 		act2 = act2->ipa_next;
3789 	}
3790 
3791 	if (act1 != NULL || act2 != NULL) {
3792 		return (B_FALSE);
3793 	}
3794 
3795 	return (B_TRUE);
3796 }
3797 
3798 
3799 /*
3800  * Given a constructed ipsec_policy_t policy rule, enter it into
3801  * the correct policy ruleset.
3802  *
3803  * ipsec_check_policy() is assumed to have succeeded first (to check for
3804  * duplicates).
3805  */
3806 void
3807 ipsec_enter_policy(ipsec_policy_head_t *php, ipsec_policy_t *ipp, int direction,
3808     netstack_t *ns)
3809 {
3810 	ipsec_policy_root_t *pr = &php->iph_root[direction];
3811 	ipsec_selkey_t *selkey = &ipp->ipsp_sel->ipsl_key;
3812 	uint32_t valid = selkey->ipsl_valid;
3813 	uint32_t hval = selkey->ipsl_pol_hval;
3814 	int af = -1;
3815 
3816 	ASSERT(RW_WRITE_HELD(&php->iph_lock));
3817 
3818 	if (valid & IPSL_IPV6) {
3819 		ASSERT(!(valid & IPSL_IPV4));
3820 		af = IPSEC_AF_V6;
3821 	} else {
3822 		ASSERT(valid & IPSL_IPV4);
3823 		af = IPSEC_AF_V4;
3824 	}
3825 
3826 	php->iph_gen++;
3827 
3828 	if (hval == IPSEC_SEL_NOHASH) {
3829 		HASHLIST_INSERT(ipp, ipsp_hash, pr->ipr_nonhash[af]);
3830 	} else {
3831 		HASH_LOCK(pr->ipr_hash, hval);
3832 		HASH_INSERT(ipp, ipsp_hash, pr->ipr_hash, hval);
3833 		HASH_UNLOCK(pr->ipr_hash, hval);
3834 	}
3835 
3836 	ipsec_insert_always(&php->iph_rulebyid, ipp);
3837 
3838 	ipsec_update_present_flags(ns->netstack_ipsec);
3839 }
3840 
3841 static void
3842 ipsec_ipr_flush(ipsec_policy_head_t *php, ipsec_policy_root_t *ipr)
3843 {
3844 	ipsec_policy_t *ip, *nip;
3845 	int af, chain, nchain;
3846 
3847 	for (af = 0; af < IPSEC_NAF; af++) {
3848 		for (ip = ipr->ipr_nonhash[af]; ip != NULL; ip = nip) {
3849 			nip = ip->ipsp_hash.hash_next;
3850 			IPPOL_UNCHAIN(php, ip);
3851 		}
3852 		ipr->ipr_nonhash[af] = NULL;
3853 	}
3854 	nchain = ipr->ipr_nchains;
3855 
3856 	for (chain = 0; chain < nchain; chain++) {
3857 		for (ip = ipr->ipr_hash[chain].hash_head; ip != NULL;
3858 		    ip = nip) {
3859 			nip = ip->ipsp_hash.hash_next;
3860 			IPPOL_UNCHAIN(php, ip);
3861 		}
3862 		ipr->ipr_hash[chain].hash_head = NULL;
3863 	}
3864 }
3865 
3866 /*
3867  * Create and insert inbound or outbound policy associated with actp for the
3868  * address family fam into the policy head ph.  Returns B_TRUE if policy was
3869  * inserted, and B_FALSE otherwise.
3870  */
3871 boolean_t
3872 ipsec_polhead_insert(ipsec_policy_head_t *ph, ipsec_act_t *actp, uint_t nact,
3873     int fam, int ptype, netstack_t *ns)
3874 {
3875 	ipsec_selkey_t		sel;
3876 	ipsec_policy_t		*pol;
3877 	ipsec_policy_root_t	*pr;
3878 
3879 	bzero(&sel, sizeof (sel));
3880 	sel.ipsl_valid = (fam == IPSEC_AF_V4 ? IPSL_IPV4 : IPSL_IPV6);
3881 	if ((pol = ipsec_policy_create(&sel, actp, nact, IPSEC_PRIO_SOCKET,
3882 	    NULL, ns)) != NULL) {
3883 		pr = &ph->iph_root[ptype];
3884 		HASHLIST_INSERT(pol, ipsp_hash, pr->ipr_nonhash[fam]);
3885 		ipsec_insert_always(&ph->iph_rulebyid, pol);
3886 	}
3887 	return (pol != NULL);
3888 }
3889 
3890 void
3891 ipsec_polhead_flush(ipsec_policy_head_t *php, netstack_t *ns)
3892 {
3893 	int dir;
3894 
3895 	ASSERT(RW_WRITE_HELD(&php->iph_lock));
3896 
3897 	for (dir = 0; dir < IPSEC_NTYPES; dir++)
3898 		ipsec_ipr_flush(php, &php->iph_root[dir]);
3899 
3900 	php->iph_gen++;
3901 	ipsec_update_present_flags(ns->netstack_ipsec);
3902 }
3903 
3904 void
3905 ipsec_polhead_free(ipsec_policy_head_t *php, netstack_t *ns)
3906 {
3907 	int dir;
3908 
3909 	ASSERT(php->iph_refs == 0);
3910 
3911 	rw_enter(&php->iph_lock, RW_WRITER);
3912 	ipsec_polhead_flush(php, ns);
3913 	rw_exit(&php->iph_lock);
3914 	rw_destroy(&php->iph_lock);
3915 	for (dir = 0; dir < IPSEC_NTYPES; dir++) {
3916 		ipsec_policy_root_t *ipr = &php->iph_root[dir];
3917 		int chain;
3918 
3919 		for (chain = 0; chain < ipr->ipr_nchains; chain++)
3920 			mutex_destroy(&(ipr->ipr_hash[chain].hash_lock));
3921 
3922 	}
3923 	ipsec_polhead_free_table(php);
3924 	kmem_free(php, sizeof (*php));
3925 }
3926 
3927 static void
3928 ipsec_ipr_init(ipsec_policy_root_t *ipr)
3929 {
3930 	int af;
3931 
3932 	ipr->ipr_nchains = 0;
3933 	ipr->ipr_hash = NULL;
3934 
3935 	for (af = 0; af < IPSEC_NAF; af++) {
3936 		ipr->ipr_nonhash[af] = NULL;
3937 	}
3938 }
3939 
3940 ipsec_policy_head_t *
3941 ipsec_polhead_create(void)
3942 {
3943 	ipsec_policy_head_t *php;
3944 
3945 	php = kmem_alloc(sizeof (*php), KM_NOSLEEP);
3946 	if (php == NULL)
3947 		return (php);
3948 
3949 	rw_init(&php->iph_lock, NULL, RW_DEFAULT, NULL);
3950 	php->iph_refs = 1;
3951 	php->iph_gen = 0;
3952 
3953 	ipsec_ipr_init(&php->iph_root[IPSEC_TYPE_INBOUND]);
3954 	ipsec_ipr_init(&php->iph_root[IPSEC_TYPE_OUTBOUND]);
3955 
3956 	avl_create(&php->iph_rulebyid, ipsec_policy_cmpbyid,
3957 	    sizeof (ipsec_policy_t), offsetof(ipsec_policy_t, ipsp_byid));
3958 
3959 	return (php);
3960 }
3961 
3962 /*
3963  * Clone the policy head into a new polhead; release one reference to the
3964  * old one and return the only reference to the new one.
3965  * If the old one had a refcount of 1, just return it.
3966  */
3967 ipsec_policy_head_t *
3968 ipsec_polhead_split(ipsec_policy_head_t *php, netstack_t *ns)
3969 {
3970 	ipsec_policy_head_t *nphp;
3971 
3972 	if (php == NULL)
3973 		return (ipsec_polhead_create());
3974 	else if (php->iph_refs == 1)
3975 		return (php);
3976 
3977 	nphp = ipsec_polhead_create();
3978 	if (nphp == NULL)
3979 		return (NULL);
3980 
3981 	if (ipsec_copy_polhead(php, nphp, ns) != 0) {
3982 		ipsec_polhead_free(nphp, ns);
3983 		return (NULL);
3984 	}
3985 	IPPH_REFRELE(php, ns);
3986 	return (nphp);
3987 }
3988 
3989 /*
3990  * When sending a response to a ICMP request or generating a RST
3991  * in the TCP case, the outbound packets need to go at the same level
3992  * of protection as the incoming ones i.e we associate our outbound
3993  * policy with how the packet came in. We call this after we have
3994  * accepted the incoming packet which may or may not have been in
3995  * clear and hence we are sending the reply back with the policy
3996  * matching the incoming datagram's policy.
3997  *
3998  * NOTE : This technology serves two purposes :
3999  *
4000  * 1) If we have multiple outbound policies, we send out a reply
4001  *    matching with how it came in rather than matching the outbound
4002  *    policy.
4003  *
4004  * 2) For assymetric policies, we want to make sure that incoming
4005  *    and outgoing has the same level of protection. Assymetric
4006  *    policies exist only with global policy where we may not have
4007  *    both outbound and inbound at the same time.
4008  *
4009  * NOTE2:	This function is called by cleartext cases, so it needs to be
4010  *		in IP proper.
4011  *
4012  * Note: the caller has moved other parts of ira into ixa already.
4013  */
4014 boolean_t
4015 ipsec_in_to_out(ip_recv_attr_t *ira, ip_xmit_attr_t *ixa, mblk_t *data_mp,
4016     ipha_t *ipha, ip6_t *ip6h)
4017 {
4018 	ipsec_selector_t sel;
4019 	ipsec_action_t	*reflect_action = NULL;
4020 	netstack_t	*ns = ixa->ixa_ipst->ips_netstack;
4021 
4022 	bzero((void*)&sel, sizeof (sel));
4023 
4024 	if (ira->ira_ipsec_action != NULL) {
4025 		/* transfer reference.. */
4026 		reflect_action = ira->ira_ipsec_action;
4027 		ira->ira_ipsec_action = NULL;
4028 	} else if (!(ira->ira_flags & IRAF_LOOPBACK))
4029 		reflect_action = ipsec_in_to_out_action(ira);
4030 
4031 	/*
4032 	 * The caller is going to send the datagram out which might
4033 	 * go on the wire or delivered locally through ire_send_local.
4034 	 *
4035 	 * 1) If it goes out on the wire, new associations will be
4036 	 *    obtained.
4037 	 * 2) If it is delivered locally, ire_send_local will convert
4038 	 *    this ip_xmit_attr_t back to a ip_recv_attr_t looking at the
4039 	 *    requests.
4040 	 */
4041 	ixa->ixa_ipsec_action = reflect_action;
4042 
4043 	if (!ipsec_init_outbound_ports(&sel, data_mp, ipha, ip6h, 0,
4044 	    ns->netstack_ipsec)) {
4045 		/* Note: data_mp already consumed and ip_drop_packet done */
4046 		return (B_FALSE);
4047 	}
4048 	ixa->ixa_ipsec_src_port = sel.ips_local_port;
4049 	ixa->ixa_ipsec_dst_port = sel.ips_remote_port;
4050 	ixa->ixa_ipsec_proto = sel.ips_protocol;
4051 	ixa->ixa_ipsec_icmp_type = sel.ips_icmp_type;
4052 	ixa->ixa_ipsec_icmp_code = sel.ips_icmp_code;
4053 
4054 	/*
4055 	 * Don't use global policy for this, as we want
4056 	 * to use the same protection that was applied to the inbound packet.
4057 	 * Thus we set IXAF_NO_IPSEC is it arrived in the clear to make
4058 	 * it be sent in the clear.
4059 	 */
4060 	if (ira->ira_flags & IRAF_IPSEC_SECURE)
4061 		ixa->ixa_flags |= IXAF_IPSEC_SECURE;
4062 	else
4063 		ixa->ixa_flags |= IXAF_NO_IPSEC;
4064 
4065 	return (B_TRUE);
4066 }
4067 
4068 void
4069 ipsec_out_release_refs(ip_xmit_attr_t *ixa)
4070 {
4071 	if (!(ixa->ixa_flags & IXAF_IPSEC_SECURE))
4072 		return;
4073 
4074 	if (ixa->ixa_ipsec_ah_sa != NULL) {
4075 		IPSA_REFRELE(ixa->ixa_ipsec_ah_sa);
4076 		ixa->ixa_ipsec_ah_sa = NULL;
4077 	}
4078 	if (ixa->ixa_ipsec_esp_sa != NULL) {
4079 		IPSA_REFRELE(ixa->ixa_ipsec_esp_sa);
4080 		ixa->ixa_ipsec_esp_sa = NULL;
4081 	}
4082 	if (ixa->ixa_ipsec_policy != NULL) {
4083 		IPPOL_REFRELE(ixa->ixa_ipsec_policy);
4084 		ixa->ixa_ipsec_policy = NULL;
4085 	}
4086 	if (ixa->ixa_ipsec_action != NULL) {
4087 		IPACT_REFRELE(ixa->ixa_ipsec_action);
4088 		ixa->ixa_ipsec_action = NULL;
4089 	}
4090 	if (ixa->ixa_ipsec_latch) {
4091 		IPLATCH_REFRELE(ixa->ixa_ipsec_latch);
4092 		ixa->ixa_ipsec_latch = NULL;
4093 	}
4094 	/* Clear the soft references to the SAs */
4095 	ixa->ixa_ipsec_ref[0].ipsr_sa = NULL;
4096 	ixa->ixa_ipsec_ref[0].ipsr_bucket = NULL;
4097 	ixa->ixa_ipsec_ref[0].ipsr_gen = 0;
4098 	ixa->ixa_ipsec_ref[1].ipsr_sa = NULL;
4099 	ixa->ixa_ipsec_ref[1].ipsr_bucket = NULL;
4100 	ixa->ixa_ipsec_ref[1].ipsr_gen = 0;
4101 	ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4102 }
4103 
4104 void
4105 ipsec_in_release_refs(ip_recv_attr_t *ira)
4106 {
4107 	if (!(ira->ira_flags & IRAF_IPSEC_SECURE))
4108 		return;
4109 
4110 	if (ira->ira_ipsec_ah_sa != NULL) {
4111 		IPSA_REFRELE(ira->ira_ipsec_ah_sa);
4112 		ira->ira_ipsec_ah_sa = NULL;
4113 	}
4114 	if (ira->ira_ipsec_esp_sa != NULL) {
4115 		IPSA_REFRELE(ira->ira_ipsec_esp_sa);
4116 		ira->ira_ipsec_esp_sa = NULL;
4117 	}
4118 	ira->ira_flags &= ~IRAF_IPSEC_SECURE;
4119 }
4120 
4121 /*
4122  * This is called from ire_send_local when a packet
4123  * is looped back. We setup the ip_recv_attr_t "borrowing" the references
4124  * held by the callers.
4125  * Note that we don't do any IPsec but we carry the actions and IPSEC flags
4126  * across so that the fanout policy checks see that IPsec was applied.
4127  *
4128  * The caller should do ipsec_in_release_refs() on the ira by calling
4129  * ira_cleanup().
4130  */
4131 void
4132 ipsec_out_to_in(ip_xmit_attr_t *ixa, ill_t *ill, ip_recv_attr_t *ira)
4133 {
4134 	ipsec_policy_t *pol;
4135 	ipsec_action_t *act;
4136 
4137 	/* Non-IPsec operations */
4138 	ira->ira_free_flags = 0;
4139 	ira->ira_zoneid = ixa->ixa_zoneid;
4140 	ira->ira_cred = ixa->ixa_cred;
4141 	ira->ira_cpid = ixa->ixa_cpid;
4142 	ira->ira_tsl = ixa->ixa_tsl;
4143 	ira->ira_ill = ira->ira_rill = ill;
4144 	ira->ira_flags = ixa->ixa_flags & IAF_MASK;
4145 	ira->ira_no_loop_zoneid = ixa->ixa_no_loop_zoneid;
4146 	ira->ira_pktlen = ixa->ixa_pktlen;
4147 	ira->ira_ip_hdr_length = ixa->ixa_ip_hdr_length;
4148 	ira->ira_protocol = ixa->ixa_protocol;
4149 	ira->ira_mhip = NULL;
4150 
4151 	ira->ira_flags |= IRAF_LOOPBACK | IRAF_L2SRC_LOOPBACK;
4152 
4153 	ira->ira_sqp = ixa->ixa_sqp;
4154 	ira->ira_ring = NULL;
4155 
4156 	ira->ira_ruifindex = ill->ill_phyint->phyint_ifindex;
4157 	ira->ira_rifindex = ira->ira_ruifindex;
4158 
4159 	if (!(ixa->ixa_flags & IXAF_IPSEC_SECURE))
4160 		return;
4161 
4162 	ira->ira_flags |= IRAF_IPSEC_SECURE;
4163 
4164 	ira->ira_ipsec_ah_sa = NULL;
4165 	ira->ira_ipsec_esp_sa = NULL;
4166 
4167 	act = ixa->ixa_ipsec_action;
4168 	if (act == NULL) {
4169 		pol = ixa->ixa_ipsec_policy;
4170 		if (pol != NULL) {
4171 			act = pol->ipsp_act;
4172 			IPACT_REFHOLD(act);
4173 		}
4174 	}
4175 	ixa->ixa_ipsec_action = NULL;
4176 	ira->ira_ipsec_action = act;
4177 }
4178 
4179 /*
4180  * Consults global policy and per-socket policy to see whether this datagram
4181  * should go out secure. If so it updates the ip_xmit_attr_t
4182  * Should not be used when connecting, since then we want to latch the policy.
4183  *
4184  * If connp is NULL we just look at the global policy.
4185  *
4186  * Returns NULL if the packet was dropped, in which case the MIB has
4187  * been incremented and ip_drop_packet done.
4188  */
4189 mblk_t *
4190 ip_output_attach_policy(mblk_t *mp, ipha_t *ipha, ip6_t *ip6h,
4191     const conn_t *connp, ip_xmit_attr_t *ixa)
4192 {
4193 	ipsec_selector_t sel;
4194 	boolean_t	policy_present;
4195 	ip_stack_t	*ipst = ixa->ixa_ipst;
4196 	netstack_t	*ns = ipst->ips_netstack;
4197 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
4198 	ipsec_policy_t	*p;
4199 
4200 	ixa->ixa_ipsec_policy_gen = ipss->ipsec_system_policy.iph_gen;
4201 	ASSERT((ipha != NULL && ip6h == NULL) ||
4202 	    (ip6h != NULL && ipha == NULL));
4203 
4204 	if (ipha != NULL)
4205 		policy_present = ipss->ipsec_outbound_v4_policy_present;
4206 	else
4207 		policy_present = ipss->ipsec_outbound_v6_policy_present;
4208 
4209 	if (!policy_present && (connp == NULL || connp->conn_policy == NULL))
4210 		return (mp);
4211 
4212 	bzero((void*)&sel, sizeof (sel));
4213 
4214 	if (ipha != NULL) {
4215 		sel.ips_local_addr_v4 = ipha->ipha_src;
4216 		sel.ips_remote_addr_v4 = ip_get_dst(ipha);
4217 		sel.ips_isv4 = B_TRUE;
4218 	} else {
4219 		sel.ips_isv4 = B_FALSE;
4220 		sel.ips_local_addr_v6 = ip6h->ip6_src;
4221 		sel.ips_remote_addr_v6 = ip_get_dst_v6(ip6h, mp, NULL);
4222 	}
4223 	sel.ips_protocol = ixa->ixa_protocol;
4224 
4225 	if (!ipsec_init_outbound_ports(&sel, mp, ipha, ip6h, 0, ipss)) {
4226 		if (ipha != NULL) {
4227 			BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
4228 		} else {
4229 			BUMP_MIB(&ipst->ips_ip6_mib, ipIfStatsOutDiscards);
4230 		}
4231 		/* Note: mp already consumed and ip_drop_packet done */
4232 		return (NULL);
4233 	}
4234 
4235 	ASSERT(ixa->ixa_ipsec_policy == NULL);
4236 	p = ipsec_find_policy(IPSEC_TYPE_OUTBOUND, connp, &sel, ns);
4237 	ixa->ixa_ipsec_policy = p;
4238 	if (p != NULL) {
4239 		ixa->ixa_flags |= IXAF_IPSEC_SECURE;
4240 		if (connp == NULL || connp->conn_policy == NULL)
4241 			ixa->ixa_flags |= IXAF_IPSEC_GLOBAL_POLICY;
4242 	} else {
4243 		ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4244 	}
4245 
4246 	/*
4247 	 * Copy the right port information.
4248 	 */
4249 	ixa->ixa_ipsec_src_port = sel.ips_local_port;
4250 	ixa->ixa_ipsec_dst_port = sel.ips_remote_port;
4251 	ixa->ixa_ipsec_icmp_type = sel.ips_icmp_type;
4252 	ixa->ixa_ipsec_icmp_code = sel.ips_icmp_code;
4253 	ixa->ixa_ipsec_proto = sel.ips_protocol;
4254 	return (mp);
4255 }
4256 
4257 /*
4258  * When appropriate, this function caches inbound and outbound policy
4259  * for this connection. The outbound policy is stored in conn_ixa.
4260  * Note that it can not be used for SCTP since conn_faddr isn't set for SCTP.
4261  *
4262  * XXX need to work out more details about per-interface policy and
4263  * caching here!
4264  *
4265  * XXX may want to split inbound and outbound caching for ill..
4266  */
4267 int
4268 ipsec_conn_cache_policy(conn_t *connp, boolean_t isv4)
4269 {
4270 	boolean_t global_policy_present;
4271 	netstack_t	*ns = connp->conn_netstack;
4272 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
4273 
4274 	connp->conn_ixa->ixa_ipsec_policy_gen =
4275 	    ipss->ipsec_system_policy.iph_gen;
4276 	/*
4277 	 * There is no policy latching for ICMP sockets because we can't
4278 	 * decide on which policy to use until we see the packet and get
4279 	 * type/code selectors.
4280 	 */
4281 	if (connp->conn_proto == IPPROTO_ICMP ||
4282 	    connp->conn_proto == IPPROTO_ICMPV6) {
4283 		connp->conn_in_enforce_policy =
4284 		    connp->conn_out_enforce_policy = B_TRUE;
4285 		if (connp->conn_latch != NULL) {
4286 			IPLATCH_REFRELE(connp->conn_latch);
4287 			connp->conn_latch = NULL;
4288 		}
4289 		if (connp->conn_latch_in_policy != NULL) {
4290 			IPPOL_REFRELE(connp->conn_latch_in_policy);
4291 			connp->conn_latch_in_policy = NULL;
4292 		}
4293 		if (connp->conn_latch_in_action != NULL) {
4294 			IPACT_REFRELE(connp->conn_latch_in_action);
4295 			connp->conn_latch_in_action = NULL;
4296 		}
4297 		if (connp->conn_ixa->ixa_ipsec_policy != NULL) {
4298 			IPPOL_REFRELE(connp->conn_ixa->ixa_ipsec_policy);
4299 			connp->conn_ixa->ixa_ipsec_policy = NULL;
4300 		}
4301 		if (connp->conn_ixa->ixa_ipsec_action != NULL) {
4302 			IPACT_REFRELE(connp->conn_ixa->ixa_ipsec_action);
4303 			connp->conn_ixa->ixa_ipsec_action = NULL;
4304 		}
4305 		connp->conn_ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4306 		return (0);
4307 	}
4308 
4309 	global_policy_present = isv4 ?
4310 	    (ipss->ipsec_outbound_v4_policy_present ||
4311 	    ipss->ipsec_inbound_v4_policy_present) :
4312 	    (ipss->ipsec_outbound_v6_policy_present ||
4313 	    ipss->ipsec_inbound_v6_policy_present);
4314 
4315 	if ((connp->conn_policy != NULL) || global_policy_present) {
4316 		ipsec_selector_t sel;
4317 		ipsec_policy_t	*p;
4318 
4319 		if (connp->conn_latch == NULL &&
4320 		    (connp->conn_latch = iplatch_create()) == NULL) {
4321 			return (ENOMEM);
4322 		}
4323 
4324 		bzero((void*)&sel, sizeof (sel));
4325 
4326 		sel.ips_protocol = connp->conn_proto;
4327 		sel.ips_local_port = connp->conn_lport;
4328 		sel.ips_remote_port = connp->conn_fport;
4329 		sel.ips_is_icmp_inv_acq = 0;
4330 		sel.ips_isv4 = isv4;
4331 		if (isv4) {
4332 			sel.ips_local_addr_v4 = connp->conn_laddr_v4;
4333 			sel.ips_remote_addr_v4 = connp->conn_faddr_v4;
4334 		} else {
4335 			sel.ips_local_addr_v6 = connp->conn_laddr_v6;
4336 			sel.ips_remote_addr_v6 = connp->conn_faddr_v6;
4337 		}
4338 
4339 		p = ipsec_find_policy(IPSEC_TYPE_INBOUND, connp, &sel, ns);
4340 		if (connp->conn_latch_in_policy != NULL)
4341 			IPPOL_REFRELE(connp->conn_latch_in_policy);
4342 		connp->conn_latch_in_policy = p;
4343 		connp->conn_in_enforce_policy = (p != NULL);
4344 
4345 		p = ipsec_find_policy(IPSEC_TYPE_OUTBOUND, connp, &sel, ns);
4346 		if (connp->conn_ixa->ixa_ipsec_policy != NULL)
4347 			IPPOL_REFRELE(connp->conn_ixa->ixa_ipsec_policy);
4348 		connp->conn_ixa->ixa_ipsec_policy = p;
4349 		connp->conn_out_enforce_policy = (p != NULL);
4350 		if (p != NULL) {
4351 			connp->conn_ixa->ixa_flags |= IXAF_IPSEC_SECURE;
4352 			if (connp->conn_policy == NULL) {
4353 				connp->conn_ixa->ixa_flags |=
4354 				    IXAF_IPSEC_GLOBAL_POLICY;
4355 			}
4356 		} else {
4357 			connp->conn_ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4358 		}
4359 		/* Clear the latched actions too, in case we're recaching. */
4360 		if (connp->conn_ixa->ixa_ipsec_action != NULL) {
4361 			IPACT_REFRELE(connp->conn_ixa->ixa_ipsec_action);
4362 			connp->conn_ixa->ixa_ipsec_action = NULL;
4363 		}
4364 		if (connp->conn_latch_in_action != NULL) {
4365 			IPACT_REFRELE(connp->conn_latch_in_action);
4366 			connp->conn_latch_in_action = NULL;
4367 		}
4368 		connp->conn_ixa->ixa_ipsec_src_port = sel.ips_local_port;
4369 		connp->conn_ixa->ixa_ipsec_dst_port = sel.ips_remote_port;
4370 		connp->conn_ixa->ixa_ipsec_icmp_type = sel.ips_icmp_type;
4371 		connp->conn_ixa->ixa_ipsec_icmp_code = sel.ips_icmp_code;
4372 		connp->conn_ixa->ixa_ipsec_proto = sel.ips_protocol;
4373 	} else {
4374 		connp->conn_ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4375 	}
4376 
4377 	/*
4378 	 * We may or may not have policy for this endpoint.  We still set
4379 	 * conn_policy_cached so that inbound datagrams don't have to look
4380 	 * at global policy as policy is considered latched for these
4381 	 * endpoints.  We should not set conn_policy_cached until the conn
4382 	 * reflects the actual policy. If we *set* this before inheriting
4383 	 * the policy there is a window where the check
4384 	 * CONN_INBOUND_POLICY_PRESENT, will neither check with the policy
4385 	 * on the conn (because we have not yet copied the policy on to
4386 	 * conn and hence not set conn_in_enforce_policy) nor with the
4387 	 * global policy (because conn_policy_cached is already set).
4388 	 */
4389 	connp->conn_policy_cached = B_TRUE;
4390 	return (0);
4391 }
4392 
4393 /*
4394  * When appropriate, this function caches outbound policy for faddr/fport.
4395  * It is used when we are not connected i.e., when we can not latch the
4396  * policy.
4397  */
4398 void
4399 ipsec_cache_outbound_policy(const conn_t *connp, const in6_addr_t *v6src,
4400     const in6_addr_t *v6dst, in_port_t dstport, ip_xmit_attr_t *ixa)
4401 {
4402 	boolean_t	isv4 = (ixa->ixa_flags & IXAF_IS_IPV4) != 0;
4403 	boolean_t	global_policy_present;
4404 	netstack_t	*ns = connp->conn_netstack;
4405 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
4406 
4407 	ixa->ixa_ipsec_policy_gen = ipss->ipsec_system_policy.iph_gen;
4408 
4409 	/*
4410 	 * There is no policy caching for ICMP sockets because we can't
4411 	 * decide on which policy to use until we see the packet and get
4412 	 * type/code selectors.
4413 	 */
4414 	if (connp->conn_proto == IPPROTO_ICMP ||
4415 	    connp->conn_proto == IPPROTO_ICMPV6) {
4416 		ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4417 		if (ixa->ixa_ipsec_policy != NULL) {
4418 			IPPOL_REFRELE(ixa->ixa_ipsec_policy);
4419 			ixa->ixa_ipsec_policy = NULL;
4420 		}
4421 		if (ixa->ixa_ipsec_action != NULL) {
4422 			IPACT_REFRELE(ixa->ixa_ipsec_action);
4423 			ixa->ixa_ipsec_action = NULL;
4424 		}
4425 		return;
4426 	}
4427 
4428 	global_policy_present = isv4 ?
4429 	    (ipss->ipsec_outbound_v4_policy_present ||
4430 	    ipss->ipsec_inbound_v4_policy_present) :
4431 	    (ipss->ipsec_outbound_v6_policy_present ||
4432 	    ipss->ipsec_inbound_v6_policy_present);
4433 
4434 	if ((connp->conn_policy != NULL) || global_policy_present) {
4435 		ipsec_selector_t sel;
4436 		ipsec_policy_t	*p;
4437 
4438 		bzero((void*)&sel, sizeof (sel));
4439 
4440 		sel.ips_protocol = connp->conn_proto;
4441 		sel.ips_local_port = connp->conn_lport;
4442 		sel.ips_remote_port = dstport;
4443 		sel.ips_is_icmp_inv_acq = 0;
4444 		sel.ips_isv4 = isv4;
4445 		if (isv4) {
4446 			IN6_V4MAPPED_TO_IPADDR(v6src, sel.ips_local_addr_v4);
4447 			IN6_V4MAPPED_TO_IPADDR(v6dst, sel.ips_remote_addr_v4);
4448 		} else {
4449 			sel.ips_local_addr_v6 = *v6src;
4450 			sel.ips_remote_addr_v6 = *v6dst;
4451 		}
4452 
4453 		p = ipsec_find_policy(IPSEC_TYPE_OUTBOUND, connp, &sel, ns);
4454 		if (ixa->ixa_ipsec_policy != NULL)
4455 			IPPOL_REFRELE(ixa->ixa_ipsec_policy);
4456 		ixa->ixa_ipsec_policy = p;
4457 		if (p != NULL) {
4458 			ixa->ixa_flags |= IXAF_IPSEC_SECURE;
4459 			if (connp->conn_policy == NULL)
4460 				ixa->ixa_flags |= IXAF_IPSEC_GLOBAL_POLICY;
4461 		} else {
4462 			ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4463 		}
4464 		/* Clear the latched actions too, in case we're recaching. */
4465 		if (ixa->ixa_ipsec_action != NULL) {
4466 			IPACT_REFRELE(ixa->ixa_ipsec_action);
4467 			ixa->ixa_ipsec_action = NULL;
4468 		}
4469 
4470 		ixa->ixa_ipsec_src_port = sel.ips_local_port;
4471 		ixa->ixa_ipsec_dst_port = sel.ips_remote_port;
4472 		ixa->ixa_ipsec_icmp_type = sel.ips_icmp_type;
4473 		ixa->ixa_ipsec_icmp_code = sel.ips_icmp_code;
4474 		ixa->ixa_ipsec_proto = sel.ips_protocol;
4475 	} else {
4476 		ixa->ixa_flags &= ~IXAF_IPSEC_SECURE;
4477 		if (ixa->ixa_ipsec_policy != NULL) {
4478 			IPPOL_REFRELE(ixa->ixa_ipsec_policy);
4479 			ixa->ixa_ipsec_policy = NULL;
4480 		}
4481 		if (ixa->ixa_ipsec_action != NULL) {
4482 			IPACT_REFRELE(ixa->ixa_ipsec_action);
4483 			ixa->ixa_ipsec_action = NULL;
4484 		}
4485 	}
4486 }
4487 
4488 /*
4489  * Returns B_FALSE if the policy has gone stale.
4490  */
4491 boolean_t
4492 ipsec_outbound_policy_current(ip_xmit_attr_t *ixa)
4493 {
4494 	ipsec_stack_t	*ipss = ixa->ixa_ipst->ips_netstack->netstack_ipsec;
4495 
4496 	if (!(ixa->ixa_flags & IXAF_IPSEC_GLOBAL_POLICY))
4497 		return (B_TRUE);
4498 
4499 	return (ixa->ixa_ipsec_policy_gen == ipss->ipsec_system_policy.iph_gen);
4500 }
4501 
4502 void
4503 iplatch_free(ipsec_latch_t *ipl)
4504 {
4505 	if (ipl->ipl_local_cid != NULL)
4506 		IPSID_REFRELE(ipl->ipl_local_cid);
4507 	if (ipl->ipl_remote_cid != NULL)
4508 		IPSID_REFRELE(ipl->ipl_remote_cid);
4509 	mutex_destroy(&ipl->ipl_lock);
4510 	kmem_free(ipl, sizeof (*ipl));
4511 }
4512 
4513 ipsec_latch_t *
4514 iplatch_create()
4515 {
4516 	ipsec_latch_t *ipl = kmem_alloc(sizeof (*ipl), KM_NOSLEEP);
4517 	if (ipl == NULL)
4518 		return (ipl);
4519 	bzero(ipl, sizeof (*ipl));
4520 	mutex_init(&ipl->ipl_lock, NULL, MUTEX_DEFAULT, NULL);
4521 	ipl->ipl_refcnt = 1;
4522 	return (ipl);
4523 }
4524 
4525 /*
4526  * Hash function for ID hash table.
4527  */
4528 static uint32_t
4529 ipsid_hash(int idtype, char *idstring)
4530 {
4531 	uint32_t hval = idtype;
4532 	unsigned char c;
4533 
4534 	while ((c = *idstring++) != 0) {
4535 		hval = (hval << 4) | (hval >> 28);
4536 		hval ^= c;
4537 	}
4538 	hval = hval ^ (hval >> 16);
4539 	return (hval & (IPSID_HASHSIZE-1));
4540 }
4541 
4542 /*
4543  * Look up identity string in hash table.  Return identity object
4544  * corresponding to the name -- either preexisting, or newly allocated.
4545  *
4546  * Return NULL if we need to allocate a new one and can't get memory.
4547  */
4548 ipsid_t *
4549 ipsid_lookup(int idtype, char *idstring, netstack_t *ns)
4550 {
4551 	ipsid_t *retval;
4552 	char *nstr;
4553 	int idlen = strlen(idstring) + 1;
4554 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
4555 	ipsif_t *bucket;
4556 
4557 	bucket = &ipss->ipsec_ipsid_buckets[ipsid_hash(idtype, idstring)];
4558 
4559 	mutex_enter(&bucket->ipsif_lock);
4560 
4561 	for (retval = bucket->ipsif_head; retval != NULL;
4562 	    retval = retval->ipsid_next) {
4563 		if (idtype != retval->ipsid_type)
4564 			continue;
4565 		if (bcmp(idstring, retval->ipsid_cid, idlen) != 0)
4566 			continue;
4567 
4568 		IPSID_REFHOLD(retval);
4569 		mutex_exit(&bucket->ipsif_lock);
4570 		return (retval);
4571 	}
4572 
4573 	retval = kmem_alloc(sizeof (*retval), KM_NOSLEEP);
4574 	if (!retval) {
4575 		mutex_exit(&bucket->ipsif_lock);
4576 		return (NULL);
4577 	}
4578 
4579 	nstr = kmem_alloc(idlen, KM_NOSLEEP);
4580 	if (!nstr) {
4581 		mutex_exit(&bucket->ipsif_lock);
4582 		kmem_free(retval, sizeof (*retval));
4583 		return (NULL);
4584 	}
4585 
4586 	retval->ipsid_refcnt = 1;
4587 	retval->ipsid_next = bucket->ipsif_head;
4588 	if (retval->ipsid_next != NULL)
4589 		retval->ipsid_next->ipsid_ptpn = &retval->ipsid_next;
4590 	retval->ipsid_ptpn = &bucket->ipsif_head;
4591 	retval->ipsid_type = idtype;
4592 	retval->ipsid_cid = nstr;
4593 	bucket->ipsif_head = retval;
4594 	bcopy(idstring, nstr, idlen);
4595 	mutex_exit(&bucket->ipsif_lock);
4596 
4597 	return (retval);
4598 }
4599 
4600 /*
4601  * Garbage collect the identity hash table.
4602  */
4603 void
4604 ipsid_gc(netstack_t *ns)
4605 {
4606 	int i, len;
4607 	ipsid_t *id, *nid;
4608 	ipsif_t *bucket;
4609 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
4610 
4611 	for (i = 0; i < IPSID_HASHSIZE; i++) {
4612 		bucket = &ipss->ipsec_ipsid_buckets[i];
4613 		mutex_enter(&bucket->ipsif_lock);
4614 		for (id = bucket->ipsif_head; id != NULL; id = nid) {
4615 			nid = id->ipsid_next;
4616 			if (id->ipsid_refcnt == 0) {
4617 				*id->ipsid_ptpn = nid;
4618 				if (nid != NULL)
4619 					nid->ipsid_ptpn = id->ipsid_ptpn;
4620 				len = strlen(id->ipsid_cid) + 1;
4621 				kmem_free(id->ipsid_cid, len);
4622 				kmem_free(id, sizeof (*id));
4623 			}
4624 		}
4625 		mutex_exit(&bucket->ipsif_lock);
4626 	}
4627 }
4628 
4629 /*
4630  * Return true if two identities are the same.
4631  */
4632 boolean_t
4633 ipsid_equal(ipsid_t *id1, ipsid_t *id2)
4634 {
4635 	if (id1 == id2)
4636 		return (B_TRUE);
4637 #ifdef DEBUG
4638 	if ((id1 == NULL) || (id2 == NULL))
4639 		return (B_FALSE);
4640 	/*
4641 	 * test that we're interning id's correctly..
4642 	 */
4643 	ASSERT((strcmp(id1->ipsid_cid, id2->ipsid_cid) != 0) ||
4644 	    (id1->ipsid_type != id2->ipsid_type));
4645 #endif
4646 	return (B_FALSE);
4647 }
4648 
4649 /*
4650  * Initialize identity table; called during module initialization.
4651  */
4652 static void
4653 ipsid_init(netstack_t *ns)
4654 {
4655 	ipsif_t *bucket;
4656 	int i;
4657 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
4658 
4659 	for (i = 0; i < IPSID_HASHSIZE; i++) {
4660 		bucket = &ipss->ipsec_ipsid_buckets[i];
4661 		mutex_init(&bucket->ipsif_lock, NULL, MUTEX_DEFAULT, NULL);
4662 	}
4663 }
4664 
4665 /*
4666  * Free identity table (preparatory to module unload)
4667  */
4668 static void
4669 ipsid_fini(netstack_t *ns)
4670 {
4671 	ipsif_t *bucket;
4672 	int i;
4673 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
4674 
4675 	for (i = 0; i < IPSID_HASHSIZE; i++) {
4676 		bucket = &ipss->ipsec_ipsid_buckets[i];
4677 		ASSERT(bucket->ipsif_head == NULL);
4678 		mutex_destroy(&bucket->ipsif_lock);
4679 	}
4680 }
4681 
4682 /*
4683  * Update the minimum and maximum supported key sizes for the
4684  * specified algorithm. Must be called while holding the algorithms lock.
4685  */
4686 void
4687 ipsec_alg_fix_min_max(ipsec_alginfo_t *alg, ipsec_algtype_t alg_type,
4688     netstack_t *ns)
4689 {
4690 	size_t crypto_min = (size_t)-1, crypto_max = 0;
4691 	size_t cur_crypto_min, cur_crypto_max;
4692 	boolean_t is_valid;
4693 	crypto_mechanism_info_t *mech_infos;
4694 	uint_t nmech_infos;
4695 	int crypto_rc, i;
4696 	crypto_mech_usage_t mask;
4697 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
4698 
4699 	ASSERT(MUTEX_HELD(&ipss->ipsec_alg_lock));
4700 
4701 	/*
4702 	 * Compute the min, max, and default key sizes (in number of
4703 	 * increments to the default key size in bits) as defined
4704 	 * by the algorithm mappings. This range of key sizes is used
4705 	 * for policy related operations. The effective key sizes
4706 	 * supported by the framework could be more limited than
4707 	 * those defined for an algorithm.
4708 	 */
4709 	alg->alg_default_bits = alg->alg_key_sizes[0];
4710 	alg->alg_default = 0;
4711 	if (alg->alg_increment != 0) {
4712 		/* key sizes are defined by range & increment */
4713 		alg->alg_minbits = alg->alg_key_sizes[1];
4714 		alg->alg_maxbits = alg->alg_key_sizes[2];
4715 	} else if (alg->alg_nkey_sizes == 0) {
4716 		/* no specified key size for algorithm */
4717 		alg->alg_minbits = alg->alg_maxbits = 0;
4718 	} else {
4719 		/* key sizes are defined by enumeration */
4720 		alg->alg_minbits = (uint16_t)-1;
4721 		alg->alg_maxbits = 0;
4722 
4723 		for (i = 0; i < alg->alg_nkey_sizes; i++) {
4724 			if (alg->alg_key_sizes[i] < alg->alg_minbits)
4725 				alg->alg_minbits = alg->alg_key_sizes[i];
4726 			if (alg->alg_key_sizes[i] > alg->alg_maxbits)
4727 				alg->alg_maxbits = alg->alg_key_sizes[i];
4728 		}
4729 	}
4730 
4731 	if (!(alg->alg_flags & ALG_FLAG_VALID))
4732 		return;
4733 
4734 	/*
4735 	 * Mechanisms do not apply to the NULL encryption
4736 	 * algorithm, so simply return for this case.
4737 	 */
4738 	if (alg->alg_id == SADB_EALG_NULL)
4739 		return;
4740 
4741 	/*
4742 	 * Find the min and max key sizes supported by the cryptographic
4743 	 * framework providers.
4744 	 */
4745 
4746 	/* get the key sizes supported by the framework */
4747 	crypto_rc = crypto_get_all_mech_info(alg->alg_mech_type,
4748 	    &mech_infos, &nmech_infos, KM_SLEEP);
4749 	if (crypto_rc != CRYPTO_SUCCESS || nmech_infos == 0) {
4750 		alg->alg_flags &= ~ALG_FLAG_VALID;
4751 		return;
4752 	}
4753 
4754 	/* min and max key sizes supported by framework */
4755 	for (i = 0, is_valid = B_FALSE; i < nmech_infos; i++) {
4756 		int unit_bits;
4757 
4758 		/*
4759 		 * Ignore entries that do not support the operations
4760 		 * needed for the algorithm type.
4761 		 */
4762 		if (alg_type == IPSEC_ALG_AUTH) {
4763 			mask = CRYPTO_MECH_USAGE_MAC;
4764 		} else {
4765 			mask = CRYPTO_MECH_USAGE_ENCRYPT |
4766 			    CRYPTO_MECH_USAGE_DECRYPT;
4767 		}
4768 		if ((mech_infos[i].mi_usage & mask) != mask)
4769 			continue;
4770 
4771 		unit_bits = (mech_infos[i].mi_keysize_unit ==
4772 		    CRYPTO_KEYSIZE_UNIT_IN_BYTES)  ? 8 : 1;
4773 		/* adjust min/max supported by framework */
4774 		cur_crypto_min = mech_infos[i].mi_min_key_size * unit_bits;
4775 		cur_crypto_max = mech_infos[i].mi_max_key_size * unit_bits;
4776 
4777 		if (cur_crypto_min < crypto_min)
4778 			crypto_min = cur_crypto_min;
4779 
4780 		/*
4781 		 * CRYPTO_EFFECTIVELY_INFINITE is a special value of
4782 		 * the crypto framework which means "no upper limit".
4783 		 */
4784 		if (mech_infos[i].mi_max_key_size ==
4785 		    CRYPTO_EFFECTIVELY_INFINITE) {
4786 			crypto_max = (size_t)-1;
4787 		} else if (cur_crypto_max > crypto_max) {
4788 			crypto_max = cur_crypto_max;
4789 		}
4790 
4791 		is_valid = B_TRUE;
4792 	}
4793 
4794 	kmem_free(mech_infos, sizeof (crypto_mechanism_info_t) *
4795 	    nmech_infos);
4796 
4797 	if (!is_valid) {
4798 		/* no key sizes supported by framework */
4799 		alg->alg_flags &= ~ALG_FLAG_VALID;
4800 		return;
4801 	}
4802 
4803 	/*
4804 	 * Determine min and max key sizes from alg_key_sizes[].
4805 	 * defined for the algorithm entry. Adjust key sizes based on
4806 	 * those supported by the framework.
4807 	 */
4808 	alg->alg_ef_default_bits = alg->alg_key_sizes[0];
4809 
4810 	/*
4811 	 * For backwards compatability, assume that the IV length
4812 	 * is the same as the data length.
4813 	 */
4814 	alg->alg_ivlen = alg->alg_datalen;
4815 
4816 	/*
4817 	 * Copy any algorithm parameters (if provided) into dedicated
4818 	 * elements in the ipsec_alginfo_t structure.
4819 	 * There may be a better place to put this code.
4820 	 */
4821 	for (i = 0; i < alg->alg_nparams; i++) {
4822 		switch (i) {
4823 		case 0:
4824 			/* Initialisation Vector length (bytes) */
4825 			alg->alg_ivlen =  alg->alg_params[0];
4826 			break;
4827 		case 1:
4828 			/* Integrity Check Vector length (bytes) */
4829 			alg->alg_icvlen = alg->alg_params[1];
4830 			break;
4831 		case 2:
4832 			/* Salt length (bytes) */
4833 			alg->alg_saltlen = (uint8_t)alg->alg_params[2];
4834 			break;
4835 		default:
4836 			break;
4837 		}
4838 	}
4839 
4840 	/* Default if the IV length is not specified. */
4841 	if (alg_type == IPSEC_ALG_ENCR && alg->alg_ivlen == 0)
4842 		alg->alg_ivlen = alg->alg_datalen;
4843 
4844 	alg_flag_check(alg);
4845 
4846 	if (alg->alg_increment != 0) {
4847 		/* supported key sizes are defined by range  & increment */
4848 		crypto_min = ALGBITS_ROUND_UP(crypto_min, alg->alg_increment);
4849 		crypto_max = ALGBITS_ROUND_DOWN(crypto_max, alg->alg_increment);
4850 
4851 		alg->alg_ef_minbits = MAX(alg->alg_minbits,
4852 		    (uint16_t)crypto_min);
4853 		alg->alg_ef_maxbits = MIN(alg->alg_maxbits,
4854 		    (uint16_t)crypto_max);
4855 
4856 		/*
4857 		 * If the sizes supported by the framework are outside
4858 		 * the range of sizes defined by the algorithm mappings,
4859 		 * the algorithm cannot be used. Check for this
4860 		 * condition here.
4861 		 */
4862 		if (alg->alg_ef_minbits > alg->alg_ef_maxbits) {
4863 			alg->alg_flags &= ~ALG_FLAG_VALID;
4864 			return;
4865 		}
4866 		if (alg->alg_ef_default_bits < alg->alg_ef_minbits)
4867 			alg->alg_ef_default_bits = alg->alg_ef_minbits;
4868 		if (alg->alg_ef_default_bits > alg->alg_ef_maxbits)
4869 			alg->alg_ef_default_bits = alg->alg_ef_maxbits;
4870 	} else if (alg->alg_nkey_sizes == 0) {
4871 		/* no specified key size for algorithm */
4872 		alg->alg_ef_minbits = alg->alg_ef_maxbits = 0;
4873 	} else {
4874 		/* supported key sizes are defined by enumeration */
4875 		alg->alg_ef_minbits = (uint16_t)-1;
4876 		alg->alg_ef_maxbits = 0;
4877 
4878 		for (i = 0, is_valid = B_FALSE; i < alg->alg_nkey_sizes; i++) {
4879 			/*
4880 			 * Ignore the current key size if it is not in the
4881 			 * range of sizes supported by the framework.
4882 			 */
4883 			if (alg->alg_key_sizes[i] < crypto_min ||
4884 			    alg->alg_key_sizes[i] > crypto_max)
4885 				continue;
4886 			if (alg->alg_key_sizes[i] < alg->alg_ef_minbits)
4887 				alg->alg_ef_minbits = alg->alg_key_sizes[i];
4888 			if (alg->alg_key_sizes[i] > alg->alg_ef_maxbits)
4889 				alg->alg_ef_maxbits = alg->alg_key_sizes[i];
4890 			is_valid = B_TRUE;
4891 		}
4892 
4893 		if (!is_valid) {
4894 			alg->alg_flags &= ~ALG_FLAG_VALID;
4895 			return;
4896 		}
4897 		alg->alg_ef_default = 0;
4898 	}
4899 }
4900 
4901 /*
4902  * Sanity check parameters provided by ipsecalgs(1m). Assume that
4903  * the algoritm is marked as valid, there is a check at the top
4904  * of this function. If any of the checks below fail, the algorithm
4905  * entry is invalid.
4906  */
4907 void
4908 alg_flag_check(ipsec_alginfo_t *alg)
4909 {
4910 	alg->alg_flags &= ~ALG_FLAG_VALID;
4911 
4912 	/*
4913 	 * Can't have the algorithm marked as CCM and GCM.
4914 	 * Check the ALG_FLAG_COMBINED and ALG_FLAG_COUNTERMODE
4915 	 * flags are set for CCM & GCM.
4916 	 */
4917 	if ((alg->alg_flags & (ALG_FLAG_CCM|ALG_FLAG_GCM)) ==
4918 	    (ALG_FLAG_CCM|ALG_FLAG_GCM))
4919 		return;
4920 	if (alg->alg_flags & (ALG_FLAG_CCM|ALG_FLAG_GCM)) {
4921 		if (!(alg->alg_flags & ALG_FLAG_COUNTERMODE))
4922 			return;
4923 		if (!(alg->alg_flags & ALG_FLAG_COMBINED))
4924 			return;
4925 	}
4926 
4927 	/*
4928 	 * For ALG_FLAG_COUNTERMODE, check the parameters
4929 	 * fit in the ipsec_nonce_t structure.
4930 	 */
4931 	if (alg->alg_flags & ALG_FLAG_COUNTERMODE) {
4932 		if (alg->alg_ivlen != sizeof (((ipsec_nonce_t *)NULL)->iv))
4933 			return;
4934 		if (alg->alg_saltlen > sizeof (((ipsec_nonce_t *)NULL)->salt))
4935 			return;
4936 	}
4937 	if ((alg->alg_flags & ALG_FLAG_COMBINED) &&
4938 	    (alg->alg_icvlen == 0))
4939 		return;
4940 
4941 	/* all is well. */
4942 	alg->alg_flags |= ALG_FLAG_VALID;
4943 }
4944 
4945 /*
4946  * Free the memory used by the specified algorithm.
4947  */
4948 void
4949 ipsec_alg_free(ipsec_alginfo_t *alg)
4950 {
4951 	if (alg == NULL)
4952 		return;
4953 
4954 	if (alg->alg_key_sizes != NULL) {
4955 		kmem_free(alg->alg_key_sizes,
4956 		    (alg->alg_nkey_sizes + 1) * sizeof (uint16_t));
4957 		alg->alg_key_sizes = NULL;
4958 	}
4959 	if (alg->alg_block_sizes != NULL) {
4960 		kmem_free(alg->alg_block_sizes,
4961 		    (alg->alg_nblock_sizes + 1) * sizeof (uint16_t));
4962 		alg->alg_block_sizes = NULL;
4963 	}
4964 	if (alg->alg_params != NULL) {
4965 		kmem_free(alg->alg_params,
4966 		    (alg->alg_nparams + 1) * sizeof (uint16_t));
4967 		alg->alg_params = NULL;
4968 	}
4969 	kmem_free(alg, sizeof (*alg));
4970 }
4971 
4972 /*
4973  * Check the validity of the specified key size for an algorithm.
4974  * Returns B_TRUE if key size is valid, B_FALSE otherwise.
4975  */
4976 boolean_t
4977 ipsec_valid_key_size(uint16_t key_size, ipsec_alginfo_t *alg)
4978 {
4979 	if (key_size < alg->alg_ef_minbits || key_size > alg->alg_ef_maxbits)
4980 		return (B_FALSE);
4981 
4982 	if (alg->alg_increment == 0 && alg->alg_nkey_sizes != 0) {
4983 		/*
4984 		 * If the key sizes are defined by enumeration, the new
4985 		 * key size must be equal to one of the supported values.
4986 		 */
4987 		int i;
4988 
4989 		for (i = 0; i < alg->alg_nkey_sizes; i++)
4990 			if (key_size == alg->alg_key_sizes[i])
4991 				break;
4992 		if (i == alg->alg_nkey_sizes)
4993 			return (B_FALSE);
4994 	}
4995 
4996 	return (B_TRUE);
4997 }
4998 
4999 /*
5000  * Callback function invoked by the crypto framework when a provider
5001  * registers or unregisters. This callback updates the algorithms
5002  * tables when a crypto algorithm is no longer available or becomes
5003  * available, and triggers the freeing/creation of context templates
5004  * associated with existing SAs, if needed.
5005  *
5006  * Need to walk all stack instances since the callback is global
5007  * for all instances
5008  */
5009 void
5010 ipsec_prov_update_callback(uint32_t event, void *event_arg)
5011 {
5012 	netstack_handle_t nh;
5013 	netstack_t *ns;
5014 
5015 	netstack_next_init(&nh);
5016 	while ((ns = netstack_next(&nh)) != NULL) {
5017 		ipsec_prov_update_callback_stack(event, event_arg, ns);
5018 		netstack_rele(ns);
5019 	}
5020 	netstack_next_fini(&nh);
5021 }
5022 
5023 static void
5024 ipsec_prov_update_callback_stack(uint32_t event, void *event_arg,
5025     netstack_t *ns)
5026 {
5027 	crypto_notify_event_change_t *prov_change =
5028 	    (crypto_notify_event_change_t *)event_arg;
5029 	uint_t algidx, algid, algtype, mech_count, mech_idx;
5030 	ipsec_alginfo_t *alg;
5031 	ipsec_alginfo_t oalg;
5032 	crypto_mech_name_t *mechs;
5033 	boolean_t alg_changed = B_FALSE;
5034 	ipsec_stack_t	*ipss = ns->netstack_ipsec;
5035 
5036 	/* ignore events for which we didn't register */
5037 	if (event != CRYPTO_EVENT_MECHS_CHANGED) {
5038 		ip1dbg(("ipsec_prov_update_callback: unexpected event 0x%x "
5039 		    " received from crypto framework\n", event));
5040 		return;
5041 	}
5042 
5043 	mechs = crypto_get_mech_list(&mech_count, KM_SLEEP);
5044 	if (mechs == NULL)
5045 		return;
5046 
5047 	/*
5048 	 * Walk the list of currently defined IPsec algorithm. Update
5049 	 * the algorithm valid flag and trigger an update of the
5050 	 * SAs that depend on that algorithm.
5051 	 */
5052 	mutex_enter(&ipss->ipsec_alg_lock);
5053 	for (algtype = 0; algtype < IPSEC_NALGTYPES; algtype++) {
5054 		for (algidx = 0; algidx < ipss->ipsec_nalgs[algtype];
5055 		    algidx++) {
5056 
5057 			algid = ipss->ipsec_sortlist[algtype][algidx];
5058 			alg = ipss->ipsec_alglists[algtype][algid];
5059 			ASSERT(alg != NULL);
5060 
5061 			/*
5062 			 * Skip the algorithms which do not map to the
5063 			 * crypto framework provider being added or removed.
5064 			 */
5065 			if (strncmp(alg->alg_mech_name,
5066 			    prov_change->ec_mech_name,
5067 			    CRYPTO_MAX_MECH_NAME) != 0)
5068 				continue;
5069 
5070 			/*
5071 			 * Determine if the mechanism is valid. If it
5072 			 * is not, mark the algorithm as being invalid. If
5073 			 * it is, mark the algorithm as being valid.
5074 			 */
5075 			for (mech_idx = 0; mech_idx < mech_count; mech_idx++)
5076 				if (strncmp(alg->alg_mech_name,
5077 				    mechs[mech_idx], CRYPTO_MAX_MECH_NAME) == 0)
5078 					break;
5079 			if (mech_idx == mech_count &&
5080 			    alg->alg_flags & ALG_FLAG_VALID) {
5081 				alg->alg_flags &= ~ALG_FLAG_VALID;
5082 				alg_changed = B_TRUE;
5083 			} else if (mech_idx < mech_count &&
5084 			    !(alg->alg_flags & ALG_FLAG_VALID)) {
5085 				alg->alg_flags |= ALG_FLAG_VALID;
5086 				alg_changed = B_TRUE;
5087 			}
5088 
5089 			/*
5090 			 * Update the supported key sizes, regardless
5091 			 * of whether a crypto provider was added or
5092 			 * removed.
5093 			 */
5094 			oalg = *alg;
5095 			ipsec_alg_fix_min_max(alg, algtype, ns);
5096 			if (!alg_changed &&
5097 			    alg->alg_ef_minbits != oalg.alg_ef_minbits ||
5098 			    alg->alg_ef_maxbits != oalg.alg_ef_maxbits ||
5099 			    alg->alg_ef_default != oalg.alg_ef_default ||
5100 			    alg->alg_ef_default_bits !=
5101 			    oalg.alg_ef_default_bits)
5102 				alg_changed = B_TRUE;
5103 
5104 			/*
5105 			 * Update the affected SAs if a software provider is
5106 			 * being added or removed.
5107 			 */
5108 			if (prov_change->ec_provider_type ==
5109 			    CRYPTO_SW_PROVIDER)
5110 				sadb_alg_update(algtype, alg->alg_id,
5111 				    prov_change->ec_change ==
5112 				    CRYPTO_MECH_ADDED, ns);
5113 		}
5114 	}
5115 	mutex_exit(&ipss->ipsec_alg_lock);
5116 	crypto_free_mech_list(mechs, mech_count);
5117 
5118 	if (alg_changed) {
5119 		/*
5120 		 * An algorithm has changed, i.e. it became valid or
5121 		 * invalid, or its support key sizes have changed.
5122 		 * Notify ipsecah and ipsecesp of this change so
5123 		 * that they can send a SADB_REGISTER to their consumers.
5124 		 */
5125 		ipsecah_algs_changed(ns);
5126 		ipsecesp_algs_changed(ns);
5127 	}
5128 }
5129 
5130 /*
5131  * Registers with the crypto framework to be notified of crypto
5132  * providers changes. Used to update the algorithm tables and
5133  * to free or create context templates if needed. Invoked after IPsec
5134  * is loaded successfully.
5135  *
5136  * This is called separately for each IP instance, so we ensure we only
5137  * register once.
5138  */
5139 void
5140 ipsec_register_prov_update(void)
5141 {
5142 	if (prov_update_handle != NULL)
5143 		return;
5144 
5145 	prov_update_handle = crypto_notify_events(
5146 	    ipsec_prov_update_callback, CRYPTO_EVENT_MECHS_CHANGED);
5147 }
5148 
5149 /*
5150  * Unregisters from the framework to be notified of crypto providers
5151  * changes. Called from ipsec_policy_g_destroy().
5152  */
5153 static void
5154 ipsec_unregister_prov_update(void)
5155 {
5156 	if (prov_update_handle != NULL)
5157 		crypto_unnotify_events(prov_update_handle);
5158 }
5159 
5160 /*
5161  * Tunnel-mode support routines.
5162  */
5163 
5164 /*
5165  * Returns an mblk chain suitable for putnext() if policies match and IPsec
5166  * SAs are available.  If there's no per-tunnel policy, or a match comes back
5167  * with no match, then still return the packet and have global policy take
5168  * a crack at it in IP.
5169  * This updates the ip_xmit_attr with the IPsec policy.
5170  *
5171  * Remember -> we can be forwarding packets.  Keep that in mind w.r.t.
5172  * inner-packet contents.
5173  */
5174 mblk_t *
5175 ipsec_tun_outbound(mblk_t *mp, iptun_t *iptun, ipha_t *inner_ipv4,
5176     ip6_t *inner_ipv6, ipha_t *outer_ipv4, ip6_t *outer_ipv6, int outer_hdr_len,
5177     ip_xmit_attr_t *ixa)
5178 {
5179 	ipsec_policy_head_t *polhead;
5180 	ipsec_selector_t sel;
5181 	mblk_t *nmp;
5182 	boolean_t is_fragment;
5183 	ipsec_policy_t *pol;
5184 	ipsec_tun_pol_t *itp = iptun->iptun_itp;
5185 	netstack_t *ns = iptun->iptun_ns;
5186 	ipsec_stack_t *ipss = ns->netstack_ipsec;
5187 
5188 	ASSERT(outer_ipv6 != NULL && outer_ipv4 == NULL ||
5189 	    outer_ipv4 != NULL && outer_ipv6 == NULL);
5190 	/* We take care of inners in a bit. */
5191 
5192 	/* Are the IPsec fields initialized at all? */
5193 	if (!(ixa->ixa_flags & IXAF_IPSEC_SECURE)) {
5194 		ASSERT(ixa->ixa_ipsec_policy == NULL);
5195 		ASSERT(ixa->ixa_ipsec_latch == NULL);
5196 		ASSERT(ixa->ixa_ipsec_action == NULL);
5197 		ASSERT(ixa->ixa_ipsec_ah_sa == NULL);
5198 		ASSERT(ixa->ixa_ipsec_esp_sa == NULL);
5199 	}
5200 
5201 	ASSERT(itp != NULL && (itp->itp_flags & ITPF_P_ACTIVE));
5202 	polhead = itp->itp_policy;
5203 
5204 	bzero(&sel, sizeof (sel));
5205 	if (inner_ipv4 != NULL) {
5206 		ASSERT(inner_ipv6 == NULL);
5207 		sel.ips_isv4 = B_TRUE;
5208 		sel.ips_local_addr_v4 = inner_ipv4->ipha_src;
5209 		sel.ips_remote_addr_v4 = inner_ipv4->ipha_dst;
5210 		sel.ips_protocol = (uint8_t)inner_ipv4->ipha_protocol;
5211 	} else {
5212 		ASSERT(inner_ipv6 != NULL);
5213 		sel.ips_isv4 = B_FALSE;
5214 		sel.ips_local_addr_v6 = inner_ipv6->ip6_src;
5215 		/*
5216 		 * We don't care about routing-header dests in the
5217 		 * forwarding/tunnel path, so just grab ip6_dst.
5218 		 */
5219 		sel.ips_remote_addr_v6 = inner_ipv6->ip6_dst;
5220 	}
5221 
5222 	if (itp->itp_flags & ITPF_P_PER_PORT_SECURITY) {
5223 		/*
5224 		 * Caller can prepend the outer header, which means
5225 		 * inner_ipv[46] may be stuck in the middle.  Pullup the whole
5226 		 * mess now if need-be, for easier processing later.  Don't
5227 		 * forget to rewire the outer header too.
5228 		 */
5229 		if (mp->b_cont != NULL) {
5230 			nmp = msgpullup(mp, -1);
5231 			if (nmp == NULL) {
5232 				ip_drop_packet(mp, B_FALSE, NULL,
5233 				    DROPPER(ipss, ipds_spd_nomem),
5234 				    &ipss->ipsec_spd_dropper);
5235 				return (NULL);
5236 			}
5237 			freemsg(mp);
5238 			mp = nmp;
5239 			if (outer_ipv4 != NULL)
5240 				outer_ipv4 = (ipha_t *)mp->b_rptr;
5241 			else
5242 				outer_ipv6 = (ip6_t *)mp->b_rptr;
5243 			if (inner_ipv4 != NULL) {
5244 				inner_ipv4 =
5245 				    (ipha_t *)(mp->b_rptr + outer_hdr_len);
5246 			} else {
5247 				inner_ipv6 =
5248 				    (ip6_t *)(mp->b_rptr + outer_hdr_len);
5249 			}
5250 		}
5251 		if (inner_ipv4 != NULL) {
5252 			is_fragment = IS_V4_FRAGMENT(
5253 			    inner_ipv4->ipha_fragment_offset_and_flags);
5254 		} else {
5255 			sel.ips_remote_addr_v6 = ip_get_dst_v6(inner_ipv6, mp,
5256 			    &is_fragment);
5257 		}
5258 
5259 		if (is_fragment) {
5260 			ipha_t *oiph;
5261 			ipha_t *iph = NULL;
5262 			ip6_t *ip6h = NULL;
5263 			int hdr_len;
5264 			uint16_t ip6_hdr_length;
5265 			uint8_t v6_proto;
5266 			uint8_t *v6_proto_p;
5267 
5268 			/*
5269 			 * We have a fragment we need to track!
5270 			 */
5271 			mp = ipsec_fragcache_add(&itp->itp_fragcache, NULL, mp,
5272 			    outer_hdr_len, ipss);
5273 			if (mp == NULL)
5274 				return (NULL);
5275 			ASSERT(mp->b_cont == NULL);
5276 
5277 			/*
5278 			 * If we get here, we have a full fragment chain
5279 			 */
5280 
5281 			oiph = (ipha_t *)mp->b_rptr;
5282 			if (IPH_HDR_VERSION(oiph) == IPV4_VERSION) {
5283 				hdr_len = ((outer_hdr_len != 0) ?
5284 				    IPH_HDR_LENGTH(oiph) : 0);
5285 				iph = (ipha_t *)(mp->b_rptr + hdr_len);
5286 			} else {
5287 				ASSERT(IPH_HDR_VERSION(oiph) == IPV6_VERSION);
5288 				ip6h = (ip6_t *)mp->b_rptr;
5289 				if (!ip_hdr_length_nexthdr_v6(mp, ip6h,
5290 				    &ip6_hdr_length, &v6_proto_p)) {
5291 					ip_drop_packet_chain(mp, B_FALSE, NULL,
5292 					    DROPPER(ipss,
5293 					    ipds_spd_malformed_packet),
5294 					    &ipss->ipsec_spd_dropper);
5295 					return (NULL);
5296 				}
5297 				hdr_len = ip6_hdr_length;
5298 			}
5299 			outer_hdr_len = hdr_len;
5300 
5301 			if (sel.ips_isv4) {
5302 				if (iph == NULL) {
5303 					/* Was v6 outer */
5304 					iph = (ipha_t *)(mp->b_rptr + hdr_len);
5305 				}
5306 				inner_ipv4 = iph;
5307 				sel.ips_local_addr_v4 = inner_ipv4->ipha_src;
5308 				sel.ips_remote_addr_v4 = inner_ipv4->ipha_dst;
5309 				sel.ips_protocol =
5310 				    (uint8_t)inner_ipv4->ipha_protocol;
5311 			} else {
5312 				inner_ipv6 = (ip6_t *)(mp->b_rptr +
5313 				    hdr_len);
5314 				sel.ips_local_addr_v6 = inner_ipv6->ip6_src;
5315 				sel.ips_remote_addr_v6 = inner_ipv6->ip6_dst;
5316 				if (!ip_hdr_length_nexthdr_v6(mp,
5317 				    inner_ipv6, &ip6_hdr_length, &v6_proto_p)) {
5318 					ip_drop_packet_chain(mp, B_FALSE, NULL,
5319 					    DROPPER(ipss,
5320 					    ipds_spd_malformed_frag),
5321 					    &ipss->ipsec_spd_dropper);
5322 					return (NULL);
5323 				}
5324 				v6_proto = *v6_proto_p;
5325 				sel.ips_protocol = v6_proto;
5326 #ifdef FRAGCACHE_DEBUG
5327 				cmn_err(CE_WARN, "v6_sel.ips_protocol = %d\n",
5328 				    sel.ips_protocol);
5329 #endif
5330 			}
5331 			/* Ports are extracted below */
5332 		}
5333 
5334 		/* Get ports... */
5335 		if (!ipsec_init_outbound_ports(&sel, mp,
5336 		    inner_ipv4, inner_ipv6, outer_hdr_len, ipss)) {
5337 			/* callee did ip_drop_packet_chain() on mp. */
5338 			return (NULL);
5339 		}
5340 #ifdef FRAGCACHE_DEBUG
5341 		if (inner_ipv4 != NULL)
5342 			cmn_err(CE_WARN,
5343 			    "(v4) sel.ips_protocol = %d, "
5344 			    "sel.ips_local_port = %d, "
5345 			    "sel.ips_remote_port = %d\n",
5346 			    sel.ips_protocol, ntohs(sel.ips_local_port),
5347 			    ntohs(sel.ips_remote_port));
5348 		if (inner_ipv6 != NULL)
5349 			cmn_err(CE_WARN,
5350 			    "(v6) sel.ips_protocol = %d, "
5351 			    "sel.ips_local_port = %d, "
5352 			    "sel.ips_remote_port = %d\n",
5353 			    sel.ips_protocol, ntohs(sel.ips_local_port),
5354 			    ntohs(sel.ips_remote_port));
5355 #endif
5356 		/* Success so far! */
5357 	}
5358 	rw_enter(&polhead->iph_lock, RW_READER);
5359 	pol = ipsec_find_policy_head(NULL, polhead, IPSEC_TYPE_OUTBOUND, &sel);
5360 	rw_exit(&polhead->iph_lock);
5361 	if (pol == NULL) {
5362 		/*
5363 		 * No matching policy on this tunnel, drop the packet.
5364 		 *
5365 		 * NOTE:  Tunnel-mode tunnels are different from the
5366 		 * IP global transport mode policy head.  For a tunnel-mode
5367 		 * tunnel, we drop the packet in lieu of passing it
5368 		 * along accepted the way a global-policy miss would.
5369 		 *
5370 		 * NOTE2:  "negotiate transport" tunnels should match ALL
5371 		 * inbound packets, but we do not uncomment the ASSERT()
5372 		 * below because if/when we open PF_POLICY, a user can
5373 		 * shoot him/her-self in the foot with a 0 priority.
5374 		 */
5375 
5376 		/* ASSERT(itp->itp_flags & ITPF_P_TUNNEL); */
5377 #ifdef FRAGCACHE_DEBUG
5378 		cmn_err(CE_WARN, "ipsec_tun_outbound(): No matching tunnel "
5379 		    "per-port policy\n");
5380 #endif
5381 		ip_drop_packet_chain(mp, B_FALSE, NULL,
5382 		    DROPPER(ipss, ipds_spd_explicit),
5383 		    &ipss->ipsec_spd_dropper);
5384 		return (NULL);
5385 	}
5386 
5387 #ifdef FRAGCACHE_DEBUG
5388 	cmn_err(CE_WARN, "Having matching tunnel per-port policy\n");
5389 #endif
5390 
5391 	/*
5392 	 * NOTE: ixa_cleanup() function will release pol references.
5393 	 */
5394 	ixa->ixa_ipsec_policy = pol;
5395 	/*
5396 	 * NOTE: There is a subtle difference between iptun_zoneid and
5397 	 * iptun_connp->conn_zoneid explained in iptun_conn_create().  When
5398 	 * interacting with the ip module, we must use conn_zoneid.
5399 	 */
5400 	ixa->ixa_zoneid = iptun->iptun_connp->conn_zoneid;
5401 
5402 	ASSERT((outer_ipv4 != NULL) ? (ixa->ixa_flags & IXAF_IS_IPV4) :
5403 	    !(ixa->ixa_flags & IXAF_IS_IPV4));
5404 	ASSERT(ixa->ixa_ipsec_policy != NULL);
5405 	ixa->ixa_flags |= IXAF_IPSEC_SECURE;
5406 
5407 	if (!(itp->itp_flags & ITPF_P_TUNNEL)) {
5408 		/* Set up transport mode for tunnelled packets. */
5409 		ixa->ixa_ipsec_proto = (inner_ipv4 != NULL) ? IPPROTO_ENCAP :
5410 		    IPPROTO_IPV6;
5411 		return (mp);
5412 	}
5413 
5414 	/* Fill in tunnel-mode goodies here. */
5415 	ixa->ixa_flags |= IXAF_IPSEC_TUNNEL;
5416 	/* XXX Do I need to fill in all of the goodies here? */
5417 	if (inner_ipv4) {
5418 		ixa->ixa_ipsec_inaf = AF_INET;
5419 		ixa->ixa_ipsec_insrc[0] =
5420 		    pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v4;
5421 		ixa->ixa_ipsec_indst[0] =
5422 		    pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v4;
5423 	} else {
5424 		ixa->ixa_ipsec_inaf = AF_INET6;
5425 		ixa->ixa_ipsec_insrc[0] =
5426 		    pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v6.s6_addr32[0];
5427 		ixa->ixa_ipsec_insrc[1] =
5428 		    pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v6.s6_addr32[1];
5429 		ixa->ixa_ipsec_insrc[2] =
5430 		    pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v6.s6_addr32[2];
5431 		ixa->ixa_ipsec_insrc[3] =
5432 		    pol->ipsp_sel->ipsl_key.ipsl_local.ipsad_v6.s6_addr32[3];
5433 		ixa->ixa_ipsec_indst[0] =
5434 		    pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v6.s6_addr32[0];
5435 		ixa->ixa_ipsec_indst[1] =
5436 		    pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v6.s6_addr32[1];
5437 		ixa->ixa_ipsec_indst[2] =
5438 		    pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v6.s6_addr32[2];
5439 		ixa->ixa_ipsec_indst[3] =
5440 		    pol->ipsp_sel->ipsl_key.ipsl_remote.ipsad_v6.s6_addr32[3];
5441 	}
5442 	ixa->ixa_ipsec_insrcpfx = pol->ipsp_sel->ipsl_key.ipsl_local_pfxlen;
5443 	ixa->ixa_ipsec_indstpfx = pol->ipsp_sel->ipsl_key.ipsl_remote_pfxlen;
5444 	/* NOTE:  These are used for transport mode too. */
5445 	ixa->ixa_ipsec_src_port = pol->ipsp_sel->ipsl_key.ipsl_lport;
5446 	ixa->ixa_ipsec_dst_port = pol->ipsp_sel->ipsl_key.ipsl_rport;
5447 	ixa->ixa_ipsec_proto = pol->ipsp_sel->ipsl_key.ipsl_proto;
5448 
5449 	return (mp);
5450 }
5451 
5452 /*
5453  * NOTE: The following releases pol's reference and
5454  * calls ip_drop_packet() for me on NULL returns.
5455  */
5456 mblk_t *
5457 ipsec_check_ipsecin_policy_reasm(mblk_t *attr_mp, ipsec_policy_t *pol,
5458     ipha_t *inner_ipv4, ip6_t *inner_ipv6, uint64_t pkt_unique, netstack_t *ns)
5459 {
5460 	/* Assume attr_mp is a chain of b_next-linked ip_recv_attr mblk. */
5461 	mblk_t *data_chain = NULL, *data_tail = NULL;
5462 	mblk_t *next;
5463 	mblk_t *data_mp;
5464 	ip_recv_attr_t	iras;
5465 
5466 	while (attr_mp != NULL) {
5467 		ASSERT(ip_recv_attr_is_mblk(attr_mp));
5468 		next = attr_mp->b_next;
5469 		attr_mp->b_next = NULL;  /* No tripping asserts. */
5470 
5471 		data_mp = attr_mp->b_cont;
5472 		attr_mp->b_cont = NULL;
5473 		if (!ip_recv_attr_from_mblk(attr_mp, &iras)) {
5474 			/* The ill or ip_stack_t disappeared on us */
5475 			freemsg(data_mp);	/* ip_drop_packet?? */
5476 			ira_cleanup(&iras, B_TRUE);
5477 			goto fail;
5478 		}
5479 
5480 		/*
5481 		 * Need IPPOL_REFHOLD(pol) for extras because
5482 		 * ipsecin_policy does the refrele.
5483 		 */
5484 		IPPOL_REFHOLD(pol);
5485 
5486 		data_mp = ipsec_check_ipsecin_policy(data_mp, pol, inner_ipv4,
5487 		    inner_ipv6, pkt_unique, &iras, ns);
5488 		ira_cleanup(&iras, B_TRUE);
5489 
5490 		if (data_mp == NULL)
5491 			goto fail;
5492 
5493 		if (data_tail == NULL) {
5494 			/* First one */
5495 			data_chain = data_tail = data_mp;
5496 		} else {
5497 			data_tail->b_next = data_mp;
5498 			data_tail = data_mp;
5499 		}
5500 		attr_mp = next;
5501 	}
5502 	/*
5503 	 * One last release because either the loop bumped it up, or we never
5504 	 * called ipsec_check_ipsecin_policy().
5505 	 */
5506 	IPPOL_REFRELE(pol);
5507 
5508 	/* data_chain is ready for return to tun module. */
5509 	return (data_chain);
5510 
5511 fail:
5512 	/*
5513 	 * Need to get rid of any extra pol
5514 	 * references, and any remaining bits as well.
5515 	 */
5516 	IPPOL_REFRELE(pol);
5517 	ipsec_freemsg_chain(data_chain);
5518 	ipsec_freemsg_chain(next);	/* ipdrop stats? */
5519 	return (NULL);
5520 }
5521 
5522 /*
5523  * Return a message if the inbound packet passed an IPsec policy check.  Returns
5524  * NULL if it failed or if it is a fragment needing its friends before a
5525  * policy check can be performed.
5526  *
5527  * Expects a non-NULL data_mp, and a non-NULL polhead.
5528  * The returned mblk may be a b_next chain of packets if fragments
5529  * neeeded to be collected for a proper policy check.
5530  *
5531  * This function calls ip_drop_packet() on data_mp if need be.
5532  *
5533  * NOTE:  outer_hdr_len is signed.  If it's a negative value, the caller
5534  * is inspecting an ICMP packet.
5535  */
5536 mblk_t *
5537 ipsec_tun_inbound(ip_recv_attr_t *ira, mblk_t *data_mp, ipsec_tun_pol_t *itp,
5538     ipha_t *inner_ipv4, ip6_t *inner_ipv6, ipha_t *outer_ipv4,
5539     ip6_t *outer_ipv6, int outer_hdr_len, netstack_t *ns)
5540 {
5541 	ipsec_policy_head_t *polhead;
5542 	ipsec_selector_t sel;
5543 	ipsec_policy_t *pol;
5544 	uint16_t tmpport;
5545 	selret_t rc;
5546 	boolean_t port_policy_present, is_icmp, global_present;
5547 	in6_addr_t tmpaddr;
5548 	ipaddr_t tmp4;
5549 	uint8_t flags, *inner_hdr;
5550 	ipsec_stack_t *ipss = ns->netstack_ipsec;
5551 
5552 	sel.ips_is_icmp_inv_acq = 0;
5553 
5554 	if (outer_ipv4 != NULL) {
5555 		ASSERT(outer_ipv6 == NULL);
5556 		global_present = ipss->ipsec_inbound_v4_policy_present;
5557 	} else {
5558 		ASSERT(outer_ipv6 != NULL);
5559 		global_present = ipss->ipsec_inbound_v6_policy_present;
5560 	}
5561 
5562 	ASSERT(inner_ipv4 != NULL && inner_ipv6 == NULL ||
5563 	    inner_ipv4 == NULL && inner_ipv6 != NULL);
5564 
5565 	if (outer_hdr_len < 0) {
5566 		outer_hdr_len = (-outer_hdr_len);
5567 		is_icmp = B_TRUE;
5568 	} else {
5569 		is_icmp = B_FALSE;
5570 	}
5571 
5572 	if (itp != NULL && (itp->itp_flags & ITPF_P_ACTIVE)) {
5573 		mblk_t *mp = data_mp;
5574 
5575 		polhead = itp->itp_policy;
5576 		/*
5577 		 * We need to perform full Tunnel-Mode enforcement,
5578 		 * and we need to have inner-header data for such enforcement.
5579 		 *
5580 		 * See ipsec_init_inbound_sel() for the 0x80000000 on inbound
5581 		 * and on return.
5582 		 */
5583 
5584 		port_policy_present = ((itp->itp_flags &
5585 		    ITPF_P_PER_PORT_SECURITY) ? B_TRUE : B_FALSE);
5586 		/*
5587 		 * NOTE:  Even if our policy is transport mode, set the
5588 		 * SEL_TUNNEL_MODE flag so ipsec_init_inbound_sel() can
5589 		 * do the right thing w.r.t. outer headers.
5590 		 */
5591 		flags = ((port_policy_present ? SEL_PORT_POLICY : SEL_NONE) |
5592 		    (is_icmp ? SEL_IS_ICMP : SEL_NONE) | SEL_TUNNEL_MODE);
5593 
5594 		rc = ipsec_init_inbound_sel(&sel, data_mp, inner_ipv4,
5595 		    inner_ipv6, flags);
5596 
5597 		switch (rc) {
5598 		case SELRET_NOMEM:
5599 			ip_drop_packet(data_mp, B_TRUE, NULL,
5600 			    DROPPER(ipss, ipds_spd_nomem),
5601 			    &ipss->ipsec_spd_dropper);
5602 			return (NULL);
5603 		case SELRET_TUNFRAG:
5604 			/*
5605 			 * At this point, if we're cleartext, we don't want
5606 			 * to go there.
5607 			 */
5608 			if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
5609 				ip_drop_packet(data_mp, B_TRUE, NULL,
5610 				    DROPPER(ipss, ipds_spd_got_clear),
5611 				    &ipss->ipsec_spd_dropper);
5612 				return (NULL);
5613 			}
5614 
5615 			/*
5616 			 * Inner and outer headers may not be contiguous.
5617 			 * Pullup the data_mp now to satisfy assumptions of
5618 			 * ipsec_fragcache_add()
5619 			 */
5620 			if (data_mp->b_cont != NULL) {
5621 				mblk_t *nmp;
5622 
5623 				nmp = msgpullup(data_mp, -1);
5624 				if (nmp == NULL) {
5625 					ip_drop_packet(data_mp, B_TRUE, NULL,
5626 					    DROPPER(ipss, ipds_spd_nomem),
5627 					    &ipss->ipsec_spd_dropper);
5628 					return (NULL);
5629 				}
5630 				freemsg(data_mp);
5631 				data_mp = nmp;
5632 				if (outer_ipv4 != NULL)
5633 					outer_ipv4 =
5634 					    (ipha_t *)data_mp->b_rptr;
5635 				else
5636 					outer_ipv6 =
5637 					    (ip6_t *)data_mp->b_rptr;
5638 				if (inner_ipv4 != NULL) {
5639 					inner_ipv4 =
5640 					    (ipha_t *)(data_mp->b_rptr +
5641 					    outer_hdr_len);
5642 				} else {
5643 					inner_ipv6 =
5644 					    (ip6_t *)(data_mp->b_rptr +
5645 					    outer_hdr_len);
5646 				}
5647 			}
5648 
5649 			/*
5650 			 * If we need to queue the packet. First we
5651 			 * get an mblk with the attributes. ipsec_fragcache_add
5652 			 * will prepend that to the queued data and return
5653 			 * a list of b_next messages each of which starts with
5654 			 * the attribute mblk.
5655 			 */
5656 			mp = ip_recv_attr_to_mblk(ira);
5657 			if (mp == NULL) {
5658 				ip_drop_packet(data_mp, B_TRUE, NULL,
5659 				    DROPPER(ipss, ipds_spd_nomem),
5660 				    &ipss->ipsec_spd_dropper);
5661 				return (NULL);
5662 			}
5663 
5664 			mp = ipsec_fragcache_add(&itp->itp_fragcache,
5665 			    mp, data_mp, outer_hdr_len, ipss);
5666 
5667 			if (mp == NULL) {
5668 				/*
5669 				 * Data is cached, fragment chain is not
5670 				 * complete.
5671 				 */
5672 				return (NULL);
5673 			}
5674 
5675 			/*
5676 			 * If we get here, we have a full fragment chain.
5677 			 * Reacquire headers and selectors from first fragment.
5678 			 */
5679 			ASSERT(ip_recv_attr_is_mblk(mp));
5680 			data_mp = mp->b_cont;
5681 			inner_hdr = data_mp->b_rptr;
5682 			if (outer_ipv4 != NULL) {
5683 				inner_hdr += IPH_HDR_LENGTH(
5684 				    (ipha_t *)data_mp->b_rptr);
5685 			} else {
5686 				inner_hdr += ip_hdr_length_v6(data_mp,
5687 				    (ip6_t *)data_mp->b_rptr);
5688 			}
5689 			ASSERT(inner_hdr <= data_mp->b_wptr);
5690 
5691 			if (inner_ipv4 != NULL) {
5692 				inner_ipv4 = (ipha_t *)inner_hdr;
5693 				inner_ipv6 = NULL;
5694 			} else {
5695 				inner_ipv6 = (ip6_t *)inner_hdr;
5696 				inner_ipv4 = NULL;
5697 			}
5698 
5699 			/*
5700 			 * Use SEL_TUNNEL_MODE to take into account the outer
5701 			 * header.  Use SEL_POST_FRAG so we always get ports.
5702 			 */
5703 			rc = ipsec_init_inbound_sel(&sel, data_mp,
5704 			    inner_ipv4, inner_ipv6,
5705 			    SEL_TUNNEL_MODE | SEL_POST_FRAG);
5706 			switch (rc) {
5707 			case SELRET_SUCCESS:
5708 				/*
5709 				 * Get to same place as first caller's
5710 				 * SELRET_SUCCESS case.
5711 				 */
5712 				break;
5713 			case SELRET_NOMEM:
5714 				ip_drop_packet_chain(mp, B_TRUE, NULL,
5715 				    DROPPER(ipss, ipds_spd_nomem),
5716 				    &ipss->ipsec_spd_dropper);
5717 				return (NULL);
5718 			case SELRET_BADPKT:
5719 				ip_drop_packet_chain(mp, B_TRUE, NULL,
5720 				    DROPPER(ipss, ipds_spd_malformed_frag),
5721 				    &ipss->ipsec_spd_dropper);
5722 				return (NULL);
5723 			case SELRET_TUNFRAG:
5724 				cmn_err(CE_WARN, "(TUNFRAG on 2nd call...)");
5725 				/* FALLTHRU */
5726 			default:
5727 				cmn_err(CE_WARN, "ipsec_init_inbound_sel(mark2)"
5728 				    " returns bizarro 0x%x", rc);
5729 				/* Guaranteed panic! */
5730 				ASSERT(rc == SELRET_NOMEM);
5731 				return (NULL);
5732 			}
5733 			/* FALLTHRU */
5734 		case SELRET_SUCCESS:
5735 			/*
5736 			 * Common case:
5737 			 * No per-port policy or a non-fragment.  Keep going.
5738 			 */
5739 			break;
5740 		case SELRET_BADPKT:
5741 			/*
5742 			 * We may receive ICMP (with IPv6 inner) packets that
5743 			 * trigger this return value.  Send 'em in for
5744 			 * enforcement checking.
5745 			 */
5746 			cmn_err(CE_NOTE, "ipsec_tun_inbound(): "
5747 			    "sending 'bad packet' in for enforcement");
5748 			break;
5749 		default:
5750 			cmn_err(CE_WARN,
5751 			    "ipsec_init_inbound_sel() returns bizarro 0x%x",
5752 			    rc);
5753 			ASSERT(rc == SELRET_NOMEM);	/* Guaranteed panic! */
5754 			return (NULL);
5755 		}
5756 
5757 		if (is_icmp) {
5758 			/*
5759 			 * Swap local/remote because this is an ICMP packet.
5760 			 */
5761 			tmpaddr = sel.ips_local_addr_v6;
5762 			sel.ips_local_addr_v6 = sel.ips_remote_addr_v6;
5763 			sel.ips_remote_addr_v6 = tmpaddr;
5764 			tmpport = sel.ips_local_port;
5765 			sel.ips_local_port = sel.ips_remote_port;
5766 			sel.ips_remote_port = tmpport;
5767 		}
5768 
5769 		/* find_policy_head() */
5770 		rw_enter(&polhead->iph_lock, RW_READER);
5771 		pol = ipsec_find_policy_head(NULL, polhead, IPSEC_TYPE_INBOUND,
5772 		    &sel);
5773 		rw_exit(&polhead->iph_lock);
5774 		if (pol != NULL) {
5775 			uint64_t pkt_unique;
5776 
5777 			if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) {
5778 				if (!pol->ipsp_act->ipa_allow_clear) {
5779 					/*
5780 					 * XXX should never get here with
5781 					 * tunnel reassembled fragments?
5782 					 */
5783 					ASSERT(mp == data_mp);
5784 					ip_drop_packet(data_mp, B_TRUE, NULL,
5785 					    DROPPER(ipss, ipds_spd_got_clear),
5786 					    &ipss->ipsec_spd_dropper);
5787 					IPPOL_REFRELE(pol);
5788 					return (NULL);
5789 				} else {
5790 					IPPOL_REFRELE(pol);
5791 					return (mp);
5792 				}
5793 			}
5794 			pkt_unique = SA_UNIQUE_ID(sel.ips_remote_port,
5795 			    sel.ips_local_port,
5796 			    (inner_ipv4 == NULL) ? IPPROTO_IPV6 :
5797 			    IPPROTO_ENCAP, sel.ips_protocol);
5798 
5799 			/*
5800 			 * NOTE: The following releases pol's reference and
5801 			 * calls ip_drop_packet() for me on NULL returns.
5802 			 *
5803 			 * "sel" is still good here, so let's use it!
5804 			 */
5805 			if (data_mp == mp) {
5806 				/* A single packet without attributes */
5807 				data_mp = ipsec_check_ipsecin_policy(data_mp,
5808 				    pol, inner_ipv4, inner_ipv6, pkt_unique,
5809 				    ira, ns);
5810 			} else {
5811 				/*
5812 				 * We pass in the b_next chain of attr_mp's
5813 				 * and get back a b_next chain of data_mp's.
5814 				 */
5815 				data_mp = ipsec_check_ipsecin_policy_reasm(mp,
5816 				    pol, inner_ipv4, inner_ipv6, pkt_unique,
5817 				    ns);
5818 			}
5819 			return (data_mp);
5820 		}
5821 
5822 		/*
5823 		 * Else fallthru and check the global policy on the outer
5824 		 * header(s) if this tunnel is an old-style transport-mode
5825 		 * one.  Drop the packet explicitly (no policy entry) for
5826 		 * a new-style tunnel-mode tunnel.
5827 		 */
5828 		if ((itp->itp_flags & ITPF_P_TUNNEL) && !is_icmp) {
5829 			ip_drop_packet_chain(data_mp, B_TRUE, NULL,
5830 			    DROPPER(ipss, ipds_spd_explicit),
5831 			    &ipss->ipsec_spd_dropper);
5832 			return (NULL);
5833 		}
5834 	}
5835 
5836 	/*
5837 	 * NOTE:  If we reach here, we will not have packet chains from
5838 	 * fragcache_add(), because the only way I get chains is on a
5839 	 * tunnel-mode tunnel, which either returns with a pass, or gets
5840 	 * hit by the ip_drop_packet_chain() call right above here.
5841 	 */
5842 	ASSERT(data_mp->b_next == NULL);
5843 
5844 	/* If no per-tunnel security, check global policy now. */
5845 	if ((ira->ira_flags & IRAF_IPSEC_SECURE) && !global_present) {
5846 		if (ira->ira_flags & IRAF_TRUSTED_ICMP) {
5847 			/*
5848 			 * This is an ICMP message that was geenrated locally.
5849 			 * We should accept it.
5850 			 */
5851 			return (data_mp);
5852 		}
5853 
5854 		ip_drop_packet(data_mp, B_TRUE, NULL,
5855 		    DROPPER(ipss, ipds_spd_got_secure),
5856 		    &ipss->ipsec_spd_dropper);
5857 		return (NULL);
5858 	}
5859 
5860 	if (is_icmp) {
5861 		/*
5862 		 * For ICMP packets, "outer_ipvN" is set to the outer header
5863 		 * that is *INSIDE* the ICMP payload.  For global policy
5864 		 * checking, we need to reverse src/dst on the payload in
5865 		 * order to construct selectors appropriately.  See "ripha"
5866 		 * constructions in ip.c.  To avoid a bug like 6478464 (see
5867 		 * earlier in this file), we will actually exchange src/dst
5868 		 * in the packet, and reverse if after the call to
5869 		 * ipsec_check_global_policy().
5870 		 */
5871 		if (outer_ipv4 != NULL) {
5872 			tmp4 = outer_ipv4->ipha_src;
5873 			outer_ipv4->ipha_src = outer_ipv4->ipha_dst;
5874 			outer_ipv4->ipha_dst = tmp4;
5875 		} else {
5876 			ASSERT(outer_ipv6 != NULL);
5877 			tmpaddr = outer_ipv6->ip6_src;
5878 			outer_ipv6->ip6_src = outer_ipv6->ip6_dst;
5879 			outer_ipv6->ip6_dst = tmpaddr;
5880 		}
5881 	}
5882 
5883 	data_mp = ipsec_check_global_policy(data_mp, NULL, outer_ipv4,
5884 	    outer_ipv6, ira, ns);
5885 	if (data_mp == NULL)
5886 		return (NULL);
5887 
5888 	if (is_icmp) {
5889 		/* Set things back to normal. */
5890 		if (outer_ipv4 != NULL) {
5891 			tmp4 = outer_ipv4->ipha_src;
5892 			outer_ipv4->ipha_src = outer_ipv4->ipha_dst;
5893 			outer_ipv4->ipha_dst = tmp4;
5894 		} else {
5895 			/* No need for ASSERT()s now. */
5896 			tmpaddr = outer_ipv6->ip6_src;
5897 			outer_ipv6->ip6_src = outer_ipv6->ip6_dst;
5898 			outer_ipv6->ip6_dst = tmpaddr;
5899 		}
5900 	}
5901 
5902 	/*
5903 	 * At this point, we pretend it's a cleartext accepted
5904 	 * packet.
5905 	 */
5906 	return (data_mp);
5907 }
5908 
5909 /*
5910  * AVL comparison routine for our list of tunnel polheads.
5911  */
5912 static int
5913 tunnel_compare(const void *arg1, const void *arg2)
5914 {
5915 	ipsec_tun_pol_t *left, *right;
5916 	int rc;
5917 
5918 	left = (ipsec_tun_pol_t *)arg1;
5919 	right = (ipsec_tun_pol_t *)arg2;
5920 
5921 	rc = strncmp(left->itp_name, right->itp_name, LIFNAMSIZ);
5922 	return (rc == 0 ? rc : (rc > 0 ? 1 : -1));
5923 }
5924 
5925 /*
5926  * Free a tunnel policy node.
5927  */
5928 void
5929 itp_free(ipsec_tun_pol_t *node, netstack_t *ns)
5930 {
5931 	if (node->itp_policy != NULL) {
5932 		IPPH_REFRELE(node->itp_policy, ns);
5933 		node->itp_policy = NULL;
5934 	}
5935 	if (node->itp_inactive != NULL) {
5936 		IPPH_REFRELE(node->itp_inactive, ns);
5937 		node->itp_inactive = NULL;
5938 	}
5939 	mutex_destroy(&node->itp_lock);
5940 	kmem_free(node, sizeof (*node));
5941 }
5942 
5943 void
5944 itp_unlink(ipsec_tun_pol_t *node, netstack_t *ns)
5945 {
5946 	ipsec_stack_t *ipss = ns->netstack_ipsec;
5947 
5948 	rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_WRITER);
5949 	ipss->ipsec_tunnel_policy_gen++;
5950 	ipsec_fragcache_uninit(&node->itp_fragcache, ipss);
5951 	avl_remove(&ipss->ipsec_tunnel_policies, node);
5952 	rw_exit(&ipss->ipsec_tunnel_policy_lock);
5953 	ITP_REFRELE(node, ns);
5954 }
5955 
5956 /*
5957  * Public interface to look up a tunnel security policy by name.  Used by
5958  * spdsock mostly.  Returns "node" with a bumped refcnt.
5959  */
5960 ipsec_tun_pol_t *
5961 get_tunnel_policy(char *name, netstack_t *ns)
5962 {
5963 	ipsec_tun_pol_t *node, lookup;
5964 	ipsec_stack_t *ipss = ns->netstack_ipsec;
5965 
5966 	(void) strncpy(lookup.itp_name, name, LIFNAMSIZ);
5967 
5968 	rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
5969 	node = (ipsec_tun_pol_t *)avl_find(&ipss->ipsec_tunnel_policies,
5970 	    &lookup, NULL);
5971 	if (node != NULL) {
5972 		ITP_REFHOLD(node);
5973 	}
5974 	rw_exit(&ipss->ipsec_tunnel_policy_lock);
5975 
5976 	return (node);
5977 }
5978 
5979 /*
5980  * Public interface to walk all tunnel security polcies.  Useful for spdsock
5981  * DUMP operations.  iterator() will not consume a reference.
5982  */
5983 void
5984 itp_walk(void (*iterator)(ipsec_tun_pol_t *, void *, netstack_t *),
5985     void *arg, netstack_t *ns)
5986 {
5987 	ipsec_tun_pol_t *node;
5988 	ipsec_stack_t *ipss = ns->netstack_ipsec;
5989 
5990 	rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_READER);
5991 	for (node = avl_first(&ipss->ipsec_tunnel_policies); node != NULL;
5992 	    node = AVL_NEXT(&ipss->ipsec_tunnel_policies, node)) {
5993 		iterator(node, arg, ns);
5994 	}
5995 	rw_exit(&ipss->ipsec_tunnel_policy_lock);
5996 }
5997 
5998 /*
5999  * Initialize policy head.  This can only fail if there's a memory problem.
6000  */
6001 static boolean_t
6002 tunnel_polhead_init(ipsec_policy_head_t *iph, netstack_t *ns)
6003 {
6004 	ipsec_stack_t *ipss = ns->netstack_ipsec;
6005 
6006 	rw_init(&iph->iph_lock, NULL, RW_DEFAULT, NULL);
6007 	iph->iph_refs = 1;
6008 	iph->iph_gen = 0;
6009 	if (ipsec_alloc_table(iph, ipss->ipsec_tun_spd_hashsize,
6010 	    KM_SLEEP, B_FALSE, ns) != 0) {
6011 		ipsec_polhead_free_table(iph);
6012 		return (B_FALSE);
6013 	}
6014 	ipsec_polhead_init(iph, ipss->ipsec_tun_spd_hashsize);
6015 	return (B_TRUE);
6016 }
6017 
6018 /*
6019  * Create a tunnel policy node with "name".  Set errno with
6020  * ENOMEM if there's a memory problem, and EEXIST if there's an existing
6021  * node.
6022  */
6023 ipsec_tun_pol_t *
6024 create_tunnel_policy(char *name, int *errno, uint64_t *gen, netstack_t *ns)
6025 {
6026 	ipsec_tun_pol_t *newbie, *existing;
6027 	avl_index_t where;
6028 	ipsec_stack_t *ipss = ns->netstack_ipsec;
6029 
6030 	newbie = kmem_zalloc(sizeof (*newbie), KM_NOSLEEP);
6031 	if (newbie == NULL) {
6032 		*errno = ENOMEM;
6033 		return (NULL);
6034 	}
6035 	if (!ipsec_fragcache_init(&newbie->itp_fragcache)) {
6036 		kmem_free(newbie, sizeof (*newbie));
6037 		*errno = ENOMEM;
6038 		return (NULL);
6039 	}
6040 
6041 	(void) strncpy(newbie->itp_name, name, LIFNAMSIZ);
6042 
6043 	rw_enter(&ipss->ipsec_tunnel_policy_lock, RW_WRITER);
6044 	existing = (ipsec_tun_pol_t *)avl_find(&ipss->ipsec_tunnel_policies,
6045 	    newbie, &where);
6046 	if (existing != NULL) {
6047 		itp_free(newbie, ns);
6048 		*errno = EEXIST;
6049 		rw_exit(&ipss->ipsec_tunnel_policy_lock);
6050 		return (NULL);
6051 	}
6052 	ipss->ipsec_tunnel_policy_gen++;
6053 	*gen = ipss->ipsec_tunnel_policy_gen;
6054 	newbie->itp_refcnt = 2;	/* One for the caller, one for the tree. */
6055 	newbie->itp_next_policy_index = 1;
6056 	avl_insert(&ipss->ipsec_tunnel_policies, newbie, where);
6057 	mutex_init(&newbie->itp_lock, NULL, MUTEX_DEFAULT, NULL);
6058 	newbie->itp_policy = kmem_zalloc(sizeof (ipsec_policy_head_t),
6059 	    KM_NOSLEEP);
6060 	if (newbie->itp_policy == NULL)
6061 		goto nomem;
6062 	newbie->itp_inactive = kmem_zalloc(sizeof (ipsec_policy_head_t),
6063 	    KM_NOSLEEP);
6064 	if (newbie->itp_inactive == NULL) {
6065 		kmem_free(newbie->itp_policy, sizeof (ipsec_policy_head_t));
6066 		goto nomem;
6067 	}
6068 
6069 	if (!tunnel_polhead_init(newbie->itp_policy, ns)) {
6070 		kmem_free(newbie->itp_policy, sizeof (ipsec_policy_head_t));
6071 		kmem_free(newbie->itp_inactive, sizeof (ipsec_policy_head_t));
6072 		goto nomem;
6073 	} else if (!tunnel_polhead_init(newbie->itp_inactive, ns)) {
6074 		IPPH_REFRELE(newbie->itp_policy, ns);
6075 		kmem_free(newbie->itp_inactive, sizeof (ipsec_policy_head_t));
6076 		goto nomem;
6077 	}
6078 	rw_exit(&ipss->ipsec_tunnel_policy_lock);
6079 
6080 	return (newbie);
6081 nomem:
6082 	*errno = ENOMEM;
6083 	kmem_free(newbie, sizeof (*newbie));
6084 	return (NULL);
6085 }
6086 
6087 /*
6088  * Given two addresses, find a tunnel instance's IPsec policy heads.
6089  * Returns NULL on failure.
6090  */
6091 ipsec_tun_pol_t *
6092 itp_get_byaddr(uint32_t *laddr, uint32_t *faddr, int af, ip_stack_t *ipst)
6093 {
6094 	conn_t *connp;
6095 	iptun_t *iptun;
6096 	ipsec_tun_pol_t *itp = NULL;
6097 
6098 	/* Classifiers are used to "src" being foreign. */
6099 	if (af == AF_INET) {
6100 		connp = ipcl_iptun_classify_v4((ipaddr_t *)faddr,
6101 		    (ipaddr_t *)laddr, ipst);
6102 	} else {
6103 		ASSERT(af == AF_INET6);
6104 		ASSERT(!IN6_IS_ADDR_V4MAPPED((in6_addr_t *)laddr));
6105 		ASSERT(!IN6_IS_ADDR_V4MAPPED((in6_addr_t *)faddr));
6106 		connp = ipcl_iptun_classify_v6((in6_addr_t *)faddr,
6107 		    (in6_addr_t *)laddr, ipst);
6108 	}
6109 
6110 	if (connp == NULL)
6111 		return (NULL);
6112 
6113 	if (IPCL_IS_IPTUN(connp)) {
6114 		iptun = connp->conn_iptun;
6115 		if (iptun != NULL) {
6116 			itp = iptun->iptun_itp;
6117 			if (itp != NULL) {
6118 				/* Braces due to the macro's nature... */
6119 				ITP_REFHOLD(itp);
6120 			}
6121 		}  /* Else itp is already NULL. */
6122 	}
6123 
6124 	CONN_DEC_REF(connp);
6125 	return (itp);
6126 }
6127 
6128 /*
6129  * Frag cache code, based on SunScreen 3.2 source
6130  *	screen/kernel/common/screen_fragcache.c
6131  */
6132 
6133 #define	IPSEC_FRAG_TTL_MAX	5
6134 /*
6135  * Note that the following parameters create 256 hash buckets
6136  * with 1024 free entries to be distributed.  Things are cleaned
6137  * periodically and are attempted to be cleaned when there is no
6138  * free space, but this system errs on the side of dropping packets
6139  * over creating memory exhaustion.  We may decide to make hash
6140  * factor a tunable if this proves to be a bad decision.
6141  */
6142 #define	IPSEC_FRAG_HASH_SLOTS	(1<<8)
6143 #define	IPSEC_FRAG_HASH_FACTOR	4
6144 #define	IPSEC_FRAG_HASH_SIZE	(IPSEC_FRAG_HASH_SLOTS * IPSEC_FRAG_HASH_FACTOR)
6145 
6146 #define	IPSEC_FRAG_HASH_MASK		(IPSEC_FRAG_HASH_SLOTS - 1)
6147 #define	IPSEC_FRAG_HASH_FUNC(id)	(((id) & IPSEC_FRAG_HASH_MASK) ^ \
6148 					    (((id) / \
6149 					    (ushort_t)IPSEC_FRAG_HASH_SLOTS) & \
6150 					    IPSEC_FRAG_HASH_MASK))
6151 
6152 /* Maximum fragments per packet.  48 bytes payload x 1366 packets > 64KB */
6153 #define	IPSEC_MAX_FRAGS		1366
6154 
6155 #define	V4_FRAG_OFFSET(ipha) ((ntohs(ipha->ipha_fragment_offset_and_flags) & \
6156 				    IPH_OFFSET) << 3)
6157 #define	V4_MORE_FRAGS(ipha) (ntohs(ipha->ipha_fragment_offset_and_flags) & \
6158 		IPH_MF)
6159 
6160 /*
6161  * Initialize an ipsec fragcache instance.
6162  * Returns B_FALSE if memory allocation fails.
6163  */
6164 boolean_t
6165 ipsec_fragcache_init(ipsec_fragcache_t *frag)
6166 {
6167 	ipsec_fragcache_entry_t *ftemp;
6168 	int i;
6169 
6170 	mutex_init(&frag->itpf_lock, NULL, MUTEX_DEFAULT, NULL);
6171 	frag->itpf_ptr = (ipsec_fragcache_entry_t **)
6172 	    kmem_zalloc(sizeof (ipsec_fragcache_entry_t *) *
6173 	    IPSEC_FRAG_HASH_SLOTS, KM_NOSLEEP);
6174 	if (frag->itpf_ptr == NULL)
6175 		return (B_FALSE);
6176 
6177 	ftemp = (ipsec_fragcache_entry_t *)
6178 	    kmem_zalloc(sizeof (ipsec_fragcache_entry_t) *
6179 	    IPSEC_FRAG_HASH_SIZE, KM_NOSLEEP);
6180 	if (ftemp == NULL) {
6181 		kmem_free(frag->itpf_ptr, sizeof (ipsec_fragcache_entry_t *) *
6182 		    IPSEC_FRAG_HASH_SLOTS);
6183 		return (B_FALSE);
6184 	}
6185 
6186 	frag->itpf_freelist = NULL;
6187 
6188 	for (i = 0; i < IPSEC_FRAG_HASH_SIZE; i++) {
6189 		ftemp->itpfe_next = frag->itpf_freelist;
6190 		frag->itpf_freelist = ftemp;
6191 		ftemp++;
6192 	}
6193 
6194 	frag->itpf_expire_hint = 0;
6195 
6196 	return (B_TRUE);
6197 }
6198 
6199 void
6200 ipsec_fragcache_uninit(ipsec_fragcache_t *frag, ipsec_stack_t *ipss)
6201 {
6202 	ipsec_fragcache_entry_t *fep;
6203 	int i;
6204 
6205 	mutex_enter(&frag->itpf_lock);
6206 	if (frag->itpf_ptr) {
6207 		/* Delete any existing fragcache entry chains */
6208 		for (i = 0; i < IPSEC_FRAG_HASH_SLOTS; i++) {
6209 			fep = (frag->itpf_ptr)[i];
6210 			while (fep != NULL) {
6211 				/* Returned fep is next in chain or NULL */
6212 				fep = fragcache_delentry(i, fep, frag, ipss);
6213 			}
6214 		}
6215 		/*
6216 		 * Chase the pointers back to the beginning
6217 		 * of the memory allocation and then
6218 		 * get rid of the allocated freelist
6219 		 */
6220 		while (frag->itpf_freelist->itpfe_next != NULL)
6221 			frag->itpf_freelist = frag->itpf_freelist->itpfe_next;
6222 		/*
6223 		 * XXX - If we ever dynamically grow the freelist
6224 		 * then we'll have to free entries individually
6225 		 * or determine how many entries or chunks we have
6226 		 * grown since the initial allocation.
6227 		 */
6228 		kmem_free(frag->itpf_freelist,
6229 		    sizeof (ipsec_fragcache_entry_t) *
6230 		    IPSEC_FRAG_HASH_SIZE);
6231 		/* Free the fragcache structure */
6232 		kmem_free(frag->itpf_ptr,
6233 		    sizeof (ipsec_fragcache_entry_t *) *
6234 		    IPSEC_FRAG_HASH_SLOTS);
6235 	}
6236 	mutex_exit(&frag->itpf_lock);
6237 	mutex_destroy(&frag->itpf_lock);
6238 }
6239 
6240 /*
6241  * Add a fragment to the fragment cache.   Consumes mp if NULL is returned.
6242  * Returns mp if a whole fragment has been assembled, NULL otherwise
6243  * The returned mp could be a b_next chain of fragments.
6244  *
6245  * The iramp argument is set on inbound; NULL if outbound.
6246  */
6247 mblk_t *
6248 ipsec_fragcache_add(ipsec_fragcache_t *frag, mblk_t *iramp, mblk_t *mp,
6249     int outer_hdr_len, ipsec_stack_t *ipss)
6250 {
6251 	boolean_t is_v4;
6252 	time_t itpf_time;
6253 	ipha_t *iph;
6254 	ipha_t *oiph;
6255 	ip6_t *ip6h = NULL;
6256 	uint8_t v6_proto;
6257 	uint8_t *v6_proto_p;
6258 	uint16_t ip6_hdr_length;
6259 	ip_pkt_t ipp;
6260 	ip6_frag_t *fraghdr;
6261 	ipsec_fragcache_entry_t *fep;
6262 	int i;
6263 	mblk_t *nmp, *prevmp;
6264 	int firstbyte, lastbyte;
6265 	int offset;
6266 	int last;
6267 	boolean_t inbound = (iramp != NULL);
6268 
6269 #ifdef FRAGCACHE_DEBUG
6270 	cmn_err(CE_WARN, "Fragcache: %s\n", inbound ? "INBOUND" : "OUTBOUND");
6271 #endif
6272 	/*
6273 	 * You're on the slow path, so insure that every packet in the
6274 	 * cache is a single-mblk one.
6275 	 */
6276 	if (mp->b_cont != NULL) {
6277 		nmp = msgpullup(mp, -1);
6278 		if (nmp == NULL) {
6279 			ip_drop_packet(mp, inbound, NULL,
6280 			    DROPPER(ipss, ipds_spd_nomem),
6281 			    &ipss->ipsec_spd_dropper);
6282 			if (inbound)
6283 				(void) ip_recv_attr_free_mblk(iramp);
6284 			return (NULL);
6285 		}
6286 		freemsg(mp);
6287 		mp = nmp;
6288 	}
6289 
6290 	mutex_enter(&frag->itpf_lock);
6291 
6292 	oiph  = (ipha_t *)mp->b_rptr;
6293 	iph  = (ipha_t *)(mp->b_rptr + outer_hdr_len);
6294 
6295 	if (IPH_HDR_VERSION(iph) == IPV4_VERSION) {
6296 		is_v4 = B_TRUE;
6297 	} else {
6298 		ASSERT(IPH_HDR_VERSION(iph) == IPV6_VERSION);
6299 		ip6h = (ip6_t *)(mp->b_rptr + outer_hdr_len);
6300 
6301 		if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &ip6_hdr_length,
6302 		    &v6_proto_p)) {
6303 			/*
6304 			 * Find upper layer protocol.
6305 			 * If it fails we have a malformed packet
6306 			 */
6307 			mutex_exit(&frag->itpf_lock);
6308 			ip_drop_packet(mp, inbound, NULL,
6309 			    DROPPER(ipss, ipds_spd_malformed_packet),
6310 			    &ipss->ipsec_spd_dropper);
6311 			if (inbound)
6312 				(void) ip_recv_attr_free_mblk(iramp);
6313 			return (NULL);
6314 		} else {
6315 			v6_proto = *v6_proto_p;
6316 		}
6317 
6318 
6319 		bzero(&ipp, sizeof (ipp));
6320 		(void) ip_find_hdr_v6(mp, ip6h, B_FALSE, &ipp, NULL);
6321 		if (!(ipp.ipp_fields & IPPF_FRAGHDR)) {
6322 			/*
6323 			 * We think this is a fragment, but didn't find
6324 			 * a fragment header.  Something is wrong.
6325 			 */
6326 			mutex_exit(&frag->itpf_lock);
6327 			ip_drop_packet(mp, inbound, NULL,
6328 			    DROPPER(ipss, ipds_spd_malformed_frag),
6329 			    &ipss->ipsec_spd_dropper);
6330 			if (inbound)
6331 				(void) ip_recv_attr_free_mblk(iramp);
6332 			return (NULL);
6333 		}
6334 		fraghdr = ipp.ipp_fraghdr;
6335 		is_v4 = B_FALSE;
6336 	}
6337 
6338 	/* Anything to cleanup? */
6339 
6340 	/*
6341 	 * This cleanup call could be put in a timer loop
6342 	 * but it may actually be just as reasonable a decision to
6343 	 * leave it here.  The disadvantage is this only gets called when
6344 	 * frags are added.  The advantage is that it is not
6345 	 * susceptible to race conditions like a time-based cleanup
6346 	 * may be.
6347 	 */
6348 	itpf_time = gethrestime_sec();
6349 	if (itpf_time >= frag->itpf_expire_hint)
6350 		ipsec_fragcache_clean(frag, ipss);
6351 
6352 	/* Lookup to see if there is an existing entry */
6353 
6354 	if (is_v4)
6355 		i = IPSEC_FRAG_HASH_FUNC(iph->ipha_ident);
6356 	else
6357 		i = IPSEC_FRAG_HASH_FUNC(fraghdr->ip6f_ident);
6358 
6359 	for (fep = (frag->itpf_ptr)[i]; fep; fep = fep->itpfe_next) {
6360 		if (is_v4) {
6361 			ASSERT(iph != NULL);
6362 			if ((fep->itpfe_id == iph->ipha_ident) &&
6363 			    (fep->itpfe_src == iph->ipha_src) &&
6364 			    (fep->itpfe_dst == iph->ipha_dst) &&
6365 			    (fep->itpfe_proto == iph->ipha_protocol))
6366 				break;
6367 		} else {
6368 			ASSERT(fraghdr != NULL);
6369 			ASSERT(fep != NULL);
6370 			if ((fep->itpfe_id == fraghdr->ip6f_ident) &&
6371 			    IN6_ARE_ADDR_EQUAL(&fep->itpfe_src6,
6372 			    &ip6h->ip6_src) &&
6373 			    IN6_ARE_ADDR_EQUAL(&fep->itpfe_dst6,
6374 			    &ip6h->ip6_dst) && (fep->itpfe_proto == v6_proto))
6375 				break;
6376 		}
6377 	}
6378 
6379 	if (is_v4) {
6380 		firstbyte = V4_FRAG_OFFSET(iph);
6381 		lastbyte  = firstbyte + ntohs(iph->ipha_length) -
6382 		    IPH_HDR_LENGTH(iph);
6383 		last = (V4_MORE_FRAGS(iph) == 0);
6384 #ifdef FRAGCACHE_DEBUG
6385 		cmn_err(CE_WARN, "V4 fragcache: firstbyte = %d, lastbyte = %d, "
6386 		    "is_last_frag = %d, id = %d, mp = %p\n", firstbyte,
6387 		    lastbyte, last, iph->ipha_ident, mp);
6388 #endif
6389 	} else {
6390 		firstbyte = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
6391 		lastbyte  = firstbyte + ntohs(ip6h->ip6_plen) +
6392 		    sizeof (ip6_t) - ip6_hdr_length;
6393 		last = (fraghdr->ip6f_offlg & IP6F_MORE_FRAG) == 0;
6394 #ifdef FRAGCACHE_DEBUG
6395 		cmn_err(CE_WARN, "V6 fragcache: firstbyte = %d, lastbyte = %d, "
6396 		    "is_last_frag = %d, id = %d, fraghdr = %p, mp = %p\n",
6397 		    firstbyte, lastbyte, last, fraghdr->ip6f_ident, fraghdr,
6398 		    mp);
6399 #endif
6400 	}
6401 
6402 	/* check for bogus fragments and delete the entry */
6403 	if (firstbyte > 0 && firstbyte <= 8) {
6404 		if (fep != NULL)
6405 			(void) fragcache_delentry(i, fep, frag, ipss);
6406 		mutex_exit(&frag->itpf_lock);
6407 		ip_drop_packet(mp, inbound, NULL,
6408 		    DROPPER(ipss, ipds_spd_malformed_frag),
6409 		    &ipss->ipsec_spd_dropper);
6410 		if (inbound)
6411 			(void) ip_recv_attr_free_mblk(iramp);
6412 		return (NULL);
6413 	}
6414 
6415 	/* Not found, allocate a new entry */
6416 	if (fep == NULL) {
6417 		if (frag->itpf_freelist == NULL) {
6418 			/* see if there is some space */
6419 			ipsec_fragcache_clean(frag, ipss);
6420 			if (frag->itpf_freelist == NULL) {
6421 				mutex_exit(&frag->itpf_lock);
6422 				ip_drop_packet(mp, inbound, NULL,
6423 				    DROPPER(ipss, ipds_spd_nomem),
6424 				    &ipss->ipsec_spd_dropper);
6425 				if (inbound)
6426 					(void) ip_recv_attr_free_mblk(iramp);
6427 				return (NULL);
6428 			}
6429 		}
6430 
6431 		fep = frag->itpf_freelist;
6432 		frag->itpf_freelist = fep->itpfe_next;
6433 
6434 		if (is_v4) {
6435 			bcopy((caddr_t)&iph->ipha_src, (caddr_t)&fep->itpfe_src,
6436 			    sizeof (struct in_addr));
6437 			bcopy((caddr_t)&iph->ipha_dst, (caddr_t)&fep->itpfe_dst,
6438 			    sizeof (struct in_addr));
6439 			fep->itpfe_id = iph->ipha_ident;
6440 			fep->itpfe_proto = iph->ipha_protocol;
6441 			i = IPSEC_FRAG_HASH_FUNC(fep->itpfe_id);
6442 		} else {
6443 			bcopy((in6_addr_t *)&ip6h->ip6_src,
6444 			    (in6_addr_t *)&fep->itpfe_src6,
6445 			    sizeof (struct in6_addr));
6446 			bcopy((in6_addr_t *)&ip6h->ip6_dst,
6447 			    (in6_addr_t *)&fep->itpfe_dst6,
6448 			    sizeof (struct in6_addr));
6449 			fep->itpfe_id = fraghdr->ip6f_ident;
6450 			fep->itpfe_proto = v6_proto;
6451 			i = IPSEC_FRAG_HASH_FUNC(fep->itpfe_id);
6452 		}
6453 		itpf_time = gethrestime_sec();
6454 		fep->itpfe_exp = itpf_time + IPSEC_FRAG_TTL_MAX + 1;
6455 		fep->itpfe_last = 0;
6456 		fep->itpfe_fraglist = NULL;
6457 		fep->itpfe_depth = 0;
6458 		fep->itpfe_next = (frag->itpf_ptr)[i];
6459 		(frag->itpf_ptr)[i] = fep;
6460 
6461 		if (frag->itpf_expire_hint > fep->itpfe_exp)
6462 			frag->itpf_expire_hint = fep->itpfe_exp;
6463 
6464 	}
6465 
6466 	/* Insert it in the frag list */
6467 	/* List is in order by starting offset of fragments */
6468 
6469 	prevmp = NULL;
6470 	for (nmp = fep->itpfe_fraglist; nmp; nmp = nmp->b_next) {
6471 		ipha_t *niph;
6472 		ipha_t *oniph;
6473 		ip6_t *nip6h;
6474 		ip_pkt_t nipp;
6475 		ip6_frag_t *nfraghdr;
6476 		uint16_t nip6_hdr_length;
6477 		uint8_t *nv6_proto_p;
6478 		int nfirstbyte, nlastbyte;
6479 		char *data, *ndata;
6480 		mblk_t *ndata_mp = (inbound ? nmp->b_cont : nmp);
6481 		int hdr_len;
6482 
6483 		oniph  = (ipha_t *)mp->b_rptr;
6484 		nip6h = NULL;
6485 		niph = NULL;
6486 
6487 		/*
6488 		 * Determine outer header type and length and set
6489 		 * pointers appropriately
6490 		 */
6491 
6492 		if (IPH_HDR_VERSION(oniph) == IPV4_VERSION) {
6493 			hdr_len = ((outer_hdr_len != 0) ?
6494 			    IPH_HDR_LENGTH(oiph) : 0);
6495 			niph = (ipha_t *)(ndata_mp->b_rptr + hdr_len);
6496 		} else {
6497 			ASSERT(IPH_HDR_VERSION(oniph) == IPV6_VERSION);
6498 			ASSERT(ndata_mp->b_cont == NULL);
6499 			nip6h = (ip6_t *)ndata_mp->b_rptr;
6500 			(void) ip_hdr_length_nexthdr_v6(ndata_mp, nip6h,
6501 			    &nip6_hdr_length, &v6_proto_p);
6502 			hdr_len = ((outer_hdr_len != 0) ? nip6_hdr_length : 0);
6503 		}
6504 
6505 		/*
6506 		 * Determine inner header type and length and set
6507 		 * pointers appropriately
6508 		 */
6509 
6510 		if (is_v4) {
6511 			if (niph == NULL) {
6512 				/* Was v6 outer */
6513 				niph = (ipha_t *)(ndata_mp->b_rptr + hdr_len);
6514 			}
6515 			nfirstbyte = V4_FRAG_OFFSET(niph);
6516 			nlastbyte = nfirstbyte + ntohs(niph->ipha_length) -
6517 			    IPH_HDR_LENGTH(niph);
6518 		} else {
6519 			ASSERT(ndata_mp->b_cont == NULL);
6520 			nip6h = (ip6_t *)(ndata_mp->b_rptr + hdr_len);
6521 			if (!ip_hdr_length_nexthdr_v6(ndata_mp, nip6h,
6522 			    &nip6_hdr_length, &nv6_proto_p)) {
6523 				mutex_exit(&frag->itpf_lock);
6524 				ip_drop_packet_chain(nmp, inbound, NULL,
6525 				    DROPPER(ipss, ipds_spd_malformed_frag),
6526 				    &ipss->ipsec_spd_dropper);
6527 				ipsec_freemsg_chain(ndata_mp);
6528 				if (inbound)
6529 					(void) ip_recv_attr_free_mblk(iramp);
6530 				return (NULL);
6531 			}
6532 			bzero(&nipp, sizeof (nipp));
6533 			(void) ip_find_hdr_v6(ndata_mp, nip6h, B_FALSE, &nipp,
6534 			    NULL);
6535 			nfraghdr = nipp.ipp_fraghdr;
6536 			nfirstbyte = ntohs(nfraghdr->ip6f_offlg &
6537 			    IP6F_OFF_MASK);
6538 			nlastbyte  = nfirstbyte + ntohs(nip6h->ip6_plen) +
6539 			    sizeof (ip6_t) - nip6_hdr_length;
6540 		}
6541 
6542 		/* Check for overlapping fragments */
6543 		if (firstbyte >= nfirstbyte && firstbyte < nlastbyte) {
6544 			/*
6545 			 * Overlap Check:
6546 			 *  ~~~~---------		# Check if the newly
6547 			 * ~	ndata_mp|		# received fragment
6548 			 *  ~~~~---------		# overlaps with the
6549 			 *	 ---------~~~~~~	# current fragment.
6550 			 *	|    mp		~
6551 			 *	 ---------~~~~~~
6552 			 */
6553 			if (is_v4) {
6554 				data  = (char *)iph  + IPH_HDR_LENGTH(iph) +
6555 				    firstbyte - nfirstbyte;
6556 				ndata = (char *)niph + IPH_HDR_LENGTH(niph);
6557 			} else {
6558 				data  = (char *)ip6h  +
6559 				    nip6_hdr_length + firstbyte -
6560 				    nfirstbyte;
6561 				ndata = (char *)nip6h + nip6_hdr_length;
6562 			}
6563 			if (bcmp(data, ndata, MIN(lastbyte, nlastbyte) -
6564 			    firstbyte)) {
6565 				/* Overlapping data does not match */
6566 				(void) fragcache_delentry(i, fep, frag, ipss);
6567 				mutex_exit(&frag->itpf_lock);
6568 				ip_drop_packet(mp, inbound, NULL,
6569 				    DROPPER(ipss, ipds_spd_overlap_frag),
6570 				    &ipss->ipsec_spd_dropper);
6571 				if (inbound)
6572 					(void) ip_recv_attr_free_mblk(iramp);
6573 				return (NULL);
6574 			}
6575 			/* Part of defense for jolt2.c fragmentation attack */
6576 			if (firstbyte >= nfirstbyte && lastbyte <= nlastbyte) {
6577 				/*
6578 				 * Check for identical or subset fragments:
6579 				 *  ----------	    ~~~~--------~~~~~
6580 				 * |    nmp   | or  ~	   nmp	    ~
6581 				 *  ----------	    ~~~~--------~~~~~
6582 				 *  ----------		  ------
6583 				 * |	mp    |		 |  mp  |
6584 				 *  ----------		  ------
6585 				 */
6586 				mutex_exit(&frag->itpf_lock);
6587 				ip_drop_packet(mp, inbound, NULL,
6588 				    DROPPER(ipss, ipds_spd_evil_frag),
6589 				    &ipss->ipsec_spd_dropper);
6590 				if (inbound)
6591 					(void) ip_recv_attr_free_mblk(iramp);
6592 				return (NULL);
6593 			}
6594 
6595 		}
6596 
6597 		/* Correct location for this fragment? */
6598 		if (firstbyte <= nfirstbyte) {
6599 			/*
6600 			 * Check if the tail end of the new fragment overlaps
6601 			 * with the head of the current fragment.
6602 			 *	  --------~~~~~~~
6603 			 *	 |    nmp	~
6604 			 *	  --------~~~~~~~
6605 			 *  ~~~~~--------
6606 			 *  ~	mp	 |
6607 			 *  ~~~~~--------
6608 			 */
6609 			if (lastbyte > nfirstbyte) {
6610 				/* Fragments overlap */
6611 				data  = (char *)iph  + IPH_HDR_LENGTH(iph) +
6612 				    firstbyte - nfirstbyte;
6613 				ndata = (char *)niph + IPH_HDR_LENGTH(niph);
6614 				if (is_v4) {
6615 					data  = (char *)iph +
6616 					    IPH_HDR_LENGTH(iph) + firstbyte -
6617 					    nfirstbyte;
6618 					ndata = (char *)niph +
6619 					    IPH_HDR_LENGTH(niph);
6620 				} else {
6621 					data  = (char *)ip6h  +
6622 					    nip6_hdr_length + firstbyte -
6623 					    nfirstbyte;
6624 					ndata = (char *)nip6h + nip6_hdr_length;
6625 				}
6626 				if (bcmp(data, ndata, MIN(lastbyte, nlastbyte)
6627 				    - nfirstbyte)) {
6628 					/* Overlap mismatch */
6629 					(void) fragcache_delentry(i, fep, frag,
6630 					    ipss);
6631 					mutex_exit(&frag->itpf_lock);
6632 					ip_drop_packet(mp, inbound, NULL,
6633 					    DROPPER(ipss,
6634 					    ipds_spd_overlap_frag),
6635 					    &ipss->ipsec_spd_dropper);
6636 					if (inbound) {
6637 						(void) ip_recv_attr_free_mblk(
6638 						    iramp);
6639 					}
6640 					return (NULL);
6641 				}
6642 			}
6643 
6644 			/*
6645 			 * Fragment does not illegally overlap and can now
6646 			 * be inserted into the chain
6647 			 */
6648 			break;
6649 		}
6650 
6651 		prevmp = nmp;
6652 	}
6653 	/* Prepend the attributes before we link it in */
6654 	if (iramp != NULL) {
6655 		ASSERT(iramp->b_cont == NULL);
6656 		iramp->b_cont = mp;
6657 		mp = iramp;
6658 		iramp = NULL;
6659 	}
6660 	mp->b_next = nmp;
6661 
6662 	if (prevmp == NULL) {
6663 		fep->itpfe_fraglist = mp;
6664 	} else {
6665 		prevmp->b_next = mp;
6666 	}
6667 	if (last)
6668 		fep->itpfe_last = 1;
6669 
6670 	/* Part of defense for jolt2.c fragmentation attack */
6671 	if (++(fep->itpfe_depth) > IPSEC_MAX_FRAGS) {
6672 		(void) fragcache_delentry(i, fep, frag, ipss);
6673 		mutex_exit(&frag->itpf_lock);
6674 		if (inbound)
6675 			mp = ip_recv_attr_free_mblk(mp);
6676 
6677 		ip_drop_packet(mp, inbound, NULL,
6678 		    DROPPER(ipss, ipds_spd_max_frags),
6679 		    &ipss->ipsec_spd_dropper);
6680 		return (NULL);
6681 	}
6682 
6683 	/* Check for complete packet */
6684 
6685 	if (!fep->itpfe_last) {
6686 		mutex_exit(&frag->itpf_lock);
6687 #ifdef FRAGCACHE_DEBUG
6688 		cmn_err(CE_WARN, "Fragment cached, last not yet seen.\n");
6689 #endif
6690 		return (NULL);
6691 	}
6692 
6693 	offset = 0;
6694 	for (mp = fep->itpfe_fraglist; mp; mp = mp->b_next) {
6695 		mblk_t *data_mp = (inbound ? mp->b_cont : mp);
6696 		int hdr_len;
6697 
6698 		oiph  = (ipha_t *)data_mp->b_rptr;
6699 		ip6h = NULL;
6700 		iph = NULL;
6701 
6702 		if (IPH_HDR_VERSION(oiph) == IPV4_VERSION) {
6703 			hdr_len = ((outer_hdr_len != 0) ?
6704 			    IPH_HDR_LENGTH(oiph) : 0);
6705 			iph = (ipha_t *)(data_mp->b_rptr + hdr_len);
6706 		} else {
6707 			ASSERT(IPH_HDR_VERSION(oiph) == IPV6_VERSION);
6708 			ASSERT(data_mp->b_cont == NULL);
6709 			ip6h = (ip6_t *)data_mp->b_rptr;
6710 			(void) ip_hdr_length_nexthdr_v6(data_mp, ip6h,
6711 			    &ip6_hdr_length, &v6_proto_p);
6712 			hdr_len = ((outer_hdr_len != 0) ? ip6_hdr_length : 0);
6713 		}
6714 
6715 		/* Calculate current fragment start/end */
6716 		if (is_v4) {
6717 			if (iph == NULL) {
6718 				/* Was v6 outer */
6719 				iph = (ipha_t *)(data_mp->b_rptr + hdr_len);
6720 			}
6721 			firstbyte = V4_FRAG_OFFSET(iph);
6722 			lastbyte = firstbyte + ntohs(iph->ipha_length) -
6723 			    IPH_HDR_LENGTH(iph);
6724 		} else {
6725 			ASSERT(data_mp->b_cont == NULL);
6726 			ip6h = (ip6_t *)(data_mp->b_rptr + hdr_len);
6727 			if (!ip_hdr_length_nexthdr_v6(data_mp, ip6h,
6728 			    &ip6_hdr_length, &v6_proto_p)) {
6729 				mutex_exit(&frag->itpf_lock);
6730 				ip_drop_packet_chain(mp, inbound, NULL,
6731 				    DROPPER(ipss, ipds_spd_malformed_frag),
6732 				    &ipss->ipsec_spd_dropper);
6733 				return (NULL);
6734 			}
6735 			v6_proto = *v6_proto_p;
6736 			bzero(&ipp, sizeof (ipp));
6737 			(void) ip_find_hdr_v6(data_mp, ip6h, B_FALSE, &ipp,
6738 			    NULL);
6739 			fraghdr = ipp.ipp_fraghdr;
6740 			firstbyte = ntohs(fraghdr->ip6f_offlg &
6741 			    IP6F_OFF_MASK);
6742 			lastbyte  = firstbyte + ntohs(ip6h->ip6_plen) +
6743 			    sizeof (ip6_t) - ip6_hdr_length;
6744 		}
6745 
6746 		/*
6747 		 * If this fragment is greater than current offset,
6748 		 * we have a missing fragment so return NULL
6749 		 */
6750 		if (firstbyte > offset) {
6751 			mutex_exit(&frag->itpf_lock);
6752 #ifdef FRAGCACHE_DEBUG
6753 			/*
6754 			 * Note, this can happen when the last frag
6755 			 * gets sent through because it is smaller
6756 			 * than the MTU.  It is not necessarily an
6757 			 * error condition.
6758 			 */
6759 			cmn_err(CE_WARN, "Frag greater than offset! : "
6760 			    "missing fragment: firstbyte = %d, offset = %d, "
6761 			    "mp = %p\n", firstbyte, offset, mp);
6762 #endif
6763 			return (NULL);
6764 		}
6765 #ifdef FRAGCACHE_DEBUG
6766 		cmn_err(CE_WARN, "Frag offsets : "
6767 		    "firstbyte = %d, offset = %d, mp = %p\n",
6768 		    firstbyte, offset, mp);
6769 #endif
6770 
6771 		/*
6772 		 * If we are at the last fragment, we have the complete
6773 		 * packet, so rechain things and return it to caller
6774 		 * for processing
6775 		 */
6776 
6777 		if ((is_v4 && !V4_MORE_FRAGS(iph)) ||
6778 		    (!is_v4 && !(fraghdr->ip6f_offlg & IP6F_MORE_FRAG))) {
6779 			mp = fep->itpfe_fraglist;
6780 			fep->itpfe_fraglist = NULL;
6781 			(void) fragcache_delentry(i, fep, frag, ipss);
6782 			mutex_exit(&frag->itpf_lock);
6783 
6784 			if ((is_v4 && (firstbyte + ntohs(iph->ipha_length) >
6785 			    65535)) || (!is_v4 && (firstbyte +
6786 			    ntohs(ip6h->ip6_plen) > 65535))) {
6787 				/* It is an invalid "ping-o-death" packet */
6788 				/* Discard it */
6789 				ip_drop_packet_chain(mp, inbound, NULL,
6790 				    DROPPER(ipss, ipds_spd_evil_frag),
6791 				    &ipss->ipsec_spd_dropper);
6792 				return (NULL);
6793 			}
6794 #ifdef FRAGCACHE_DEBUG
6795 			cmn_err(CE_WARN, "Fragcache returning mp = %p, "
6796 			    "mp->b_next = %p", mp, mp->b_next);
6797 #endif
6798 			/*
6799 			 * For inbound case, mp has attrmp b_next'd chain
6800 			 * For outbound case, it is just data mp chain
6801 			 */
6802 			return (mp);
6803 		}
6804 
6805 		/*
6806 		 * Update new ending offset if this
6807 		 * fragment extends the packet
6808 		 */
6809 		if (offset < lastbyte)
6810 			offset = lastbyte;
6811 	}
6812 
6813 	mutex_exit(&frag->itpf_lock);
6814 
6815 	/* Didn't find last fragment, so return NULL */
6816 	return (NULL);
6817 }
6818 
6819 static void
6820 ipsec_fragcache_clean(ipsec_fragcache_t *frag, ipsec_stack_t *ipss)
6821 {
6822 	ipsec_fragcache_entry_t *fep;
6823 	int i;
6824 	ipsec_fragcache_entry_t *earlyfep = NULL;
6825 	time_t itpf_time;
6826 	int earlyexp;
6827 	int earlyi = 0;
6828 
6829 	ASSERT(MUTEX_HELD(&frag->itpf_lock));
6830 
6831 	itpf_time = gethrestime_sec();
6832 	earlyexp = itpf_time + 10000;
6833 
6834 	for (i = 0; i < IPSEC_FRAG_HASH_SLOTS; i++) {
6835 		fep = (frag->itpf_ptr)[i];
6836 		while (fep) {
6837 			if (fep->itpfe_exp < itpf_time) {
6838 				/* found */
6839 				fep = fragcache_delentry(i, fep, frag, ipss);
6840 			} else {
6841 				if (fep->itpfe_exp < earlyexp) {
6842 					earlyfep = fep;
6843 					earlyexp = fep->itpfe_exp;
6844 					earlyi = i;
6845 				}
6846 				fep = fep->itpfe_next;
6847 			}
6848 		}
6849 	}
6850 
6851 	frag->itpf_expire_hint = earlyexp;
6852 
6853 	/* if (!found) */
6854 	if (frag->itpf_freelist == NULL)
6855 		(void) fragcache_delentry(earlyi, earlyfep, frag, ipss);
6856 }
6857 
6858 static ipsec_fragcache_entry_t *
6859 fragcache_delentry(int slot, ipsec_fragcache_entry_t *fep,
6860     ipsec_fragcache_t *frag, ipsec_stack_t *ipss)
6861 {
6862 	ipsec_fragcache_entry_t *targp;
6863 	ipsec_fragcache_entry_t *nextp = fep->itpfe_next;
6864 
6865 	ASSERT(MUTEX_HELD(&frag->itpf_lock));
6866 
6867 	/* Free up any fragment list still in cache entry */
6868 	if (fep->itpfe_fraglist != NULL) {
6869 		ip_drop_packet_chain(fep->itpfe_fraglist,
6870 		    ip_recv_attr_is_mblk(fep->itpfe_fraglist), NULL,
6871 		    DROPPER(ipss, ipds_spd_expired_frags),
6872 		    &ipss->ipsec_spd_dropper);
6873 	}
6874 	fep->itpfe_fraglist = NULL;
6875 
6876 	targp = (frag->itpf_ptr)[slot];
6877 	ASSERT(targp != 0);
6878 
6879 	if (targp == fep) {
6880 		/* unlink from head of hash chain */
6881 		(frag->itpf_ptr)[slot] = nextp;
6882 		/* link into free list */
6883 		fep->itpfe_next = frag->itpf_freelist;
6884 		frag->itpf_freelist = fep;
6885 		return (nextp);
6886 	}
6887 
6888 	/* maybe should use double linked list to make update faster */
6889 	/* must be past front of chain */
6890 	while (targp) {
6891 		if (targp->itpfe_next == fep) {
6892 			/* unlink from hash chain */
6893 			targp->itpfe_next = nextp;
6894 			/* link into free list */
6895 			fep->itpfe_next = frag->itpf_freelist;
6896 			frag->itpf_freelist = fep;
6897 			return (nextp);
6898 		}
6899 		targp = targp->itpfe_next;
6900 		ASSERT(targp != 0);
6901 	}
6902 	/* NOTREACHED */
6903 	return (NULL);
6904 }
6905