xref: /titanic_50/usr/src/uts/common/inet/ip/ipsecesp.c (revision 18c2aff776a775d34a4c9893a4c72e0434d68e36)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/stream.h>
30 #include <sys/stropts.h>
31 #include <sys/errno.h>
32 #include <sys/strlog.h>
33 #include <sys/tihdr.h>
34 #include <sys/socket.h>
35 #include <sys/ddi.h>
36 #include <sys/sunddi.h>
37 #include <sys/kmem.h>
38 #include <sys/sysmacros.h>
39 #include <sys/cmn_err.h>
40 #include <sys/vtrace.h>
41 #include <sys/debug.h>
42 #include <sys/atomic.h>
43 #include <sys/strsun.h>
44 #include <sys/random.h>
45 #include <netinet/in.h>
46 #include <net/if.h>
47 #include <netinet/ip6.h>
48 #include <net/pfkeyv2.h>
49 
50 #include <inet/common.h>
51 #include <inet/mi.h>
52 #include <inet/nd.h>
53 #include <inet/ip.h>
54 #include <inet/ip6.h>
55 #include <inet/sadb.h>
56 #include <inet/ipsec_info.h>
57 #include <inet/ipsec_impl.h>
58 #include <inet/ipsecesp.h>
59 #include <inet/ipdrop.h>
60 #include <inet/tcp.h>
61 #include <sys/kstat.h>
62 #include <sys/policy.h>
63 #include <sys/strsun.h>
64 #include <inet/udp_impl.h>
65 #include <sys/taskq.h>
66 
67 #include <sys/iphada.h>
68 
69 /* Packet dropper for ESP drops. */
70 static ipdropper_t esp_dropper;
71 
72 static kmutex_t ipsecesp_param_lock; /* Protects ipsecesp_param_arr[] below. */
73 /*
74  * Table of ND variables supported by ipsecesp. These are loaded into
75  * ipsecesp_g_nd in ipsecesp_init_nd.
76  * All of these are alterable, within the min/max values given, at run time.
77  */
78 static	ipsecespparam_t	ipsecesp_param_arr[] = {
79 	/* min	max			value	name */
80 	{ 0,	3,			0,	"ipsecesp_debug"},
81 	{ 125,	32000, SADB_AGE_INTERVAL_DEFAULT, "ipsecesp_age_interval"},
82 	{ 1,	10,			1,	"ipsecesp_reap_delay"},
83 	{ 1,	SADB_MAX_REPLAY,	64,	"ipsecesp_replay_size"},
84 	{ 1,	300,			15,	"ipsecesp_acquire_timeout"},
85 	{ 1,	1800,			90,	"ipsecesp_larval_timeout"},
86 	/* Default lifetime values for ACQUIRE messages. */
87 	{ 0,	0xffffffffU,	0,	"ipsecesp_default_soft_bytes"},
88 	{ 0,	0xffffffffU,	0,	"ipsecesp_default_hard_bytes"},
89 	{ 0,	0xffffffffU,	24000,	"ipsecesp_default_soft_addtime"},
90 	{ 0,	0xffffffffU,	28800,	"ipsecesp_default_hard_addtime"},
91 	{ 0,	0xffffffffU,	0,	"ipsecesp_default_soft_usetime"},
92 	{ 0,	0xffffffffU,	0,	"ipsecesp_default_hard_usetime"},
93 	{ 0,	1,		0,	"ipsecesp_log_unknown_spi"},
94 	{ 0,	2,		1,	"ipsecesp_padding_check"},
95 };
96 #define	ipsecesp_debug		ipsecesp_param_arr[0].ipsecesp_param_value
97 #define	ipsecesp_age_interval	ipsecesp_param_arr[1].ipsecesp_param_value
98 #define	ipsecesp_age_int_max	ipsecesp_param_arr[1].ipsecesp_param_max
99 #define	ipsecesp_reap_delay	ipsecesp_param_arr[2].ipsecesp_param_value
100 #define	ipsecesp_replay_size	ipsecesp_param_arr[3].ipsecesp_param_value
101 #define	ipsecesp_acquire_timeout ipsecesp_param_arr[4].ipsecesp_param_value
102 #define	ipsecesp_larval_timeout ipsecesp_param_arr[5].ipsecesp_param_value
103 #define	ipsecesp_default_soft_bytes \
104 	ipsecesp_param_arr[6].ipsecesp_param_value
105 #define	ipsecesp_default_hard_bytes \
106 	ipsecesp_param_arr[7].ipsecesp_param_value
107 #define	ipsecesp_default_soft_addtime \
108 	ipsecesp_param_arr[8].ipsecesp_param_value
109 #define	ipsecesp_default_hard_addtime \
110 	ipsecesp_param_arr[9].ipsecesp_param_value
111 #define	ipsecesp_default_soft_usetime \
112 	ipsecesp_param_arr[10].ipsecesp_param_value
113 #define	ipsecesp_default_hard_usetime \
114 	ipsecesp_param_arr[11].ipsecesp_param_value
115 #define	ipsecesp_log_unknown_spi \
116 	ipsecesp_param_arr[12].ipsecesp_param_value
117 #define	ipsecesp_padding_check \
118 	ipsecesp_param_arr[13].ipsecesp_param_value
119 
120 #define	esp0dbg(a)	printf a
121 /* NOTE:  != 0 instead of > 0 so lint doesn't complain. */
122 #define	esp1dbg(a)	if (ipsecesp_debug != 0) printf a
123 #define	esp2dbg(a)	if (ipsecesp_debug > 1) printf a
124 #define	esp3dbg(a)	if (ipsecesp_debug > 2) printf a
125 
126 static IDP ipsecesp_g_nd;
127 
128 static int ipsecesp_open(queue_t *, dev_t *, int, int, cred_t *);
129 static int ipsecesp_close(queue_t *);
130 static void ipsecesp_rput(queue_t *, mblk_t *);
131 static void ipsecesp_wput(queue_t *, mblk_t *);
132 static void esp_send_acquire(ipsacq_t *, mblk_t *);
133 
134 static ipsec_status_t esp_outbound_accelerated(mblk_t *, uint_t);
135 static ipsec_status_t esp_inbound_accelerated(mblk_t *, mblk_t *,
136     boolean_t, ipsa_t *);
137 
138 static boolean_t esp_register_out(uint32_t, uint32_t, uint_t);
139 static boolean_t esp_strip_header(mblk_t *, boolean_t, uint32_t,
140     kstat_named_t **);
141 static ipsec_status_t esp_submit_req_inbound(mblk_t *, ipsa_t *, uint_t);
142 static ipsec_status_t esp_submit_req_outbound(mblk_t *, ipsa_t *, uchar_t *,
143     uint_t);
144 
145 static struct module_info info = {
146 	5137, "ipsecesp", 0, INFPSZ, 65536, 1024
147 };
148 
149 static struct qinit rinit = {
150 	(pfi_t)ipsecesp_rput, NULL, ipsecesp_open, ipsecesp_close, NULL, &info,
151 	NULL
152 };
153 
154 static struct qinit winit = {
155 	(pfi_t)ipsecesp_wput, NULL, ipsecesp_open, ipsecesp_close, NULL, &info,
156 	NULL
157 };
158 
159 struct streamtab ipsecespinfo = {
160 	&rinit, &winit, NULL, NULL
161 };
162 
163 /*
164  * Keysock instance of ESP.  "There can be only one." :)
165  * Use casptr() on this because I don't set it until KEYSOCK_HELLO comes down.
166  * Paired up with the esp_pfkey_q is the esp_event, which will age SAs.
167  */
168 static queue_t *esp_pfkey_q;
169 static timeout_id_t esp_event;
170 static taskq_t *esp_taskq;
171 
172 /*
173  * OTOH, this one is set at open/close, and I'm D_MTQPAIR for now.
174  *
175  * Question:	Do I need this, given that all instance's esps->esps_wq point
176  *		to IP?
177  *
178  * Answer:	Yes, because I need to know which queue is BOUND to
179  *		IPPROTO_ESP
180  */
181 static mblk_t *esp_ip_unbind;
182 
183 /*
184  * Stats.  This may eventually become a full-blown SNMP MIB once that spec
185  * stabilizes.
186  */
187 
188 typedef struct {
189 	kstat_named_t esp_stat_num_aalgs;
190 	kstat_named_t esp_stat_good_auth;
191 	kstat_named_t esp_stat_bad_auth;
192 	kstat_named_t esp_stat_bad_padding;
193 	kstat_named_t esp_stat_replay_failures;
194 	kstat_named_t esp_stat_replay_early_failures;
195 	kstat_named_t esp_stat_keysock_in;
196 	kstat_named_t esp_stat_out_requests;
197 	kstat_named_t esp_stat_acquire_requests;
198 	kstat_named_t esp_stat_bytes_expired;
199 	kstat_named_t esp_stat_out_discards;
200 	kstat_named_t esp_stat_in_accelerated;
201 	kstat_named_t esp_stat_out_accelerated;
202 	kstat_named_t esp_stat_noaccel;
203 	kstat_named_t esp_stat_crypto_sync;
204 	kstat_named_t esp_stat_crypto_async;
205 	kstat_named_t esp_stat_crypto_failures;
206 	kstat_named_t esp_stat_num_ealgs;
207 	kstat_named_t esp_stat_bad_decrypt;
208 } esp_kstats_t;
209 
210 uint32_t esp_hash_size = IPSEC_DEFAULT_HASH_SIZE;
211 #define	ESP_BUMP_STAT(x) (esp_kstats->esp_stat_ ## x).value.ui64++
212 #define	ESP_DEBUMP_STAT(x) (esp_kstats->esp_stat_ ## x).value.ui64--
213 
214 static kstat_t *esp_ksp;
215 static esp_kstats_t *esp_kstats;
216 
217 static int	esp_kstat_update(kstat_t *, int);
218 
219 static boolean_t
220 esp_kstat_init(void)
221 {
222 	esp_ksp = kstat_create("ipsecesp", 0, "esp_stat", "net",
223 	    KSTAT_TYPE_NAMED, sizeof (*esp_kstats) / sizeof (kstat_named_t),
224 	    KSTAT_FLAG_PERSISTENT);
225 
226 	if (esp_ksp == NULL)
227 		return (B_FALSE);
228 
229 	esp_kstats = esp_ksp->ks_data;
230 
231 	esp_ksp->ks_update = esp_kstat_update;
232 
233 #define	K64 KSTAT_DATA_UINT64
234 #define	KI(x) kstat_named_init(&(esp_kstats->esp_stat_##x), #x, K64)
235 
236 	KI(num_aalgs);
237 	KI(num_ealgs);
238 	KI(good_auth);
239 	KI(bad_auth);
240 	KI(bad_padding);
241 	KI(replay_failures);
242 	KI(replay_early_failures);
243 	KI(keysock_in);
244 	KI(out_requests);
245 	KI(acquire_requests);
246 	KI(bytes_expired);
247 	KI(out_discards);
248 	KI(in_accelerated);
249 	KI(out_accelerated);
250 	KI(noaccel);
251 	KI(crypto_sync);
252 	KI(crypto_async);
253 	KI(crypto_failures);
254 	KI(bad_decrypt);
255 
256 #undef KI
257 #undef K64
258 
259 	kstat_install(esp_ksp);
260 
261 	return (B_TRUE);
262 }
263 
264 static int
265 esp_kstat_update(kstat_t *kp, int rw)
266 {
267 	esp_kstats_t *ekp;
268 
269 	if ((kp == NULL) || (kp->ks_data == NULL))
270 		return (EIO);
271 
272 	if (rw == KSTAT_WRITE)
273 		return (EACCES);
274 
275 	ASSERT(kp == esp_ksp);
276 	ekp = (esp_kstats_t *)kp->ks_data;
277 	ASSERT(ekp == esp_kstats);
278 
279 	mutex_enter(&alg_lock);
280 	ekp->esp_stat_num_aalgs.value.ui64 = ipsec_nalgs[IPSEC_ALG_AUTH];
281 	ekp->esp_stat_num_ealgs.value.ui64 = ipsec_nalgs[IPSEC_ALG_ENCR];
282 	mutex_exit(&alg_lock);
283 
284 	return (0);
285 }
286 
287 #ifdef DEBUG
288 /*
289  * Debug routine, useful to see pre-encryption data.
290  */
291 static char *
292 dump_msg(mblk_t *mp)
293 {
294 	char tmp_str[3], tmp_line[256];
295 
296 	while (mp != NULL) {
297 		unsigned char *ptr;
298 
299 		printf("mblk address 0x%p, length %ld, db_ref %d "
300 		    "type %d, base 0x%p, lim 0x%p\n",
301 		    (void *) mp, (long)(mp->b_wptr - mp->b_rptr),
302 		    mp->b_datap->db_ref, mp->b_datap->db_type,
303 		    (void *)mp->b_datap->db_base, (void *)mp->b_datap->db_lim);
304 		ptr = mp->b_rptr;
305 
306 		tmp_line[0] = '\0';
307 		while (ptr < mp->b_wptr) {
308 			uint_t diff;
309 
310 			diff = (ptr - mp->b_rptr);
311 			if (!(diff & 0x1f)) {
312 				if (strlen(tmp_line) > 0) {
313 					printf("bytes: %s\n", tmp_line);
314 					tmp_line[0] = '\0';
315 				}
316 			}
317 			if (!(diff & 0x3))
318 				(void) strcat(tmp_line, " ");
319 			(void) sprintf(tmp_str, "%02x", *ptr);
320 			(void) strcat(tmp_line, tmp_str);
321 			ptr++;
322 		}
323 		if (strlen(tmp_line) > 0)
324 			printf("bytes: %s\n", tmp_line);
325 
326 		mp = mp->b_cont;
327 	}
328 
329 	return ("\n");
330 }
331 
332 #else /* DEBUG */
333 static char *
334 dump_msg(mblk_t *mp)
335 {
336 	printf("Find value of mp %p.\n", mp);
337 	return ("\n");
338 }
339 #endif /* DEBUG */
340 
341 /*
342  * Don't have to lock age_interval, as only one thread will access it at
343  * a time, because I control the one function that does with timeout().
344  */
345 /* ARGSUSED */
346 static void
347 esp_ager(void *ignoreme)
348 {
349 	hrtime_t begin = gethrtime();
350 
351 	sadb_ager(&esp_sadb.s_v4, esp_pfkey_q, esp_sadb.s_ip_q,
352 	    ipsecesp_reap_delay);
353 	sadb_ager(&esp_sadb.s_v6, esp_pfkey_q, esp_sadb.s_ip_q,
354 	    ipsecesp_reap_delay);
355 
356 	esp_event = sadb_retimeout(begin, esp_pfkey_q, esp_ager,
357 	    &(ipsecesp_age_interval), ipsecesp_age_int_max, info.mi_idnum);
358 }
359 
360 /*
361  * Get an ESP NDD parameter.
362  */
363 /* ARGSUSED */
364 static int
365 ipsecesp_param_get(q, mp, cp, cr)
366 	queue_t	*q;
367 	mblk_t	*mp;
368 	caddr_t	cp;
369 	cred_t *cr;
370 {
371 	ipsecespparam_t	*ipsecesppa = (ipsecespparam_t *)cp;
372 	uint_t value;
373 
374 	mutex_enter(&ipsecesp_param_lock);
375 	value = ipsecesppa->ipsecesp_param_value;
376 	mutex_exit(&ipsecesp_param_lock);
377 
378 	(void) mi_mpprintf(mp, "%u", value);
379 	return (0);
380 }
381 
382 /*
383  * This routine sets an NDD variable in a ipsecespparam_t structure.
384  */
385 /* ARGSUSED */
386 static int
387 ipsecesp_param_set(q, mp, value, cp, cr)
388 	queue_t	*q;
389 	mblk_t	*mp;
390 	char	*value;
391 	caddr_t	cp;
392 	cred_t *cr;
393 {
394 	ulong_t	new_value;
395 	ipsecespparam_t	*ipsecesppa = (ipsecespparam_t *)cp;
396 
397 	/*
398 	 * Fail the request if the new value does not lie within the
399 	 * required bounds.
400 	 */
401 	if (ddi_strtoul(value, NULL, 10, &new_value) != 0 ||
402 	    new_value < ipsecesppa->ipsecesp_param_min ||
403 	    new_value > ipsecesppa->ipsecesp_param_max) {
404 		return (EINVAL);
405 	}
406 
407 	/* Set the new value */
408 	mutex_enter(&ipsecesp_param_lock);
409 	ipsecesppa->ipsecesp_param_value = new_value;
410 	mutex_exit(&ipsecesp_param_lock);
411 	return (0);
412 }
413 
414 /*
415  * Using lifetime NDD variables, fill in an extended combination's
416  * lifetime information.
417  */
418 void
419 ipsecesp_fill_defs(sadb_x_ecomb_t *ecomb)
420 {
421 	ecomb->sadb_x_ecomb_soft_bytes = ipsecesp_default_soft_bytes;
422 	ecomb->sadb_x_ecomb_hard_bytes = ipsecesp_default_hard_bytes;
423 	ecomb->sadb_x_ecomb_soft_addtime = ipsecesp_default_soft_addtime;
424 	ecomb->sadb_x_ecomb_hard_addtime = ipsecesp_default_hard_addtime;
425 	ecomb->sadb_x_ecomb_soft_usetime = ipsecesp_default_soft_usetime;
426 	ecomb->sadb_x_ecomb_hard_usetime = ipsecesp_default_hard_usetime;
427 }
428 
429 /*
430  * Initialize things for ESP at module load time.
431  */
432 boolean_t
433 ipsecesp_ddi_init(void)
434 {
435 	int count;
436 	ipsecespparam_t *espp = ipsecesp_param_arr;
437 
438 	for (count = A_CNT(ipsecesp_param_arr); count-- > 0; espp++) {
439 		if (espp->ipsecesp_param_name != NULL &&
440 		    espp->ipsecesp_param_name[0]) {
441 			if (!nd_load(&ipsecesp_g_nd, espp->ipsecesp_param_name,
442 			    ipsecesp_param_get, ipsecesp_param_set,
443 			    (caddr_t)espp)) {
444 				nd_free(&ipsecesp_g_nd);
445 				return (B_FALSE);
446 			}
447 		}
448 	}
449 
450 	if (!esp_kstat_init()) {
451 		nd_free(&ipsecesp_g_nd);
452 		return (B_FALSE);
453 	}
454 
455 	esp_sadb.s_acquire_timeout = &ipsecesp_acquire_timeout;
456 	esp_sadb.s_acqfn = esp_send_acquire;
457 	sadbp_init("ESP", &esp_sadb, SADB_SATYPE_ESP, esp_hash_size);
458 
459 	esp_taskq = taskq_create("esp_taskq", 1, minclsyspri,
460 	    IPSEC_TASKQ_MIN, IPSEC_TASKQ_MAX, 0);
461 
462 	mutex_init(&ipsecesp_param_lock, NULL, MUTEX_DEFAULT, 0);
463 
464 	ip_drop_register(&esp_dropper, "IPsec ESP");
465 
466 	return (B_TRUE);
467 }
468 
469 /*
470  * Destroy things for ESP at module unload time.
471  */
472 void
473 ipsecesp_ddi_destroy(void)
474 {
475 	esp1dbg(("In ipsecesp_ddi_destroy.\n"));
476 
477 	sadbp_destroy(&esp_sadb);
478 	ip_drop_unregister(&esp_dropper);
479 	taskq_destroy(esp_taskq);
480 	mutex_destroy(&ipsecesp_param_lock);
481 	nd_free(&ipsecesp_g_nd);
482 	kstat_delete(esp_ksp);
483 }
484 
485 /*
486  * ESP module open routine.
487  */
488 /* ARGSUSED */
489 static int
490 ipsecesp_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
491 {
492 	if (secpolicy_net_config(credp, B_FALSE) != 0) {
493 		esp1dbg(("Non-privileged user trying to open ipsecesp.\n"));
494 		return (EPERM);
495 	}
496 
497 	if (q->q_ptr != NULL)
498 		return (0);  /* Re-open of an already open instance. */
499 
500 	if (sflag != MODOPEN)
501 		return (EINVAL);
502 
503 	/*
504 	 * ASSUMPTIONS (because I'm MT_OCEXCL):
505 	 *
506 	 *	* I'm being pushed on top of IP for all my opens (incl. #1).
507 	 *	* Only ipsecesp_open() can write into esp_sadb.s_ip_q.
508 	 *	* Because of this, I can check lazily for esp_sadb.s_ip_q.
509 	 *
510 	 *  If these assumptions are wrong, I'm in BIG trouble...
511 	 */
512 
513 	q->q_ptr = q; /* just so I know I'm open */
514 
515 	if (esp_sadb.s_ip_q == NULL) {
516 		struct T_unbind_req *tur;
517 
518 		esp_sadb.s_ip_q = WR(q);
519 		/* Allocate an unbind... */
520 		esp_ip_unbind = allocb(sizeof (struct T_unbind_req), BPRI_HI);
521 
522 		/*
523 		 * Send down T_BIND_REQ to bind IPPROTO_ESP.
524 		 * Handle the ACK here in ESP.
525 		 */
526 		qprocson(q);
527 		if (esp_ip_unbind == NULL ||
528 		    !sadb_t_bind_req(esp_sadb.s_ip_q, IPPROTO_ESP)) {
529 			if (esp_ip_unbind != NULL) {
530 				freeb(esp_ip_unbind);
531 				esp_ip_unbind = NULL;
532 			}
533 			q->q_ptr = NULL;
534 			return (ENOMEM);
535 		}
536 
537 		esp_ip_unbind->b_datap->db_type = M_PROTO;
538 		tur = (struct T_unbind_req *)esp_ip_unbind->b_rptr;
539 		tur->PRIM_type = T_UNBIND_REQ;
540 	} else {
541 		qprocson(q);
542 	}
543 
544 	/*
545 	 * For now, there's not much I can do.  I'll be getting a message
546 	 * passed down to me from keysock (in my wput), and a T_BIND_ACK
547 	 * up from IP (in my rput).
548 	 */
549 
550 	return (0);
551 }
552 
553 /*
554  * ESP module close routine.
555  */
556 static int
557 ipsecesp_close(queue_t *q)
558 {
559 	/*
560 	 * If esp_sadb.s_ip_q is attached to this instance, send a
561 	 * T_UNBIND_REQ to IP for the instance before doing
562 	 * a qprocsoff().
563 	 */
564 	if (WR(q) == esp_sadb.s_ip_q && esp_ip_unbind != NULL) {
565 		putnext(WR(q), esp_ip_unbind);
566 		esp_ip_unbind = NULL;
567 	}
568 
569 	/*
570 	 * Clean up q_ptr, if needed.
571 	 */
572 	qprocsoff(q);
573 
574 	/* Keysock queue check is safe, because of OCEXCL perimeter. */
575 
576 	if (q == esp_pfkey_q) {
577 		esp0dbg(("ipsecesp_close:  Ummm... keysock is closing ESP.\n"));
578 		esp_pfkey_q = NULL;
579 		/* Detach qtimeouts. */
580 		(void) quntimeout(q, esp_event);
581 	}
582 
583 	if (WR(q) == esp_sadb.s_ip_q) {
584 		/*
585 		 * If the esp_sadb.s_ip_q is attached to this instance, find
586 		 * another.  The OCEXCL outer perimeter helps us here.
587 		 */
588 		esp_sadb.s_ip_q = NULL;
589 
590 		/*
591 		 * Find a replacement queue for esp_sadb.s_ip_q.
592 		 */
593 		if (esp_pfkey_q != NULL && esp_pfkey_q != RD(q)) {
594 			/*
595 			 * See if we can use the pfkey_q.
596 			 */
597 			esp_sadb.s_ip_q = WR(esp_pfkey_q);
598 		}
599 
600 		if (esp_sadb.s_ip_q == NULL ||
601 		    !sadb_t_bind_req(esp_sadb.s_ip_q, IPPROTO_ESP)) {
602 			esp1dbg(("ipsecesp: Can't reassign ip_q.\n"));
603 			esp_sadb.s_ip_q = NULL;
604 		} else {
605 			esp_ip_unbind = allocb(sizeof (struct T_unbind_req),
606 			    BPRI_HI);
607 
608 			if (esp_ip_unbind != NULL) {
609 				struct T_unbind_req *tur;
610 
611 				esp_ip_unbind->b_datap->db_type = M_PROTO;
612 				tur = (struct T_unbind_req *)
613 				    esp_ip_unbind->b_rptr;
614 				tur->PRIM_type = T_UNBIND_REQ;
615 			}
616 			/* If it's NULL, I can't do much here. */
617 		}
618 	}
619 
620 	return (0);
621 }
622 
623 /*
624  * Add a number of bytes to what the SA has protected so far.  Return
625  * B_TRUE if the SA can still protect that many bytes.
626  *
627  * Caller must REFRELE the passed-in assoc.  This function must REFRELE
628  * any obtained peer SA.
629  */
630 static boolean_t
631 esp_age_bytes(ipsa_t *assoc, uint64_t bytes, boolean_t inbound)
632 {
633 	ipsa_t *inassoc, *outassoc;
634 	isaf_t *bucket;
635 	boolean_t inrc, outrc, isv6;
636 	sadb_t *sp;
637 	int outhash;
638 
639 	/* No peer?  No problem! */
640 	if (!assoc->ipsa_haspeer) {
641 		return (sadb_age_bytes(esp_pfkey_q, assoc, bytes,
642 		    B_TRUE));
643 	}
644 
645 	/*
646 	 * Otherwise, we want to grab both the original assoc and its peer.
647 	 * There might be a race for this, but if it's a real race, two
648 	 * expire messages may occur.  We limit this by only sending the
649 	 * expire message on one of the peers, we'll pick the inbound
650 	 * arbitrarily.
651 	 *
652 	 * If we need tight synchronization on the peer SA, then we need to
653 	 * reconsider.
654 	 */
655 
656 	/* Use address length to select IPv6/IPv4 */
657 	isv6 = (assoc->ipsa_addrfam == AF_INET6);
658 	sp = isv6 ? &esp_sadb.s_v6 : &esp_sadb.s_v4;
659 
660 	if (inbound) {
661 		inassoc = assoc;
662 		if (isv6) {
663 			outhash = OUTBOUND_HASH_V6(sp, *((in6_addr_t *)
664 			    &inassoc->ipsa_dstaddr));
665 		} else {
666 			outhash = OUTBOUND_HASH_V4(sp, *((ipaddr_t *)
667 				&inassoc->ipsa_dstaddr));
668 		}
669 		bucket = &sp->sdb_of[outhash];
670 		mutex_enter(&bucket->isaf_lock);
671 		outassoc = ipsec_getassocbyspi(bucket, inassoc->ipsa_spi,
672 		    inassoc->ipsa_srcaddr, inassoc->ipsa_dstaddr,
673 		    inassoc->ipsa_addrfam);
674 		mutex_exit(&bucket->isaf_lock);
675 		if (outassoc == NULL) {
676 			/* Q: Do we wish to set haspeer == B_FALSE? */
677 			esp0dbg(("esp_age_bytes: "
678 			    "can't find peer for inbound.\n"));
679 			return (sadb_age_bytes(esp_pfkey_q, inassoc,
680 			    bytes, B_TRUE));
681 		}
682 	} else {
683 		outassoc = assoc;
684 		bucket = INBOUND_BUCKET(sp, outassoc->ipsa_spi);
685 		mutex_enter(&bucket->isaf_lock);
686 		inassoc = ipsec_getassocbyspi(bucket, outassoc->ipsa_spi,
687 		    outassoc->ipsa_srcaddr, outassoc->ipsa_dstaddr,
688 		    outassoc->ipsa_addrfam);
689 		mutex_exit(&bucket->isaf_lock);
690 		if (inassoc == NULL) {
691 			/* Q: Do we wish to set haspeer == B_FALSE? */
692 			esp0dbg(("esp_age_bytes: "
693 			    "can't find peer for outbound.\n"));
694 			return (sadb_age_bytes(esp_pfkey_q, outassoc,
695 			    bytes, B_TRUE));
696 		}
697 	}
698 
699 	inrc = sadb_age_bytes(esp_pfkey_q, inassoc, bytes, B_TRUE);
700 	outrc = sadb_age_bytes(esp_pfkey_q, outassoc, bytes, B_FALSE);
701 
702 	/*
703 	 * REFRELE any peer SA.
704 	 *
705 	 * Because of the multi-line macro nature of IPSA_REFRELE, keep
706 	 * them in { }.
707 	 */
708 	if (inbound) {
709 		IPSA_REFRELE(outassoc);
710 	} else {
711 		IPSA_REFRELE(inassoc);
712 	}
713 
714 	return (inrc && outrc);
715 }
716 
717 /*
718  * Do incoming NAT-T manipulations for packet.
719  */
720 static ipsec_status_t
721 esp_fix_natt_checksums(mblk_t *data_mp, ipsa_t *assoc)
722 {
723 	ipha_t *ipha = (ipha_t *)data_mp->b_rptr;
724 	tcpha_t *tcph;
725 	udpha_t *udpha;
726 	/* Initialize to our inbound cksum adjustment... */
727 	uint32_t sum = assoc->ipsa_inbound_cksum;
728 
729 	switch (ipha->ipha_protocol) {
730 	case IPPROTO_TCP:
731 		tcph = (tcpha_t *)(data_mp->b_rptr +
732 		    IPH_HDR_LENGTH(ipha));
733 
734 #define	DOWN_SUM(x) (x) = ((x) & 0xFFFF) +	 ((x) >> 16)
735 		sum += ~ntohs(tcph->tha_sum) & 0xFFFF;
736 		DOWN_SUM(sum);
737 		DOWN_SUM(sum);
738 		tcph->tha_sum = ~htons(sum);
739 		break;
740 	case IPPROTO_UDP:
741 		udpha = (udpha_t *)(data_mp->b_rptr + IPH_HDR_LENGTH(ipha));
742 
743 		if (udpha->uha_checksum != 0) {
744 			/* Adujst if the inbound one was not zero. */
745 			sum += ~ntohs(udpha->uha_checksum) & 0xFFFF;
746 			DOWN_SUM(sum);
747 			DOWN_SUM(sum);
748 			udpha->uha_checksum = ~htons(sum);
749 			if (udpha->uha_checksum == 0)
750 				udpha->uha_checksum = 0xFFFF;
751 		}
752 #undef DOWN_SUM
753 		break;
754 	case IPPROTO_IP:
755 		/*
756 		 * This case is only an issue for self-encapsulated
757 		 * packets.  So for now, fall through.
758 		 */
759 		break;
760 	}
761 	return (IPSEC_STATUS_SUCCESS);
762 }
763 
764 
765 /*
766  * Strip ESP header and fix IP header
767  * Returns B_TRUE on success, B_FALSE if an error occured.
768  */
769 static boolean_t
770 esp_strip_header(mblk_t *data_mp, boolean_t isv4, uint32_t ivlen,
771     kstat_named_t **counter)
772 {
773 	ipha_t *ipha;
774 	ip6_t *ip6h;
775 	uint_t divpoint;
776 	mblk_t *scratch;
777 	uint8_t nexthdr, padlen;
778 	uint8_t lastpad;
779 
780 	/*
781 	 * Strip ESP data and fix IP header.
782 	 *
783 	 * XXX In case the beginning of esp_inbound() changes to not do a
784 	 * pullup, this part of the code can remain unchanged.
785 	 */
786 	if (isv4) {
787 		ASSERT((data_mp->b_wptr - data_mp->b_rptr) >= sizeof (ipha_t));
788 		ipha = (ipha_t *)data_mp->b_rptr;
789 		ASSERT((data_mp->b_wptr - data_mp->b_rptr) >= sizeof (esph_t) +
790 		    IPH_HDR_LENGTH(ipha));
791 		divpoint = IPH_HDR_LENGTH(ipha);
792 	} else {
793 		ASSERT((data_mp->b_wptr - data_mp->b_rptr) >= sizeof (ip6_t));
794 		ip6h = (ip6_t *)data_mp->b_rptr;
795 		divpoint = ip_hdr_length_v6(data_mp, ip6h);
796 	}
797 
798 	scratch = data_mp;
799 	while (scratch->b_cont != NULL)
800 		scratch = scratch->b_cont;
801 
802 	ASSERT((scratch->b_wptr - scratch->b_rptr) >= 3);
803 
804 	/*
805 	 * "Next header" and padding length are the last two bytes in the
806 	 * ESP-protected datagram, thus the explicit - 1 and - 2.
807 	 * lastpad is the last byte of the padding, which can be used for
808 	 * a quick check to see if the padding is correct.
809 	 */
810 	nexthdr = *(scratch->b_wptr - 1);
811 	padlen = *(scratch->b_wptr - 2);
812 	lastpad = *(scratch->b_wptr - 3);
813 
814 	if (isv4) {
815 		/* Fix part of the IP header. */
816 		ipha->ipha_protocol = nexthdr;
817 		/*
818 		 * Reality check the padlen.  The explicit - 2 is for the
819 		 * padding length and the next-header bytes.
820 		 */
821 		if (padlen >= ntohs(ipha->ipha_length) - sizeof (ipha_t) - 2 -
822 		    sizeof (esph_t) - ivlen) {
823 			ESP_BUMP_STAT(bad_decrypt);
824 			ipsec_rl_strlog(info.mi_idnum, 0, 0, SL_ERROR | SL_WARN,
825 			    "Possibly corrupt ESP packet.");
826 			esp1dbg(("padlen (%d) is greater than:\n", padlen));
827 			esp1dbg(("pkt len(%d) - ip hdr - esp hdr - ivlen(%d) "
828 			    "= %d.\n", ntohs(ipha->ipha_length), ivlen,
829 			    (int)(ntohs(ipha->ipha_length) - sizeof (ipha_t) -
830 				2 - sizeof (esph_t) - ivlen)));
831 			*counter = &ipdrops_esp_bad_padlen;
832 			return (B_FALSE);
833 		}
834 
835 		/*
836 		 * Fix the rest of the header.  The explicit - 2 is for the
837 		 * padding length and the next-header bytes.
838 		 */
839 		ipha->ipha_length = htons(ntohs(ipha->ipha_length) - padlen -
840 		    2 - sizeof (esph_t) - ivlen);
841 		ipha->ipha_hdr_checksum = 0;
842 		ipha->ipha_hdr_checksum = (uint16_t)ip_csum_hdr(ipha);
843 	} else {
844 		if (ip6h->ip6_nxt == IPPROTO_ESP) {
845 			ip6h->ip6_nxt = nexthdr;
846 		} else {
847 			ip6_pkt_t ipp;
848 
849 			bzero(&ipp, sizeof (ipp));
850 			(void) ip_find_hdr_v6(data_mp, ip6h, &ipp, NULL);
851 			if (ipp.ipp_dstopts != NULL) {
852 				ipp.ipp_dstopts->ip6d_nxt = nexthdr;
853 			} else if (ipp.ipp_rthdr != NULL) {
854 				ipp.ipp_rthdr->ip6r_nxt = nexthdr;
855 			} else if (ipp.ipp_hopopts != NULL) {
856 				ipp.ipp_hopopts->ip6h_nxt = nexthdr;
857 			} else {
858 				/* Panic a DEBUG kernel. */
859 				ASSERT(ipp.ipp_hopopts != NULL);
860 				/* Otherwise, pretend it's IP + ESP. */
861 				cmn_err(CE_WARN, "ESP IPv6 headers wrong.\n");
862 				ip6h->ip6_nxt = nexthdr;
863 			}
864 		}
865 
866 		if (padlen >= ntohs(ip6h->ip6_plen) - 2 - sizeof (esph_t) -
867 		    ivlen) {
868 			ESP_BUMP_STAT(bad_decrypt);
869 			ipsec_rl_strlog(info.mi_idnum, 0, 0, SL_ERROR | SL_WARN,
870 			    "Possibly corrupt ESP packet.");
871 			esp1dbg(("padlen (%d) is greater than:\n", padlen));
872 			esp1dbg(("pkt len(%u) - ip hdr - esp hdr - ivlen(%d)"
873 			    " = %u.\n", (unsigned)(ntohs(ip6h->ip6_plen)
874 				+ sizeof (ip6_t)), ivlen,
875 			    (unsigned)(ntohs(ip6h->ip6_plen) - 2 -
876 				sizeof (esph_t) - ivlen)));
877 			*counter = &ipdrops_esp_bad_padlen;
878 			return (B_FALSE);
879 		}
880 
881 
882 		/*
883 		 * Fix the rest of the header.  The explicit - 2 is for the
884 		 * padding length and the next-header bytes.  IPv6 is nice,
885 		 * because there's no hdr checksum!
886 		 */
887 		ip6h->ip6_plen = htons(ntohs(ip6h->ip6_plen) - padlen -
888 		    2 - sizeof (esph_t) - ivlen);
889 	}
890 
891 	if (ipsecesp_padding_check > 0 &&
892 		padlen != lastpad && padlen != 0) {
893 		ipsec_rl_strlog(info.mi_idnum, 0, 0, SL_ERROR | SL_WARN,
894 		    "Possibly corrupt ESP packet.");
895 		esp1dbg(("lastpad (%d) not equal to padlen (%d):\n",
896 		    lastpad, padlen));
897 		ESP_BUMP_STAT(bad_padding);
898 		*counter = &ipdrops_esp_bad_padding;
899 		return (B_FALSE);
900 	}
901 
902 	if (ipsecesp_padding_check > 1) {
903 		uint8_t *last = (uint8_t *)(scratch->b_wptr - 3);
904 		uint8_t lastval = *last;
905 
906 		/*
907 		 * this assert may have to become an if
908 		 * and a pullup if we start accepting
909 		 * multi-dblk mblks. Any packet here will
910 		 * have been pulled up in esp_inbound.
911 		 */
912 		ASSERT(MBLKL(scratch) >= lastval + 3);
913 
914 		while (lastval != 0) {
915 			if (lastval != *last) {
916 				ipsec_rl_strlog(info.mi_idnum, 0, 0,
917 				    SL_ERROR | SL_WARN,
918 				    "Possibly corrupt ESP packet.");
919 				esp1dbg(("padding not in correct"
920 				    " format:\n"));
921 				ESP_BUMP_STAT(bad_padding);
922 				*counter = &ipdrops_esp_bad_padding;
923 				return (B_FALSE);
924 			}
925 			lastval--; last--;
926 		}
927 	}
928 
929 	/* Trim off the padding. */
930 	ASSERT(data_mp->b_cont == NULL);
931 	data_mp->b_wptr -= (padlen + 2);
932 
933 	/*
934 	 * Remove the ESP header.
935 	 *
936 	 * The above assertions about data_mp's size will make this work.
937 	 *
938 	 * XXX  Question:  If I send up and get back a contiguous mblk,
939 	 * would it be quicker to bcopy over, or keep doing the dupb stuff?
940 	 * I go with copying for now.
941 	 */
942 
943 	if (IS_P2ALIGNED(data_mp->b_rptr, sizeof (uint32_t)) &&
944 	    IS_P2ALIGNED(ivlen, sizeof (uint32_t))) {
945 		uint8_t *start = data_mp->b_rptr;
946 		uint32_t *src, *dst;
947 
948 		src = (uint32_t *)(start + divpoint);
949 		dst = (uint32_t *)(start + divpoint + sizeof (esph_t) + ivlen);
950 
951 		ASSERT(IS_P2ALIGNED(dst, sizeof (uint32_t)) &&
952 		    IS_P2ALIGNED(src, sizeof (uint32_t)));
953 
954 		do {
955 			src--;
956 			dst--;
957 			*dst = *src;
958 		} while (src != (uint32_t *)start);
959 
960 		data_mp->b_rptr = (uchar_t *)dst;
961 	} else {
962 		uint8_t *start = data_mp->b_rptr;
963 		uint8_t *src, *dst;
964 
965 		src = start + divpoint;
966 		dst = src + sizeof (esph_t) + ivlen;
967 
968 		do {
969 			src--;
970 			dst--;
971 			*dst = *src;
972 		} while (src != start);
973 
974 		data_mp->b_rptr = dst;
975 	}
976 
977 	esp2dbg(("data_mp after inbound ESP adjustment:\n"));
978 	esp2dbg((dump_msg(data_mp)));
979 
980 	return (B_TRUE);
981 }
982 
983 /*
984  * Updating use times can be tricky business if the ipsa_haspeer flag is
985  * set.  This function is called once in an SA's lifetime.
986  *
987  * Caller has to REFRELE "assoc" which is passed in.  This function has
988  * to REFRELE any peer SA that is obtained.
989  */
990 static void
991 esp_set_usetime(ipsa_t *assoc, boolean_t inbound)
992 {
993 	ipsa_t *inassoc, *outassoc;
994 	isaf_t *bucket;
995 	sadb_t *sp;
996 	int outhash;
997 	boolean_t isv6;
998 
999 	/* No peer?  No problem! */
1000 	if (!assoc->ipsa_haspeer) {
1001 		sadb_set_usetime(assoc);
1002 		return;
1003 	}
1004 
1005 	/*
1006 	 * Otherwise, we want to grab both the original assoc and its peer.
1007 	 * There might be a race for this, but if it's a real race, the times
1008 	 * will be out-of-synch by at most a second, and since our time
1009 	 * granularity is a second, this won't be a problem.
1010 	 *
1011 	 * If we need tight synchronization on the peer SA, then we need to
1012 	 * reconsider.
1013 	 */
1014 
1015 	/* Use address length to select IPv6/IPv4 */
1016 	isv6 = (assoc->ipsa_addrfam == AF_INET6);
1017 	sp = isv6 ? &esp_sadb.s_v6 : &esp_sadb.s_v4;
1018 
1019 	if (inbound) {
1020 		inassoc = assoc;
1021 		if (isv6) {
1022 			outhash = OUTBOUND_HASH_V6(sp, *((in6_addr_t *)
1023 			    &inassoc->ipsa_dstaddr));
1024 		} else {
1025 			outhash = OUTBOUND_HASH_V4(sp, *((ipaddr_t *)
1026 				&inassoc->ipsa_dstaddr));
1027 		}
1028 		bucket = &sp->sdb_of[outhash];
1029 		mutex_enter(&bucket->isaf_lock);
1030 		outassoc = ipsec_getassocbyspi(bucket, inassoc->ipsa_spi,
1031 		    inassoc->ipsa_srcaddr, inassoc->ipsa_dstaddr,
1032 		    inassoc->ipsa_addrfam);
1033 		mutex_exit(&bucket->isaf_lock);
1034 		if (outassoc == NULL) {
1035 			/* Q: Do we wish to set haspeer == B_FALSE? */
1036 			esp0dbg(("esp_set_usetime: "
1037 			    "can't find peer for inbound.\n"));
1038 			sadb_set_usetime(inassoc);
1039 			return;
1040 		}
1041 	} else {
1042 		outassoc = assoc;
1043 		bucket = INBOUND_BUCKET(sp, outassoc->ipsa_spi);
1044 		mutex_enter(&bucket->isaf_lock);
1045 		inassoc = ipsec_getassocbyspi(bucket, outassoc->ipsa_spi,
1046 		    outassoc->ipsa_srcaddr, outassoc->ipsa_dstaddr,
1047 		    outassoc->ipsa_addrfam);
1048 		mutex_exit(&bucket->isaf_lock);
1049 		if (inassoc == NULL) {
1050 			/* Q: Do we wish to set haspeer == B_FALSE? */
1051 			esp0dbg(("esp_set_usetime: "
1052 			    "can't find peer for outbound.\n"));
1053 			sadb_set_usetime(outassoc);
1054 			return;
1055 		}
1056 	}
1057 
1058 	/* Update usetime on both. */
1059 	sadb_set_usetime(inassoc);
1060 	sadb_set_usetime(outassoc);
1061 
1062 	/*
1063 	 * REFRELE any peer SA.
1064 	 *
1065 	 * Because of the multi-line macro nature of IPSA_REFRELE, keep
1066 	 * them in { }.
1067 	 */
1068 	if (inbound) {
1069 		IPSA_REFRELE(outassoc);
1070 	} else {
1071 		IPSA_REFRELE(inassoc);
1072 	}
1073 }
1074 
1075 /*
1076  * Handle ESP inbound data for IPv4 and IPv6.
1077  * On success returns B_TRUE, on failure returns B_FALSE and frees the
1078  * mblk chain ipsec_in_mp.
1079  */
1080 ipsec_status_t
1081 esp_inbound(mblk_t *ipsec_in_mp, void *arg)
1082 {
1083 	mblk_t *data_mp = ipsec_in_mp->b_cont;
1084 	ipsec_in_t *ii = (ipsec_in_t *)ipsec_in_mp->b_rptr;
1085 	esph_t *esph = (esph_t *)arg;
1086 	ipsa_t *ipsa = ii->ipsec_in_esp_sa;
1087 
1088 	if (ipsa->ipsa_usetime == 0)
1089 		esp_set_usetime(ipsa, B_TRUE);
1090 
1091 	/*
1092 	 * We may wish to check replay in-range-only here as an optimization.
1093 	 * Include the reality check of ipsa->ipsa_replay >
1094 	 * ipsa->ipsa_replay_wsize for times when it's the first N packets,
1095 	 * where N == ipsa->ipsa_replay_wsize.
1096 	 *
1097 	 * Another check that may come here later is the "collision" check.
1098 	 * If legitimate packets flow quickly enough, this won't be a problem,
1099 	 * but collisions may cause authentication algorithm crunching to
1100 	 * take place when it doesn't need to.
1101 	 */
1102 	if (!sadb_replay_peek(ipsa, esph->esph_replay)) {
1103 		ESP_BUMP_STAT(replay_early_failures);
1104 		IP_ESP_BUMP_STAT(in_discards);
1105 		/*
1106 		 * TODO: Extract inbound interface from the IPSEC_IN
1107 		 * message's ii->ipsec_in_rill_index.
1108 		 */
1109 		ip_drop_packet(ipsec_in_mp, B_TRUE, NULL, NULL,
1110 		    &ipdrops_esp_early_replay, &esp_dropper);
1111 		return (IPSEC_STATUS_FAILED);
1112 	}
1113 
1114 	/*
1115 	 * Has this packet already been processed by a hardware
1116 	 * IPsec accelerator?
1117 	 */
1118 	if (ii->ipsec_in_accelerated) {
1119 		ipsec_status_t rv;
1120 		esp3dbg(("esp_inbound: pkt processed by ill=%d isv6=%d\n",
1121 		    ii->ipsec_in_ill_index, !ii->ipsec_in_v4));
1122 		rv = esp_inbound_accelerated(ipsec_in_mp,
1123 		    data_mp, ii->ipsec_in_v4, ipsa);
1124 		return (rv);
1125 	}
1126 	ESP_BUMP_STAT(noaccel);
1127 
1128 	/*
1129 	 * Adjust the IP header's payload length to reflect the removal
1130 	 * of the ICV.
1131 	 */
1132 	if (!ii->ipsec_in_v4) {
1133 		ip6_t *ip6h = (ip6_t *)data_mp->b_rptr;
1134 		ip6h->ip6_plen = htons(ntohs(ip6h->ip6_plen) -
1135 		    ipsa->ipsa_mac_len);
1136 	} else {
1137 		ipha_t *ipha = (ipha_t *)data_mp->b_rptr;
1138 		ipha->ipha_length = htons(ntohs(ipha->ipha_length) -
1139 		    ipsa->ipsa_mac_len);
1140 	}
1141 
1142 	/* submit the request to the crypto framework */
1143 	return (esp_submit_req_inbound(ipsec_in_mp, ipsa,
1144 	    (uint8_t *)esph - data_mp->b_rptr));
1145 }
1146 
1147 /*
1148  * Perform the really difficult work of inserting the proposed situation.
1149  * Called while holding the algorithm lock.
1150  */
1151 static void
1152 esp_insert_prop(sadb_prop_t *prop, ipsacq_t *acqrec, uint_t combs)
1153 {
1154 	sadb_comb_t *comb = (sadb_comb_t *)(prop + 1);
1155 	ipsec_out_t *io;
1156 	ipsec_action_t *ap;
1157 	ipsec_prot_t *prot;
1158 
1159 	ASSERT(MUTEX_HELD(&alg_lock));
1160 	io = (ipsec_out_t *)acqrec->ipsacq_mp->b_rptr;
1161 	ASSERT(io->ipsec_out_type == IPSEC_OUT);
1162 
1163 	prop->sadb_prop_exttype = SADB_EXT_PROPOSAL;
1164 	prop->sadb_prop_len = SADB_8TO64(sizeof (sadb_prop_t));
1165 	*(uint32_t *)(&prop->sadb_prop_replay) = 0;	/* Quick zero-out! */
1166 
1167 	prop->sadb_prop_replay = ipsecesp_replay_size;
1168 
1169 	/*
1170 	 * Based upon algorithm properties, and what-not, prioritize
1171 	 * a proposal.  If the IPSEC_OUT message has an algorithm specified,
1172 	 * use it first and foremost.
1173 	 *
1174 	 * For each action in policy list
1175 	 *   Add combination.  If I've hit limit, return.
1176 	 */
1177 
1178 	for (ap = acqrec->ipsacq_act; ap != NULL;
1179 	    ap = ap->ipa_next) {
1180 		ipsec_alginfo_t *ealg = NULL;
1181 		ipsec_alginfo_t *aalg = NULL;
1182 
1183 		if (ap->ipa_act.ipa_type != IPSEC_POLICY_APPLY)
1184 			continue;
1185 
1186 		prot = &ap->ipa_act.ipa_apply;
1187 
1188 		if (!(prot->ipp_use_esp))
1189 			continue;
1190 
1191 		if (prot->ipp_esp_auth_alg != 0) {
1192 			aalg = ipsec_alglists[IPSEC_ALG_AUTH]
1193 			    [prot->ipp_esp_auth_alg];
1194 			if (aalg == NULL || !ALG_VALID(aalg))
1195 				continue;
1196 		}
1197 
1198 		ASSERT(prot->ipp_encr_alg > 0);
1199 		ealg = ipsec_alglists[IPSEC_ALG_ENCR][prot->ipp_encr_alg];
1200 		if (ealg == NULL || !ALG_VALID(ealg))
1201 			continue;
1202 
1203 		comb->sadb_comb_flags = 0;
1204 		comb->sadb_comb_reserved = 0;
1205 		comb->sadb_comb_encrypt = ealg->alg_id;
1206 		comb->sadb_comb_encrypt_minbits =
1207 		    MAX(prot->ipp_espe_minbits, ealg->alg_ef_minbits);
1208 		comb->sadb_comb_encrypt_maxbits =
1209 		    MIN(prot->ipp_espe_maxbits, ealg->alg_ef_maxbits);
1210 		if (aalg == NULL) {
1211 			comb->sadb_comb_auth = 0;
1212 			comb->sadb_comb_auth_minbits = 0;
1213 			comb->sadb_comb_auth_maxbits = 0;
1214 		} else {
1215 			comb->sadb_comb_auth = aalg->alg_id;
1216 			comb->sadb_comb_auth_minbits =
1217 			    MAX(prot->ipp_espa_minbits, aalg->alg_ef_minbits);
1218 			comb->sadb_comb_auth_maxbits =
1219 			    MIN(prot->ipp_espa_maxbits, aalg->alg_ef_maxbits);
1220 		}
1221 
1222 		/*
1223 		 * The following may be based on algorithm
1224 		 * properties, but in the meantime, we just pick
1225 		 * some good, sensible numbers.  Key mgmt. can
1226 		 * (and perhaps should) be the place to finalize
1227 		 * such decisions.
1228 		 */
1229 
1230 		/*
1231 		 * No limits on allocations, since we really don't
1232 		 * support that concept currently.
1233 		 */
1234 		comb->sadb_comb_soft_allocations = 0;
1235 		comb->sadb_comb_hard_allocations = 0;
1236 
1237 		/*
1238 		 * These may want to come from policy rule..
1239 		 */
1240 		comb->sadb_comb_soft_bytes = ipsecesp_default_soft_bytes;
1241 		comb->sadb_comb_hard_bytes = ipsecesp_default_hard_bytes;
1242 		comb->sadb_comb_soft_addtime = ipsecesp_default_soft_addtime;
1243 		comb->sadb_comb_hard_addtime = ipsecesp_default_hard_addtime;
1244 		comb->sadb_comb_soft_usetime = ipsecesp_default_soft_usetime;
1245 		comb->sadb_comb_hard_usetime = ipsecesp_default_hard_usetime;
1246 
1247 		prop->sadb_prop_len += SADB_8TO64(sizeof (*comb));
1248 		if (--combs == 0)
1249 			break;	/* out of space.. */
1250 		comb++;
1251 	}
1252 }
1253 
1254 /*
1255  * Prepare and actually send the SADB_ACQUIRE message to PF_KEY.
1256  */
1257 static void
1258 esp_send_acquire(ipsacq_t *acqrec, mblk_t *extended)
1259 {
1260 	mblk_t *pfkeymp, *msgmp;
1261 	uint_t allocsize, combs;
1262 	sadb_msg_t *samsg;
1263 	sadb_prop_t *prop;
1264 	uint8_t *cur, *end;
1265 
1266 	ESP_BUMP_STAT(acquire_requests);
1267 
1268 	ASSERT(MUTEX_HELD(&acqrec->ipsacq_lock));
1269 
1270 	pfkeymp = sadb_keysock_out(0);
1271 	if (pfkeymp == NULL) {
1272 		esp0dbg(("esp_send_acquire: 1st allocb() failed.\n"));
1273 		/* Just bail. */
1274 		goto done;
1275 	}
1276 
1277 	/*
1278 	 * First, allocate a basic ACQUIRE message.  Beyond that,
1279 	 * you need to extract certificate info from
1280 	 */
1281 	allocsize = sizeof (sadb_msg_t) + sizeof (sadb_address_t) +
1282 	    sizeof (sadb_address_t) + sizeof (sadb_prop_t);
1283 
1284 	switch (acqrec->ipsacq_addrfam) {
1285 	case AF_INET:
1286 		allocsize += 2 * sizeof (struct sockaddr_in);
1287 		break;
1288 	case AF_INET6:
1289 		allocsize += 2 * sizeof (struct sockaddr_in6);
1290 		break;
1291 	}
1292 
1293 	mutex_enter(&alg_lock);
1294 
1295 	combs = ipsec_nalgs[IPSEC_ALG_AUTH] * ipsec_nalgs[IPSEC_ALG_ENCR];
1296 
1297 	allocsize += combs * sizeof (sadb_comb_t);
1298 
1299 	/*
1300 	 * XXX If there are:
1301 	 *	certificate IDs
1302 	 *	proxy address
1303 	 *	<Others>
1304 	 * add additional allocation size.
1305 	 */
1306 
1307 	msgmp = allocb(allocsize, BPRI_HI);
1308 	if (msgmp == NULL) {
1309 		esp0dbg(("esp_send_acquire: 2nd allocb() failed.\n"));
1310 		/* Just bail. */
1311 		freemsg(pfkeymp);
1312 		pfkeymp = NULL;
1313 		goto done;
1314 	}
1315 
1316 	cur = msgmp->b_rptr;
1317 	end = cur + allocsize;
1318 	samsg = (sadb_msg_t *)cur;
1319 	pfkeymp->b_cont = msgmp;
1320 
1321 	/* Set up ACQUIRE. */
1322 	cur = sadb_setup_acquire(cur, end, acqrec);
1323 	if (cur == NULL) {
1324 		esp0dbg(("sadb_setup_acquire failed.\n"));
1325 		/* Just bail. */
1326 		freemsg(pfkeymp);
1327 		pfkeymp = NULL;
1328 		goto done;
1329 	}
1330 	samsg->sadb_msg_satype = SADB_SATYPE_ESP;
1331 
1332 	/* XXX Insert proxy address information here. */
1333 
1334 	/* XXX Insert identity information here. */
1335 
1336 	/* XXXMLS Insert sensitivity information here. */
1337 
1338 	/* Insert proposal here. */
1339 
1340 	prop = (sadb_prop_t *)(((uint64_t *)samsg) + samsg->sadb_msg_len);
1341 	esp_insert_prop(prop, acqrec, combs);
1342 	samsg->sadb_msg_len += prop->sadb_prop_len;
1343 	msgmp->b_wptr += SADB_64TO8(samsg->sadb_msg_len);
1344 
1345 done:
1346 	mutex_exit(&alg_lock);
1347 
1348 	/*
1349 	 * Must mutex_exit() before sending PF_KEY message up, in
1350 	 * order to avoid recursive mutex_enter() if there are no registered
1351 	 * listeners.
1352 	 *
1353 	 * Once I've sent the message, I'm cool anyway.
1354 	 */
1355 	mutex_exit(&acqrec->ipsacq_lock);
1356 	if (esp_pfkey_q != NULL && pfkeymp != NULL) {
1357 		if (extended != NULL) {
1358 			putnext(esp_pfkey_q, extended);
1359 		}
1360 		putnext(esp_pfkey_q, pfkeymp);
1361 		return;
1362 	}
1363 	/* XXX freemsg() works for extended == NULL. */
1364 	freemsg(extended);
1365 	freemsg(pfkeymp);
1366 }
1367 
1368 /*
1369  * Handle the SADB_GETSPI message.  Create a larval SA.
1370  */
1371 static void
1372 esp_getspi(mblk_t *mp, keysock_in_t *ksi)
1373 {
1374 	ipsa_t *newbie, *target;
1375 	isaf_t *outbound, *inbound;
1376 	int rc, diagnostic;
1377 	sadb_sa_t *assoc;
1378 	keysock_out_t *kso;
1379 	uint32_t newspi;
1380 
1381 	/*
1382 	 * Randomly generate a proposed SPI value
1383 	 */
1384 	(void) random_get_pseudo_bytes((uint8_t *)&newspi, sizeof (uint32_t));
1385 	newbie = sadb_getspi(ksi, newspi, &diagnostic);
1386 
1387 	if (newbie == NULL) {
1388 		sadb_pfkey_error(esp_pfkey_q, mp, ENOMEM, diagnostic,
1389 		    ksi->ks_in_serial);
1390 		return;
1391 	} else if (newbie == (ipsa_t *)-1) {
1392 		sadb_pfkey_error(esp_pfkey_q, mp, EINVAL, diagnostic,
1393 		    ksi->ks_in_serial);
1394 		return;
1395 	}
1396 
1397 	/*
1398 	 * XXX - We may randomly collide.  We really should recover from this.
1399 	 *	 Unfortunately, that could require spending way-too-much-time
1400 	 *	 in here.  For now, let the user retry.
1401 	 */
1402 
1403 	if (newbie->ipsa_addrfam == AF_INET6) {
1404 		outbound = OUTBOUND_BUCKET_V6(&esp_sadb.s_v6,
1405 		    *(uint32_t *)(newbie->ipsa_dstaddr));
1406 		inbound = INBOUND_BUCKET(&esp_sadb.s_v6, newbie->ipsa_spi);
1407 	} else {
1408 		ASSERT(newbie->ipsa_addrfam == AF_INET);
1409 		outbound = OUTBOUND_BUCKET_V4(&esp_sadb.s_v4,
1410 		    *(uint32_t *)(newbie->ipsa_dstaddr));
1411 		inbound = INBOUND_BUCKET(&esp_sadb.s_v4, newbie->ipsa_spi);
1412 	}
1413 
1414 	mutex_enter(&outbound->isaf_lock);
1415 	mutex_enter(&inbound->isaf_lock);
1416 
1417 	/*
1418 	 * Check for collisions (i.e. did sadb_getspi() return with something
1419 	 * that already exists?).
1420 	 *
1421 	 * Try outbound first.  Even though SADB_GETSPI is traditionally
1422 	 * for inbound SAs, you never know what a user might do.
1423 	 */
1424 	target = ipsec_getassocbyspi(outbound, newbie->ipsa_spi,
1425 	    newbie->ipsa_srcaddr, newbie->ipsa_dstaddr, newbie->ipsa_addrfam);
1426 	if (target == NULL) {
1427 		target = ipsec_getassocbyspi(inbound, newbie->ipsa_spi,
1428 		    newbie->ipsa_srcaddr, newbie->ipsa_dstaddr,
1429 		    newbie->ipsa_addrfam);
1430 	}
1431 
1432 	/*
1433 	 * I don't have collisions elsewhere!
1434 	 * (Nor will I because I'm still holding inbound/outbound locks.)
1435 	 */
1436 
1437 	if (target != NULL) {
1438 		rc = EEXIST;
1439 		IPSA_REFRELE(target);
1440 	} else {
1441 		/*
1442 		 * sadb_insertassoc() also checks for collisions, so
1443 		 * if there's a colliding entry, rc will be set
1444 		 * to EEXIST.
1445 		 */
1446 		rc = sadb_insertassoc(newbie, inbound);
1447 		(void) drv_getparm(TIME, &newbie->ipsa_hardexpiretime);
1448 		newbie->ipsa_hardexpiretime += ipsecesp_larval_timeout;
1449 	}
1450 
1451 	/*
1452 	 * Can exit outbound mutex.  Hold inbound until we're done
1453 	 * with newbie.
1454 	 */
1455 	mutex_exit(&outbound->isaf_lock);
1456 
1457 	if (rc != 0) {
1458 		mutex_exit(&inbound->isaf_lock);
1459 		IPSA_REFRELE(newbie);
1460 		sadb_pfkey_error(esp_pfkey_q, mp, rc, SADB_X_DIAGNOSTIC_NONE,
1461 		    ksi->ks_in_serial);
1462 		return;
1463 	}
1464 
1465 
1466 	/* Can write here because I'm still holding the bucket lock. */
1467 	newbie->ipsa_type = SADB_SATYPE_ESP;
1468 
1469 	/*
1470 	 * Construct successful return message.  We have one thing going
1471 	 * for us in PF_KEY v2.  That's the fact that
1472 	 *	sizeof (sadb_spirange_t) == sizeof (sadb_sa_t)
1473 	 */
1474 	assoc = (sadb_sa_t *)ksi->ks_in_extv[SADB_EXT_SPIRANGE];
1475 	assoc->sadb_sa_exttype = SADB_EXT_SA;
1476 	assoc->sadb_sa_spi = newbie->ipsa_spi;
1477 	*((uint64_t *)(&assoc->sadb_sa_replay)) = 0;
1478 	mutex_exit(&inbound->isaf_lock);
1479 
1480 	/* Convert KEYSOCK_IN to KEYSOCK_OUT. */
1481 	kso = (keysock_out_t *)ksi;
1482 	kso->ks_out_len = sizeof (*kso);
1483 	kso->ks_out_serial = ksi->ks_in_serial;
1484 	kso->ks_out_type = KEYSOCK_OUT;
1485 
1486 	/*
1487 	 * Can safely putnext() to esp_pfkey_q, because this is a turnaround
1488 	 * from the esp_pfkey_q.
1489 	 */
1490 	putnext(esp_pfkey_q, mp);
1491 }
1492 
1493 /*
1494  * Insert the ESP header into a packet.  Duplicate an mblk, and insert a newly
1495  * allocated mblk with the ESP header in between the two.
1496  */
1497 static boolean_t
1498 esp_insert_esp(mblk_t *mp, mblk_t *esp_mp, uint_t divpoint)
1499 {
1500 	mblk_t *split_mp = mp;
1501 	uint_t wheretodiv = divpoint;
1502 
1503 	while ((split_mp->b_wptr - split_mp->b_rptr) < wheretodiv) {
1504 		wheretodiv -= (split_mp->b_wptr - split_mp->b_rptr);
1505 		split_mp = split_mp->b_cont;
1506 		ASSERT(split_mp != NULL);
1507 	}
1508 
1509 	if (split_mp->b_wptr - split_mp->b_rptr != wheretodiv) {
1510 		mblk_t *scratch;
1511 
1512 		/* "scratch" is the 2nd half, split_mp is the first. */
1513 		scratch = dupb(split_mp);
1514 		if (scratch == NULL) {
1515 			esp1dbg(("esp_insert_esp: can't allocate scratch.\n"));
1516 			return (B_FALSE);
1517 		}
1518 		/* NOTE:  dupb() doesn't set b_cont appropriately. */
1519 		scratch->b_cont = split_mp->b_cont;
1520 		scratch->b_rptr += wheretodiv;
1521 		split_mp->b_wptr = split_mp->b_rptr + wheretodiv;
1522 		split_mp->b_cont = scratch;
1523 	}
1524 	/*
1525 	 * At this point, split_mp is exactly "wheretodiv" bytes long, and
1526 	 * holds the end of the pre-ESP part of the datagram.
1527 	 */
1528 	esp_mp->b_cont = split_mp->b_cont;
1529 	split_mp->b_cont = esp_mp;
1530 
1531 	return (B_TRUE);
1532 }
1533 
1534 /*
1535  * Finish processing of an inbound ESP packet after processing by the
1536  * crypto framework.
1537  * - Remove the ESP header.
1538  * - Send packet back to IP.
1539  * If authentication was performed on the packet, this function is called
1540  * only if the authentication succeeded.
1541  * On success returns B_TRUE, on failure returns B_FALSE and frees the
1542  * mblk chain ipsec_in_mp.
1543  */
1544 static ipsec_status_t
1545 esp_in_done(mblk_t *ipsec_in_mp)
1546 {
1547 	ipsec_in_t *ii = (ipsec_in_t *)ipsec_in_mp->b_rptr;
1548 	mblk_t *data_mp;
1549 	ipsa_t *assoc;
1550 	uint_t espstart;
1551 	uint32_t ivlen = 0;
1552 	uint_t processed_len;
1553 	esph_t *esph;
1554 	kstat_named_t *counter;
1555 	boolean_t is_natt;
1556 
1557 	assoc = ii->ipsec_in_esp_sa;
1558 	ASSERT(assoc != NULL);
1559 
1560 	is_natt = ((assoc->ipsa_flags & IPSA_F_NATT) != 0);
1561 
1562 	/* get the pointer to the ESP header */
1563 	if (assoc->ipsa_encr_alg == SADB_EALG_NULL) {
1564 		/* authentication-only ESP */
1565 		espstart = ii->ipsec_in_crypto_data.cd_offset;
1566 		processed_len = ii->ipsec_in_crypto_data.cd_length;
1567 	} else {
1568 		/* encryption present */
1569 		ivlen = assoc->ipsa_iv_len;
1570 		if (assoc->ipsa_auth_alg == SADB_AALG_NONE) {
1571 			/* encryption-only ESP */
1572 			espstart = ii->ipsec_in_crypto_data.cd_offset -
1573 				sizeof (esph_t) - assoc->ipsa_iv_len;
1574 			processed_len = ii->ipsec_in_crypto_data.cd_length +
1575 				ivlen;
1576 		} else {
1577 			/* encryption with authentication */
1578 			espstart = ii->ipsec_in_crypto_dual_data.dd_offset1;
1579 			processed_len = ii->ipsec_in_crypto_dual_data.dd_len2 +
1580 			    ivlen;
1581 		}
1582 	}
1583 
1584 	data_mp = ipsec_in_mp->b_cont;
1585 	esph = (esph_t *)(data_mp->b_rptr + espstart);
1586 
1587 	if (assoc->ipsa_auth_alg != IPSA_AALG_NONE) {
1588 		/* authentication passed if we reach this point */
1589 		ESP_BUMP_STAT(good_auth);
1590 		data_mp->b_wptr -= assoc->ipsa_mac_len;
1591 
1592 		/*
1593 		 * Check replay window here!
1594 		 * For right now, assume keysock will set the replay window
1595 		 * size to zero for SAs that have an unspecified sender.
1596 		 * This may change...
1597 		 */
1598 
1599 		if (!sadb_replay_check(assoc, esph->esph_replay)) {
1600 			/*
1601 			 * Log the event. As of now we print out an event.
1602 			 * Do not print the replay failure number, or else
1603 			 * syslog cannot collate the error messages.  Printing
1604 			 * the replay number that failed opens a denial-of-
1605 			 * service attack.
1606 			 */
1607 			ipsec_assocfailure(info.mi_idnum, 0, 0,
1608 			    SL_ERROR | SL_WARN,
1609 			    "Replay failed for ESP spi 0x%x, dst %s.\n",
1610 			    assoc->ipsa_spi, assoc->ipsa_dstaddr,
1611 			    assoc->ipsa_addrfam);
1612 			ESP_BUMP_STAT(replay_failures);
1613 			counter = &ipdrops_esp_replay;
1614 			goto drop_and_bail;
1615 		}
1616 	}
1617 
1618 	if (!esp_age_bytes(assoc, processed_len, B_TRUE)) {
1619 		/* The ipsa has hit hard expiration, LOG and AUDIT. */
1620 		ipsec_assocfailure(info.mi_idnum, 0, 0,
1621 		    SL_ERROR | SL_WARN,
1622 		    "ESP association 0x%x, dst %s had bytes expire.\n",
1623 		    assoc->ipsa_spi, assoc->ipsa_dstaddr, assoc->ipsa_addrfam);
1624 		ESP_BUMP_STAT(bytes_expired);
1625 		counter = &ipdrops_esp_bytes_expire;
1626 		goto drop_and_bail;
1627 	}
1628 
1629 	/*
1630 	 * Remove ESP header and padding from packet.  I hope the compiler
1631 	 * spews "branch, predict taken" code for this.
1632 	 */
1633 
1634 	if (esp_strip_header(data_mp, ii->ipsec_in_v4, ivlen, &counter)) {
1635 		if (is_natt)
1636 			return (esp_fix_natt_checksums(data_mp, assoc));
1637 		return (IPSEC_STATUS_SUCCESS);
1638 	}
1639 
1640 	esp1dbg(("esp_in_done: esp_strip_header() failed\n"));
1641 drop_and_bail:
1642 	IP_ESP_BUMP_STAT(in_discards);
1643 	/*
1644 	 * TODO: Extract inbound interface from the IPSEC_IN message's
1645 	 * ii->ipsec_in_rill_index.
1646 	 */
1647 	ip_drop_packet(ipsec_in_mp, B_TRUE, NULL, NULL, counter, &esp_dropper);
1648 	return (IPSEC_STATUS_FAILED);
1649 }
1650 
1651 /*
1652  * Called upon failing the inbound ICV check. The message passed as
1653  * argument is freed.
1654  */
1655 static void
1656 esp_log_bad_auth(mblk_t *ipsec_in)
1657 {
1658 	ipsec_in_t *ii = (ipsec_in_t *)ipsec_in->b_rptr;
1659 	ipsa_t *assoc = ii->ipsec_in_esp_sa;
1660 
1661 	/*
1662 	 * Log the event. Don't print to the console, block
1663 	 * potential denial-of-service attack.
1664 	 */
1665 	ESP_BUMP_STAT(bad_auth);
1666 
1667 	ipsec_assocfailure(info.mi_idnum, 0, 0, SL_ERROR | SL_WARN,
1668 	    "ESP Authentication failed for spi 0x%x, dst %s.\n",
1669 	    assoc->ipsa_spi, assoc->ipsa_dstaddr, assoc->ipsa_addrfam);
1670 
1671 	IP_ESP_BUMP_STAT(in_discards);
1672 	/*
1673 	 * TODO: Extract inbound interface from the IPSEC_IN
1674 	 * message's ii->ipsec_in_rill_index.
1675 	 */
1676 	ip_drop_packet(ipsec_in, B_TRUE, NULL, NULL, &ipdrops_esp_bad_auth,
1677 	    &esp_dropper);
1678 }
1679 
1680 
1681 /*
1682  * Invoked for outbound packets after ESP processing. If the packet
1683  * also requires AH, performs the AH SA selection and AH processing.
1684  * Returns B_TRUE if the AH processing was not needed or if it was
1685  * performed successfully. Returns B_FALSE and consumes the passed mblk
1686  * if AH processing was required but could not be performed.
1687  */
1688 static boolean_t
1689 esp_do_outbound_ah(mblk_t *ipsec_mp)
1690 {
1691 	ipsec_out_t *io = (ipsec_out_t *)ipsec_mp->b_rptr;
1692 	ipsec_status_t ipsec_rc;
1693 	ipsec_action_t *ap;
1694 
1695 	ap = io->ipsec_out_act;
1696 	if (ap == NULL) {
1697 		ipsec_policy_t *pp = io->ipsec_out_policy;
1698 		ap = pp->ipsp_act;
1699 	}
1700 
1701 	if (!ap->ipa_want_ah)
1702 		return (B_TRUE);
1703 
1704 	ASSERT(io->ipsec_out_ah_done == B_FALSE);
1705 
1706 	if (io->ipsec_out_ah_sa == NULL) {
1707 		if (!ipsec_outbound_sa(ipsec_mp, IPPROTO_AH)) {
1708 			sadb_acquire(ipsec_mp, io, B_TRUE, B_FALSE);
1709 			return (B_FALSE);
1710 		}
1711 	}
1712 	ASSERT(io->ipsec_out_ah_sa != NULL);
1713 
1714 	io->ipsec_out_ah_done = B_TRUE;
1715 	ipsec_rc = io->ipsec_out_ah_sa->ipsa_output_func(ipsec_mp);
1716 	return (ipsec_rc == IPSEC_STATUS_SUCCESS);
1717 }
1718 
1719 
1720 /*
1721  * Kernel crypto framework callback invoked after completion of async
1722  * crypto requests.
1723  */
1724 static void
1725 esp_kcf_callback(void *arg, int status)
1726 {
1727 	mblk_t *ipsec_mp = (mblk_t *)arg;
1728 	ipsec_in_t *ii = (ipsec_in_t *)ipsec_mp->b_rptr;
1729 	boolean_t is_inbound = (ii->ipsec_in_type == IPSEC_IN);
1730 
1731 	ASSERT(ipsec_mp->b_cont != NULL);
1732 
1733 	if (status == CRYPTO_SUCCESS) {
1734 		if (is_inbound) {
1735 			if (esp_in_done(ipsec_mp) != IPSEC_STATUS_SUCCESS)
1736 				return;
1737 
1738 			/* finish IPsec processing */
1739 			ip_fanout_proto_again(ipsec_mp, NULL, NULL, NULL);
1740 		} else {
1741 			/*
1742 			 * If a ICV was computed, it was stored by the
1743 			 * crypto framework at the end of the packet.
1744 			 */
1745 			ipha_t *ipha = (ipha_t *)ipsec_mp->b_cont->b_rptr;
1746 
1747 			/* do AH processing if needed */
1748 			if (!esp_do_outbound_ah(ipsec_mp))
1749 				return;
1750 
1751 			/* finish IPsec processing */
1752 			if (IPH_HDR_VERSION(ipha) == IP_VERSION) {
1753 				ip_wput_ipsec_out(NULL, ipsec_mp, ipha, NULL,
1754 				    NULL);
1755 			} else {
1756 				ip6_t *ip6h = (ip6_t *)ipha;
1757 				ip_wput_ipsec_out_v6(NULL, ipsec_mp, ip6h,
1758 				    NULL, NULL);
1759 			}
1760 		}
1761 
1762 	} else if (status == CRYPTO_INVALID_MAC) {
1763 		esp_log_bad_auth(ipsec_mp);
1764 
1765 	} else {
1766 		esp1dbg(("esp_kcf_callback: crypto failed with 0x%x\n",
1767 		    status));
1768 		ESP_BUMP_STAT(crypto_failures);
1769 		if (is_inbound)
1770 			IP_ESP_BUMP_STAT(in_discards);
1771 		else
1772 			ESP_BUMP_STAT(out_discards);
1773 		ip_drop_packet(ipsec_mp, is_inbound, NULL, NULL,
1774 		    &ipdrops_esp_crypto_failed, &esp_dropper);
1775 	}
1776 }
1777 
1778 /*
1779  * Invoked on crypto framework failure during inbound and outbound processing.
1780  */
1781 static void
1782 esp_crypto_failed(mblk_t *mp, boolean_t is_inbound, int kef_rc)
1783 {
1784 	esp1dbg(("crypto failed for %s ESP with 0x%x\n",
1785 	    is_inbound ? "inbound" : "outbound", kef_rc));
1786 	ip_drop_packet(mp, is_inbound, NULL, NULL, &ipdrops_esp_crypto_failed,
1787 	    &esp_dropper);
1788 	ESP_BUMP_STAT(crypto_failures);
1789 	if (is_inbound)
1790 		IP_ESP_BUMP_STAT(in_discards);
1791 	else
1792 		ESP_BUMP_STAT(out_discards);
1793 }
1794 
1795 #define	ESP_INIT_CALLREQ(_cr) {						\
1796 	(_cr)->cr_flag = CRYPTO_SKIP_REQID|CRYPTO_RESTRICTED;		\
1797 	(_cr)->cr_callback_arg = ipsec_mp;				\
1798 	(_cr)->cr_callback_func = esp_kcf_callback;			\
1799 }
1800 
1801 #define	ESP_INIT_CRYPTO_MAC(mac, icvlen, icvbuf) {			\
1802 	(mac)->cd_format = CRYPTO_DATA_RAW;				\
1803 	(mac)->cd_offset = 0;						\
1804 	(mac)->cd_length = icvlen;					\
1805 	(mac)->cd_raw.iov_base = (char *)icvbuf;			\
1806 	(mac)->cd_raw.iov_len = icvlen;					\
1807 }
1808 
1809 #define	ESP_INIT_CRYPTO_DATA(data, mp, off, len) {			\
1810 	if (MBLKL(mp) >= (len) + (off)) {				\
1811 		(data)->cd_format = CRYPTO_DATA_RAW;			\
1812 		(data)->cd_raw.iov_base = (char *)(mp)->b_rptr;		\
1813 		(data)->cd_raw.iov_len = MBLKL(mp);			\
1814 		(data)->cd_offset = off;				\
1815 	} else {							\
1816 		(data)->cd_format = CRYPTO_DATA_MBLK;			\
1817 		(data)->cd_mp = mp;			       		\
1818 		(data)->cd_offset = off;				\
1819 	}								\
1820 	(data)->cd_length = len;					\
1821 }
1822 
1823 #define	ESP_INIT_CRYPTO_DUAL_DATA(data, mp, off1, len1, off2, len2) {	\
1824 	(data)->dd_format = CRYPTO_DATA_MBLK;				\
1825 	(data)->dd_mp = mp;						\
1826 	(data)->dd_len1 = len1;						\
1827 	(data)->dd_offset1 = off1;					\
1828 	(data)->dd_len2 = len2;						\
1829 	(data)->dd_offset2 = off2;					\
1830 }
1831 
1832 static ipsec_status_t
1833 esp_submit_req_inbound(mblk_t *ipsec_mp, ipsa_t *assoc, uint_t esph_offset)
1834 {
1835 	ipsec_in_t *ii = (ipsec_in_t *)ipsec_mp->b_rptr;
1836 	boolean_t do_auth;
1837 	uint_t auth_offset, msg_len, auth_len;
1838 	crypto_call_req_t call_req;
1839 	mblk_t *esp_mp;
1840 	int kef_rc = CRYPTO_FAILED;
1841 	uint_t icv_len = assoc->ipsa_mac_len;
1842 	crypto_ctx_template_t auth_ctx_tmpl;
1843 	boolean_t do_encr;
1844 	uint_t encr_offset, encr_len;
1845 	uint_t iv_len = assoc->ipsa_iv_len;
1846 	crypto_ctx_template_t encr_ctx_tmpl;
1847 
1848 	ASSERT(ii->ipsec_in_type == IPSEC_IN);
1849 
1850 	do_auth = assoc->ipsa_auth_alg != SADB_AALG_NONE;
1851 	do_encr = assoc->ipsa_encr_alg != SADB_EALG_NULL;
1852 
1853 	/*
1854 	 * An inbound packet is of the form:
1855 	 * IPSEC_IN -> [IP,options,ESP,IV,data,ICV,pad]
1856 	 */
1857 	esp_mp = ipsec_mp->b_cont;
1858 	msg_len = MBLKL(esp_mp);
1859 
1860 	ESP_INIT_CALLREQ(&call_req);
1861 
1862 	if (do_auth) {
1863 		/* force asynchronous processing? */
1864 		if (ipsec_algs_exec_mode[IPSEC_ALG_AUTH] ==
1865 		    IPSEC_ALGS_EXEC_ASYNC)
1866 			call_req.cr_flag |= CRYPTO_ALWAYS_QUEUE;
1867 
1868 		/* authentication context template */
1869 		IPSEC_CTX_TMPL(assoc, ipsa_authtmpl, IPSEC_ALG_AUTH,
1870 		    auth_ctx_tmpl);
1871 
1872 		/* ICV to be verified */
1873 		ESP_INIT_CRYPTO_MAC(&ii->ipsec_in_crypto_mac,
1874 		    icv_len, esp_mp->b_wptr - icv_len);
1875 
1876 		/* authentication starts at the ESP header */
1877 		auth_offset = esph_offset;
1878 		auth_len = msg_len - auth_offset - icv_len;
1879 		if (!do_encr) {
1880 			/* authentication only */
1881 			/* initialize input data argument */
1882 			ESP_INIT_CRYPTO_DATA(&ii->ipsec_in_crypto_data,
1883 			    esp_mp, auth_offset, auth_len);
1884 
1885 			/* call the crypto framework */
1886 			kef_rc = crypto_mac_verify(&assoc->ipsa_amech,
1887 			    &ii->ipsec_in_crypto_data,
1888 			    &assoc->ipsa_kcfauthkey, auth_ctx_tmpl,
1889 			    &ii->ipsec_in_crypto_mac, &call_req);
1890 		}
1891 	}
1892 
1893 	if (do_encr) {
1894 		/* force asynchronous processing? */
1895 		if (ipsec_algs_exec_mode[IPSEC_ALG_ENCR] ==
1896 		    IPSEC_ALGS_EXEC_ASYNC)
1897 			call_req.cr_flag |= CRYPTO_ALWAYS_QUEUE;
1898 
1899 		/* encryption template */
1900 		IPSEC_CTX_TMPL(assoc, ipsa_encrtmpl, IPSEC_ALG_ENCR,
1901 		    encr_ctx_tmpl);
1902 
1903 		/* skip IV, since it is passed separately */
1904 		encr_offset = esph_offset + sizeof (esph_t) + iv_len;
1905 		encr_len = msg_len - encr_offset;
1906 
1907 		if (!do_auth) {
1908 			/* decryption only */
1909 			/* initialize input data argument */
1910 			ESP_INIT_CRYPTO_DATA(&ii->ipsec_in_crypto_data,
1911 			    esp_mp, encr_offset, encr_len);
1912 
1913 			/* specify IV */
1914 			ii->ipsec_in_crypto_data.cd_miscdata =
1915 			    (char *)esp_mp->b_rptr + sizeof (esph_t) +
1916 			    esph_offset;
1917 
1918 			/* call the crypto framework */
1919 			kef_rc = crypto_decrypt(&assoc->ipsa_emech,
1920 			    &ii->ipsec_in_crypto_data,
1921 			    &assoc->ipsa_kcfencrkey, encr_ctx_tmpl,
1922 			    NULL, &call_req);
1923 		}
1924 	}
1925 
1926 	if (do_auth && do_encr) {
1927 		/* dual operation */
1928 		/* initialize input data argument */
1929 		ESP_INIT_CRYPTO_DUAL_DATA(&ii->ipsec_in_crypto_dual_data,
1930 		    esp_mp, auth_offset, auth_len,
1931 		    encr_offset, encr_len - icv_len);
1932 
1933 		/* specify IV */
1934 		ii->ipsec_in_crypto_dual_data.dd_miscdata =
1935 		    (char *)esp_mp->b_rptr + sizeof (esph_t) + esph_offset;
1936 
1937 		/* call the framework */
1938 		kef_rc = crypto_mac_verify_decrypt(&assoc->ipsa_amech,
1939 		    &assoc->ipsa_emech, &ii->ipsec_in_crypto_dual_data,
1940 		    &assoc->ipsa_kcfauthkey, &assoc->ipsa_kcfencrkey,
1941 		    auth_ctx_tmpl, encr_ctx_tmpl, &ii->ipsec_in_crypto_mac,
1942 		    NULL, &call_req);
1943 	}
1944 
1945 	switch (kef_rc) {
1946 	case CRYPTO_SUCCESS:
1947 		ESP_BUMP_STAT(crypto_sync);
1948 		return (esp_in_done(ipsec_mp));
1949 	case CRYPTO_QUEUED:
1950 		/* esp_kcf_callback() will be invoked on completion */
1951 		ESP_BUMP_STAT(crypto_async);
1952 		return (IPSEC_STATUS_PENDING);
1953 	case CRYPTO_INVALID_MAC:
1954 		ESP_BUMP_STAT(crypto_sync);
1955 		esp_log_bad_auth(ipsec_mp);
1956 		return (IPSEC_STATUS_FAILED);
1957 	}
1958 
1959 	esp_crypto_failed(ipsec_mp, B_TRUE, kef_rc);
1960 	return (IPSEC_STATUS_FAILED);
1961 }
1962 
1963 static ipsec_status_t
1964 esp_submit_req_outbound(mblk_t *ipsec_mp, ipsa_t *assoc, uchar_t *icv_buf,
1965     uint_t payload_len)
1966 {
1967 	ipsec_out_t *io = (ipsec_out_t *)ipsec_mp->b_rptr;
1968 	uint_t auth_len;
1969 	crypto_call_req_t call_req;
1970 	mblk_t *esp_mp;
1971 	int kef_rc = CRYPTO_FAILED;
1972 	uint_t icv_len = assoc->ipsa_mac_len;
1973 	crypto_ctx_template_t auth_ctx_tmpl;
1974 	boolean_t do_auth;
1975 	boolean_t do_encr;
1976 	uint_t iv_len = assoc->ipsa_iv_len;
1977 	crypto_ctx_template_t encr_ctx_tmpl;
1978 	boolean_t is_natt = ((assoc->ipsa_flags & IPSA_F_NATT) != 0);
1979 	size_t esph_offset = (is_natt ? UDPH_SIZE : 0);
1980 
1981 	esp3dbg(("esp_submit_req_outbound:%s", is_natt ? "natt" : "not natt"));
1982 
1983 	ASSERT(io->ipsec_out_type == IPSEC_OUT);
1984 
1985 	do_encr = assoc->ipsa_encr_alg != SADB_EALG_NULL;
1986 	do_auth = assoc->ipsa_auth_alg != SADB_AALG_NONE;
1987 
1988 	/*
1989 	 * Outbound IPsec packets are of the form:
1990 	 * IPSEC_OUT -> [IP,options] -> [ESP,IV] -> [data] -> [pad,ICV]
1991 	 * unless it's NATT, then it's
1992 	 * IPSEC_OUT -> [IP,options] -> [udp][ESP,IV] -> [data] -> [pad,ICV]
1993 	 * Get a pointer to the mblk containing the ESP header.
1994 	 */
1995 	ASSERT(ipsec_mp->b_cont != NULL && ipsec_mp->b_cont->b_cont != NULL);
1996 	esp_mp = ipsec_mp->b_cont->b_cont;
1997 
1998 	ESP_INIT_CALLREQ(&call_req);
1999 
2000 	if (do_auth) {
2001 		/* force asynchronous processing? */
2002 		if (ipsec_algs_exec_mode[IPSEC_ALG_AUTH] ==
2003 		    IPSEC_ALGS_EXEC_ASYNC)
2004 			call_req.cr_flag |= CRYPTO_ALWAYS_QUEUE;
2005 
2006 		/* authentication context template */
2007 		IPSEC_CTX_TMPL(assoc, ipsa_authtmpl, IPSEC_ALG_AUTH,
2008 		    auth_ctx_tmpl);
2009 
2010 		/* where to store the computed mac */
2011 		ESP_INIT_CRYPTO_MAC(&io->ipsec_out_crypto_mac,
2012 		    icv_len, icv_buf);
2013 
2014 		/* authentication starts at the ESP header */
2015 		auth_len = payload_len + iv_len + sizeof (esph_t);
2016 		if (!do_encr) {
2017 			/* authentication only */
2018 			/* initialize input data argument */
2019 			ESP_INIT_CRYPTO_DATA(&io->ipsec_out_crypto_data,
2020 			    esp_mp, esph_offset, auth_len);
2021 
2022 			/* call the crypto framework */
2023 			kef_rc = crypto_mac(&assoc->ipsa_amech,
2024 			    &io->ipsec_out_crypto_data,
2025 			    &assoc->ipsa_kcfauthkey, auth_ctx_tmpl,
2026 			    &io->ipsec_out_crypto_mac, &call_req);
2027 		}
2028 	}
2029 
2030 	if (do_encr) {
2031 		/* force asynchronous processing? */
2032 		if (ipsec_algs_exec_mode[IPSEC_ALG_ENCR] ==
2033 		    IPSEC_ALGS_EXEC_ASYNC)
2034 			call_req.cr_flag |= CRYPTO_ALWAYS_QUEUE;
2035 
2036 		/* encryption context template */
2037 		IPSEC_CTX_TMPL(assoc, ipsa_encrtmpl, IPSEC_ALG_ENCR,
2038 		    encr_ctx_tmpl);
2039 
2040 		if (!do_auth) {
2041 			/* encryption only, skip mblk that contains ESP hdr */
2042 			/* initialize input data argument */
2043 			ESP_INIT_CRYPTO_DATA(&io->ipsec_out_crypto_data,
2044 			    esp_mp->b_cont, 0, payload_len);
2045 
2046 			/* specify IV */
2047 			io->ipsec_out_crypto_data.cd_miscdata =
2048 			    (char *)esp_mp->b_rptr + sizeof (esph_t) +
2049 			    esph_offset;
2050 
2051 			/* call the crypto framework */
2052 			kef_rc = crypto_encrypt(&assoc->ipsa_emech,
2053 			    &io->ipsec_out_crypto_data,
2054 			    &assoc->ipsa_kcfencrkey, encr_ctx_tmpl,
2055 			    NULL, &call_req);
2056 		}
2057 	}
2058 
2059 	if (do_auth && do_encr) {
2060 		/*
2061 		 * Encryption and authentication:
2062 		 * Pass the pointer to the mblk chain starting at the ESP
2063 		 * header to the framework. Skip the ESP header mblk
2064 		 * for encryption, which is reflected by an encryption
2065 		 * offset equal to the length of that mblk. Start
2066 		 * the authentication at the ESP header, i.e. use an
2067 		 * authentication offset of zero.
2068 		 */
2069 		ESP_INIT_CRYPTO_DUAL_DATA(&io->ipsec_out_crypto_dual_data,
2070 		    esp_mp, MBLKL(esp_mp), payload_len, esph_offset, auth_len);
2071 
2072 		/* specify IV */
2073 		io->ipsec_out_crypto_dual_data.dd_miscdata =
2074 		    (char *)esp_mp->b_rptr + sizeof (esph_t) + esph_offset;
2075 
2076 		/* call the framework */
2077 		kef_rc = crypto_encrypt_mac(&assoc->ipsa_emech,
2078 		    &assoc->ipsa_amech, NULL,
2079 		    &assoc->ipsa_kcfencrkey, &assoc->ipsa_kcfauthkey,
2080 		    encr_ctx_tmpl, auth_ctx_tmpl,
2081 		    &io->ipsec_out_crypto_dual_data,
2082 		    &io->ipsec_out_crypto_mac, &call_req);
2083 	}
2084 
2085 	switch (kef_rc) {
2086 	case CRYPTO_SUCCESS:
2087 		ESP_BUMP_STAT(crypto_sync);
2088 		return (IPSEC_STATUS_SUCCESS);
2089 	case CRYPTO_QUEUED:
2090 		/* esp_kcf_callback() will be invoked on completion */
2091 		ESP_BUMP_STAT(crypto_async);
2092 		return (IPSEC_STATUS_PENDING);
2093 	}
2094 
2095 	esp_crypto_failed(ipsec_mp, B_TRUE, kef_rc);
2096 	return (IPSEC_STATUS_FAILED);
2097 }
2098 
2099 /*
2100  * Handle outbound IPsec processing for IPv4 and IPv6
2101  * On success returns B_TRUE, on failure returns B_FALSE and frees the
2102  * mblk chain ipsec_in_mp.
2103  */
2104 static ipsec_status_t
2105 esp_outbound(mblk_t *mp)
2106 {
2107 	mblk_t *ipsec_out_mp, *data_mp, *espmp, *tailmp;
2108 	ipsec_out_t *io;
2109 	ipha_t *ipha;
2110 	ip6_t *ip6h;
2111 	esph_t *esph;
2112 	uint_t af;
2113 	uint8_t *nhp;
2114 	uintptr_t divpoint, datalen, adj, padlen, i, alloclen;
2115 	uintptr_t esplen = sizeof (esph_t);
2116 	uint8_t protocol;
2117 	ipsa_t *assoc;
2118 	uint_t iv_len = 0, mac_len = 0;
2119 	uchar_t *icv_buf;
2120 	udpha_t *udpha;
2121 	boolean_t is_natt = B_FALSE;
2122 
2123 	ESP_BUMP_STAT(out_requests);
2124 
2125 	ipsec_out_mp = mp;
2126 	data_mp = ipsec_out_mp->b_cont;
2127 
2128 	/*
2129 	 * <sigh> We have to copy the message here, because TCP (for example)
2130 	 * keeps a dupb() of the message lying around for retransmission.
2131 	 * Since ESP changes the whole of the datagram, we have to create our
2132 	 * own copy lest we clobber TCP's data.  Since we have to copy anyway,
2133 	 * we might as well make use of msgpullup() and get the mblk into one
2134 	 * contiguous piece!
2135 	 */
2136 	ipsec_out_mp->b_cont = msgpullup(data_mp, -1);
2137 	if (ipsec_out_mp->b_cont == NULL) {
2138 		esp0dbg(("esp_outbound: msgpullup() failed, "
2139 		    "dropping packet.\n"));
2140 		ipsec_out_mp->b_cont = data_mp;
2141 		/*
2142 		 * TODO:  Find the outbound IRE for this packet and
2143 		 * pass it to ip_drop_packet().
2144 		 */
2145 		ip_drop_packet(ipsec_out_mp, B_FALSE, NULL, NULL,
2146 		    &ipdrops_esp_nomem, &esp_dropper);
2147 		return (IPSEC_STATUS_FAILED);
2148 	} else {
2149 		freemsg(data_mp);
2150 		data_mp = ipsec_out_mp->b_cont;
2151 	}
2152 
2153 	io = (ipsec_out_t *)ipsec_out_mp->b_rptr;
2154 
2155 	/*
2156 	 * Reality check....
2157 	 */
2158 
2159 	ipha = (ipha_t *)data_mp->b_rptr;  /* So we can call esp_acquire(). */
2160 
2161 	if (io->ipsec_out_v4) {
2162 		af = AF_INET;
2163 		divpoint = IPH_HDR_LENGTH(ipha);
2164 		datalen = ntohs(ipha->ipha_length) - divpoint;
2165 		nhp = (uint8_t *)&ipha->ipha_protocol;
2166 	} else {
2167 		ip6_pkt_t ipp;
2168 
2169 		af = AF_INET6;
2170 		ip6h = (ip6_t *)ipha;
2171 		bzero(&ipp, sizeof (ipp));
2172 		divpoint = ip_find_hdr_v6(data_mp, ip6h, &ipp, NULL);
2173 		if (ipp.ipp_dstopts != NULL &&
2174 		    ipp.ipp_dstopts->ip6d_nxt != IPPROTO_ROUTING) {
2175 			/*
2176 			 * Destination options are tricky.  If we get in here,
2177 			 * then we have a terminal header following the
2178 			 * destination options.  We need to adjust backwards
2179 			 * so we insert ESP BEFORE the destination options
2180 			 * bag.  (So that the dstopts get encrypted!)
2181 			 *
2182 			 * Since this is for outbound packets only, we know
2183 			 * that non-terminal destination options only precede
2184 			 * routing headers.
2185 			 */
2186 			divpoint -= ipp.ipp_dstoptslen;
2187 		}
2188 		datalen = ntohs(ip6h->ip6_plen) + sizeof (ip6_t) - divpoint;
2189 
2190 		if (ipp.ipp_rthdr != NULL) {
2191 			nhp = &ipp.ipp_rthdr->ip6r_nxt;
2192 		} else if (ipp.ipp_hopopts != NULL) {
2193 			nhp = &ipp.ipp_hopopts->ip6h_nxt;
2194 		} else {
2195 			ASSERT(divpoint == sizeof (ip6_t));
2196 			/* It's probably IP + ESP. */
2197 			nhp = &ip6h->ip6_nxt;
2198 		}
2199 	}
2200 	assoc = io->ipsec_out_esp_sa;
2201 	ASSERT(assoc != NULL);
2202 
2203 	if (assoc->ipsa_usetime == 0)
2204 		esp_set_usetime(assoc, B_FALSE);
2205 
2206 	if (assoc->ipsa_auth_alg != SADB_AALG_NONE)
2207 		mac_len = assoc->ipsa_mac_len;
2208 
2209 	if (assoc->ipsa_flags & IPSA_F_NATT) {
2210 		/* wedge in fake UDP */
2211 		is_natt = B_TRUE;
2212 		esplen += UDPH_SIZE;
2213 	}
2214 
2215 	if (assoc->ipsa_encr_alg != SADB_EALG_NULL)
2216 		iv_len = assoc->ipsa_iv_len;
2217 
2218 	/*
2219 	 * Set up ESP header and encryption padding for ENCR PI request.
2220 	 */
2221 
2222 	/*
2223 	 * Determine the padding length.   Pad to 4-bytes.
2224 	 *
2225 	 * Include the two additional bytes (hence the - 2) for the padding
2226 	 * length and the next header.  Take this into account when
2227 	 * calculating the actual length of the padding.
2228 	 */
2229 
2230 	if (assoc->ipsa_encr_alg != SADB_EALG_NULL) {
2231 		padlen = ((unsigned)(iv_len - datalen - 2)) % iv_len;
2232 	} else {
2233 		padlen = ((unsigned)(sizeof (uint32_t) - datalen - 2)) %
2234 		    sizeof (uint32_t);
2235 	}
2236 
2237 	/* Allocate ESP header and IV. */
2238 	esplen += iv_len;
2239 
2240 	/*
2241 	 * Update association byte-count lifetimes.  Don't forget to take
2242 	 * into account the padding length and next-header (hence the + 2).
2243 	 *
2244 	 * Use the amount of data fed into the "encryption algorithm".  This
2245 	 * is the IV, the data length, the padding length, and the final two
2246 	 * bytes (padlen, and next-header).
2247 	 *
2248 	 */
2249 
2250 	if (!esp_age_bytes(assoc, datalen + padlen + iv_len + 2, B_FALSE)) {
2251 		/*
2252 		 * TODO:  Find the outbound IRE for this packet and
2253 		 * pass it to ip_drop_packet().
2254 		 */
2255 		ip_drop_packet(mp, B_FALSE, NULL, NULL,
2256 		    &ipdrops_esp_bytes_expire, &esp_dropper);
2257 		return (IPSEC_STATUS_FAILED);
2258 	}
2259 
2260 	espmp = allocb(esplen, BPRI_HI);
2261 	if (espmp == NULL) {
2262 		ESP_BUMP_STAT(out_discards);
2263 		esp1dbg(("esp_outbound: can't allocate espmp.\n"));
2264 		/*
2265 		 * TODO:  Find the outbound IRE for this packet and
2266 		 * pass it to ip_drop_packet().
2267 		 */
2268 		ip_drop_packet(mp, B_FALSE, NULL, NULL, &ipdrops_esp_nomem,
2269 		    &esp_dropper);
2270 		return (IPSEC_STATUS_FAILED);
2271 	}
2272 	espmp->b_wptr += esplen;
2273 	esph = (esph_t *)espmp->b_rptr;
2274 
2275 	if (is_natt) {
2276 		esp3dbg(("esp_outbound: NATT"));
2277 
2278 		udpha = (udpha_t *)espmp->b_rptr;
2279 		udpha->uha_src_port = htons(IPPORT_IKE_NATT);
2280 		if (assoc->ipsa_remote_port != 0)
2281 			udpha->uha_dst_port = assoc->ipsa_remote_port;
2282 		else
2283 			udpha->uha_dst_port = htons(IPPORT_IKE_NATT);
2284 		/*
2285 		 * Set the checksum to 0, so that the ip_wput_ipsec_out()
2286 		 * can do the right thing.
2287 		 */
2288 		udpha->uha_checksum = 0;
2289 		esph = (esph_t *)(udpha + 1);
2290 	}
2291 
2292 	esph->esph_spi = assoc->ipsa_spi;
2293 
2294 	esph->esph_replay = htonl(atomic_add_32_nv(&assoc->ipsa_replay, 1));
2295 	if (esph->esph_replay == 0 && assoc->ipsa_replay_wsize != 0) {
2296 		/*
2297 		 * XXX We have replay counter wrapping.
2298 		 * We probably want to nuke this SA (and its peer).
2299 		 */
2300 		ipsec_assocfailure(info.mi_idnum, 0, 0,
2301 		    SL_ERROR | SL_CONSOLE | SL_WARN,
2302 		    "Outbound ESP SA (0x%x, %s) has wrapped sequence.\n",
2303 		    esph->esph_spi, assoc->ipsa_dstaddr, af);
2304 
2305 		ESP_BUMP_STAT(out_discards);
2306 		sadb_replay_delete(assoc);
2307 		/*
2308 		 * TODO:  Find the outbound IRE for this packet and
2309 		 * pass it to ip_drop_packet().
2310 		 */
2311 		ip_drop_packet(mp, B_FALSE, NULL, NULL, &ipdrops_esp_replay,
2312 		    &esp_dropper);
2313 		return (IPSEC_STATUS_FAILED);
2314 	}
2315 
2316 	/*
2317 	 * Set the IV to a random quantity.  We do not require the
2318 	 * highest quality random bits, but for best security with CBC
2319 	 * mode ciphers, the value must be unlikely to repeat and also
2320 	 * must not be known in advance to an adversary capable of
2321 	 * influencing the plaintext.
2322 	 */
2323 	(void) random_get_pseudo_bytes((uint8_t *)(esph + 1), iv_len);
2324 
2325 	/* Fix the IP header. */
2326 	alloclen = padlen + 2 + mac_len;
2327 	adj = alloclen + (espmp->b_wptr - espmp->b_rptr);
2328 
2329 	protocol = *nhp;
2330 
2331 	if (io->ipsec_out_v4) {
2332 		ipha->ipha_length = htons(ntohs(ipha->ipha_length) + adj);
2333 		if (is_natt) {
2334 			*nhp = IPPROTO_UDP;
2335 			udpha->uha_length = htons(ntohs(ipha->ipha_length) -
2336 			    IPH_HDR_LENGTH(ipha));
2337 		} else {
2338 			*nhp = IPPROTO_ESP;
2339 		}
2340 		ipha->ipha_hdr_checksum = 0;
2341 		ipha->ipha_hdr_checksum = (uint16_t)ip_csum_hdr(ipha);
2342 	} else {
2343 		ip6h->ip6_plen = htons(ntohs(ip6h->ip6_plen) + adj);
2344 		*nhp = IPPROTO_ESP;
2345 	}
2346 
2347 	/* I've got the two ESP mblks, now insert them. */
2348 
2349 	esp2dbg(("data_mp before outbound ESP adjustment:\n"));
2350 	esp2dbg((dump_msg(data_mp)));
2351 
2352 	if (!esp_insert_esp(data_mp, espmp, divpoint)) {
2353 		ESP_BUMP_STAT(out_discards);
2354 		/* NOTE:  esp_insert_esp() only fails if there's no memory. */
2355 		/*
2356 		 * TODO:  Find the outbound IRE for this packet and
2357 		 * pass it to ip_drop_packet().
2358 		 */
2359 		ip_drop_packet(mp, B_FALSE, NULL, NULL, &ipdrops_esp_nomem,
2360 		    &esp_dropper);
2361 		freeb(espmp);
2362 		return (IPSEC_STATUS_FAILED);
2363 	}
2364 
2365 	/* Append padding (and leave room for ICV). */
2366 	for (tailmp = data_mp; tailmp->b_cont != NULL; tailmp = tailmp->b_cont)
2367 		;
2368 	if (tailmp->b_wptr + alloclen > tailmp->b_datap->db_lim) {
2369 		tailmp->b_cont = allocb(alloclen, BPRI_HI);
2370 		if (tailmp->b_cont == NULL) {
2371 			ESP_BUMP_STAT(out_discards);
2372 			esp0dbg(("esp_outbound:  Can't allocate tailmp.\n"));
2373 			/*
2374 			 * TODO:  Find the outbound IRE for this packet and
2375 			 * pass it to ip_drop_packet().
2376 			 */
2377 			ip_drop_packet(mp, B_FALSE, NULL, NULL,
2378 			    &ipdrops_esp_nomem, &esp_dropper);
2379 			return (IPSEC_STATUS_FAILED);
2380 		}
2381 		tailmp = tailmp->b_cont;
2382 	}
2383 
2384 	/*
2385 	 * If there's padding, N bytes of padding must be of the form 0x1,
2386 	 * 0x2, 0x3... 0xN.
2387 	 */
2388 	for (i = 0; i < padlen; ) {
2389 		i++;
2390 		*tailmp->b_wptr++ = i;
2391 	}
2392 	*tailmp->b_wptr++ = i;
2393 	*tailmp->b_wptr++ = protocol;
2394 
2395 	esp2dbg(("data_Mp before encryption:\n"));
2396 	esp2dbg((dump_msg(data_mp)));
2397 
2398 	/*
2399 	 * The packet is eligible for hardware acceleration if the
2400 	 * following conditions are satisfied:
2401 	 *
2402 	 * 1. the packet will not be fragmented
2403 	 * 2. the provider supports the algorithms specified by SA
2404 	 * 3. there is no pending control message being exchanged
2405 	 * 4. snoop is not attached
2406 	 * 5. the destination address is not a multicast address
2407 	 *
2408 	 * All five of these conditions are checked by IP prior to
2409 	 * sending the packet to ESP.
2410 	 *
2411 	 * But We, and We Alone, can, nay MUST check if the packet
2412 	 * is over NATT, and then disqualify it from hardware
2413 	 * acceleration.
2414 	 */
2415 
2416 	if (io->ipsec_out_is_capab_ill && !(assoc->ipsa_flags & IPSA_F_NATT)) {
2417 		return (esp_outbound_accelerated(ipsec_out_mp, mac_len));
2418 	}
2419 	ESP_BUMP_STAT(noaccel);
2420 
2421 	/*
2422 	 * Okay.  I've set up the pre-encryption ESP.  Let's do it!
2423 	 */
2424 
2425 	if (mac_len > 0) {
2426 		ASSERT(tailmp->b_wptr + mac_len <= tailmp->b_datap->db_lim);
2427 		icv_buf = tailmp->b_wptr;
2428 		tailmp->b_wptr += mac_len;
2429 	} else {
2430 		icv_buf = NULL;
2431 	}
2432 
2433 	return (esp_submit_req_outbound(ipsec_out_mp, assoc, icv_buf,
2434 	    datalen + padlen + 2));
2435 }
2436 
2437 /*
2438  * IP calls this to validate the ICMP errors that
2439  * we got from the network.
2440  */
2441 ipsec_status_t
2442 ipsecesp_icmp_error(mblk_t *ipsec_mp)
2443 {
2444 	/*
2445 	 * Unless we get an entire packet back, this function is useless.
2446 	 * Why?
2447 	 *
2448 	 * 1.)	Partial packets are useless, because the "next header"
2449 	 *	is at the end of the decrypted ESP packet.  Without the
2450 	 *	whole packet, this is useless.
2451 	 *
2452 	 * 2.)	If we every use a stateful cipher, such as a stream or a
2453 	 *	one-time pad, we can't do anything.
2454 	 *
2455 	 * Since the chances of us getting an entire packet back are very
2456 	 * very small, we discard here.
2457 	 */
2458 	IP_ESP_BUMP_STAT(in_discards);
2459 	ip_drop_packet(ipsec_mp, B_TRUE, NULL, NULL, &ipdrops_esp_icmp,
2460 	    &esp_dropper);
2461 	return (IPSEC_STATUS_FAILED);
2462 }
2463 
2464 /*
2465  * ESP module read put routine.
2466  */
2467 /* ARGSUSED */
2468 static void
2469 ipsecesp_rput(queue_t *q, mblk_t *mp)
2470 {
2471 	keysock_in_t *ksi;
2472 	int *addrtype;
2473 	ire_t *ire;
2474 	mblk_t *ire_mp, *last_mp;
2475 
2476 	switch (mp->b_datap->db_type) {
2477 	case M_CTL:
2478 		/*
2479 		 * IPsec request of some variety from IP.  IPSEC_{IN,OUT}
2480 		 * are the common cases, but even ICMP error messages from IP
2481 		 * may rise up here.
2482 		 *
2483 		 * Ummmm, actually, this can also be the reflected KEYSOCK_IN
2484 		 * message, with an IRE_DB_TYPE hung off at the end.
2485 		 */
2486 		switch (((ipsec_info_t *)(mp->b_rptr))->ipsec_info_type) {
2487 		case KEYSOCK_IN:
2488 			last_mp = mp;
2489 			while (last_mp->b_cont != NULL &&
2490 			    last_mp->b_cont->b_datap->db_type != IRE_DB_TYPE)
2491 				last_mp = last_mp->b_cont;
2492 
2493 			if (last_mp->b_cont == NULL) {
2494 				freemsg(mp);
2495 				break;	/* Out of switch. */
2496 			}
2497 
2498 			ire_mp = last_mp->b_cont;
2499 			last_mp->b_cont = NULL;
2500 
2501 			ksi = (keysock_in_t *)mp->b_rptr;
2502 
2503 			if (ksi->ks_in_srctype == KS_IN_ADDR_UNKNOWN)
2504 				addrtype = &ksi->ks_in_srctype;
2505 			else if (ksi->ks_in_dsttype == KS_IN_ADDR_UNKNOWN)
2506 				addrtype = &ksi->ks_in_dsttype;
2507 			else if (ksi->ks_in_proxytype == KS_IN_ADDR_UNKNOWN)
2508 				addrtype = &ksi->ks_in_proxytype;
2509 
2510 			ire = (ire_t *)ire_mp->b_rptr;
2511 
2512 			*addrtype = sadb_addrset(ire);
2513 
2514 			freemsg(ire_mp);
2515 			if (esp_pfkey_q != NULL) {
2516 				/*
2517 				 * Decrement counter to make up for
2518 				 * auto-increment in ipsecesp_wput().
2519 				 * I'm running all MT-hot through here, so
2520 				 * don't worry about perimeters and lateral
2521 				 * puts.
2522 				 */
2523 				ESP_DEBUMP_STAT(keysock_in);
2524 				ipsecesp_wput(WR(esp_pfkey_q), mp);
2525 			} else {
2526 				freemsg(mp);
2527 			}
2528 			break;
2529 		default:
2530 			freemsg(mp);
2531 			break;
2532 		}
2533 		break;
2534 	case M_PROTO:
2535 	case M_PCPROTO:
2536 		/* TPI message of some sort. */
2537 		switch (*((t_scalar_t *)mp->b_rptr)) {
2538 		case T_BIND_ACK:
2539 			esp3dbg(("Thank you IP from ESP for T_BIND_ACK\n"));
2540 			break;
2541 		case T_ERROR_ACK:
2542 			cmn_err(CE_WARN,
2543 			    "ipsecesp:  ESP received T_ERROR_ACK from IP.");
2544 			/*
2545 			 * Make esp_sadb.s_ip_q NULL, and in the
2546 			 * future, perhaps try again.
2547 			 */
2548 			esp_sadb.s_ip_q = NULL;
2549 			break;
2550 		case T_OK_ACK:
2551 			/* Probably from a (rarely sent) T_UNBIND_REQ. */
2552 			break;
2553 		default:
2554 			esp0dbg(("Unknown M_{,PC}PROTO message.\n"));
2555 		}
2556 		freemsg(mp);
2557 		break;
2558 	default:
2559 		/* For now, passthru message. */
2560 		esp2dbg(("ESP got unknown mblk type %d.\n",
2561 		    mp->b_datap->db_type));
2562 		putnext(q, mp);
2563 	}
2564 }
2565 
2566 /*
2567  * Construct an SADB_REGISTER message with the current algorithms.
2568  */
2569 static boolean_t
2570 esp_register_out(uint32_t sequence, uint32_t pid, uint_t serial)
2571 {
2572 	mblk_t *pfkey_msg_mp, *keysock_out_mp;
2573 	sadb_msg_t *samsg;
2574 	sadb_supported_t *sasupp_auth = NULL;
2575 	sadb_supported_t *sasupp_encr = NULL;
2576 	sadb_alg_t *saalg;
2577 	uint_t allocsize = sizeof (*samsg);
2578 	uint_t i, numalgs_snap;
2579 	int current_aalgs;
2580 	ipsec_alginfo_t **authalgs;
2581 	uint_t num_aalgs;
2582 	int current_ealgs;
2583 	ipsec_alginfo_t **encralgs;
2584 	uint_t num_ealgs;
2585 
2586 	/* Allocate the KEYSOCK_OUT. */
2587 	keysock_out_mp = sadb_keysock_out(serial);
2588 	if (keysock_out_mp == NULL) {
2589 		esp0dbg(("esp_register_out: couldn't allocate mblk.\n"));
2590 		return (B_FALSE);
2591 	}
2592 
2593 	/*
2594 	 * Allocate the PF_KEY message that follows KEYSOCK_OUT.
2595 	 */
2596 
2597 	mutex_enter(&alg_lock);
2598 
2599 	/*
2600 	 * Fill SADB_REGISTER message's algorithm descriptors.  Hold
2601 	 * down the lock while filling it.
2602 	 *
2603 	 * Return only valid algorithms, so the number of algorithms
2604 	 * to send up may be less than the number of algorithm entries
2605 	 * in the table.
2606 	 */
2607 	authalgs = ipsec_alglists[IPSEC_ALG_AUTH];
2608 	for (num_aalgs = 0, i = 0; i < IPSEC_MAX_ALGS; i++)
2609 		if (authalgs[i] != NULL && ALG_VALID(authalgs[i]))
2610 			num_aalgs++;
2611 
2612 	if (num_aalgs != 0) {
2613 		allocsize += (num_aalgs * sizeof (*saalg));
2614 		allocsize += sizeof (*sasupp_auth);
2615 	}
2616 	encralgs = ipsec_alglists[IPSEC_ALG_ENCR];
2617 	for (num_ealgs = 0, i = 0; i < IPSEC_MAX_ALGS; i++)
2618 		if (encralgs[i] != NULL && ALG_VALID(encralgs[i]))
2619 			num_ealgs++;
2620 
2621 	if (num_ealgs != 0) {
2622 		allocsize += (num_ealgs * sizeof (*saalg));
2623 		allocsize += sizeof (*sasupp_encr);
2624 	}
2625 	keysock_out_mp->b_cont = allocb(allocsize, BPRI_HI);
2626 	if (keysock_out_mp->b_cont == NULL) {
2627 		mutex_exit(&alg_lock);
2628 		freemsg(keysock_out_mp);
2629 		return (B_FALSE);
2630 	}
2631 
2632 	pfkey_msg_mp = keysock_out_mp->b_cont;
2633 	pfkey_msg_mp->b_wptr += allocsize;
2634 	if (num_aalgs != 0) {
2635 		sasupp_auth = (sadb_supported_t *)
2636 		    (pfkey_msg_mp->b_rptr + sizeof (*samsg));
2637 		saalg = (sadb_alg_t *)(sasupp_auth + 1);
2638 
2639 		ASSERT(((ulong_t)saalg & 0x7) == 0);
2640 
2641 		numalgs_snap = 0;
2642 		for (i = 0;
2643 		    ((i < IPSEC_MAX_ALGS) && (numalgs_snap < num_aalgs)); i++) {
2644 			if (authalgs[i] == NULL || !ALG_VALID(authalgs[i]))
2645 				continue;
2646 
2647 			saalg->sadb_alg_id = authalgs[i]->alg_id;
2648 			saalg->sadb_alg_ivlen = 0;
2649 			saalg->sadb_alg_minbits	= authalgs[i]->alg_ef_minbits;
2650 			saalg->sadb_alg_maxbits	= authalgs[i]->alg_ef_maxbits;
2651 			saalg->sadb_x_alg_defincr = authalgs[i]->alg_ef_default;
2652 			saalg->sadb_x_alg_increment =
2653 			    authalgs[i]->alg_increment;
2654 			numalgs_snap++;
2655 			saalg++;
2656 		}
2657 		ASSERT(numalgs_snap == num_aalgs);
2658 #ifdef DEBUG
2659 		/*
2660 		 * Reality check to make sure I snagged all of the
2661 		 * algorithms.
2662 		 */
2663 		for (; i < IPSEC_MAX_ALGS; i++) {
2664 			if (authalgs[i] != NULL && ALG_VALID(authalgs[i])) {
2665 				cmn_err(CE_PANIC, "esp_register_out()! "
2666 				    "Missed aalg #%d.\n", i);
2667 			}
2668 		}
2669 #endif /* DEBUG */
2670 	} else {
2671 		saalg = (sadb_alg_t *)(pfkey_msg_mp->b_rptr + sizeof (*samsg));
2672 	}
2673 
2674 	if (num_ealgs != 0) {
2675 		sasupp_encr = (sadb_supported_t *)saalg;
2676 		saalg = (sadb_alg_t *)(sasupp_encr + 1);
2677 
2678 		numalgs_snap = 0;
2679 		for (i = 0;
2680 		    ((i < IPSEC_MAX_ALGS) && (numalgs_snap < num_ealgs)); i++) {
2681 			if (encralgs[i] == NULL || !ALG_VALID(encralgs[i]))
2682 				continue;
2683 			saalg->sadb_alg_id = encralgs[i]->alg_id;
2684 			saalg->sadb_alg_ivlen = encralgs[i]->alg_datalen;
2685 			saalg->sadb_alg_minbits	= encralgs[i]->alg_ef_minbits;
2686 			saalg->sadb_alg_maxbits	= encralgs[i]->alg_ef_maxbits;
2687 			saalg->sadb_x_alg_defincr = encralgs[i]->alg_ef_default;
2688 			saalg->sadb_x_alg_increment =
2689 			    encralgs[i]->alg_increment;
2690 			numalgs_snap++;
2691 			saalg++;
2692 		}
2693 		ASSERT(numalgs_snap == num_ealgs);
2694 #ifdef DEBUG
2695 		/*
2696 		 * Reality check to make sure I snagged all of the
2697 		 * algorithms.
2698 		 */
2699 		for (; i < IPSEC_MAX_ALGS; i++) {
2700 			if (encralgs[i] != NULL && ALG_VALID(encralgs[i])) {
2701 				cmn_err(CE_PANIC, "esp_register_out()! "
2702 				    "Missed ealg #%d.\n", i);
2703 			}
2704 		}
2705 #endif /* DEBUG */
2706 	}
2707 
2708 	current_aalgs = num_aalgs;
2709 	current_ealgs = num_ealgs;
2710 
2711 	mutex_exit(&alg_lock);
2712 
2713 	/* Now fill the rest of the SADB_REGISTER message. */
2714 
2715 	samsg = (sadb_msg_t *)pfkey_msg_mp->b_rptr;
2716 	samsg->sadb_msg_version = PF_KEY_V2;
2717 	samsg->sadb_msg_type = SADB_REGISTER;
2718 	samsg->sadb_msg_errno = 0;
2719 	samsg->sadb_msg_satype = SADB_SATYPE_ESP;
2720 	samsg->sadb_msg_len = SADB_8TO64(allocsize);
2721 	samsg->sadb_msg_reserved = 0;
2722 	/*
2723 	 * Assume caller has sufficient sequence/pid number info.  If it's one
2724 	 * from me over a new alg., I could give two hoots about sequence.
2725 	 */
2726 	samsg->sadb_msg_seq = sequence;
2727 	samsg->sadb_msg_pid = pid;
2728 
2729 	if (sasupp_auth != NULL) {
2730 		sasupp_auth->sadb_supported_len =
2731 		    SADB_8TO64(sizeof (*sasupp_auth) +
2732 			sizeof (*saalg) * current_aalgs);
2733 		sasupp_auth->sadb_supported_exttype = SADB_EXT_SUPPORTED_AUTH;
2734 		sasupp_auth->sadb_supported_reserved = 0;
2735 	}
2736 
2737 	if (sasupp_encr != NULL) {
2738 		sasupp_encr->sadb_supported_len =
2739 		    SADB_8TO64(sizeof (*sasupp_encr) +
2740 			sizeof (*saalg) * current_ealgs);
2741 		sasupp_encr->sadb_supported_exttype =
2742 		    SADB_EXT_SUPPORTED_ENCRYPT;
2743 		sasupp_encr->sadb_supported_reserved = 0;
2744 	}
2745 
2746 	if (esp_pfkey_q != NULL)
2747 		putnext(esp_pfkey_q, keysock_out_mp);
2748 	else {
2749 		freemsg(keysock_out_mp);
2750 		return (B_FALSE);
2751 	}
2752 
2753 	return (B_TRUE);
2754 }
2755 
2756 /*
2757  * Invoked when the algorithm table changes. Causes SADB_REGISTER
2758  * messages continaining the current list of algorithms to be
2759  * sent up to the ESP listeners.
2760  */
2761 void
2762 ipsecesp_algs_changed(void)
2763 {
2764 	/*
2765 	 * Time to send a PF_KEY SADB_REGISTER message to ESP listeners
2766 	 * everywhere.  (The function itself checks for NULL esp_pfkey_q.)
2767 	 */
2768 	(void) esp_register_out(0, 0, 0);
2769 }
2770 
2771 /*
2772  * taskq_dispatch handler.
2773  */
2774 static void
2775 inbound_task(void *arg)
2776 {
2777 	esph_t *esph;
2778 	mblk_t *mp = (mblk_t *)arg;
2779 	ipsec_in_t *ii = (ipsec_in_t *)mp->b_rptr;
2780 	int ipsec_rc;
2781 
2782 	esp2dbg(("in ESP inbound_task"));
2783 
2784 	esph = ipsec_inbound_esp_sa(mp);
2785 	if (esph == NULL)
2786 		return;
2787 	ASSERT(ii->ipsec_in_esp_sa != NULL);
2788 	ipsec_rc = ii->ipsec_in_esp_sa->ipsa_input_func(mp, esph);
2789 	if (ipsec_rc != IPSEC_STATUS_SUCCESS)
2790 		return;
2791 	ip_fanout_proto_again(mp, NULL, NULL, NULL);
2792 }
2793 
2794 /*
2795  * Now that weak-key passed, actually ADD the security association, and
2796  * send back a reply ADD message.
2797  */
2798 static int
2799 esp_add_sa_finish(mblk_t *mp, sadb_msg_t *samsg, keysock_in_t *ksi)
2800 {
2801 	isaf_t *primary, *secondary, *inbound, *outbound;
2802 	sadb_sa_t *assoc = (sadb_sa_t *)ksi->ks_in_extv[SADB_EXT_SA];
2803 	sadb_address_t *dstext =
2804 	    (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_DST];
2805 	struct sockaddr_in *dst;
2806 	struct sockaddr_in6 *dst6;
2807 	boolean_t is_ipv4, clone = B_FALSE, is_inbound = B_FALSE;
2808 	uint32_t *dstaddr;
2809 	ipsa_t *larval = NULL;
2810 	ipsacq_t *acqrec;
2811 	iacqf_t *acq_bucket;
2812 	mblk_t *acq_msgs = NULL;
2813 	int rc;
2814 	sadb_t *sp;
2815 	int outhash;
2816 	mblk_t *lpkt;
2817 
2818 	/*
2819 	 * Locate the appropriate table(s).
2820 	 */
2821 
2822 	dst = (struct sockaddr_in *)(dstext + 1);
2823 	dst6 = (struct sockaddr_in6 *)dst;
2824 	is_ipv4 = (dst->sin_family == AF_INET);
2825 	if (is_ipv4) {
2826 		sp = &esp_sadb.s_v4;
2827 		dstaddr = (uint32_t *)(&dst->sin_addr);
2828 		outhash = OUTBOUND_HASH_V4(sp, *(ipaddr_t *)dstaddr);
2829 	} else {
2830 		sp = &esp_sadb.s_v6;
2831 		dstaddr = (uint32_t *)(&dst6->sin6_addr);
2832 		outhash = OUTBOUND_HASH_V6(sp, *(in6_addr_t *)dstaddr);
2833 	}
2834 
2835 	inbound = INBOUND_BUCKET(sp, assoc->sadb_sa_spi);
2836 	outbound = &sp->sdb_of[outhash];
2837 
2838 	switch (ksi->ks_in_dsttype) {
2839 	case KS_IN_ADDR_MBCAST:
2840 		clone = B_TRUE;	/* All mcast SAs can be bidirectional */
2841 		/* FALLTHRU */
2842 	case KS_IN_ADDR_ME:
2843 		primary = inbound;
2844 		secondary = outbound;
2845 		/*
2846 		 * If the source address is either one of mine, or unspecified
2847 		 * (which is best summed up by saying "not 'not mine'"),
2848 		 * then the association is potentially bi-directional,
2849 		 * in that it can be used for inbound traffic and outbound
2850 		 * traffic.  The best example of such an SA is a multicast
2851 		 * SA (which allows me to receive the outbound traffic).
2852 		 */
2853 		if (ksi->ks_in_srctype != KS_IN_ADDR_NOTME)
2854 			clone = B_TRUE;
2855 		is_inbound = B_TRUE;
2856 		break;
2857 	case KS_IN_ADDR_NOTME:
2858 		primary = outbound;
2859 		secondary = inbound;
2860 		/*
2861 		 * If the source address literally not mine (either
2862 		 * unspecified or not mine), then this SA may have an
2863 		 * address that WILL be mine after some configuration.
2864 		 * We pay the price for this by making it a bi-directional
2865 		 * SA.
2866 		 */
2867 		if (ksi->ks_in_srctype != KS_IN_ADDR_ME)
2868 			clone = B_TRUE;
2869 		break;
2870 	default:
2871 		samsg->sadb_x_msg_diagnostic = SADB_X_DIAGNOSTIC_BAD_DST;
2872 		return (EINVAL);
2873 	}
2874 
2875 	/*
2876 	 * Find a ACQUIRE list entry if possible.  If we've added an SA that
2877 	 * suits the needs of an ACQUIRE list entry, we can eliminate the
2878 	 * ACQUIRE list entry and transmit the enqueued packets.  Use the
2879 	 * high-bit of the sequence number to queue it.  Key off destination
2880 	 * addr, and change acqrec's state.
2881 	 */
2882 
2883 	if (samsg->sadb_msg_seq & IACQF_LOWEST_SEQ) {
2884 		acq_bucket = &sp->sdb_acq[outhash];
2885 		mutex_enter(&acq_bucket->iacqf_lock);
2886 		for (acqrec = acq_bucket->iacqf_ipsacq; acqrec != NULL;
2887 		    acqrec = acqrec->ipsacq_next) {
2888 			mutex_enter(&acqrec->ipsacq_lock);
2889 			/*
2890 			 * Q:  I only check sequence.  Should I check dst?
2891 			 * A: Yes, check dest because those are the packets
2892 			 *    that are queued up.
2893 			 */
2894 			if (acqrec->ipsacq_seq == samsg->sadb_msg_seq &&
2895 			    IPSA_ARE_ADDR_EQUAL(dstaddr,
2896 				acqrec->ipsacq_dstaddr, acqrec->ipsacq_addrfam))
2897 				break;
2898 			mutex_exit(&acqrec->ipsacq_lock);
2899 		}
2900 		if (acqrec != NULL) {
2901 			/*
2902 			 * AHA!  I found an ACQUIRE record for this SA.
2903 			 * Grab the msg list, and free the acquire record.
2904 			 * I already am holding the lock for this record,
2905 			 * so all I have to do is free it.
2906 			 */
2907 			acq_msgs = acqrec->ipsacq_mp;
2908 			acqrec->ipsacq_mp = NULL;
2909 			mutex_exit(&acqrec->ipsacq_lock);
2910 			sadb_destroy_acquire(acqrec);
2911 		}
2912 		mutex_exit(&acq_bucket->iacqf_lock);
2913 	}
2914 
2915 	/*
2916 	 * Find PF_KEY message, and see if I'm an update.  If so, find entry
2917 	 * in larval list (if there).
2918 	 */
2919 
2920 	if (samsg->sadb_msg_type == SADB_UPDATE) {
2921 		mutex_enter(&inbound->isaf_lock);
2922 		larval = ipsec_getassocbyspi(inbound, assoc->sadb_sa_spi,
2923 		    ALL_ZEROES_PTR, dstaddr, dst->sin_family);
2924 		mutex_exit(&inbound->isaf_lock);
2925 
2926 		if (larval == NULL) {
2927 			esp0dbg(("Larval update, but larval disappeared.\n"));
2928 			return (ESRCH);
2929 		} /* Else sadb_common_add unlinks it for me! */
2930 	}
2931 
2932 	lpkt = NULL;
2933 	if (larval != NULL)
2934 		lpkt = sadb_clear_lpkt(larval);
2935 
2936 	rc = sadb_common_add(esp_sadb.s_ip_q, esp_pfkey_q, mp, samsg, ksi,
2937 	    primary, secondary, larval, clone, is_inbound);
2938 
2939 	if (rc == 0 && lpkt != NULL) {
2940 		rc = !taskq_dispatch(esp_taskq, inbound_task,
2941 			    (void *) lpkt, TQ_NOSLEEP);
2942 	}
2943 
2944 	if (rc != 0) {
2945 		ip_drop_packet(lpkt, B_TRUE, NULL, NULL,
2946 		    &ipdrops_sadb_inlarval_timeout, &esp_dropper);
2947 	}
2948 
2949 	/*
2950 	 * How much more stack will I create with all of these
2951 	 * esp_outbound() calls?
2952 	 */
2953 
2954 	while (acq_msgs != NULL) {
2955 		mblk_t *mp = acq_msgs;
2956 
2957 		acq_msgs = acq_msgs->b_next;
2958 		mp->b_next = NULL;
2959 		if (rc == 0) {
2960 			if (ipsec_outbound_sa(mp, IPPROTO_ESP)) {
2961 				((ipsec_out_t *)(mp->b_rptr))->
2962 				    ipsec_out_esp_done = B_TRUE;
2963 				if (esp_outbound(mp) == IPSEC_STATUS_SUCCESS) {
2964 					ipha_t *ipha = (ipha_t *)
2965 					    mp->b_cont->b_rptr;
2966 
2967 					/* do AH processing if needed */
2968 					if (!esp_do_outbound_ah(mp))
2969 						continue;
2970 
2971 					/* finish IPsec processing */
2972 					if (is_ipv4) {
2973 						ip_wput_ipsec_out(NULL, mp,
2974 						    ipha, NULL, NULL);
2975 					} else {
2976 						ip6_t *ip6h = (ip6_t *)ipha;
2977 						ip_wput_ipsec_out_v6(NULL,
2978 						    mp, ip6h, NULL, NULL);
2979 					}
2980 				}
2981 				continue;
2982 			}
2983 		}
2984 		ESP_BUMP_STAT(out_discards);
2985 		ip_drop_packet(mp, B_FALSE, NULL, NULL,
2986 		    &ipdrops_sadb_acquire_timeout, &esp_dropper);
2987 	}
2988 
2989 	return (rc);
2990 }
2991 
2992 /*
2993  * Add new ESP security association.  This may become a generic AH/ESP
2994  * routine eventually.
2995  */
2996 static int
2997 esp_add_sa(mblk_t *mp, keysock_in_t *ksi, int *diagnostic)
2998 {
2999 	sadb_sa_t *assoc = (sadb_sa_t *)ksi->ks_in_extv[SADB_EXT_SA];
3000 	sadb_address_t *srcext =
3001 	    (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_SRC];
3002 	sadb_address_t *dstext =
3003 	    (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_DST];
3004 	sadb_address_t *nttext_loc =
3005 	    (sadb_address_t *)ksi->ks_in_extv[SADB_X_EXT_ADDRESS_NATT_LOC];
3006 	sadb_address_t *nttext_rem =
3007 	    (sadb_address_t *)ksi->ks_in_extv[SADB_X_EXT_ADDRESS_NATT_REM];
3008 	sadb_key_t *akey = (sadb_key_t *)ksi->ks_in_extv[SADB_EXT_KEY_AUTH];
3009 	sadb_key_t *ekey = (sadb_key_t *)ksi->ks_in_extv[SADB_EXT_KEY_ENCRYPT];
3010 	struct sockaddr_in *src, *dst;
3011 	struct sockaddr_in *natt_loc, *natt_rem;
3012 	struct sockaddr_in6 *natt_loc6, *natt_rem6;
3013 
3014 	sadb_lifetime_t *soft =
3015 	    (sadb_lifetime_t *)ksi->ks_in_extv[SADB_EXT_LIFETIME_SOFT];
3016 	sadb_lifetime_t *hard =
3017 	    (sadb_lifetime_t *)ksi->ks_in_extv[SADB_EXT_LIFETIME_HARD];
3018 
3019 	/* I need certain extensions present for an ADD message. */
3020 	if (srcext == NULL) {
3021 		*diagnostic = SADB_X_DIAGNOSTIC_MISSING_SRC;
3022 		return (EINVAL);
3023 	}
3024 	if (dstext == NULL) {
3025 		*diagnostic = SADB_X_DIAGNOSTIC_MISSING_DST;
3026 		return (EINVAL);
3027 	}
3028 	if (assoc == NULL) {
3029 		*diagnostic = SADB_X_DIAGNOSTIC_MISSING_SA;
3030 		return (EINVAL);
3031 	}
3032 	if (ekey == NULL && assoc->sadb_sa_encrypt != SADB_EALG_NULL) {
3033 		*diagnostic = SADB_X_DIAGNOSTIC_MISSING_EKEY;
3034 		return (EINVAL);
3035 	}
3036 
3037 	src = (struct sockaddr_in *)(srcext + 1);
3038 	dst = (struct sockaddr_in *)(dstext + 1);
3039 	natt_loc = (struct sockaddr_in *)(nttext_loc + 1);
3040 	natt_loc6 = (struct sockaddr_in6 *)(nttext_loc + 1);
3041 	natt_rem = (struct sockaddr_in *)(nttext_rem + 1);
3042 	natt_rem6 = (struct sockaddr_in6 *)(nttext_rem + 1);
3043 
3044 	/* Sundry ADD-specific reality checks. */
3045 	/* XXX STATS :  Logging/stats here? */
3046 	if (assoc->sadb_sa_state != SADB_SASTATE_MATURE) {
3047 		*diagnostic = SADB_X_DIAGNOSTIC_BAD_SASTATE;
3048 		return (EINVAL);
3049 	}
3050 	if (assoc->sadb_sa_encrypt == SADB_EALG_NONE) {
3051 		*diagnostic = SADB_X_DIAGNOSTIC_BAD_EALG;
3052 		return (EINVAL);
3053 	}
3054 
3055 	if (assoc->sadb_sa_encrypt == SADB_EALG_NULL &&
3056 	    assoc->sadb_sa_auth == SADB_AALG_NONE) {
3057 		*diagnostic = SADB_X_DIAGNOSTIC_BAD_AALG;
3058 		return (EINVAL);
3059 	}
3060 
3061 	if (assoc->sadb_sa_flags & ~(SADB_SAFLAGS_NOREPLAY |
3062 	    SADB_X_SAFLAGS_NATT_LOC | SADB_X_SAFLAGS_NATT_REM)) {
3063 		*diagnostic = SADB_X_DIAGNOSTIC_BAD_SAFLAGS;
3064 		return (EINVAL);
3065 	}
3066 
3067 	if ((*diagnostic = sadb_hardsoftchk(hard, soft)) != 0) {
3068 		return (EINVAL);
3069 	}
3070 	if (src->sin_family != dst->sin_family) {
3071 		*diagnostic = SADB_X_DIAGNOSTIC_AF_MISMATCH;
3072 		return (EINVAL);
3073 	}
3074 
3075 
3076 	if (assoc->sadb_sa_flags & SADB_X_SAFLAGS_NATT_LOC) {
3077 		if (nttext_loc == NULL) {
3078 			*diagnostic = SADB_X_DIAGNOSTIC_MISSING_NATT_LOC;
3079 			return (EINVAL);
3080 		}
3081 
3082 		if (natt_loc->sin_family == AF_INET6 &&
3083 		    !IN6_IS_ADDR_V4MAPPED(&natt_loc6->sin6_addr)) {
3084 			*diagnostic = SADB_X_DIAGNOSTIC_MALFORMED_NATT_LOC;
3085 			return (EINVAL);
3086 		}
3087 	}
3088 
3089 	if (assoc->sadb_sa_flags & SADB_X_SAFLAGS_NATT_REM) {
3090 		if (nttext_rem == NULL) {
3091 			*diagnostic = SADB_X_DIAGNOSTIC_MISSING_NATT_REM;
3092 			return (EINVAL);
3093 		}
3094 		if (natt_rem->sin_family == AF_INET6 &&
3095 		    !IN6_IS_ADDR_V4MAPPED(&natt_rem6->sin6_addr)) {
3096 			*diagnostic = SADB_X_DIAGNOSTIC_MALFORMED_NATT_REM;
3097 			return (EINVAL);
3098 		}
3099 	}
3100 
3101 
3102 	/* Stuff I don't support, for now.  XXX Diagnostic? */
3103 	if (ksi->ks_in_extv[SADB_EXT_LIFETIME_CURRENT] != NULL ||
3104 	    ksi->ks_in_extv[SADB_EXT_SENSITIVITY] != NULL)
3105 		return (EOPNOTSUPP);
3106 
3107 	/*
3108 	 * XXX Policy :  I'm not checking identities or sensitivity
3109 	 * labels at this time, but if I did, I'd do them here, before I sent
3110 	 * the weak key check up to the algorithm.
3111 	 */
3112 
3113 	mutex_enter(&alg_lock);
3114 
3115 	/*
3116 	 * First locate the authentication algorithm.
3117 	 */
3118 	if (akey != NULL) {
3119 		ipsec_alginfo_t *aalg;
3120 
3121 		aalg = ipsec_alglists[IPSEC_ALG_AUTH][assoc->sadb_sa_auth];
3122 		if (aalg == NULL || !ALG_VALID(aalg)) {
3123 			mutex_exit(&alg_lock);
3124 			esp1dbg(("Couldn't find auth alg #%d.\n",
3125 			    assoc->sadb_sa_auth));
3126 			*diagnostic = SADB_X_DIAGNOSTIC_BAD_AALG;
3127 			return (EINVAL);
3128 		}
3129 
3130 		/*
3131 		 * Sanity check key sizes.
3132 		 * Note: It's not possible to use SADB_AALG_NONE because
3133 		 * this auth_alg is not defined with ALG_FLAG_VALID. If this
3134 		 * ever changes, the same check for SADB_AALG_NONE and
3135 		 * a auth_key != NULL should be made here ( see below).
3136 		 */
3137 		if (!ipsec_valid_key_size(akey->sadb_key_bits, aalg)) {
3138 			mutex_exit(&alg_lock);
3139 			*diagnostic = SADB_X_DIAGNOSTIC_BAD_AKEYBITS;
3140 			return (EINVAL);
3141 		}
3142 		ASSERT(aalg->alg_mech_type != CRYPTO_MECHANISM_INVALID);
3143 
3144 		/* check key and fix parity if needed */
3145 		if (ipsec_check_key(aalg->alg_mech_type, akey, B_TRUE,
3146 		    diagnostic) != 0) {
3147 			mutex_exit(&alg_lock);
3148 			return (EINVAL);
3149 		}
3150 	}
3151 
3152 	/*
3153 	 * Then locate the encryption algorithm.
3154 	 */
3155 	if (ekey != NULL) {
3156 		ipsec_alginfo_t *ealg;
3157 
3158 		ealg = ipsec_alglists[IPSEC_ALG_ENCR][assoc->sadb_sa_encrypt];
3159 		if (ealg == NULL || !ALG_VALID(ealg)) {
3160 			mutex_exit(&alg_lock);
3161 			esp1dbg(("Couldn't find encr alg #%d.\n",
3162 			    assoc->sadb_sa_encrypt));
3163 			*diagnostic = SADB_X_DIAGNOSTIC_BAD_EALG;
3164 			return (EINVAL);
3165 		}
3166 
3167 		/*
3168 		 * Sanity check key sizes. If the encryption algorithm is
3169 		 * SADB_EALG_NULL but the encryption key is NOT
3170 		 * NULL then complain.
3171 		 */
3172 		if ((assoc->sadb_sa_encrypt == SADB_EALG_NULL) ||
3173 		    (!ipsec_valid_key_size(ekey->sadb_key_bits, ealg))) {
3174 			mutex_exit(&alg_lock);
3175 			*diagnostic = SADB_X_DIAGNOSTIC_BAD_EKEYBITS;
3176 			return (EINVAL);
3177 		}
3178 		ASSERT(ealg->alg_mech_type != CRYPTO_MECHANISM_INVALID);
3179 
3180 		/* check key */
3181 		if (ipsec_check_key(ealg->alg_mech_type, ekey, B_FALSE,
3182 		    diagnostic) != 0) {
3183 			mutex_exit(&alg_lock);
3184 			return (EINVAL);
3185 		}
3186 	}
3187 	mutex_exit(&alg_lock);
3188 
3189 	return (esp_add_sa_finish(mp, (sadb_msg_t *)mp->b_cont->b_rptr, ksi));
3190 }
3191 
3192 /*
3193  * Update a security association.  Updates come in two varieties.  The first
3194  * is an update of lifetimes on a non-larval SA.  The second is an update of
3195  * a larval SA, which ends up looking a lot more like an add.
3196  */
3197 static int
3198 esp_update_sa(mblk_t *mp, keysock_in_t *ksi, int *diagnostic)
3199 {
3200 	sadb_address_t *dstext =
3201 	    (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_DST];
3202 	struct sockaddr_in *sin;
3203 
3204 	if (dstext == NULL) {
3205 		*diagnostic = SADB_X_DIAGNOSTIC_MISSING_DST;
3206 		return (EINVAL);
3207 	}
3208 
3209 	sin = (struct sockaddr_in *)(dstext + 1);
3210 	return (sadb_update_sa(mp, ksi,
3211 	    (sin->sin_family == AF_INET6) ? &esp_sadb.s_v6 : &esp_sadb.s_v4,
3212 	    diagnostic, esp_pfkey_q, esp_add_sa));
3213 }
3214 
3215 /*
3216  * Delete a security association.  This is REALLY likely to be code common to
3217  * both AH and ESP.  Find the association, then unlink it.
3218  */
3219 static int
3220 esp_del_sa(mblk_t *mp, keysock_in_t *ksi, int *diagnostic)
3221 {
3222 	sadb_sa_t *assoc = (sadb_sa_t *)ksi->ks_in_extv[SADB_EXT_SA];
3223 	sadb_address_t *dstext =
3224 	    (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_DST];
3225 	sadb_address_t *srcext =
3226 	    (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_SRC];
3227 	struct sockaddr_in *sin;
3228 
3229 	if (assoc == NULL) {
3230 		if (dstext != NULL) {
3231 			sin = (struct sockaddr_in *)(dstext + 1);
3232 		} else if (srcext != NULL) {
3233 			sin = (struct sockaddr_in *)(srcext + 1);
3234 		} else {
3235 			*diagnostic = SADB_X_DIAGNOSTIC_MISSING_SA;
3236 			return (EINVAL);
3237 		}
3238 		return sadb_purge_sa(mp, ksi,
3239 		    (sin->sin_family == AF_INET6) ? &esp_sadb.s_v6 :
3240 		    &esp_sadb.s_v4,
3241 		    diagnostic, esp_pfkey_q, esp_sadb.s_ip_q);
3242 	}
3243 
3244 	return (sadb_del_sa(mp, ksi, &esp_sadb, diagnostic, esp_pfkey_q));
3245 }
3246 
3247 /*
3248  * Convert the entire contents of all of ESP's SA tables into PF_KEY SADB_DUMP
3249  * messages.
3250  */
3251 static void
3252 esp_dump(mblk_t *mp, keysock_in_t *ksi)
3253 {
3254 	int error;
3255 	sadb_msg_t *samsg;
3256 
3257 	/*
3258 	 * Dump each fanout, bailing if error is non-zero.
3259 	 */
3260 
3261 	error = sadb_dump(esp_pfkey_q, mp, ksi->ks_in_serial, &esp_sadb.s_v4);
3262 	if (error != 0)
3263 		goto bail;
3264 
3265 	error = sadb_dump(esp_pfkey_q, mp, ksi->ks_in_serial, &esp_sadb.s_v6);
3266 bail:
3267 	ASSERT(mp->b_cont != NULL);
3268 	samsg = (sadb_msg_t *)mp->b_cont->b_rptr;
3269 	samsg->sadb_msg_errno = (uint8_t)error;
3270 	sadb_pfkey_echo(esp_pfkey_q, mp, (sadb_msg_t *)mp->b_cont->b_rptr, ksi,
3271 	    NULL);
3272 }
3273 
3274 /*
3275  * ESP parsing of PF_KEY messages.  Keysock did most of the really silly
3276  * error cases.  What I receive is a fully-formed, syntactically legal
3277  * PF_KEY message.  I then need to check semantics...
3278  *
3279  * This code may become common to AH and ESP.  Stay tuned.
3280  *
3281  * I also make the assumption that db_ref's are cool.  If this assumption
3282  * is wrong, this means that someone other than keysock or me has been
3283  * mucking with PF_KEY messages.
3284  */
3285 static void
3286 esp_parse_pfkey(mblk_t *mp)
3287 {
3288 	mblk_t *msg = mp->b_cont;
3289 	sadb_msg_t *samsg;
3290 	keysock_in_t *ksi;
3291 	int error;
3292 	int diagnostic = SADB_X_DIAGNOSTIC_NONE;
3293 
3294 	ASSERT(msg != NULL);
3295 	samsg = (sadb_msg_t *)msg->b_rptr;
3296 	ksi = (keysock_in_t *)mp->b_rptr;
3297 
3298 	/*
3299 	 * If applicable, convert unspecified AF_INET6 to unspecified
3300 	 * AF_INET.
3301 	 */
3302 	sadb_srcaddrfix(ksi);
3303 
3304 	switch (samsg->sadb_msg_type) {
3305 	case SADB_ADD:
3306 		error = esp_add_sa(mp, ksi, &diagnostic);
3307 		if (error != 0) {
3308 			sadb_pfkey_error(esp_pfkey_q, mp, error, diagnostic,
3309 			    ksi->ks_in_serial);
3310 		}
3311 		/* else esp_add_sa() took care of things. */
3312 		break;
3313 	case SADB_DELETE:
3314 		error = esp_del_sa(mp, ksi, &diagnostic);
3315 		if (error != 0) {
3316 			sadb_pfkey_error(esp_pfkey_q, mp, error, diagnostic,
3317 			    ksi->ks_in_serial);
3318 		}
3319 		/* Else esp_del_sa() took care of things. */
3320 		break;
3321 	case SADB_GET:
3322 		error = sadb_get_sa(mp, ksi, &esp_sadb, &diagnostic,
3323 		    esp_pfkey_q);
3324 		if (error != 0) {
3325 			sadb_pfkey_error(esp_pfkey_q, mp, error, diagnostic,
3326 			    ksi->ks_in_serial);
3327 		}
3328 		/* Else sadb_get_sa() took care of things. */
3329 		break;
3330 	case SADB_FLUSH:
3331 		sadbp_flush(&esp_sadb);
3332 		sadb_pfkey_echo(esp_pfkey_q, mp, samsg, ksi, NULL);
3333 		break;
3334 	case SADB_REGISTER:
3335 		/*
3336 		 * Hmmm, let's do it!  Check for extensions (there should
3337 		 * be none), extract the fields, call esp_register_out(),
3338 		 * then either free or report an error.
3339 		 *
3340 		 * Keysock takes care of the PF_KEY bookkeeping for this.
3341 		 */
3342 		if (esp_register_out(samsg->sadb_msg_seq, samsg->sadb_msg_pid,
3343 		    ksi->ks_in_serial)) {
3344 			freemsg(mp);
3345 		} else {
3346 			/*
3347 			 * Only way this path hits is if there is a memory
3348 			 * failure.  It will not return B_FALSE because of
3349 			 * lack of esp_pfkey_q if I am in wput().
3350 			 */
3351 			sadb_pfkey_error(esp_pfkey_q, mp, ENOMEM, diagnostic,
3352 			    ksi->ks_in_serial);
3353 		}
3354 		break;
3355 	case SADB_UPDATE:
3356 		/*
3357 		 * Find a larval, if not there, find a full one and get
3358 		 * strict.
3359 		 */
3360 		error = esp_update_sa(mp, ksi, &diagnostic);
3361 		if (error != 0) {
3362 			sadb_pfkey_error(esp_pfkey_q, mp, error, diagnostic,
3363 			    ksi->ks_in_serial);
3364 		}
3365 		/* else esp_update_sa() took care of things. */
3366 		break;
3367 	case SADB_GETSPI:
3368 		/*
3369 		 * Reserve a new larval entry.
3370 		 */
3371 		esp_getspi(mp, ksi);
3372 		break;
3373 	case SADB_ACQUIRE:
3374 		/*
3375 		 * Find larval and/or ACQUIRE record and kill it (them), I'm
3376 		 * most likely an error.  Inbound ACQUIRE messages should only
3377 		 * have the base header.
3378 		 */
3379 		sadb_in_acquire(samsg, &esp_sadb, esp_pfkey_q);
3380 		freemsg(mp);
3381 		break;
3382 	case SADB_DUMP:
3383 		/*
3384 		 * Dump all entries.
3385 		 */
3386 		esp_dump(mp, ksi);
3387 		/* esp_dump will take care of the return message, etc. */
3388 		break;
3389 	case SADB_EXPIRE:
3390 		/* Should never reach me. */
3391 		sadb_pfkey_error(esp_pfkey_q, mp, EOPNOTSUPP, diagnostic,
3392 		    ksi->ks_in_serial);
3393 		break;
3394 	default:
3395 		sadb_pfkey_error(esp_pfkey_q, mp, EINVAL,
3396 		    SADB_X_DIAGNOSTIC_UNKNOWN_MSG, ksi->ks_in_serial);
3397 		break;
3398 	}
3399 }
3400 
3401 /*
3402  * Handle case where PF_KEY says it can't find a keysock for one of my
3403  * ACQUIRE messages.
3404  */
3405 static void
3406 esp_keysock_no_socket(mblk_t *mp)
3407 {
3408 	sadb_msg_t *samsg;
3409 	keysock_out_err_t *kse = (keysock_out_err_t *)mp->b_rptr;
3410 
3411 	if (mp->b_cont == NULL) {
3412 		freemsg(mp);
3413 		return;
3414 	}
3415 	samsg = (sadb_msg_t *)mp->b_cont->b_rptr;
3416 
3417 	/*
3418 	 * If keysock can't find any registered, delete the acquire record
3419 	 * immediately, and handle errors.
3420 	 */
3421 	if (samsg->sadb_msg_type == SADB_ACQUIRE) {
3422 		samsg->sadb_msg_errno = kse->ks_err_errno;
3423 		samsg->sadb_msg_len = SADB_8TO64(sizeof (*samsg));
3424 		/*
3425 		 * Use the write-side of the esp_pfkey_q, in case there is
3426 		 * no esp_sadb.s_ip_q.
3427 		 */
3428 		sadb_in_acquire(samsg, &esp_sadb, WR(esp_pfkey_q));
3429 	}
3430 
3431 	freemsg(mp);
3432 }
3433 
3434 /*
3435  * First-cut reality check for an inbound PF_KEY message.
3436  */
3437 static boolean_t
3438 esp_pfkey_reality_failures(mblk_t *mp, keysock_in_t *ksi)
3439 {
3440 	int diagnostic;
3441 
3442 	if (ksi->ks_in_extv[SADB_EXT_PROPOSAL] != NULL) {
3443 		diagnostic = SADB_X_DIAGNOSTIC_PROP_PRESENT;
3444 		goto badmsg;
3445 	}
3446 	if (ksi->ks_in_extv[SADB_EXT_SUPPORTED_AUTH] != NULL ||
3447 	    ksi->ks_in_extv[SADB_EXT_SUPPORTED_ENCRYPT] != NULL) {
3448 		diagnostic = SADB_X_DIAGNOSTIC_SUPP_PRESENT;
3449 		goto badmsg;
3450 	}
3451 	if (ksi->ks_in_srctype == KS_IN_ADDR_MBCAST) {
3452 		diagnostic = SADB_X_DIAGNOSTIC_BAD_SRC;
3453 		goto badmsg;
3454 	}
3455 	if (ksi->ks_in_dsttype == KS_IN_ADDR_UNSPEC) {
3456 		diagnostic = SADB_X_DIAGNOSTIC_BAD_DST;
3457 		goto badmsg;
3458 	}
3459 
3460 	return (B_FALSE);	/* False ==> no failures */
3461 
3462 badmsg:
3463 	sadb_pfkey_error(esp_pfkey_q, mp, EINVAL, diagnostic,
3464 	    ksi->ks_in_serial);
3465 	return (B_TRUE);	/* True ==> failures */
3466 }
3467 
3468 /*
3469  * ESP module write put routine.
3470  */
3471 static void
3472 ipsecesp_wput(queue_t *q, mblk_t *mp)
3473 {
3474 	ipsec_info_t *ii;
3475 	keysock_in_t *ksi;
3476 	int rc;
3477 	struct iocblk *iocp;
3478 
3479 	esp3dbg(("In esp_wput().\n"));
3480 
3481 	/* NOTE: Each case must take care of freeing or passing mp. */
3482 	switch (mp->b_datap->db_type) {
3483 	case M_CTL:
3484 		if ((mp->b_wptr - mp->b_rptr) < sizeof (ipsec_info_t)) {
3485 			/* Not big enough message. */
3486 			freemsg(mp);
3487 			break;
3488 		}
3489 		ii = (ipsec_info_t *)mp->b_rptr;
3490 
3491 		switch (ii->ipsec_info_type) {
3492 		case KEYSOCK_OUT_ERR:
3493 			esp1dbg(("Got KEYSOCK_OUT_ERR message.\n"));
3494 			esp_keysock_no_socket(mp);
3495 			break;
3496 		case KEYSOCK_IN:
3497 			ESP_BUMP_STAT(keysock_in);
3498 			esp3dbg(("Got KEYSOCK_IN message.\n"));
3499 			ksi = (keysock_in_t *)ii;
3500 			/*
3501 			 * Some common reality checks.
3502 			 */
3503 
3504 			if (esp_pfkey_reality_failures(mp, ksi))
3505 				return;
3506 
3507 			/*
3508 			 * Use 'q' instead of esp_sadb.s_ip_q, since
3509 			 * it's the write side already, and it'll go
3510 			 * down to IP.  Use esp_pfkey_q because we
3511 			 * wouldn't get here if that weren't set, and
3512 			 * the RD(q) has been done already.
3513 			 */
3514 			if (ksi->ks_in_srctype == KS_IN_ADDR_UNKNOWN) {
3515 				rc = sadb_addrcheck(q, esp_pfkey_q, mp,
3516 				    ksi->ks_in_extv[SADB_EXT_ADDRESS_SRC],
3517 				    ksi->ks_in_serial);
3518 				if (rc == KS_IN_ADDR_UNKNOWN)
3519 					return;
3520 				else
3521 					ksi->ks_in_srctype = rc;
3522 			}
3523 			if (ksi->ks_in_dsttype == KS_IN_ADDR_UNKNOWN) {
3524 				rc = sadb_addrcheck(q, esp_pfkey_q, mp,
3525 				    ksi->ks_in_extv[SADB_EXT_ADDRESS_DST],
3526 				    ksi->ks_in_serial);
3527 				if (rc == KS_IN_ADDR_UNKNOWN)
3528 					return;
3529 				else
3530 					ksi->ks_in_dsttype = rc;
3531 			}
3532 			/*
3533 			 * XXX Proxy may be a different address family.
3534 			 */
3535 			if (ksi->ks_in_proxytype == KS_IN_ADDR_UNKNOWN) {
3536 				rc = sadb_addrcheck(q, esp_pfkey_q, mp,
3537 				    ksi->ks_in_extv[SADB_EXT_ADDRESS_PROXY],
3538 				    ksi->ks_in_serial);
3539 				if (rc == KS_IN_ADDR_UNKNOWN)
3540 					return;
3541 				else
3542 					ksi->ks_in_proxytype = rc;
3543 			}
3544 			esp_parse_pfkey(mp);
3545 			break;
3546 		case KEYSOCK_HELLO:
3547 			sadb_keysock_hello(&esp_pfkey_q, q, mp,
3548 			    esp_ager, &esp_event, SADB_SATYPE_ESP);
3549 			break;
3550 		default:
3551 			esp2dbg(("Got M_CTL from above of 0x%x.\n",
3552 			    ii->ipsec_info_type));
3553 			freemsg(mp);
3554 			break;
3555 		}
3556 		break;
3557 	case M_IOCTL:
3558 		iocp = (struct iocblk *)mp->b_rptr;
3559 		switch (iocp->ioc_cmd) {
3560 		case ND_SET:
3561 		case ND_GET:
3562 			if (nd_getset(q, ipsecesp_g_nd, mp)) {
3563 				qreply(q, mp);
3564 				return;
3565 			} else {
3566 				iocp->ioc_error = ENOENT;
3567 			}
3568 			/* FALLTHRU */
3569 		default:
3570 			/* We really don't support any other ioctls, do we? */
3571 
3572 			/* Return EINVAL */
3573 			if (iocp->ioc_error != ENOENT)
3574 				iocp->ioc_error = EINVAL;
3575 			iocp->ioc_count = 0;
3576 			mp->b_datap->db_type = M_IOCACK;
3577 			qreply(q, mp);
3578 			return;
3579 		}
3580 	default:
3581 		esp3dbg(("Got default message, type %d, passing to IP.\n",
3582 		    mp->b_datap->db_type));
3583 		putnext(q, mp);
3584 	}
3585 }
3586 
3587 /*
3588  * Process an outbound ESP packet that can be accelerated by a IPsec
3589  * hardware acceleration capable Provider.
3590  * The caller already inserted and initialized the ESP header.
3591  * This function allocates a tagging M_CTL, and adds room at the end
3592  * of the packet to hold the ICV if authentication is needed.
3593  *
3594  * On success returns B_TRUE, on failure returns B_FALSE and frees the
3595  * mblk chain ipsec_out.
3596  */
3597 static ipsec_status_t
3598 esp_outbound_accelerated(mblk_t *ipsec_out, uint_t icv_len)
3599 {
3600 	ipsec_out_t *io;
3601 	mblk_t *lastmp;
3602 
3603 	ESP_BUMP_STAT(out_accelerated);
3604 
3605 	io = (ipsec_out_t *)ipsec_out->b_rptr;
3606 
3607 	/* mark packet as being accelerated in IPSEC_OUT */
3608 	ASSERT(io->ipsec_out_accelerated == B_FALSE);
3609 	io->ipsec_out_accelerated = B_TRUE;
3610 
3611 	/*
3612 	 * add room at the end of the packet for the ICV if needed
3613 	 */
3614 	if (icv_len > 0) {
3615 		/* go to last mblk */
3616 		lastmp = ipsec_out;	/* For following while loop. */
3617 		do {
3618 			lastmp = lastmp->b_cont;
3619 		} while (lastmp->b_cont != NULL);
3620 
3621 		/* if not enough available room, allocate new mblk */
3622 		if ((lastmp->b_wptr + icv_len) > lastmp->b_datap->db_lim) {
3623 			lastmp->b_cont = allocb(icv_len, BPRI_HI);
3624 			if (lastmp->b_cont == NULL) {
3625 				ESP_BUMP_STAT(out_discards);
3626 				ip_drop_packet(ipsec_out, B_FALSE, NULL, NULL,
3627 				    &ipdrops_esp_nomem, &esp_dropper);
3628 				return (IPSEC_STATUS_FAILED);
3629 			}
3630 			lastmp = lastmp->b_cont;
3631 		}
3632 		lastmp->b_wptr += icv_len;
3633 	}
3634 
3635 	return (IPSEC_STATUS_SUCCESS);
3636 }
3637 
3638 /*
3639  * Process an inbound accelerated ESP packet.
3640  * On success returns B_TRUE, on failure returns B_FALSE and frees the
3641  * mblk chain ipsec_in.
3642  */
3643 static ipsec_status_t
3644 esp_inbound_accelerated(mblk_t *ipsec_in, mblk_t *data_mp, boolean_t isv4,
3645     ipsa_t *assoc)
3646 {
3647 	ipsec_in_t *ii;
3648 	mblk_t *hada_mp;
3649 	uint32_t icv_len = 0;
3650 	da_ipsec_t *hada;
3651 	ipha_t *ipha;
3652 	ip6_t *ip6h;
3653 	kstat_named_t *counter;
3654 
3655 	ESP_BUMP_STAT(in_accelerated);
3656 
3657 	ii = (ipsec_in_t *)ipsec_in->b_rptr;
3658 	hada_mp = ii->ipsec_in_da;
3659 	ASSERT(hada_mp != NULL);
3660 	hada = (da_ipsec_t *)hada_mp->b_rptr;
3661 
3662 	/*
3663 	 * We only support one level of decapsulation in hardware, so
3664 	 * nuke the pointer.
3665 	 */
3666 	ii->ipsec_in_da = NULL;
3667 	ii->ipsec_in_accelerated = B_FALSE;
3668 
3669 	if (assoc->ipsa_auth_alg != IPSA_AALG_NONE) {
3670 		/*
3671 		 * ESP with authentication. We expect the Provider to have
3672 		 * computed the ICV and placed it in the hardware acceleration
3673 		 * data attributes.
3674 		 *
3675 		 * Extract ICV length from attributes M_CTL and sanity check
3676 		 * its value. We allow the mblk to be smaller than da_ipsec_t
3677 		 * for a small ICV, as long as the entire ICV fits within the
3678 		 * mblk.
3679 		 *
3680 		 * Also ensures that the ICV length computed by Provider
3681 		 * corresponds to the ICV length of the agorithm specified by
3682 		 * the SA.
3683 		 */
3684 		icv_len = hada->da_icv_len;
3685 		if ((icv_len != assoc->ipsa_mac_len) ||
3686 		    (icv_len > DA_ICV_MAX_LEN) || (MBLKL(hada_mp) <
3687 			(sizeof (da_ipsec_t) - DA_ICV_MAX_LEN + icv_len))) {
3688 			esp0dbg(("esp_inbound_accelerated: "
3689 			    "ICV len (%u) incorrect or mblk too small (%u)\n",
3690 			    icv_len, (uint32_t)(MBLKL(hada_mp))));
3691 			counter = &ipdrops_esp_bad_auth;
3692 			goto esp_in_discard;
3693 		}
3694 	}
3695 
3696 	/* get pointers to IP header */
3697 	if (isv4) {
3698 		ipha = (ipha_t *)data_mp->b_rptr;
3699 	} else {
3700 		ip6h = (ip6_t *)data_mp->b_rptr;
3701 	}
3702 
3703 	/*
3704 	 * Compare ICV in ESP packet vs ICV computed by adapter.
3705 	 * We also remove the ICV from the end of the packet since
3706 	 * it will no longer be needed.
3707 	 *
3708 	 * Assume that esp_inbound() already ensured that the pkt
3709 	 * was in one mblk.
3710 	 */
3711 	ASSERT(data_mp->b_cont == NULL);
3712 	data_mp->b_wptr -= icv_len;
3713 	/* adjust IP header */
3714 	if (isv4)
3715 		ipha->ipha_length = htons(ntohs(ipha->ipha_length) - icv_len);
3716 	else
3717 		ip6h->ip6_plen = htons(ntohs(ip6h->ip6_plen) - icv_len);
3718 	if (icv_len && bcmp(hada->da_icv, data_mp->b_wptr, icv_len)) {
3719 		int af;
3720 		void *addr;
3721 
3722 		if (isv4) {
3723 			addr = &ipha->ipha_dst;
3724 			af = AF_INET;
3725 		} else {
3726 			addr = &ip6h->ip6_dst;
3727 			af = AF_INET6;
3728 		}
3729 
3730 		/*
3731 		 * Log the event. Don't print to the console, block
3732 		 * potential denial-of-service attack.
3733 		 */
3734 		ESP_BUMP_STAT(bad_auth);
3735 		ipsec_assocfailure(info.mi_idnum, 0, 0, SL_ERROR | SL_WARN,
3736 		    "ESP Authentication failed spi %x, dst_addr %s",
3737 		    assoc->ipsa_spi, addr, af);
3738 		counter = &ipdrops_esp_bad_auth;
3739 		goto esp_in_discard;
3740 	}
3741 
3742 	esp3dbg(("esp_inbound_accelerated: ESP authentication succeeded, "
3743 	    "checking replay\n"));
3744 
3745 	ipsec_in->b_cont = data_mp;
3746 
3747 	/*
3748 	 * Remove ESP header and padding from packet.
3749 	 */
3750 	if (!esp_strip_header(data_mp, ii->ipsec_in_v4, assoc->ipsa_iv_len,
3751 		&counter)) {
3752 		esp1dbg(("esp_inbound_accelerated: "
3753 		    "esp_strip_header() failed\n"));
3754 		goto esp_in_discard;
3755 	}
3756 
3757 	freeb(hada_mp);
3758 
3759 	/*
3760 	 * Account for usage..
3761 	 */
3762 	if (!esp_age_bytes(assoc, msgdsize(data_mp), B_TRUE)) {
3763 		/* The ipsa has hit hard expiration, LOG and AUDIT. */
3764 		ESP_BUMP_STAT(bytes_expired);
3765 		IP_ESP_BUMP_STAT(in_discards);
3766 		ipsec_assocfailure(info.mi_idnum, 0, 0, SL_ERROR | SL_WARN,
3767 		    "ESP association 0x%x, dst %s had bytes expire.\n",
3768 		    assoc->ipsa_spi, assoc->ipsa_dstaddr, assoc->ipsa_addrfam);
3769 		ip_drop_packet(ipsec_in, B_TRUE, NULL, NULL,
3770 		    &ipdrops_esp_bytes_expire, &esp_dropper);
3771 		return (IPSEC_STATUS_FAILED);
3772 	}
3773 
3774 	/* done processing the packet */
3775 	return (IPSEC_STATUS_SUCCESS);
3776 
3777 esp_in_discard:
3778 	IP_ESP_BUMP_STAT(in_discards);
3779 	freeb(hada_mp);
3780 
3781 	ipsec_in->b_cont = data_mp;	/* For ip_drop_packet()'s sake... */
3782 	ip_drop_packet(ipsec_in, B_TRUE, NULL, NULL, counter, &esp_dropper);
3783 
3784 	return (IPSEC_STATUS_FAILED);
3785 }
3786 
3787 /*
3788  * Wrapper to allow IP to trigger an ESP association failure message
3789  * during inbound SA selection.
3790  */
3791 void
3792 ipsecesp_in_assocfailure(mblk_t *mp, char level, ushort_t sl, char *fmt,
3793     uint32_t spi, void *addr, int af)
3794 {
3795 	if (ipsecesp_log_unknown_spi) {
3796 		ipsec_assocfailure(info.mi_idnum, 0, level, sl, fmt, spi,
3797 		    addr, af);
3798 	}
3799 
3800 	ip_drop_packet(mp, B_TRUE, NULL, NULL, &ipdrops_esp_no_sa,
3801 	    &esp_dropper);
3802 }
3803 
3804 /*
3805  * Initialize the ESP input and output processing functions.
3806  */
3807 void
3808 ipsecesp_init_funcs(ipsa_t *sa)
3809 {
3810 	if (sa->ipsa_output_func == NULL)
3811 		sa->ipsa_output_func = esp_outbound;
3812 	if (sa->ipsa_input_func == NULL)
3813 		sa->ipsa_input_func = esp_inbound;
3814 }
3815