xref: /titanic_52/usr/src/uts/common/inet/ip/ipsecesp.c (revision 4bc0a2ef2b7ba50a7a717e7ddbf31472ad28e358)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/stream.h>
31 #include <sys/stropts.h>
32 #include <sys/errno.h>
33 #include <sys/strlog.h>
34 #include <sys/tihdr.h>
35 #include <sys/socket.h>
36 #include <sys/ddi.h>
37 #include <sys/sunddi.h>
38 #include <sys/kmem.h>
39 #include <sys/sysmacros.h>
40 #include <sys/cmn_err.h>
41 #include <sys/vtrace.h>
42 #include <sys/debug.h>
43 #include <sys/atomic.h>
44 #include <sys/strsun.h>
45 #include <sys/random.h>
46 #include <netinet/in.h>
47 #include <net/if.h>
48 #include <netinet/ip6.h>
49 #include <net/pfkeyv2.h>
50 
51 #include <inet/common.h>
52 #include <inet/mi.h>
53 #include <inet/nd.h>
54 #include <inet/ip.h>
55 #include <inet/ip6.h>
56 #include <inet/sadb.h>
57 #include <inet/ipsec_info.h>
58 #include <inet/ipsec_impl.h>
59 #include <inet/ipsecesp.h>
60 #include <inet/ipdrop.h>
61 #include <inet/tcp.h>
62 #include <sys/kstat.h>
63 #include <sys/policy.h>
64 #include <sys/strsun.h>
65 #include <inet/udp_impl.h>
66 #include <sys/taskq.h>
67 
68 #include <sys/iphada.h>
69 
70 /* Packet dropper for ESP drops. */
71 static ipdropper_t esp_dropper;
72 
73 static kmutex_t ipsecesp_param_lock; /* Protects ipsecesp_param_arr[] below. */
74 /*
75  * Table of ND variables supported by ipsecesp. These are loaded into
76  * ipsecesp_g_nd in ipsecesp_init_nd.
77  * All of these are alterable, within the min/max values given, at run time.
78  */
79 static	ipsecespparam_t	ipsecesp_param_arr[] = {
80 	/* min	max			value	name */
81 	{ 0,	3,			0,	"ipsecesp_debug"},
82 	{ 125,	32000, SADB_AGE_INTERVAL_DEFAULT, "ipsecesp_age_interval"},
83 	{ 1,	10,			1,	"ipsecesp_reap_delay"},
84 	{ 1,	SADB_MAX_REPLAY,	64,	"ipsecesp_replay_size"},
85 	{ 1,	300,			15,	"ipsecesp_acquire_timeout"},
86 	{ 1,	1800,			90,	"ipsecesp_larval_timeout"},
87 	/* Default lifetime values for ACQUIRE messages. */
88 	{ 0,	0xffffffffU,	0,	"ipsecesp_default_soft_bytes"},
89 	{ 0,	0xffffffffU,	0,	"ipsecesp_default_hard_bytes"},
90 	{ 0,	0xffffffffU,	24000,	"ipsecesp_default_soft_addtime"},
91 	{ 0,	0xffffffffU,	28800,	"ipsecesp_default_hard_addtime"},
92 	{ 0,	0xffffffffU,	0,	"ipsecesp_default_soft_usetime"},
93 	{ 0,	0xffffffffU,	0,	"ipsecesp_default_hard_usetime"},
94 	{ 0,	1,		0,	"ipsecesp_log_unknown_spi"},
95 	{ 0,	2,		1,	"ipsecesp_padding_check"},
96 };
97 #define	ipsecesp_debug		ipsecesp_param_arr[0].ipsecesp_param_value
98 #define	ipsecesp_age_interval	ipsecesp_param_arr[1].ipsecesp_param_value
99 #define	ipsecesp_age_int_max	ipsecesp_param_arr[1].ipsecesp_param_max
100 #define	ipsecesp_reap_delay	ipsecesp_param_arr[2].ipsecesp_param_value
101 #define	ipsecesp_replay_size	ipsecesp_param_arr[3].ipsecesp_param_value
102 #define	ipsecesp_acquire_timeout ipsecesp_param_arr[4].ipsecesp_param_value
103 #define	ipsecesp_larval_timeout ipsecesp_param_arr[5].ipsecesp_param_value
104 #define	ipsecesp_default_soft_bytes \
105 	ipsecesp_param_arr[6].ipsecesp_param_value
106 #define	ipsecesp_default_hard_bytes \
107 	ipsecesp_param_arr[7].ipsecesp_param_value
108 #define	ipsecesp_default_soft_addtime \
109 	ipsecesp_param_arr[8].ipsecesp_param_value
110 #define	ipsecesp_default_hard_addtime \
111 	ipsecesp_param_arr[9].ipsecesp_param_value
112 #define	ipsecesp_default_soft_usetime \
113 	ipsecesp_param_arr[10].ipsecesp_param_value
114 #define	ipsecesp_default_hard_usetime \
115 	ipsecesp_param_arr[11].ipsecesp_param_value
116 #define	ipsecesp_log_unknown_spi \
117 	ipsecesp_param_arr[12].ipsecesp_param_value
118 #define	ipsecesp_padding_check \
119 	ipsecesp_param_arr[13].ipsecesp_param_value
120 
121 #define	esp0dbg(a)	printf a
122 /* NOTE:  != 0 instead of > 0 so lint doesn't complain. */
123 #define	esp1dbg(a)	if (ipsecesp_debug != 0) printf a
124 #define	esp2dbg(a)	if (ipsecesp_debug > 1) printf a
125 #define	esp3dbg(a)	if (ipsecesp_debug > 2) printf a
126 
127 static IDP ipsecesp_g_nd;
128 
129 static int ipsecesp_open(queue_t *, dev_t *, int, int, cred_t *);
130 static int ipsecesp_close(queue_t *);
131 static void ipsecesp_rput(queue_t *, mblk_t *);
132 static void ipsecesp_wput(queue_t *, mblk_t *);
133 static void esp_send_acquire(ipsacq_t *, mblk_t *);
134 
135 static ipsec_status_t esp_outbound_accelerated(mblk_t *, uint_t);
136 static ipsec_status_t esp_inbound_accelerated(mblk_t *, mblk_t *,
137     boolean_t, ipsa_t *);
138 
139 static boolean_t esp_register_out(uint32_t, uint32_t, uint_t);
140 static boolean_t esp_strip_header(mblk_t *, boolean_t, uint32_t,
141     kstat_named_t **);
142 static ipsec_status_t esp_submit_req_inbound(mblk_t *, ipsa_t *, uint_t);
143 static ipsec_status_t esp_submit_req_outbound(mblk_t *, ipsa_t *, uchar_t *,
144     uint_t);
145 
146 static struct module_info info = {
147 	5137, "ipsecesp", 0, INFPSZ, 65536, 1024
148 };
149 
150 static struct qinit rinit = {
151 	(pfi_t)ipsecesp_rput, NULL, ipsecesp_open, ipsecesp_close, NULL, &info,
152 	NULL
153 };
154 
155 static struct qinit winit = {
156 	(pfi_t)ipsecesp_wput, NULL, ipsecesp_open, ipsecesp_close, NULL, &info,
157 	NULL
158 };
159 
160 struct streamtab ipsecespinfo = {
161 	&rinit, &winit, NULL, NULL
162 };
163 
164 /*
165  * Keysock instance of ESP.  "There can be only one." :)
166  * Use casptr() on this because I don't set it until KEYSOCK_HELLO comes down.
167  * Paired up with the esp_pfkey_q is the esp_event, which will age SAs.
168  */
169 static queue_t *esp_pfkey_q;
170 static timeout_id_t esp_event;
171 static taskq_t *esp_taskq;
172 
173 /*
174  * OTOH, this one is set at open/close, and I'm D_MTQPAIR for now.
175  *
176  * Question:	Do I need this, given that all instance's esps->esps_wq point
177  *		to IP?
178  *
179  * Answer:	Yes, because I need to know which queue is BOUND to
180  *		IPPROTO_ESP
181  */
182 static mblk_t *esp_ip_unbind;
183 
184 /*
185  * Stats.  This may eventually become a full-blown SNMP MIB once that spec
186  * stabilizes.
187  */
188 
189 typedef struct {
190 	kstat_named_t esp_stat_num_aalgs;
191 	kstat_named_t esp_stat_good_auth;
192 	kstat_named_t esp_stat_bad_auth;
193 	kstat_named_t esp_stat_bad_padding;
194 	kstat_named_t esp_stat_replay_failures;
195 	kstat_named_t esp_stat_replay_early_failures;
196 	kstat_named_t esp_stat_keysock_in;
197 	kstat_named_t esp_stat_out_requests;
198 	kstat_named_t esp_stat_acquire_requests;
199 	kstat_named_t esp_stat_bytes_expired;
200 	kstat_named_t esp_stat_out_discards;
201 	kstat_named_t esp_stat_in_accelerated;
202 	kstat_named_t esp_stat_out_accelerated;
203 	kstat_named_t esp_stat_noaccel;
204 	kstat_named_t esp_stat_crypto_sync;
205 	kstat_named_t esp_stat_crypto_async;
206 	kstat_named_t esp_stat_crypto_failures;
207 	kstat_named_t esp_stat_num_ealgs;
208 	kstat_named_t esp_stat_bad_decrypt;
209 } esp_kstats_t;
210 
211 uint32_t esp_hash_size = IPSEC_DEFAULT_HASH_SIZE;
212 #define	ESP_BUMP_STAT(x) (esp_kstats->esp_stat_ ## x).value.ui64++
213 #define	ESP_DEBUMP_STAT(x) (esp_kstats->esp_stat_ ## x).value.ui64--
214 
215 static kstat_t *esp_ksp;
216 static esp_kstats_t *esp_kstats;
217 
218 static int	esp_kstat_update(kstat_t *, int);
219 
220 static boolean_t
221 esp_kstat_init(void)
222 {
223 	esp_ksp = kstat_create("ipsecesp", 0, "esp_stat", "net",
224 	    KSTAT_TYPE_NAMED, sizeof (*esp_kstats) / sizeof (kstat_named_t),
225 	    KSTAT_FLAG_PERSISTENT);
226 
227 	if (esp_ksp == NULL)
228 		return (B_FALSE);
229 
230 	esp_kstats = esp_ksp->ks_data;
231 
232 	esp_ksp->ks_update = esp_kstat_update;
233 
234 #define	K64 KSTAT_DATA_UINT64
235 #define	KI(x) kstat_named_init(&(esp_kstats->esp_stat_##x), #x, K64)
236 
237 	KI(num_aalgs);
238 	KI(num_ealgs);
239 	KI(good_auth);
240 	KI(bad_auth);
241 	KI(bad_padding);
242 	KI(replay_failures);
243 	KI(replay_early_failures);
244 	KI(keysock_in);
245 	KI(out_requests);
246 	KI(acquire_requests);
247 	KI(bytes_expired);
248 	KI(out_discards);
249 	KI(in_accelerated);
250 	KI(out_accelerated);
251 	KI(noaccel);
252 	KI(crypto_sync);
253 	KI(crypto_async);
254 	KI(crypto_failures);
255 	KI(bad_decrypt);
256 
257 #undef KI
258 #undef K64
259 
260 	kstat_install(esp_ksp);
261 
262 	return (B_TRUE);
263 }
264 
265 static int
266 esp_kstat_update(kstat_t *kp, int rw)
267 {
268 	esp_kstats_t *ekp;
269 
270 	if ((kp == NULL) || (kp->ks_data == NULL))
271 		return (EIO);
272 
273 	if (rw == KSTAT_WRITE)
274 		return (EACCES);
275 
276 	ASSERT(kp == esp_ksp);
277 	ekp = (esp_kstats_t *)kp->ks_data;
278 	ASSERT(ekp == esp_kstats);
279 
280 	mutex_enter(&alg_lock);
281 	ekp->esp_stat_num_aalgs.value.ui64 = ipsec_nalgs[IPSEC_ALG_AUTH];
282 	ekp->esp_stat_num_ealgs.value.ui64 = ipsec_nalgs[IPSEC_ALG_ENCR];
283 	mutex_exit(&alg_lock);
284 
285 	return (0);
286 }
287 
288 #ifdef DEBUG
289 /*
290  * Debug routine, useful to see pre-encryption data.
291  */
292 static char *
293 dump_msg(mblk_t *mp)
294 {
295 	char tmp_str[3], tmp_line[256];
296 
297 	while (mp != NULL) {
298 		unsigned char *ptr;
299 
300 		printf("mblk address 0x%p, length %ld, db_ref %d "
301 		    "type %d, base 0x%p, lim 0x%p\n",
302 		    (void *) mp, (long)(mp->b_wptr - mp->b_rptr),
303 		    mp->b_datap->db_ref, mp->b_datap->db_type,
304 		    (void *)mp->b_datap->db_base, (void *)mp->b_datap->db_lim);
305 		ptr = mp->b_rptr;
306 
307 		tmp_line[0] = '\0';
308 		while (ptr < mp->b_wptr) {
309 			uint_t diff;
310 
311 			diff = (ptr - mp->b_rptr);
312 			if (!(diff & 0x1f)) {
313 				if (strlen(tmp_line) > 0) {
314 					printf("bytes: %s\n", tmp_line);
315 					tmp_line[0] = '\0';
316 				}
317 			}
318 			if (!(diff & 0x3))
319 				(void) strcat(tmp_line, " ");
320 			(void) sprintf(tmp_str, "%02x", *ptr);
321 			(void) strcat(tmp_line, tmp_str);
322 			ptr++;
323 		}
324 		if (strlen(tmp_line) > 0)
325 			printf("bytes: %s\n", tmp_line);
326 
327 		mp = mp->b_cont;
328 	}
329 
330 	return ("\n");
331 }
332 
333 #else /* DEBUG */
334 static char *
335 dump_msg(mblk_t *mp)
336 {
337 	printf("Find value of mp %p.\n", mp);
338 	return ("\n");
339 }
340 #endif /* DEBUG */
341 
342 /*
343  * Don't have to lock age_interval, as only one thread will access it at
344  * a time, because I control the one function that does with timeout().
345  */
346 /* ARGSUSED */
347 static void
348 esp_ager(void *ignoreme)
349 {
350 	hrtime_t begin = gethrtime();
351 
352 	sadb_ager(&esp_sadb.s_v4, esp_pfkey_q, esp_sadb.s_ip_q,
353 	    ipsecesp_reap_delay);
354 	sadb_ager(&esp_sadb.s_v6, esp_pfkey_q, esp_sadb.s_ip_q,
355 	    ipsecesp_reap_delay);
356 
357 	esp_event = sadb_retimeout(begin, esp_pfkey_q, esp_ager,
358 	    &(ipsecesp_age_interval), ipsecesp_age_int_max, info.mi_idnum);
359 }
360 
361 /*
362  * Get an ESP NDD parameter.
363  */
364 /* ARGSUSED */
365 static int
366 ipsecesp_param_get(q, mp, cp, cr)
367 	queue_t	*q;
368 	mblk_t	*mp;
369 	caddr_t	cp;
370 	cred_t *cr;
371 {
372 	ipsecespparam_t	*ipsecesppa = (ipsecespparam_t *)cp;
373 	uint_t value;
374 
375 	mutex_enter(&ipsecesp_param_lock);
376 	value = ipsecesppa->ipsecesp_param_value;
377 	mutex_exit(&ipsecesp_param_lock);
378 
379 	(void) mi_mpprintf(mp, "%u", value);
380 	return (0);
381 }
382 
383 /*
384  * This routine sets an NDD variable in a ipsecespparam_t structure.
385  */
386 /* ARGSUSED */
387 static int
388 ipsecesp_param_set(q, mp, value, cp, cr)
389 	queue_t	*q;
390 	mblk_t	*mp;
391 	char	*value;
392 	caddr_t	cp;
393 	cred_t *cr;
394 {
395 	ulong_t	new_value;
396 	ipsecespparam_t	*ipsecesppa = (ipsecespparam_t *)cp;
397 
398 	/*
399 	 * Fail the request if the new value does not lie within the
400 	 * required bounds.
401 	 */
402 	if (ddi_strtoul(value, NULL, 10, &new_value) != 0 ||
403 	    new_value < ipsecesppa->ipsecesp_param_min ||
404 	    new_value > ipsecesppa->ipsecesp_param_max) {
405 		return (EINVAL);
406 	}
407 
408 	/* Set the new value */
409 	mutex_enter(&ipsecesp_param_lock);
410 	ipsecesppa->ipsecesp_param_value = new_value;
411 	mutex_exit(&ipsecesp_param_lock);
412 	return (0);
413 }
414 
415 /*
416  * Using lifetime NDD variables, fill in an extended combination's
417  * lifetime information.
418  */
419 void
420 ipsecesp_fill_defs(sadb_x_ecomb_t *ecomb)
421 {
422 	ecomb->sadb_x_ecomb_soft_bytes = ipsecesp_default_soft_bytes;
423 	ecomb->sadb_x_ecomb_hard_bytes = ipsecesp_default_hard_bytes;
424 	ecomb->sadb_x_ecomb_soft_addtime = ipsecesp_default_soft_addtime;
425 	ecomb->sadb_x_ecomb_hard_addtime = ipsecesp_default_hard_addtime;
426 	ecomb->sadb_x_ecomb_soft_usetime = ipsecesp_default_soft_usetime;
427 	ecomb->sadb_x_ecomb_hard_usetime = ipsecesp_default_hard_usetime;
428 }
429 
430 /*
431  * Initialize things for ESP at module load time.
432  */
433 boolean_t
434 ipsecesp_ddi_init(void)
435 {
436 	int count;
437 	ipsecespparam_t *espp = ipsecesp_param_arr;
438 
439 	for (count = A_CNT(ipsecesp_param_arr); count-- > 0; espp++) {
440 		if (espp->ipsecesp_param_name != NULL &&
441 		    espp->ipsecesp_param_name[0]) {
442 			if (!nd_load(&ipsecesp_g_nd, espp->ipsecesp_param_name,
443 			    ipsecesp_param_get, ipsecesp_param_set,
444 			    (caddr_t)espp)) {
445 				nd_free(&ipsecesp_g_nd);
446 				return (B_FALSE);
447 			}
448 		}
449 	}
450 
451 	if (!esp_kstat_init()) {
452 		nd_free(&ipsecesp_g_nd);
453 		return (B_FALSE);
454 	}
455 
456 	esp_sadb.s_acquire_timeout = &ipsecesp_acquire_timeout;
457 	esp_sadb.s_acqfn = esp_send_acquire;
458 	sadbp_init("ESP", &esp_sadb, SADB_SATYPE_ESP, esp_hash_size);
459 
460 	esp_taskq = taskq_create("esp_taskq", 1, minclsyspri,
461 	    IPSEC_TASKQ_MIN, IPSEC_TASKQ_MAX, 0);
462 
463 	mutex_init(&ipsecesp_param_lock, NULL, MUTEX_DEFAULT, 0);
464 
465 	ip_drop_register(&esp_dropper, "IPsec ESP");
466 
467 	return (B_TRUE);
468 }
469 
470 /*
471  * Destroy things for ESP at module unload time.
472  */
473 void
474 ipsecesp_ddi_destroy(void)
475 {
476 	esp1dbg(("In ipsecesp_ddi_destroy.\n"));
477 
478 	sadbp_destroy(&esp_sadb);
479 	ip_drop_unregister(&esp_dropper);
480 	taskq_destroy(esp_taskq);
481 	mutex_destroy(&ipsecesp_param_lock);
482 	nd_free(&ipsecesp_g_nd);
483 	kstat_delete(esp_ksp);
484 }
485 
486 /*
487  * ESP module open routine.
488  */
489 /* ARGSUSED */
490 static int
491 ipsecesp_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
492 {
493 	if (secpolicy_net_config(credp, B_FALSE) != 0) {
494 		esp1dbg(("Non-privileged user trying to open ipsecesp.\n"));
495 		return (EPERM);
496 	}
497 
498 	if (q->q_ptr != NULL)
499 		return (0);  /* Re-open of an already open instance. */
500 
501 	if (sflag != MODOPEN)
502 		return (EINVAL);
503 
504 	/*
505 	 * ASSUMPTIONS (because I'm MT_OCEXCL):
506 	 *
507 	 *	* I'm being pushed on top of IP for all my opens (incl. #1).
508 	 *	* Only ipsecesp_open() can write into esp_sadb.s_ip_q.
509 	 *	* Because of this, I can check lazily for esp_sadb.s_ip_q.
510 	 *
511 	 *  If these assumptions are wrong, I'm in BIG trouble...
512 	 */
513 
514 	q->q_ptr = q; /* just so I know I'm open */
515 
516 	if (esp_sadb.s_ip_q == NULL) {
517 		struct T_unbind_req *tur;
518 
519 		esp_sadb.s_ip_q = WR(q);
520 		/* Allocate an unbind... */
521 		esp_ip_unbind = allocb(sizeof (struct T_unbind_req), BPRI_HI);
522 
523 		/*
524 		 * Send down T_BIND_REQ to bind IPPROTO_ESP.
525 		 * Handle the ACK here in ESP.
526 		 */
527 		qprocson(q);
528 		if (esp_ip_unbind == NULL ||
529 		    !sadb_t_bind_req(esp_sadb.s_ip_q, IPPROTO_ESP)) {
530 			if (esp_ip_unbind != NULL) {
531 				freeb(esp_ip_unbind);
532 				esp_ip_unbind = NULL;
533 			}
534 			q->q_ptr = NULL;
535 			return (ENOMEM);
536 		}
537 
538 		esp_ip_unbind->b_datap->db_type = M_PROTO;
539 		tur = (struct T_unbind_req *)esp_ip_unbind->b_rptr;
540 		tur->PRIM_type = T_UNBIND_REQ;
541 	} else {
542 		qprocson(q);
543 	}
544 
545 	/*
546 	 * For now, there's not much I can do.  I'll be getting a message
547 	 * passed down to me from keysock (in my wput), and a T_BIND_ACK
548 	 * up from IP (in my rput).
549 	 */
550 
551 	return (0);
552 }
553 
554 /*
555  * ESP module close routine.
556  */
557 static int
558 ipsecesp_close(queue_t *q)
559 {
560 	/*
561 	 * If esp_sadb.s_ip_q is attached to this instance, send a
562 	 * T_UNBIND_REQ to IP for the instance before doing
563 	 * a qprocsoff().
564 	 */
565 	if (WR(q) == esp_sadb.s_ip_q && esp_ip_unbind != NULL) {
566 		putnext(WR(q), esp_ip_unbind);
567 		esp_ip_unbind = NULL;
568 	}
569 
570 	/*
571 	 * Clean up q_ptr, if needed.
572 	 */
573 	qprocsoff(q);
574 
575 	/* Keysock queue check is safe, because of OCEXCL perimeter. */
576 
577 	if (q == esp_pfkey_q) {
578 		esp0dbg(("ipsecesp_close:  Ummm... keysock is closing ESP.\n"));
579 		esp_pfkey_q = NULL;
580 		/* Detach qtimeouts. */
581 		(void) quntimeout(q, esp_event);
582 	}
583 
584 	if (WR(q) == esp_sadb.s_ip_q) {
585 		/*
586 		 * If the esp_sadb.s_ip_q is attached to this instance, find
587 		 * another.  The OCEXCL outer perimeter helps us here.
588 		 */
589 		esp_sadb.s_ip_q = NULL;
590 
591 		/*
592 		 * Find a replacement queue for esp_sadb.s_ip_q.
593 		 */
594 		if (esp_pfkey_q != NULL && esp_pfkey_q != RD(q)) {
595 			/*
596 			 * See if we can use the pfkey_q.
597 			 */
598 			esp_sadb.s_ip_q = WR(esp_pfkey_q);
599 		}
600 
601 		if (esp_sadb.s_ip_q == NULL ||
602 		    !sadb_t_bind_req(esp_sadb.s_ip_q, IPPROTO_ESP)) {
603 			esp1dbg(("ipsecesp: Can't reassign ip_q.\n"));
604 			esp_sadb.s_ip_q = NULL;
605 		} else {
606 			esp_ip_unbind = allocb(sizeof (struct T_unbind_req),
607 			    BPRI_HI);
608 
609 			if (esp_ip_unbind != NULL) {
610 				struct T_unbind_req *tur;
611 
612 				esp_ip_unbind->b_datap->db_type = M_PROTO;
613 				tur = (struct T_unbind_req *)
614 				    esp_ip_unbind->b_rptr;
615 				tur->PRIM_type = T_UNBIND_REQ;
616 			}
617 			/* If it's NULL, I can't do much here. */
618 		}
619 	}
620 
621 	return (0);
622 }
623 
624 /*
625  * Add a number of bytes to what the SA has protected so far.  Return
626  * B_TRUE if the SA can still protect that many bytes.
627  *
628  * Caller must REFRELE the passed-in assoc.  This function must REFRELE
629  * any obtained peer SA.
630  */
631 static boolean_t
632 esp_age_bytes(ipsa_t *assoc, uint64_t bytes, boolean_t inbound)
633 {
634 	ipsa_t *inassoc, *outassoc;
635 	isaf_t *bucket;
636 	boolean_t inrc, outrc, isv6;
637 	sadb_t *sp;
638 	int outhash;
639 
640 	/* No peer?  No problem! */
641 	if (!assoc->ipsa_haspeer) {
642 		return (sadb_age_bytes(esp_pfkey_q, assoc, bytes,
643 		    B_TRUE));
644 	}
645 
646 	/*
647 	 * Otherwise, we want to grab both the original assoc and its peer.
648 	 * There might be a race for this, but if it's a real race, two
649 	 * expire messages may occur.  We limit this by only sending the
650 	 * expire message on one of the peers, we'll pick the inbound
651 	 * arbitrarily.
652 	 *
653 	 * If we need tight synchronization on the peer SA, then we need to
654 	 * reconsider.
655 	 */
656 
657 	/* Use address length to select IPv6/IPv4 */
658 	isv6 = (assoc->ipsa_addrfam == AF_INET6);
659 	sp = isv6 ? &esp_sadb.s_v6 : &esp_sadb.s_v4;
660 
661 	if (inbound) {
662 		inassoc = assoc;
663 		if (isv6) {
664 			outhash = OUTBOUND_HASH_V6(sp, *((in6_addr_t *)
665 			    &inassoc->ipsa_dstaddr));
666 		} else {
667 			outhash = OUTBOUND_HASH_V4(sp, *((ipaddr_t *)
668 				&inassoc->ipsa_dstaddr));
669 		}
670 		bucket = &sp->sdb_of[outhash];
671 		mutex_enter(&bucket->isaf_lock);
672 		outassoc = ipsec_getassocbyspi(bucket, inassoc->ipsa_spi,
673 		    inassoc->ipsa_srcaddr, inassoc->ipsa_dstaddr,
674 		    inassoc->ipsa_addrfam);
675 		mutex_exit(&bucket->isaf_lock);
676 		if (outassoc == NULL) {
677 			/* Q: Do we wish to set haspeer == B_FALSE? */
678 			esp0dbg(("esp_age_bytes: "
679 			    "can't find peer for inbound.\n"));
680 			return (sadb_age_bytes(esp_pfkey_q, inassoc,
681 			    bytes, B_TRUE));
682 		}
683 	} else {
684 		outassoc = assoc;
685 		bucket = INBOUND_BUCKET(sp, outassoc->ipsa_spi);
686 		mutex_enter(&bucket->isaf_lock);
687 		inassoc = ipsec_getassocbyspi(bucket, outassoc->ipsa_spi,
688 		    outassoc->ipsa_srcaddr, outassoc->ipsa_dstaddr,
689 		    outassoc->ipsa_addrfam);
690 		mutex_exit(&bucket->isaf_lock);
691 		if (inassoc == NULL) {
692 			/* Q: Do we wish to set haspeer == B_FALSE? */
693 			esp0dbg(("esp_age_bytes: "
694 			    "can't find peer for outbound.\n"));
695 			return (sadb_age_bytes(esp_pfkey_q, outassoc,
696 			    bytes, B_TRUE));
697 		}
698 	}
699 
700 	inrc = sadb_age_bytes(esp_pfkey_q, inassoc, bytes, B_TRUE);
701 	outrc = sadb_age_bytes(esp_pfkey_q, outassoc, bytes, B_FALSE);
702 
703 	/*
704 	 * REFRELE any peer SA.
705 	 *
706 	 * Because of the multi-line macro nature of IPSA_REFRELE, keep
707 	 * them in { }.
708 	 */
709 	if (inbound) {
710 		IPSA_REFRELE(outassoc);
711 	} else {
712 		IPSA_REFRELE(inassoc);
713 	}
714 
715 	return (inrc && outrc);
716 }
717 
718 /*
719  * Do incoming NAT-T manipulations for packet.
720  */
721 static ipsec_status_t
722 esp_fix_natt_checksums(mblk_t *data_mp, ipsa_t *assoc)
723 {
724 	ipha_t *ipha = (ipha_t *)data_mp->b_rptr;
725 	tcpha_t *tcph;
726 	udpha_t *udpha;
727 	/* Initialize to our inbound cksum adjustment... */
728 	uint32_t sum = assoc->ipsa_inbound_cksum;
729 
730 	switch (ipha->ipha_protocol) {
731 	case IPPROTO_TCP:
732 		tcph = (tcpha_t *)(data_mp->b_rptr +
733 		    IPH_HDR_LENGTH(ipha));
734 
735 #define	DOWN_SUM(x) (x) = ((x) & 0xFFFF) +	 ((x) >> 16)
736 		sum += ~ntohs(tcph->tha_sum) & 0xFFFF;
737 		DOWN_SUM(sum);
738 		DOWN_SUM(sum);
739 		tcph->tha_sum = ~htons(sum);
740 		break;
741 	case IPPROTO_UDP:
742 		udpha = (udpha_t *)(data_mp->b_rptr + IPH_HDR_LENGTH(ipha));
743 
744 		if (udpha->uha_checksum != 0) {
745 			/* Adujst if the inbound one was not zero. */
746 			sum += ~ntohs(udpha->uha_checksum) & 0xFFFF;
747 			DOWN_SUM(sum);
748 			DOWN_SUM(sum);
749 			udpha->uha_checksum = ~htons(sum);
750 			if (udpha->uha_checksum == 0)
751 				udpha->uha_checksum = 0xFFFF;
752 		}
753 #undef DOWN_SUM
754 		break;
755 	case IPPROTO_IP:
756 		/*
757 		 * This case is only an issue for self-encapsulated
758 		 * packets.  So for now, fall through.
759 		 */
760 		break;
761 	}
762 	return (IPSEC_STATUS_SUCCESS);
763 }
764 
765 
766 /*
767  * Strip ESP header and fix IP header
768  * Returns B_TRUE on success, B_FALSE if an error occured.
769  */
770 static boolean_t
771 esp_strip_header(mblk_t *data_mp, boolean_t isv4, uint32_t ivlen,
772     kstat_named_t **counter)
773 {
774 	ipha_t *ipha;
775 	ip6_t *ip6h;
776 	uint_t divpoint;
777 	mblk_t *scratch;
778 	uint8_t nexthdr, padlen;
779 	uint8_t lastpad;
780 
781 	/*
782 	 * Strip ESP data and fix IP header.
783 	 *
784 	 * XXX In case the beginning of esp_inbound() changes to not do a
785 	 * pullup, this part of the code can remain unchanged.
786 	 */
787 	if (isv4) {
788 		ASSERT((data_mp->b_wptr - data_mp->b_rptr) >= sizeof (ipha_t));
789 		ipha = (ipha_t *)data_mp->b_rptr;
790 		ASSERT((data_mp->b_wptr - data_mp->b_rptr) >= sizeof (esph_t) +
791 		    IPH_HDR_LENGTH(ipha));
792 		divpoint = IPH_HDR_LENGTH(ipha);
793 	} else {
794 		ASSERT((data_mp->b_wptr - data_mp->b_rptr) >= sizeof (ip6_t));
795 		ip6h = (ip6_t *)data_mp->b_rptr;
796 		divpoint = ip_hdr_length_v6(data_mp, ip6h);
797 	}
798 
799 	scratch = data_mp;
800 	while (scratch->b_cont != NULL)
801 		scratch = scratch->b_cont;
802 
803 	ASSERT((scratch->b_wptr - scratch->b_rptr) >= 3);
804 
805 	/*
806 	 * "Next header" and padding length are the last two bytes in the
807 	 * ESP-protected datagram, thus the explicit - 1 and - 2.
808 	 * lastpad is the last byte of the padding, which can be used for
809 	 * a quick check to see if the padding is correct.
810 	 */
811 	nexthdr = *(scratch->b_wptr - 1);
812 	padlen = *(scratch->b_wptr - 2);
813 	lastpad = *(scratch->b_wptr - 3);
814 
815 	if (isv4) {
816 		/* Fix part of the IP header. */
817 		ipha->ipha_protocol = nexthdr;
818 		/*
819 		 * Reality check the padlen.  The explicit - 2 is for the
820 		 * padding length and the next-header bytes.
821 		 */
822 		if (padlen >= ntohs(ipha->ipha_length) - sizeof (ipha_t) - 2 -
823 		    sizeof (esph_t) - ivlen) {
824 			ESP_BUMP_STAT(bad_decrypt);
825 			ipsec_rl_strlog(info.mi_idnum, 0, 0, SL_ERROR | SL_WARN,
826 			    "Possibly corrupt ESP packet.");
827 			esp1dbg(("padlen (%d) is greater than:\n", padlen));
828 			esp1dbg(("pkt len(%d) - ip hdr - esp hdr - ivlen(%d) "
829 			    "= %d.\n", ntohs(ipha->ipha_length), ivlen,
830 			    (int)(ntohs(ipha->ipha_length) - sizeof (ipha_t) -
831 				2 - sizeof (esph_t) - ivlen)));
832 			*counter = &ipdrops_esp_bad_padlen;
833 			return (B_FALSE);
834 		}
835 
836 		/*
837 		 * Fix the rest of the header.  The explicit - 2 is for the
838 		 * padding length and the next-header bytes.
839 		 */
840 		ipha->ipha_length = htons(ntohs(ipha->ipha_length) - padlen -
841 		    2 - sizeof (esph_t) - ivlen);
842 		ipha->ipha_hdr_checksum = 0;
843 		ipha->ipha_hdr_checksum = (uint16_t)ip_csum_hdr(ipha);
844 	} else {
845 		if (ip6h->ip6_nxt == IPPROTO_ESP) {
846 			ip6h->ip6_nxt = nexthdr;
847 		} else {
848 			ip6_pkt_t ipp;
849 
850 			bzero(&ipp, sizeof (ipp));
851 			(void) ip_find_hdr_v6(data_mp, ip6h, &ipp, NULL);
852 			if (ipp.ipp_dstopts != NULL) {
853 				ipp.ipp_dstopts->ip6d_nxt = nexthdr;
854 			} else if (ipp.ipp_rthdr != NULL) {
855 				ipp.ipp_rthdr->ip6r_nxt = nexthdr;
856 			} else if (ipp.ipp_hopopts != NULL) {
857 				ipp.ipp_hopopts->ip6h_nxt = nexthdr;
858 			} else {
859 				/* Panic a DEBUG kernel. */
860 				ASSERT(ipp.ipp_hopopts != NULL);
861 				/* Otherwise, pretend it's IP + ESP. */
862 				cmn_err(CE_WARN, "ESP IPv6 headers wrong.\n");
863 				ip6h->ip6_nxt = nexthdr;
864 			}
865 		}
866 
867 		if (padlen >= ntohs(ip6h->ip6_plen) - 2 - sizeof (esph_t) -
868 		    ivlen) {
869 			ESP_BUMP_STAT(bad_decrypt);
870 			ipsec_rl_strlog(info.mi_idnum, 0, 0, SL_ERROR | SL_WARN,
871 			    "Possibly corrupt ESP packet.");
872 			esp1dbg(("padlen (%d) is greater than:\n", padlen));
873 			esp1dbg(("pkt len(%u) - ip hdr - esp hdr - ivlen(%d)"
874 			    " = %u.\n", (unsigned)(ntohs(ip6h->ip6_plen)
875 				+ sizeof (ip6_t)), ivlen,
876 			    (unsigned)(ntohs(ip6h->ip6_plen) - 2 -
877 				sizeof (esph_t) - ivlen)));
878 			*counter = &ipdrops_esp_bad_padlen;
879 			return (B_FALSE);
880 		}
881 
882 
883 		/*
884 		 * Fix the rest of the header.  The explicit - 2 is for the
885 		 * padding length and the next-header bytes.  IPv6 is nice,
886 		 * because there's no hdr checksum!
887 		 */
888 		ip6h->ip6_plen = htons(ntohs(ip6h->ip6_plen) - padlen -
889 		    2 - sizeof (esph_t) - ivlen);
890 	}
891 
892 	if (ipsecesp_padding_check > 0 &&
893 		padlen != lastpad && padlen != 0) {
894 		ipsec_rl_strlog(info.mi_idnum, 0, 0, SL_ERROR | SL_WARN,
895 		    "Possibly corrupt ESP packet.");
896 		esp1dbg(("lastpad (%d) not equal to padlen (%d):\n",
897 		    lastpad, padlen));
898 		ESP_BUMP_STAT(bad_padding);
899 		*counter = &ipdrops_esp_bad_padding;
900 		return (B_FALSE);
901 	}
902 
903 	if (ipsecesp_padding_check > 1) {
904 		uint8_t *last = (uint8_t *)(scratch->b_wptr - 3);
905 		uint8_t lastval = *last;
906 
907 		/*
908 		 * this assert may have to become an if
909 		 * and a pullup if we start accepting
910 		 * multi-dblk mblks. Any packet here will
911 		 * have been pulled up in esp_inbound.
912 		 */
913 		ASSERT(MBLKL(scratch) >= lastval + 3);
914 
915 		while (lastval != 0) {
916 			if (lastval != *last) {
917 				ipsec_rl_strlog(info.mi_idnum, 0, 0,
918 				    SL_ERROR | SL_WARN,
919 				    "Possibly corrupt ESP packet.");
920 				esp1dbg(("padding not in correct"
921 				    " format:\n"));
922 				ESP_BUMP_STAT(bad_padding);
923 				*counter = &ipdrops_esp_bad_padding;
924 				return (B_FALSE);
925 			}
926 			lastval--; last--;
927 		}
928 	}
929 
930 	/* Trim off the padding. */
931 	ASSERT(data_mp->b_cont == NULL);
932 	data_mp->b_wptr -= (padlen + 2);
933 
934 	/*
935 	 * Remove the ESP header.
936 	 *
937 	 * The above assertions about data_mp's size will make this work.
938 	 *
939 	 * XXX  Question:  If I send up and get back a contiguous mblk,
940 	 * would it be quicker to bcopy over, or keep doing the dupb stuff?
941 	 * I go with copying for now.
942 	 */
943 
944 	if (IS_P2ALIGNED(data_mp->b_rptr, sizeof (uint32_t)) &&
945 	    IS_P2ALIGNED(ivlen, sizeof (uint32_t))) {
946 		uint8_t *start = data_mp->b_rptr;
947 		uint32_t *src, *dst;
948 
949 		src = (uint32_t *)(start + divpoint);
950 		dst = (uint32_t *)(start + divpoint + sizeof (esph_t) + ivlen);
951 
952 		ASSERT(IS_P2ALIGNED(dst, sizeof (uint32_t)) &&
953 		    IS_P2ALIGNED(src, sizeof (uint32_t)));
954 
955 		do {
956 			src--;
957 			dst--;
958 			*dst = *src;
959 		} while (src != (uint32_t *)start);
960 
961 		data_mp->b_rptr = (uchar_t *)dst;
962 	} else {
963 		uint8_t *start = data_mp->b_rptr;
964 		uint8_t *src, *dst;
965 
966 		src = start + divpoint;
967 		dst = src + sizeof (esph_t) + ivlen;
968 
969 		do {
970 			src--;
971 			dst--;
972 			*dst = *src;
973 		} while (src != start);
974 
975 		data_mp->b_rptr = dst;
976 	}
977 
978 	esp2dbg(("data_mp after inbound ESP adjustment:\n"));
979 	esp2dbg((dump_msg(data_mp)));
980 
981 	return (B_TRUE);
982 }
983 
984 /*
985  * Updating use times can be tricky business if the ipsa_haspeer flag is
986  * set.  This function is called once in an SA's lifetime.
987  *
988  * Caller has to REFRELE "assoc" which is passed in.  This function has
989  * to REFRELE any peer SA that is obtained.
990  */
991 static void
992 esp_set_usetime(ipsa_t *assoc, boolean_t inbound)
993 {
994 	ipsa_t *inassoc, *outassoc;
995 	isaf_t *bucket;
996 	sadb_t *sp;
997 	int outhash;
998 	boolean_t isv6;
999 
1000 	/* No peer?  No problem! */
1001 	if (!assoc->ipsa_haspeer) {
1002 		sadb_set_usetime(assoc);
1003 		return;
1004 	}
1005 
1006 	/*
1007 	 * Otherwise, we want to grab both the original assoc and its peer.
1008 	 * There might be a race for this, but if it's a real race, the times
1009 	 * will be out-of-synch by at most a second, and since our time
1010 	 * granularity is a second, this won't be a problem.
1011 	 *
1012 	 * If we need tight synchronization on the peer SA, then we need to
1013 	 * reconsider.
1014 	 */
1015 
1016 	/* Use address length to select IPv6/IPv4 */
1017 	isv6 = (assoc->ipsa_addrfam == AF_INET6);
1018 	sp = isv6 ? &esp_sadb.s_v6 : &esp_sadb.s_v4;
1019 
1020 	if (inbound) {
1021 		inassoc = assoc;
1022 		if (isv6) {
1023 			outhash = OUTBOUND_HASH_V6(sp, *((in6_addr_t *)
1024 			    &inassoc->ipsa_dstaddr));
1025 		} else {
1026 			outhash = OUTBOUND_HASH_V4(sp, *((ipaddr_t *)
1027 				&inassoc->ipsa_dstaddr));
1028 		}
1029 		bucket = &sp->sdb_of[outhash];
1030 		mutex_enter(&bucket->isaf_lock);
1031 		outassoc = ipsec_getassocbyspi(bucket, inassoc->ipsa_spi,
1032 		    inassoc->ipsa_srcaddr, inassoc->ipsa_dstaddr,
1033 		    inassoc->ipsa_addrfam);
1034 		mutex_exit(&bucket->isaf_lock);
1035 		if (outassoc == NULL) {
1036 			/* Q: Do we wish to set haspeer == B_FALSE? */
1037 			esp0dbg(("esp_set_usetime: "
1038 			    "can't find peer for inbound.\n"));
1039 			sadb_set_usetime(inassoc);
1040 			return;
1041 		}
1042 	} else {
1043 		outassoc = assoc;
1044 		bucket = INBOUND_BUCKET(sp, outassoc->ipsa_spi);
1045 		mutex_enter(&bucket->isaf_lock);
1046 		inassoc = ipsec_getassocbyspi(bucket, outassoc->ipsa_spi,
1047 		    outassoc->ipsa_srcaddr, outassoc->ipsa_dstaddr,
1048 		    outassoc->ipsa_addrfam);
1049 		mutex_exit(&bucket->isaf_lock);
1050 		if (inassoc == NULL) {
1051 			/* Q: Do we wish to set haspeer == B_FALSE? */
1052 			esp0dbg(("esp_set_usetime: "
1053 			    "can't find peer for outbound.\n"));
1054 			sadb_set_usetime(outassoc);
1055 			return;
1056 		}
1057 	}
1058 
1059 	/* Update usetime on both. */
1060 	sadb_set_usetime(inassoc);
1061 	sadb_set_usetime(outassoc);
1062 
1063 	/*
1064 	 * REFRELE any peer SA.
1065 	 *
1066 	 * Because of the multi-line macro nature of IPSA_REFRELE, keep
1067 	 * them in { }.
1068 	 */
1069 	if (inbound) {
1070 		IPSA_REFRELE(outassoc);
1071 	} else {
1072 		IPSA_REFRELE(inassoc);
1073 	}
1074 }
1075 
1076 /*
1077  * Handle ESP inbound data for IPv4 and IPv6.
1078  * On success returns B_TRUE, on failure returns B_FALSE and frees the
1079  * mblk chain ipsec_in_mp.
1080  */
1081 ipsec_status_t
1082 esp_inbound(mblk_t *ipsec_in_mp, void *arg)
1083 {
1084 	mblk_t *data_mp = ipsec_in_mp->b_cont;
1085 	ipsec_in_t *ii = (ipsec_in_t *)ipsec_in_mp->b_rptr;
1086 	esph_t *esph = (esph_t *)arg;
1087 	ipsa_t *ipsa = ii->ipsec_in_esp_sa;
1088 
1089 	if (ipsa->ipsa_usetime == 0)
1090 		esp_set_usetime(ipsa, B_TRUE);
1091 
1092 	/*
1093 	 * We may wish to check replay in-range-only here as an optimization.
1094 	 * Include the reality check of ipsa->ipsa_replay >
1095 	 * ipsa->ipsa_replay_wsize for times when it's the first N packets,
1096 	 * where N == ipsa->ipsa_replay_wsize.
1097 	 *
1098 	 * Another check that may come here later is the "collision" check.
1099 	 * If legitimate packets flow quickly enough, this won't be a problem,
1100 	 * but collisions may cause authentication algorithm crunching to
1101 	 * take place when it doesn't need to.
1102 	 */
1103 	if (!sadb_replay_peek(ipsa, esph->esph_replay)) {
1104 		ESP_BUMP_STAT(replay_early_failures);
1105 		IP_ESP_BUMP_STAT(in_discards);
1106 		/*
1107 		 * TODO: Extract inbound interface from the IPSEC_IN
1108 		 * message's ii->ipsec_in_rill_index.
1109 		 */
1110 		ip_drop_packet(ipsec_in_mp, B_TRUE, NULL, NULL,
1111 		    &ipdrops_esp_early_replay, &esp_dropper);
1112 		return (IPSEC_STATUS_FAILED);
1113 	}
1114 
1115 	/*
1116 	 * Has this packet already been processed by a hardware
1117 	 * IPsec accelerator?
1118 	 */
1119 	if (ii->ipsec_in_accelerated) {
1120 		ipsec_status_t rv;
1121 		esp3dbg(("esp_inbound: pkt processed by ill=%d isv6=%d\n",
1122 		    ii->ipsec_in_ill_index, !ii->ipsec_in_v4));
1123 		rv = esp_inbound_accelerated(ipsec_in_mp,
1124 		    data_mp, ii->ipsec_in_v4, ipsa);
1125 		return (rv);
1126 	}
1127 	ESP_BUMP_STAT(noaccel);
1128 
1129 	/*
1130 	 * Adjust the IP header's payload length to reflect the removal
1131 	 * of the ICV.
1132 	 */
1133 	if (!ii->ipsec_in_v4) {
1134 		ip6_t *ip6h = (ip6_t *)data_mp->b_rptr;
1135 		ip6h->ip6_plen = htons(ntohs(ip6h->ip6_plen) -
1136 		    ipsa->ipsa_mac_len);
1137 	} else {
1138 		ipha_t *ipha = (ipha_t *)data_mp->b_rptr;
1139 		ipha->ipha_length = htons(ntohs(ipha->ipha_length) -
1140 		    ipsa->ipsa_mac_len);
1141 	}
1142 
1143 	/* submit the request to the crypto framework */
1144 	return (esp_submit_req_inbound(ipsec_in_mp, ipsa,
1145 	    (uint8_t *)esph - data_mp->b_rptr));
1146 }
1147 
1148 /*
1149  * Perform the really difficult work of inserting the proposed situation.
1150  * Called while holding the algorithm lock.
1151  */
1152 static void
1153 esp_insert_prop(sadb_prop_t *prop, ipsacq_t *acqrec, uint_t combs)
1154 {
1155 	sadb_comb_t *comb = (sadb_comb_t *)(prop + 1);
1156 	ipsec_out_t *io;
1157 	ipsec_action_t *ap;
1158 	ipsec_prot_t *prot;
1159 
1160 	ASSERT(MUTEX_HELD(&alg_lock));
1161 	io = (ipsec_out_t *)acqrec->ipsacq_mp->b_rptr;
1162 	ASSERT(io->ipsec_out_type == IPSEC_OUT);
1163 
1164 	prop->sadb_prop_exttype = SADB_EXT_PROPOSAL;
1165 	prop->sadb_prop_len = SADB_8TO64(sizeof (sadb_prop_t));
1166 	*(uint32_t *)(&prop->sadb_prop_replay) = 0;	/* Quick zero-out! */
1167 
1168 	prop->sadb_prop_replay = ipsecesp_replay_size;
1169 
1170 	/*
1171 	 * Based upon algorithm properties, and what-not, prioritize
1172 	 * a proposal.  If the IPSEC_OUT message has an algorithm specified,
1173 	 * use it first and foremost.
1174 	 *
1175 	 * For each action in policy list
1176 	 *   Add combination.  If I've hit limit, return.
1177 	 */
1178 
1179 	for (ap = acqrec->ipsacq_act; ap != NULL;
1180 	    ap = ap->ipa_next) {
1181 		ipsec_alginfo_t *ealg = NULL;
1182 		ipsec_alginfo_t *aalg = NULL;
1183 
1184 		if (ap->ipa_act.ipa_type != IPSEC_POLICY_APPLY)
1185 			continue;
1186 
1187 		prot = &ap->ipa_act.ipa_apply;
1188 
1189 		if (!(prot->ipp_use_esp))
1190 			continue;
1191 
1192 		if (prot->ipp_esp_auth_alg != 0) {
1193 			aalg = ipsec_alglists[IPSEC_ALG_AUTH]
1194 			    [prot->ipp_esp_auth_alg];
1195 			if (aalg == NULL || !ALG_VALID(aalg))
1196 				continue;
1197 		}
1198 
1199 		ASSERT(prot->ipp_encr_alg > 0);
1200 		ealg = ipsec_alglists[IPSEC_ALG_ENCR][prot->ipp_encr_alg];
1201 		if (ealg == NULL || !ALG_VALID(ealg))
1202 			continue;
1203 
1204 		comb->sadb_comb_flags = 0;
1205 		comb->sadb_comb_reserved = 0;
1206 		comb->sadb_comb_encrypt = ealg->alg_id;
1207 		comb->sadb_comb_encrypt_minbits = prot->ipp_espe_minbits;
1208 		comb->sadb_comb_encrypt_maxbits = prot->ipp_espe_maxbits;
1209 		if (aalg == NULL) {
1210 			comb->sadb_comb_auth = 0;
1211 			comb->sadb_comb_auth_minbits = 0;
1212 			comb->sadb_comb_auth_maxbits = 0;
1213 		} else {
1214 			comb->sadb_comb_auth = aalg->alg_id;
1215 			comb->sadb_comb_auth_minbits = prot->ipp_espa_minbits;
1216 			comb->sadb_comb_auth_maxbits = prot->ipp_espa_maxbits;
1217 		}
1218 
1219 		/*
1220 		 * The following may be based on algorithm
1221 		 * properties, but in the meantime, we just pick
1222 		 * some good, sensible numbers.  Key mgmt. can
1223 		 * (and perhaps should) be the place to finalize
1224 		 * such decisions.
1225 		 */
1226 
1227 		/*
1228 		 * No limits on allocations, since we really don't
1229 		 * support that concept currently.
1230 		 */
1231 		comb->sadb_comb_soft_allocations = 0;
1232 		comb->sadb_comb_hard_allocations = 0;
1233 
1234 		/*
1235 		 * These may want to come from policy rule..
1236 		 */
1237 		comb->sadb_comb_soft_bytes = ipsecesp_default_soft_bytes;
1238 		comb->sadb_comb_hard_bytes = ipsecesp_default_hard_bytes;
1239 		comb->sadb_comb_soft_addtime = ipsecesp_default_soft_addtime;
1240 		comb->sadb_comb_hard_addtime = ipsecesp_default_hard_addtime;
1241 		comb->sadb_comb_soft_usetime = ipsecesp_default_soft_usetime;
1242 		comb->sadb_comb_hard_usetime = ipsecesp_default_hard_usetime;
1243 
1244 		prop->sadb_prop_len += SADB_8TO64(sizeof (*comb));
1245 		if (--combs == 0)
1246 			break;	/* out of space.. */
1247 		comb++;
1248 	}
1249 }
1250 
1251 /*
1252  * Prepare and actually send the SADB_ACQUIRE message to PF_KEY.
1253  */
1254 static void
1255 esp_send_acquire(ipsacq_t *acqrec, mblk_t *extended)
1256 {
1257 	mblk_t *pfkeymp, *msgmp;
1258 	uint_t allocsize, combs;
1259 	sadb_msg_t *samsg;
1260 	sadb_prop_t *prop;
1261 	uint8_t *cur, *end;
1262 
1263 	ESP_BUMP_STAT(acquire_requests);
1264 
1265 	ASSERT(MUTEX_HELD(&acqrec->ipsacq_lock));
1266 
1267 	pfkeymp = sadb_keysock_out(0);
1268 	if (pfkeymp == NULL) {
1269 		esp0dbg(("esp_send_acquire: 1st allocb() failed.\n"));
1270 		/* Just bail. */
1271 		goto done;
1272 	}
1273 
1274 	/*
1275 	 * First, allocate a basic ACQUIRE message.  Beyond that,
1276 	 * you need to extract certificate info from
1277 	 */
1278 	allocsize = sizeof (sadb_msg_t) + sizeof (sadb_address_t) +
1279 	    sizeof (sadb_address_t) + sizeof (sadb_prop_t);
1280 
1281 	switch (acqrec->ipsacq_addrfam) {
1282 	case AF_INET:
1283 		allocsize += 2 * sizeof (struct sockaddr_in);
1284 		break;
1285 	case AF_INET6:
1286 		allocsize += 2 * sizeof (struct sockaddr_in6);
1287 		break;
1288 	}
1289 
1290 	mutex_enter(&alg_lock);
1291 
1292 	combs = ipsec_nalgs[IPSEC_ALG_AUTH] * ipsec_nalgs[IPSEC_ALG_ENCR];
1293 
1294 	allocsize += combs * sizeof (sadb_comb_t);
1295 
1296 	/*
1297 	 * XXX If there are:
1298 	 *	certificate IDs
1299 	 *	proxy address
1300 	 *	<Others>
1301 	 * add additional allocation size.
1302 	 */
1303 
1304 	msgmp = allocb(allocsize, BPRI_HI);
1305 	if (msgmp == NULL) {
1306 		esp0dbg(("esp_send_acquire: 2nd allocb() failed.\n"));
1307 		/* Just bail. */
1308 		freemsg(pfkeymp);
1309 		pfkeymp = NULL;
1310 		goto done;
1311 	}
1312 
1313 	cur = msgmp->b_rptr;
1314 	end = cur + allocsize;
1315 	samsg = (sadb_msg_t *)cur;
1316 	pfkeymp->b_cont = msgmp;
1317 
1318 	/* Set up ACQUIRE. */
1319 	cur = sadb_setup_acquire(cur, end, acqrec);
1320 	if (cur == NULL) {
1321 		esp0dbg(("sadb_setup_acquire failed.\n"));
1322 		/* Just bail. */
1323 		freemsg(pfkeymp);
1324 		pfkeymp = NULL;
1325 		goto done;
1326 	}
1327 	samsg->sadb_msg_satype = SADB_SATYPE_ESP;
1328 
1329 	/* XXX Insert proxy address information here. */
1330 
1331 	/* XXX Insert identity information here. */
1332 
1333 	/* XXXMLS Insert sensitivity information here. */
1334 
1335 	/* Insert proposal here. */
1336 
1337 	prop = (sadb_prop_t *)(((uint64_t *)samsg) + samsg->sadb_msg_len);
1338 	esp_insert_prop(prop, acqrec, combs);
1339 	samsg->sadb_msg_len += prop->sadb_prop_len;
1340 	msgmp->b_wptr += SADB_64TO8(samsg->sadb_msg_len);
1341 
1342 done:
1343 	mutex_exit(&alg_lock);
1344 
1345 	/*
1346 	 * Must mutex_exit() before sending PF_KEY message up, in
1347 	 * order to avoid recursive mutex_enter() if there are no registered
1348 	 * listeners.
1349 	 *
1350 	 * Once I've sent the message, I'm cool anyway.
1351 	 */
1352 	mutex_exit(&acqrec->ipsacq_lock);
1353 	if (esp_pfkey_q != NULL && pfkeymp != NULL) {
1354 		if (extended != NULL) {
1355 			putnext(esp_pfkey_q, extended);
1356 		}
1357 		putnext(esp_pfkey_q, pfkeymp);
1358 		return;
1359 	}
1360 	/* XXX freemsg() works for extended == NULL. */
1361 	freemsg(extended);
1362 	freemsg(pfkeymp);
1363 }
1364 
1365 /*
1366  * Handle the SADB_GETSPI message.  Create a larval SA.
1367  */
1368 static void
1369 esp_getspi(mblk_t *mp, keysock_in_t *ksi)
1370 {
1371 	ipsa_t *newbie, *target;
1372 	isaf_t *outbound, *inbound;
1373 	int rc, diagnostic;
1374 	sadb_sa_t *assoc;
1375 	keysock_out_t *kso;
1376 	uint32_t newspi;
1377 
1378 	/*
1379 	 * Randomly generate a proposed SPI value
1380 	 */
1381 	(void) random_get_pseudo_bytes((uint8_t *)&newspi, sizeof (uint32_t));
1382 	newbie = sadb_getspi(ksi, newspi, &diagnostic);
1383 
1384 	if (newbie == NULL) {
1385 		sadb_pfkey_error(esp_pfkey_q, mp, ENOMEM, diagnostic,
1386 		    ksi->ks_in_serial);
1387 		return;
1388 	} else if (newbie == (ipsa_t *)-1) {
1389 		sadb_pfkey_error(esp_pfkey_q, mp, EINVAL, diagnostic,
1390 		    ksi->ks_in_serial);
1391 		return;
1392 	}
1393 
1394 	/*
1395 	 * XXX - We may randomly collide.  We really should recover from this.
1396 	 *	 Unfortunately, that could require spending way-too-much-time
1397 	 *	 in here.  For now, let the user retry.
1398 	 */
1399 
1400 	if (newbie->ipsa_addrfam == AF_INET6) {
1401 		outbound = OUTBOUND_BUCKET_V6(&esp_sadb.s_v6,
1402 		    *(uint32_t *)(newbie->ipsa_dstaddr));
1403 		inbound = INBOUND_BUCKET(&esp_sadb.s_v6, newbie->ipsa_spi);
1404 	} else {
1405 		ASSERT(newbie->ipsa_addrfam == AF_INET);
1406 		outbound = OUTBOUND_BUCKET_V4(&esp_sadb.s_v4,
1407 		    *(uint32_t *)(newbie->ipsa_dstaddr));
1408 		inbound = INBOUND_BUCKET(&esp_sadb.s_v4, newbie->ipsa_spi);
1409 	}
1410 
1411 	mutex_enter(&outbound->isaf_lock);
1412 	mutex_enter(&inbound->isaf_lock);
1413 
1414 	/*
1415 	 * Check for collisions (i.e. did sadb_getspi() return with something
1416 	 * that already exists?).
1417 	 *
1418 	 * Try outbound first.  Even though SADB_GETSPI is traditionally
1419 	 * for inbound SAs, you never know what a user might do.
1420 	 */
1421 	target = ipsec_getassocbyspi(outbound, newbie->ipsa_spi,
1422 	    newbie->ipsa_srcaddr, newbie->ipsa_dstaddr, newbie->ipsa_addrfam);
1423 	if (target == NULL) {
1424 		target = ipsec_getassocbyspi(inbound, newbie->ipsa_spi,
1425 		    newbie->ipsa_srcaddr, newbie->ipsa_dstaddr,
1426 		    newbie->ipsa_addrfam);
1427 	}
1428 
1429 	/*
1430 	 * I don't have collisions elsewhere!
1431 	 * (Nor will I because I'm still holding inbound/outbound locks.)
1432 	 */
1433 
1434 	if (target != NULL) {
1435 		rc = EEXIST;
1436 		IPSA_REFRELE(target);
1437 	} else {
1438 		/*
1439 		 * sadb_insertassoc() also checks for collisions, so
1440 		 * if there's a colliding entry, rc will be set
1441 		 * to EEXIST.
1442 		 */
1443 		rc = sadb_insertassoc(newbie, inbound);
1444 		(void) drv_getparm(TIME, &newbie->ipsa_hardexpiretime);
1445 		newbie->ipsa_hardexpiretime += ipsecesp_larval_timeout;
1446 	}
1447 
1448 	/*
1449 	 * Can exit outbound mutex.  Hold inbound until we're done
1450 	 * with newbie.
1451 	 */
1452 	mutex_exit(&outbound->isaf_lock);
1453 
1454 	if (rc != 0) {
1455 		mutex_exit(&inbound->isaf_lock);
1456 		IPSA_REFRELE(newbie);
1457 		sadb_pfkey_error(esp_pfkey_q, mp, rc, SADB_X_DIAGNOSTIC_NONE,
1458 		    ksi->ks_in_serial);
1459 		return;
1460 	}
1461 
1462 
1463 	/* Can write here because I'm still holding the bucket lock. */
1464 	newbie->ipsa_type = SADB_SATYPE_ESP;
1465 
1466 	/*
1467 	 * Construct successful return message.  We have one thing going
1468 	 * for us in PF_KEY v2.  That's the fact that
1469 	 *	sizeof (sadb_spirange_t) == sizeof (sadb_sa_t)
1470 	 */
1471 	assoc = (sadb_sa_t *)ksi->ks_in_extv[SADB_EXT_SPIRANGE];
1472 	assoc->sadb_sa_exttype = SADB_EXT_SA;
1473 	assoc->sadb_sa_spi = newbie->ipsa_spi;
1474 	*((uint64_t *)(&assoc->sadb_sa_replay)) = 0;
1475 	mutex_exit(&inbound->isaf_lock);
1476 
1477 	/* Convert KEYSOCK_IN to KEYSOCK_OUT. */
1478 	kso = (keysock_out_t *)ksi;
1479 	kso->ks_out_len = sizeof (*kso);
1480 	kso->ks_out_serial = ksi->ks_in_serial;
1481 	kso->ks_out_type = KEYSOCK_OUT;
1482 
1483 	/*
1484 	 * Can safely putnext() to esp_pfkey_q, because this is a turnaround
1485 	 * from the esp_pfkey_q.
1486 	 */
1487 	putnext(esp_pfkey_q, mp);
1488 }
1489 
1490 /*
1491  * Insert the ESP header into a packet.  Duplicate an mblk, and insert a newly
1492  * allocated mblk with the ESP header in between the two.
1493  */
1494 static boolean_t
1495 esp_insert_esp(mblk_t *mp, mblk_t *esp_mp, uint_t divpoint)
1496 {
1497 	mblk_t *split_mp = mp;
1498 	uint_t wheretodiv = divpoint;
1499 
1500 	while ((split_mp->b_wptr - split_mp->b_rptr) < wheretodiv) {
1501 		wheretodiv -= (split_mp->b_wptr - split_mp->b_rptr);
1502 		split_mp = split_mp->b_cont;
1503 		ASSERT(split_mp != NULL);
1504 	}
1505 
1506 	if (split_mp->b_wptr - split_mp->b_rptr != wheretodiv) {
1507 		mblk_t *scratch;
1508 
1509 		/* "scratch" is the 2nd half, split_mp is the first. */
1510 		scratch = dupb(split_mp);
1511 		if (scratch == NULL) {
1512 			esp1dbg(("esp_insert_esp: can't allocate scratch.\n"));
1513 			return (B_FALSE);
1514 		}
1515 		/* NOTE:  dupb() doesn't set b_cont appropriately. */
1516 		scratch->b_cont = split_mp->b_cont;
1517 		scratch->b_rptr += wheretodiv;
1518 		split_mp->b_wptr = split_mp->b_rptr + wheretodiv;
1519 		split_mp->b_cont = scratch;
1520 	}
1521 	/*
1522 	 * At this point, split_mp is exactly "wheretodiv" bytes long, and
1523 	 * holds the end of the pre-ESP part of the datagram.
1524 	 */
1525 	esp_mp->b_cont = split_mp->b_cont;
1526 	split_mp->b_cont = esp_mp;
1527 
1528 	return (B_TRUE);
1529 }
1530 
1531 /*
1532  * Finish processing of an inbound ESP packet after processing by the
1533  * crypto framework.
1534  * - Remove the ESP header.
1535  * - Send packet back to IP.
1536  * If authentication was performed on the packet, this function is called
1537  * only if the authentication succeeded.
1538  * On success returns B_TRUE, on failure returns B_FALSE and frees the
1539  * mblk chain ipsec_in_mp.
1540  */
1541 static ipsec_status_t
1542 esp_in_done(mblk_t *ipsec_in_mp)
1543 {
1544 	ipsec_in_t *ii = (ipsec_in_t *)ipsec_in_mp->b_rptr;
1545 	mblk_t *data_mp;
1546 	ipsa_t *assoc;
1547 	uint_t espstart;
1548 	uint32_t ivlen = 0;
1549 	uint_t processed_len;
1550 	esph_t *esph;
1551 	kstat_named_t *counter;
1552 	boolean_t is_natt;
1553 
1554 	assoc = ii->ipsec_in_esp_sa;
1555 	ASSERT(assoc != NULL);
1556 
1557 	is_natt = ((assoc->ipsa_flags & IPSA_F_NATT) != 0);
1558 
1559 	/* get the pointer to the ESP header */
1560 	if (assoc->ipsa_encr_alg == SADB_EALG_NULL) {
1561 		/* authentication-only ESP */
1562 		espstart = ii->ipsec_in_crypto_data.cd_offset;
1563 		processed_len = ii->ipsec_in_crypto_data.cd_length;
1564 	} else {
1565 		/* encryption present */
1566 		ivlen = assoc->ipsa_iv_len;
1567 		if (assoc->ipsa_auth_alg == SADB_AALG_NONE) {
1568 			/* encryption-only ESP */
1569 			espstart = ii->ipsec_in_crypto_data.cd_offset -
1570 				sizeof (esph_t) - assoc->ipsa_iv_len;
1571 			processed_len = ii->ipsec_in_crypto_data.cd_length +
1572 				ivlen;
1573 		} else {
1574 			/* encryption with authentication */
1575 			espstart = ii->ipsec_in_crypto_dual_data.dd_offset1;
1576 			processed_len = ii->ipsec_in_crypto_dual_data.dd_len2 +
1577 			    ivlen;
1578 		}
1579 	}
1580 
1581 	data_mp = ipsec_in_mp->b_cont;
1582 	esph = (esph_t *)(data_mp->b_rptr + espstart);
1583 
1584 	if (assoc->ipsa_auth_alg != IPSA_AALG_NONE) {
1585 		/* authentication passed if we reach this point */
1586 		ESP_BUMP_STAT(good_auth);
1587 		data_mp->b_wptr -= assoc->ipsa_mac_len;
1588 
1589 		/*
1590 		 * Check replay window here!
1591 		 * For right now, assume keysock will set the replay window
1592 		 * size to zero for SAs that have an unspecified sender.
1593 		 * This may change...
1594 		 */
1595 
1596 		if (!sadb_replay_check(assoc, esph->esph_replay)) {
1597 			/*
1598 			 * Log the event. As of now we print out an event.
1599 			 * Do not print the replay failure number, or else
1600 			 * syslog cannot collate the error messages.  Printing
1601 			 * the replay number that failed opens a denial-of-
1602 			 * service attack.
1603 			 */
1604 			ipsec_assocfailure(info.mi_idnum, 0, 0,
1605 			    SL_ERROR | SL_WARN,
1606 			    "Replay failed for ESP spi 0x%x, dst %s.\n",
1607 			    assoc->ipsa_spi, assoc->ipsa_dstaddr,
1608 			    assoc->ipsa_addrfam);
1609 			ESP_BUMP_STAT(replay_failures);
1610 			counter = &ipdrops_esp_replay;
1611 			goto drop_and_bail;
1612 		}
1613 	}
1614 
1615 	if (!esp_age_bytes(assoc, processed_len, B_TRUE)) {
1616 		/* The ipsa has hit hard expiration, LOG and AUDIT. */
1617 		ipsec_assocfailure(info.mi_idnum, 0, 0,
1618 		    SL_ERROR | SL_WARN,
1619 		    "ESP association 0x%x, dst %s had bytes expire.\n",
1620 		    assoc->ipsa_spi, assoc->ipsa_dstaddr, assoc->ipsa_addrfam);
1621 		ESP_BUMP_STAT(bytes_expired);
1622 		counter = &ipdrops_esp_bytes_expire;
1623 		goto drop_and_bail;
1624 	}
1625 
1626 	/*
1627 	 * Remove ESP header and padding from packet.  I hope the compiler
1628 	 * spews "branch, predict taken" code for this.
1629 	 */
1630 
1631 	if (esp_strip_header(data_mp, ii->ipsec_in_v4, ivlen, &counter)) {
1632 		if (is_natt)
1633 			return (esp_fix_natt_checksums(data_mp, assoc));
1634 		return (IPSEC_STATUS_SUCCESS);
1635 	}
1636 
1637 	esp1dbg(("esp_in_done: esp_strip_header() failed\n"));
1638 drop_and_bail:
1639 	IP_ESP_BUMP_STAT(in_discards);
1640 	/*
1641 	 * TODO: Extract inbound interface from the IPSEC_IN message's
1642 	 * ii->ipsec_in_rill_index.
1643 	 */
1644 	ip_drop_packet(ipsec_in_mp, B_TRUE, NULL, NULL, counter, &esp_dropper);
1645 	return (IPSEC_STATUS_FAILED);
1646 }
1647 
1648 /*
1649  * Called upon failing the inbound ICV check. The message passed as
1650  * argument is freed.
1651  */
1652 static void
1653 esp_log_bad_auth(mblk_t *ipsec_in)
1654 {
1655 	ipsec_in_t *ii = (ipsec_in_t *)ipsec_in->b_rptr;
1656 	ipsa_t *assoc = ii->ipsec_in_esp_sa;
1657 
1658 	/*
1659 	 * Log the event. Don't print to the console, block
1660 	 * potential denial-of-service attack.
1661 	 */
1662 	ESP_BUMP_STAT(bad_auth);
1663 
1664 	ipsec_assocfailure(info.mi_idnum, 0, 0, SL_ERROR | SL_WARN,
1665 	    "ESP Authentication failed for spi 0x%x, dst %s.\n",
1666 	    assoc->ipsa_spi, assoc->ipsa_dstaddr, assoc->ipsa_addrfam);
1667 
1668 	IP_ESP_BUMP_STAT(in_discards);
1669 	/*
1670 	 * TODO: Extract inbound interface from the IPSEC_IN
1671 	 * message's ii->ipsec_in_rill_index.
1672 	 */
1673 	ip_drop_packet(ipsec_in, B_TRUE, NULL, NULL, &ipdrops_esp_bad_auth,
1674 	    &esp_dropper);
1675 }
1676 
1677 
1678 /*
1679  * Invoked for outbound packets after ESP processing. If the packet
1680  * also requires AH, performs the AH SA selection and AH processing.
1681  * Returns B_TRUE if the AH processing was not needed or if it was
1682  * performed successfully. Returns B_FALSE and consumes the passed mblk
1683  * if AH processing was required but could not be performed.
1684  */
1685 static boolean_t
1686 esp_do_outbound_ah(mblk_t *ipsec_mp)
1687 {
1688 	ipsec_out_t *io = (ipsec_out_t *)ipsec_mp->b_rptr;
1689 	ipsec_status_t ipsec_rc;
1690 	ipsec_action_t *ap;
1691 
1692 	ap = io->ipsec_out_act;
1693 	if (ap == NULL) {
1694 		ipsec_policy_t *pp = io->ipsec_out_policy;
1695 		ap = pp->ipsp_act;
1696 	}
1697 
1698 	if (!ap->ipa_want_ah)
1699 		return (B_TRUE);
1700 
1701 	ASSERT(io->ipsec_out_ah_done == B_FALSE);
1702 
1703 	if (io->ipsec_out_ah_sa == NULL) {
1704 		if (!ipsec_outbound_sa(ipsec_mp, IPPROTO_AH)) {
1705 			sadb_acquire(ipsec_mp, io, B_TRUE, B_FALSE);
1706 			return (B_FALSE);
1707 		}
1708 	}
1709 	ASSERT(io->ipsec_out_ah_sa != NULL);
1710 
1711 	io->ipsec_out_ah_done = B_TRUE;
1712 	ipsec_rc = io->ipsec_out_ah_sa->ipsa_output_func(ipsec_mp);
1713 	return (ipsec_rc == IPSEC_STATUS_SUCCESS);
1714 }
1715 
1716 
1717 /*
1718  * Kernel crypto framework callback invoked after completion of async
1719  * crypto requests.
1720  */
1721 static void
1722 esp_kcf_callback(void *arg, int status)
1723 {
1724 	mblk_t *ipsec_mp = (mblk_t *)arg;
1725 	ipsec_in_t *ii = (ipsec_in_t *)ipsec_mp->b_rptr;
1726 	boolean_t is_inbound = (ii->ipsec_in_type == IPSEC_IN);
1727 
1728 	ASSERT(ipsec_mp->b_cont != NULL);
1729 
1730 	if (status == CRYPTO_SUCCESS) {
1731 		if (is_inbound) {
1732 			if (esp_in_done(ipsec_mp) != IPSEC_STATUS_SUCCESS)
1733 				return;
1734 
1735 			/* finish IPsec processing */
1736 			ip_fanout_proto_again(ipsec_mp, NULL, NULL, NULL);
1737 		} else {
1738 			/*
1739 			 * If a ICV was computed, it was stored by the
1740 			 * crypto framework at the end of the packet.
1741 			 */
1742 			ipha_t *ipha = (ipha_t *)ipsec_mp->b_cont->b_rptr;
1743 
1744 			/* do AH processing if needed */
1745 			if (!esp_do_outbound_ah(ipsec_mp))
1746 				return;
1747 
1748 			/* finish IPsec processing */
1749 			if (IPH_HDR_VERSION(ipha) == IP_VERSION) {
1750 				ip_wput_ipsec_out(NULL, ipsec_mp, ipha, NULL,
1751 				    NULL);
1752 			} else {
1753 				ip6_t *ip6h = (ip6_t *)ipha;
1754 				ip_wput_ipsec_out_v6(NULL, ipsec_mp, ip6h,
1755 				    NULL, NULL);
1756 			}
1757 		}
1758 
1759 	} else if (status == CRYPTO_INVALID_MAC) {
1760 		esp_log_bad_auth(ipsec_mp);
1761 
1762 	} else {
1763 		esp1dbg(("esp_kcf_callback: crypto failed with 0x%x\n",
1764 		    status));
1765 		ESP_BUMP_STAT(crypto_failures);
1766 		if (is_inbound)
1767 			IP_ESP_BUMP_STAT(in_discards);
1768 		else
1769 			ESP_BUMP_STAT(out_discards);
1770 		ip_drop_packet(ipsec_mp, is_inbound, NULL, NULL,
1771 		    &ipdrops_esp_crypto_failed, &esp_dropper);
1772 	}
1773 }
1774 
1775 /*
1776  * Invoked on crypto framework failure during inbound and outbound processing.
1777  */
1778 static void
1779 esp_crypto_failed(mblk_t *mp, boolean_t is_inbound, int kef_rc)
1780 {
1781 	esp1dbg(("crypto failed for %s ESP with 0x%x\n",
1782 	    is_inbound ? "inbound" : "outbound", kef_rc));
1783 	ip_drop_packet(mp, is_inbound, NULL, NULL, &ipdrops_esp_crypto_failed,
1784 	    &esp_dropper);
1785 	ESP_BUMP_STAT(crypto_failures);
1786 	if (is_inbound)
1787 		IP_ESP_BUMP_STAT(in_discards);
1788 	else
1789 		ESP_BUMP_STAT(out_discards);
1790 }
1791 
1792 #define	ESP_INIT_CALLREQ(_cr) {						\
1793 	(_cr)->cr_flag = CRYPTO_SKIP_REQID|CRYPTO_RESTRICTED;		\
1794 	(_cr)->cr_callback_arg = ipsec_mp;				\
1795 	(_cr)->cr_callback_func = esp_kcf_callback;			\
1796 }
1797 
1798 #define	ESP_INIT_CRYPTO_MAC(mac, icvlen, icvbuf) {			\
1799 	(mac)->cd_format = CRYPTO_DATA_RAW;				\
1800 	(mac)->cd_offset = 0;						\
1801 	(mac)->cd_length = icvlen;					\
1802 	(mac)->cd_raw.iov_base = (char *)icvbuf;			\
1803 	(mac)->cd_raw.iov_len = icvlen;					\
1804 }
1805 
1806 #define	ESP_INIT_CRYPTO_DATA(data, mp, off, len) {			\
1807 	if (MBLKL(mp) >= (len) + (off)) {				\
1808 		(data)->cd_format = CRYPTO_DATA_RAW;			\
1809 		(data)->cd_raw.iov_base = (char *)(mp)->b_rptr;		\
1810 		(data)->cd_raw.iov_len = MBLKL(mp);			\
1811 		(data)->cd_offset = off;				\
1812 	} else {							\
1813 		(data)->cd_format = CRYPTO_DATA_MBLK;			\
1814 		(data)->cd_mp = mp;			       		\
1815 		(data)->cd_offset = off;				\
1816 	}								\
1817 	(data)->cd_length = len;					\
1818 }
1819 
1820 #define	ESP_INIT_CRYPTO_DUAL_DATA(data, mp, off1, len1, off2, len2) {	\
1821 	(data)->dd_format = CRYPTO_DATA_MBLK;				\
1822 	(data)->dd_mp = mp;						\
1823 	(data)->dd_len1 = len1;						\
1824 	(data)->dd_offset1 = off1;					\
1825 	(data)->dd_len2 = len2;						\
1826 	(data)->dd_offset2 = off2;					\
1827 }
1828 
1829 static ipsec_status_t
1830 esp_submit_req_inbound(mblk_t *ipsec_mp, ipsa_t *assoc, uint_t esph_offset)
1831 {
1832 	ipsec_in_t *ii = (ipsec_in_t *)ipsec_mp->b_rptr;
1833 	boolean_t do_auth;
1834 	uint_t auth_offset, msg_len, auth_len;
1835 	crypto_call_req_t call_req;
1836 	mblk_t *esp_mp;
1837 	int kef_rc = CRYPTO_FAILED;
1838 	uint_t icv_len = assoc->ipsa_mac_len;
1839 	crypto_ctx_template_t auth_ctx_tmpl;
1840 	boolean_t do_encr;
1841 	uint_t encr_offset, encr_len;
1842 	uint_t iv_len = assoc->ipsa_iv_len;
1843 	crypto_ctx_template_t encr_ctx_tmpl;
1844 
1845 	ASSERT(ii->ipsec_in_type == IPSEC_IN);
1846 
1847 	do_auth = assoc->ipsa_auth_alg != SADB_AALG_NONE;
1848 	do_encr = assoc->ipsa_encr_alg != SADB_EALG_NULL;
1849 
1850 	/*
1851 	 * An inbound packet is of the form:
1852 	 * IPSEC_IN -> [IP,options,ESP,IV,data,ICV,pad]
1853 	 */
1854 	esp_mp = ipsec_mp->b_cont;
1855 	msg_len = MBLKL(esp_mp);
1856 
1857 	ESP_INIT_CALLREQ(&call_req);
1858 
1859 	if (do_auth) {
1860 		/* force asynchronous processing? */
1861 		if (ipsec_algs_exec_mode[IPSEC_ALG_AUTH] ==
1862 		    IPSEC_ALGS_EXEC_ASYNC)
1863 			call_req.cr_flag |= CRYPTO_ALWAYS_QUEUE;
1864 
1865 		/* authentication context template */
1866 		IPSEC_CTX_TMPL(assoc, ipsa_authtmpl, IPSEC_ALG_AUTH,
1867 		    auth_ctx_tmpl);
1868 
1869 		/* ICV to be verified */
1870 		ESP_INIT_CRYPTO_MAC(&ii->ipsec_in_crypto_mac,
1871 		    icv_len, esp_mp->b_wptr - icv_len);
1872 
1873 		/* authentication starts at the ESP header */
1874 		auth_offset = esph_offset;
1875 		auth_len = msg_len - auth_offset - icv_len;
1876 		if (!do_encr) {
1877 			/* authentication only */
1878 			/* initialize input data argument */
1879 			ESP_INIT_CRYPTO_DATA(&ii->ipsec_in_crypto_data,
1880 			    esp_mp, auth_offset, auth_len);
1881 
1882 			/* call the crypto framework */
1883 			kef_rc = crypto_mac_verify(&assoc->ipsa_amech,
1884 			    &ii->ipsec_in_crypto_data,
1885 			    &assoc->ipsa_kcfauthkey, auth_ctx_tmpl,
1886 			    &ii->ipsec_in_crypto_mac, &call_req);
1887 		}
1888 	}
1889 
1890 	if (do_encr) {
1891 		/* force asynchronous processing? */
1892 		if (ipsec_algs_exec_mode[IPSEC_ALG_ENCR] ==
1893 		    IPSEC_ALGS_EXEC_ASYNC)
1894 			call_req.cr_flag |= CRYPTO_ALWAYS_QUEUE;
1895 
1896 		/* encryption template */
1897 		IPSEC_CTX_TMPL(assoc, ipsa_encrtmpl, IPSEC_ALG_ENCR,
1898 		    encr_ctx_tmpl);
1899 
1900 		/* skip IV, since it is passed separately */
1901 		encr_offset = esph_offset + sizeof (esph_t) + iv_len;
1902 		encr_len = msg_len - encr_offset;
1903 
1904 		if (!do_auth) {
1905 			/* decryption only */
1906 			/* initialize input data argument */
1907 			ESP_INIT_CRYPTO_DATA(&ii->ipsec_in_crypto_data,
1908 			    esp_mp, encr_offset, encr_len);
1909 
1910 			/* specify IV */
1911 			ii->ipsec_in_crypto_data.cd_miscdata =
1912 			    (char *)esp_mp->b_rptr + sizeof (esph_t) +
1913 			    esph_offset;
1914 
1915 			/* call the crypto framework */
1916 			kef_rc = crypto_decrypt(&assoc->ipsa_emech,
1917 			    &ii->ipsec_in_crypto_data,
1918 			    &assoc->ipsa_kcfencrkey, encr_ctx_tmpl,
1919 			    NULL, &call_req);
1920 		}
1921 	}
1922 
1923 	if (do_auth && do_encr) {
1924 		/* dual operation */
1925 		/* initialize input data argument */
1926 		ESP_INIT_CRYPTO_DUAL_DATA(&ii->ipsec_in_crypto_dual_data,
1927 		    esp_mp, auth_offset, auth_len,
1928 		    encr_offset, encr_len - icv_len);
1929 
1930 		/* specify IV */
1931 		ii->ipsec_in_crypto_dual_data.dd_miscdata =
1932 		    (char *)esp_mp->b_rptr + sizeof (esph_t) + esph_offset;
1933 
1934 		/* call the framework */
1935 		kef_rc = crypto_mac_verify_decrypt(&assoc->ipsa_amech,
1936 		    &assoc->ipsa_emech, &ii->ipsec_in_crypto_dual_data,
1937 		    &assoc->ipsa_kcfauthkey, &assoc->ipsa_kcfencrkey,
1938 		    auth_ctx_tmpl, encr_ctx_tmpl, &ii->ipsec_in_crypto_mac,
1939 		    NULL, &call_req);
1940 	}
1941 
1942 	switch (kef_rc) {
1943 	case CRYPTO_SUCCESS:
1944 		ESP_BUMP_STAT(crypto_sync);
1945 		return (esp_in_done(ipsec_mp));
1946 	case CRYPTO_QUEUED:
1947 		/* esp_kcf_callback() will be invoked on completion */
1948 		ESP_BUMP_STAT(crypto_async);
1949 		return (IPSEC_STATUS_PENDING);
1950 	case CRYPTO_INVALID_MAC:
1951 		ESP_BUMP_STAT(crypto_sync);
1952 		esp_log_bad_auth(ipsec_mp);
1953 		return (IPSEC_STATUS_FAILED);
1954 	}
1955 
1956 	esp_crypto_failed(ipsec_mp, B_TRUE, kef_rc);
1957 	return (IPSEC_STATUS_FAILED);
1958 }
1959 
1960 static ipsec_status_t
1961 esp_submit_req_outbound(mblk_t *ipsec_mp, ipsa_t *assoc, uchar_t *icv_buf,
1962     uint_t payload_len)
1963 {
1964 	ipsec_out_t *io = (ipsec_out_t *)ipsec_mp->b_rptr;
1965 	uint_t auth_len;
1966 	crypto_call_req_t call_req;
1967 	mblk_t *esp_mp;
1968 	int kef_rc = CRYPTO_FAILED;
1969 	uint_t icv_len = assoc->ipsa_mac_len;
1970 	crypto_ctx_template_t auth_ctx_tmpl;
1971 	boolean_t do_auth;
1972 	boolean_t do_encr;
1973 	uint_t iv_len = assoc->ipsa_iv_len;
1974 	crypto_ctx_template_t encr_ctx_tmpl;
1975 	boolean_t is_natt = ((assoc->ipsa_flags & IPSA_F_NATT) != 0);
1976 	size_t esph_offset = (is_natt ? UDPH_SIZE : 0);
1977 
1978 	esp3dbg(("esp_submit_req_outbound:%s", is_natt ? "natt" : "not natt"));
1979 
1980 	ASSERT(io->ipsec_out_type == IPSEC_OUT);
1981 
1982 	do_encr = assoc->ipsa_encr_alg != SADB_EALG_NULL;
1983 	do_auth = assoc->ipsa_auth_alg != SADB_AALG_NONE;
1984 
1985 	/*
1986 	 * Outbound IPsec packets are of the form:
1987 	 * IPSEC_OUT -> [IP,options] -> [ESP,IV] -> [data] -> [pad,ICV]
1988 	 * unless it's NATT, then it's
1989 	 * IPSEC_OUT -> [IP,options] -> [udp][ESP,IV] -> [data] -> [pad,ICV]
1990 	 * Get a pointer to the mblk containing the ESP header.
1991 	 */
1992 	ASSERT(ipsec_mp->b_cont != NULL && ipsec_mp->b_cont->b_cont != NULL);
1993 	esp_mp = ipsec_mp->b_cont->b_cont;
1994 
1995 	ESP_INIT_CALLREQ(&call_req);
1996 
1997 	if (do_auth) {
1998 		/* force asynchronous processing? */
1999 		if (ipsec_algs_exec_mode[IPSEC_ALG_AUTH] ==
2000 		    IPSEC_ALGS_EXEC_ASYNC)
2001 			call_req.cr_flag |= CRYPTO_ALWAYS_QUEUE;
2002 
2003 		/* authentication context template */
2004 		IPSEC_CTX_TMPL(assoc, ipsa_authtmpl, IPSEC_ALG_AUTH,
2005 		    auth_ctx_tmpl);
2006 
2007 		/* where to store the computed mac */
2008 		ESP_INIT_CRYPTO_MAC(&io->ipsec_out_crypto_mac,
2009 		    icv_len, icv_buf);
2010 
2011 		/* authentication starts at the ESP header */
2012 		auth_len = payload_len + iv_len + sizeof (esph_t);
2013 		if (!do_encr) {
2014 			/* authentication only */
2015 			/* initialize input data argument */
2016 			ESP_INIT_CRYPTO_DATA(&io->ipsec_out_crypto_data,
2017 			    esp_mp, esph_offset, auth_len);
2018 
2019 			/* call the crypto framework */
2020 			kef_rc = crypto_mac(&assoc->ipsa_amech,
2021 			    &io->ipsec_out_crypto_data,
2022 			    &assoc->ipsa_kcfauthkey, auth_ctx_tmpl,
2023 			    &io->ipsec_out_crypto_mac, &call_req);
2024 		}
2025 	}
2026 
2027 	if (do_encr) {
2028 		/* force asynchronous processing? */
2029 		if (ipsec_algs_exec_mode[IPSEC_ALG_ENCR] ==
2030 		    IPSEC_ALGS_EXEC_ASYNC)
2031 			call_req.cr_flag |= CRYPTO_ALWAYS_QUEUE;
2032 
2033 		/* encryption context template */
2034 		IPSEC_CTX_TMPL(assoc, ipsa_encrtmpl, IPSEC_ALG_ENCR,
2035 		    encr_ctx_tmpl);
2036 
2037 		if (!do_auth) {
2038 			/* encryption only, skip mblk that contains ESP hdr */
2039 			/* initialize input data argument */
2040 			ESP_INIT_CRYPTO_DATA(&io->ipsec_out_crypto_data,
2041 			    esp_mp->b_cont, 0, payload_len);
2042 
2043 			/* specify IV */
2044 			io->ipsec_out_crypto_data.cd_miscdata =
2045 			    (char *)esp_mp->b_rptr + sizeof (esph_t) +
2046 			    esph_offset;
2047 
2048 			/* call the crypto framework */
2049 			kef_rc = crypto_encrypt(&assoc->ipsa_emech,
2050 			    &io->ipsec_out_crypto_data,
2051 			    &assoc->ipsa_kcfencrkey, encr_ctx_tmpl,
2052 			    NULL, &call_req);
2053 		}
2054 	}
2055 
2056 	if (do_auth && do_encr) {
2057 		/*
2058 		 * Encryption and authentication:
2059 		 * Pass the pointer to the mblk chain starting at the ESP
2060 		 * header to the framework. Skip the ESP header mblk
2061 		 * for encryption, which is reflected by an encryption
2062 		 * offset equal to the length of that mblk. Start
2063 		 * the authentication at the ESP header, i.e. use an
2064 		 * authentication offset of zero.
2065 		 */
2066 		ESP_INIT_CRYPTO_DUAL_DATA(&io->ipsec_out_crypto_dual_data,
2067 		    esp_mp, MBLKL(esp_mp), payload_len, esph_offset, auth_len);
2068 
2069 		/* specify IV */
2070 		io->ipsec_out_crypto_dual_data.dd_miscdata =
2071 		    (char *)esp_mp->b_rptr + sizeof (esph_t) + esph_offset;
2072 
2073 		/* call the framework */
2074 		kef_rc = crypto_encrypt_mac(&assoc->ipsa_emech,
2075 		    &assoc->ipsa_amech, NULL,
2076 		    &assoc->ipsa_kcfencrkey, &assoc->ipsa_kcfauthkey,
2077 		    encr_ctx_tmpl, auth_ctx_tmpl,
2078 		    &io->ipsec_out_crypto_dual_data,
2079 		    &io->ipsec_out_crypto_mac, &call_req);
2080 	}
2081 
2082 	switch (kef_rc) {
2083 	case CRYPTO_SUCCESS:
2084 		ESP_BUMP_STAT(crypto_sync);
2085 		return (IPSEC_STATUS_SUCCESS);
2086 	case CRYPTO_QUEUED:
2087 		/* esp_kcf_callback() will be invoked on completion */
2088 		ESP_BUMP_STAT(crypto_async);
2089 		return (IPSEC_STATUS_PENDING);
2090 	}
2091 
2092 	esp_crypto_failed(ipsec_mp, B_TRUE, kef_rc);
2093 	return (IPSEC_STATUS_FAILED);
2094 }
2095 
2096 /*
2097  * Handle outbound IPsec processing for IPv4 and IPv6
2098  * On success returns B_TRUE, on failure returns B_FALSE and frees the
2099  * mblk chain ipsec_in_mp.
2100  */
2101 static ipsec_status_t
2102 esp_outbound(mblk_t *mp)
2103 {
2104 	mblk_t *ipsec_out_mp, *data_mp, *espmp, *tailmp;
2105 	ipsec_out_t *io;
2106 	ipha_t *ipha;
2107 	ip6_t *ip6h;
2108 	esph_t *esph;
2109 	uint_t af;
2110 	uint8_t *nhp;
2111 	uintptr_t divpoint, datalen, adj, padlen, i, alloclen;
2112 	uintptr_t esplen = sizeof (esph_t);
2113 	uint8_t protocol;
2114 	ipsa_t *assoc;
2115 	uint_t iv_len = 0, mac_len = 0;
2116 	uchar_t *icv_buf;
2117 	udpha_t *udpha;
2118 	boolean_t is_natt = B_FALSE;
2119 
2120 	ESP_BUMP_STAT(out_requests);
2121 
2122 	ipsec_out_mp = mp;
2123 	data_mp = ipsec_out_mp->b_cont;
2124 
2125 	/*
2126 	 * <sigh> We have to copy the message here, because TCP (for example)
2127 	 * keeps a dupb() of the message lying around for retransmission.
2128 	 * Since ESP changes the whole of the datagram, we have to create our
2129 	 * own copy lest we clobber TCP's data.  Since we have to copy anyway,
2130 	 * we might as well make use of msgpullup() and get the mblk into one
2131 	 * contiguous piece!
2132 	 */
2133 	ipsec_out_mp->b_cont = msgpullup(data_mp, -1);
2134 	if (ipsec_out_mp->b_cont == NULL) {
2135 		esp0dbg(("esp_outbound: msgpullup() failed, "
2136 		    "dropping packet.\n"));
2137 		ipsec_out_mp->b_cont = data_mp;
2138 		/*
2139 		 * TODO:  Find the outbound IRE for this packet and
2140 		 * pass it to ip_drop_packet().
2141 		 */
2142 		ip_drop_packet(ipsec_out_mp, B_FALSE, NULL, NULL,
2143 		    &ipdrops_esp_nomem, &esp_dropper);
2144 		return (IPSEC_STATUS_FAILED);
2145 	} else {
2146 		freemsg(data_mp);
2147 		data_mp = ipsec_out_mp->b_cont;
2148 	}
2149 
2150 	io = (ipsec_out_t *)ipsec_out_mp->b_rptr;
2151 
2152 	/*
2153 	 * Reality check....
2154 	 */
2155 
2156 	ipha = (ipha_t *)data_mp->b_rptr;  /* So we can call esp_acquire(). */
2157 
2158 	if (io->ipsec_out_v4) {
2159 		af = AF_INET;
2160 		divpoint = IPH_HDR_LENGTH(ipha);
2161 		datalen = ntohs(ipha->ipha_length) - divpoint;
2162 		nhp = (uint8_t *)&ipha->ipha_protocol;
2163 	} else {
2164 		ip6_pkt_t ipp;
2165 
2166 		af = AF_INET6;
2167 		ip6h = (ip6_t *)ipha;
2168 		bzero(&ipp, sizeof (ipp));
2169 		divpoint = ip_find_hdr_v6(data_mp, ip6h, &ipp, NULL);
2170 		if (ipp.ipp_dstopts != NULL &&
2171 		    ipp.ipp_dstopts->ip6d_nxt != IPPROTO_ROUTING) {
2172 			/*
2173 			 * Destination options are tricky.  If we get in here,
2174 			 * then we have a terminal header following the
2175 			 * destination options.  We need to adjust backwards
2176 			 * so we insert ESP BEFORE the destination options
2177 			 * bag.  (So that the dstopts get encrypted!)
2178 			 *
2179 			 * Since this is for outbound packets only, we know
2180 			 * that non-terminal destination options only precede
2181 			 * routing headers.
2182 			 */
2183 			divpoint -= ipp.ipp_dstoptslen;
2184 		}
2185 		datalen = ntohs(ip6h->ip6_plen) + sizeof (ip6_t) - divpoint;
2186 
2187 		if (ipp.ipp_rthdr != NULL) {
2188 			nhp = &ipp.ipp_rthdr->ip6r_nxt;
2189 		} else if (ipp.ipp_hopopts != NULL) {
2190 			nhp = &ipp.ipp_hopopts->ip6h_nxt;
2191 		} else {
2192 			ASSERT(divpoint == sizeof (ip6_t));
2193 			/* It's probably IP + ESP. */
2194 			nhp = &ip6h->ip6_nxt;
2195 		}
2196 	}
2197 	assoc = io->ipsec_out_esp_sa;
2198 	ASSERT(assoc != NULL);
2199 
2200 	if (assoc->ipsa_usetime == 0)
2201 		esp_set_usetime(assoc, B_FALSE);
2202 
2203 	if (assoc->ipsa_auth_alg != SADB_AALG_NONE)
2204 		mac_len = assoc->ipsa_mac_len;
2205 
2206 	if (assoc->ipsa_flags & IPSA_F_NATT) {
2207 		/* wedge in fake UDP */
2208 		is_natt = B_TRUE;
2209 		esplen += UDPH_SIZE;
2210 	}
2211 
2212 	if (assoc->ipsa_encr_alg != SADB_EALG_NULL)
2213 		iv_len = assoc->ipsa_iv_len;
2214 
2215 	/*
2216 	 * Set up ESP header and encryption padding for ENCR PI request.
2217 	 */
2218 
2219 	/*
2220 	 * Determine the padding length.   Pad to 4-bytes.
2221 	 *
2222 	 * Include the two additional bytes (hence the - 2) for the padding
2223 	 * length and the next header.  Take this into account when
2224 	 * calculating the actual length of the padding.
2225 	 */
2226 
2227 	if (assoc->ipsa_encr_alg != SADB_EALG_NULL) {
2228 		padlen = ((unsigned)(iv_len - datalen - 2)) % iv_len;
2229 	} else {
2230 		padlen = ((unsigned)(sizeof (uint32_t) - datalen - 2)) %
2231 		    sizeof (uint32_t);
2232 	}
2233 
2234 	/* Allocate ESP header and IV. */
2235 	esplen += iv_len;
2236 
2237 	/*
2238 	 * Update association byte-count lifetimes.  Don't forget to take
2239 	 * into account the padding length and next-header (hence the + 2).
2240 	 *
2241 	 * Use the amount of data fed into the "encryption algorithm".  This
2242 	 * is the IV, the data length, the padding length, and the final two
2243 	 * bytes (padlen, and next-header).
2244 	 *
2245 	 */
2246 
2247 	if (!esp_age_bytes(assoc, datalen + padlen + iv_len + 2, B_FALSE)) {
2248 		/*
2249 		 * TODO:  Find the outbound IRE for this packet and
2250 		 * pass it to ip_drop_packet().
2251 		 */
2252 		ip_drop_packet(mp, B_FALSE, NULL, NULL,
2253 		    &ipdrops_esp_bytes_expire, &esp_dropper);
2254 		return (IPSEC_STATUS_FAILED);
2255 	}
2256 
2257 	espmp = allocb(esplen, BPRI_HI);
2258 	if (espmp == NULL) {
2259 		ESP_BUMP_STAT(out_discards);
2260 		esp1dbg(("esp_outbound: can't allocate espmp.\n"));
2261 		/*
2262 		 * TODO:  Find the outbound IRE for this packet and
2263 		 * pass it to ip_drop_packet().
2264 		 */
2265 		ip_drop_packet(mp, B_FALSE, NULL, NULL, &ipdrops_esp_nomem,
2266 		    &esp_dropper);
2267 		return (IPSEC_STATUS_FAILED);
2268 	}
2269 	espmp->b_wptr += esplen;
2270 	esph = (esph_t *)espmp->b_rptr;
2271 
2272 	if (is_natt) {
2273 		esp3dbg(("esp_outbound: NATT"));
2274 
2275 		udpha = (udpha_t *)espmp->b_rptr;
2276 		udpha->uha_src_port = htons(IPPORT_IKE_NATT);
2277 		if (assoc->ipsa_remote_port != 0)
2278 			udpha->uha_dst_port = assoc->ipsa_remote_port;
2279 		else
2280 			udpha->uha_dst_port = htons(IPPORT_IKE_NATT);
2281 		/*
2282 		 * Set the checksum to 0, so that the ip_wput_ipsec_out()
2283 		 * can do the right thing.
2284 		 */
2285 		udpha->uha_checksum = 0;
2286 		esph = (esph_t *)(udpha + 1);
2287 	}
2288 
2289 	esph->esph_spi = assoc->ipsa_spi;
2290 
2291 	esph->esph_replay = htonl(atomic_add_32_nv(&assoc->ipsa_replay, 1));
2292 	if (esph->esph_replay == 0 && assoc->ipsa_replay_wsize != 0) {
2293 		/*
2294 		 * XXX We have replay counter wrapping.
2295 		 * We probably want to nuke this SA (and its peer).
2296 		 */
2297 		ipsec_assocfailure(info.mi_idnum, 0, 0,
2298 		    SL_ERROR | SL_CONSOLE | SL_WARN,
2299 		    "Outbound ESP SA (0x%x, %s) has wrapped sequence.\n",
2300 		    esph->esph_spi, assoc->ipsa_dstaddr, af);
2301 
2302 		ESP_BUMP_STAT(out_discards);
2303 		sadb_replay_delete(assoc);
2304 		/*
2305 		 * TODO:  Find the outbound IRE for this packet and
2306 		 * pass it to ip_drop_packet().
2307 		 */
2308 		ip_drop_packet(mp, B_FALSE, NULL, NULL, &ipdrops_esp_replay,
2309 		    &esp_dropper);
2310 		return (IPSEC_STATUS_FAILED);
2311 	}
2312 
2313 	/*
2314 	 * Set the IV to a random quantity.  We do not require the
2315 	 * highest quality random bits, but for best security with CBC
2316 	 * mode ciphers, the value must be unlikely to repeat and also
2317 	 * must not be known in advance to an adversary capable of
2318 	 * influencing the plaintext.
2319 	 */
2320 	(void) random_get_pseudo_bytes((uint8_t *)(esph + 1), iv_len);
2321 
2322 	/* Fix the IP header. */
2323 	alloclen = padlen + 2 + mac_len;
2324 	adj = alloclen + (espmp->b_wptr - espmp->b_rptr);
2325 
2326 	protocol = *nhp;
2327 
2328 	if (io->ipsec_out_v4) {
2329 		ipha->ipha_length = htons(ntohs(ipha->ipha_length) + adj);
2330 		if (is_natt) {
2331 			*nhp = IPPROTO_UDP;
2332 			udpha->uha_length = htons(ntohs(ipha->ipha_length) -
2333 			    IPH_HDR_LENGTH(ipha));
2334 		} else {
2335 			*nhp = IPPROTO_ESP;
2336 		}
2337 		ipha->ipha_hdr_checksum = 0;
2338 		ipha->ipha_hdr_checksum = (uint16_t)ip_csum_hdr(ipha);
2339 	} else {
2340 		ip6h->ip6_plen = htons(ntohs(ip6h->ip6_plen) + adj);
2341 		*nhp = IPPROTO_ESP;
2342 	}
2343 
2344 	/* I've got the two ESP mblks, now insert them. */
2345 
2346 	esp2dbg(("data_mp before outbound ESP adjustment:\n"));
2347 	esp2dbg((dump_msg(data_mp)));
2348 
2349 	if (!esp_insert_esp(data_mp, espmp, divpoint)) {
2350 		ESP_BUMP_STAT(out_discards);
2351 		/* NOTE:  esp_insert_esp() only fails if there's no memory. */
2352 		/*
2353 		 * TODO:  Find the outbound IRE for this packet and
2354 		 * pass it to ip_drop_packet().
2355 		 */
2356 		ip_drop_packet(mp, B_FALSE, NULL, NULL, &ipdrops_esp_nomem,
2357 		    &esp_dropper);
2358 		freeb(espmp);
2359 		return (IPSEC_STATUS_FAILED);
2360 	}
2361 
2362 	/* Append padding (and leave room for ICV). */
2363 	for (tailmp = data_mp; tailmp->b_cont != NULL; tailmp = tailmp->b_cont)
2364 		;
2365 	if (tailmp->b_wptr + alloclen > tailmp->b_datap->db_lim) {
2366 		tailmp->b_cont = allocb(alloclen, BPRI_HI);
2367 		if (tailmp->b_cont == NULL) {
2368 			ESP_BUMP_STAT(out_discards);
2369 			esp0dbg(("esp_outbound:  Can't allocate tailmp.\n"));
2370 			/*
2371 			 * TODO:  Find the outbound IRE for this packet and
2372 			 * pass it to ip_drop_packet().
2373 			 */
2374 			ip_drop_packet(mp, B_FALSE, NULL, NULL,
2375 			    &ipdrops_esp_nomem, &esp_dropper);
2376 			return (IPSEC_STATUS_FAILED);
2377 		}
2378 		tailmp = tailmp->b_cont;
2379 	}
2380 
2381 	/*
2382 	 * If there's padding, N bytes of padding must be of the form 0x1,
2383 	 * 0x2, 0x3... 0xN.
2384 	 */
2385 	for (i = 0; i < padlen; ) {
2386 		i++;
2387 		*tailmp->b_wptr++ = i;
2388 	}
2389 	*tailmp->b_wptr++ = i;
2390 	*tailmp->b_wptr++ = protocol;
2391 
2392 	esp2dbg(("data_Mp before encryption:\n"));
2393 	esp2dbg((dump_msg(data_mp)));
2394 
2395 	/*
2396 	 * The packet is eligible for hardware acceleration if the
2397 	 * following conditions are satisfied:
2398 	 *
2399 	 * 1. the packet will not be fragmented
2400 	 * 2. the provider supports the algorithms specified by SA
2401 	 * 3. there is no pending control message being exchanged
2402 	 * 4. snoop is not attached
2403 	 * 5. the destination address is not a multicast address
2404 	 *
2405 	 * All five of these conditions are checked by IP prior to
2406 	 * sending the packet to ESP.
2407 	 *
2408 	 * But We, and We Alone, can, nay MUST check if the packet
2409 	 * is over NATT, and then disqualify it from hardware
2410 	 * acceleration.
2411 	 */
2412 
2413 	if (io->ipsec_out_is_capab_ill && !(assoc->ipsa_flags & IPSA_F_NATT)) {
2414 		return (esp_outbound_accelerated(ipsec_out_mp, mac_len));
2415 	}
2416 	ESP_BUMP_STAT(noaccel);
2417 
2418 	/*
2419 	 * Okay.  I've set up the pre-encryption ESP.  Let's do it!
2420 	 */
2421 
2422 	if (mac_len > 0) {
2423 		ASSERT(tailmp->b_wptr + mac_len <= tailmp->b_datap->db_lim);
2424 		icv_buf = tailmp->b_wptr;
2425 		tailmp->b_wptr += mac_len;
2426 	} else {
2427 		icv_buf = NULL;
2428 	}
2429 
2430 	return (esp_submit_req_outbound(ipsec_out_mp, assoc, icv_buf,
2431 	    datalen + padlen + 2));
2432 }
2433 
2434 /*
2435  * IP calls this to validate the ICMP errors that
2436  * we got from the network.
2437  */
2438 ipsec_status_t
2439 ipsecesp_icmp_error(mblk_t *ipsec_mp)
2440 {
2441 	/*
2442 	 * Unless we get an entire packet back, this function is useless.
2443 	 * Why?
2444 	 *
2445 	 * 1.)	Partial packets are useless, because the "next header"
2446 	 *	is at the end of the decrypted ESP packet.  Without the
2447 	 *	whole packet, this is useless.
2448 	 *
2449 	 * 2.)	If we every use a stateful cipher, such as a stream or a
2450 	 *	one-time pad, we can't do anything.
2451 	 *
2452 	 * Since the chances of us getting an entire packet back are very
2453 	 * very small, we discard here.
2454 	 */
2455 	IP_ESP_BUMP_STAT(in_discards);
2456 	ip_drop_packet(ipsec_mp, B_TRUE, NULL, NULL, &ipdrops_esp_icmp,
2457 	    &esp_dropper);
2458 	return (IPSEC_STATUS_FAILED);
2459 }
2460 
2461 /*
2462  * ESP module read put routine.
2463  */
2464 /* ARGSUSED */
2465 static void
2466 ipsecesp_rput(queue_t *q, mblk_t *mp)
2467 {
2468 	keysock_in_t *ksi;
2469 	int *addrtype;
2470 	ire_t *ire;
2471 	mblk_t *ire_mp, *last_mp;
2472 
2473 	switch (mp->b_datap->db_type) {
2474 	case M_CTL:
2475 		/*
2476 		 * IPsec request of some variety from IP.  IPSEC_{IN,OUT}
2477 		 * are the common cases, but even ICMP error messages from IP
2478 		 * may rise up here.
2479 		 *
2480 		 * Ummmm, actually, this can also be the reflected KEYSOCK_IN
2481 		 * message, with an IRE_DB_TYPE hung off at the end.
2482 		 */
2483 		switch (((ipsec_info_t *)(mp->b_rptr))->ipsec_info_type) {
2484 		case KEYSOCK_IN:
2485 			last_mp = mp;
2486 			while (last_mp->b_cont != NULL &&
2487 			    last_mp->b_cont->b_datap->db_type != IRE_DB_TYPE)
2488 				last_mp = last_mp->b_cont;
2489 
2490 			if (last_mp->b_cont == NULL) {
2491 				freemsg(mp);
2492 				break;	/* Out of switch. */
2493 			}
2494 
2495 			ire_mp = last_mp->b_cont;
2496 			last_mp->b_cont = NULL;
2497 
2498 			ksi = (keysock_in_t *)mp->b_rptr;
2499 
2500 			if (ksi->ks_in_srctype == KS_IN_ADDR_UNKNOWN)
2501 				addrtype = &ksi->ks_in_srctype;
2502 			else if (ksi->ks_in_dsttype == KS_IN_ADDR_UNKNOWN)
2503 				addrtype = &ksi->ks_in_dsttype;
2504 			else if (ksi->ks_in_proxytype == KS_IN_ADDR_UNKNOWN)
2505 				addrtype = &ksi->ks_in_proxytype;
2506 
2507 			ire = (ire_t *)ire_mp->b_rptr;
2508 
2509 			*addrtype = sadb_addrset(ire);
2510 
2511 			freemsg(ire_mp);
2512 			if (esp_pfkey_q != NULL) {
2513 				/*
2514 				 * Decrement counter to make up for
2515 				 * auto-increment in ipsecesp_wput().
2516 				 * I'm running all MT-hot through here, so
2517 				 * don't worry about perimeters and lateral
2518 				 * puts.
2519 				 */
2520 				ESP_DEBUMP_STAT(keysock_in);
2521 				ipsecesp_wput(WR(esp_pfkey_q), mp);
2522 			} else {
2523 				freemsg(mp);
2524 			}
2525 			break;
2526 		default:
2527 			freemsg(mp);
2528 			break;
2529 		}
2530 		break;
2531 	case M_PROTO:
2532 	case M_PCPROTO:
2533 		/* TPI message of some sort. */
2534 		switch (*((t_scalar_t *)mp->b_rptr)) {
2535 		case T_BIND_ACK:
2536 			esp3dbg(("Thank you IP from ESP for T_BIND_ACK\n"));
2537 			break;
2538 		case T_ERROR_ACK:
2539 			cmn_err(CE_WARN,
2540 			    "ipsecesp:  ESP received T_ERROR_ACK from IP.");
2541 			/*
2542 			 * Make esp_sadb.s_ip_q NULL, and in the
2543 			 * future, perhaps try again.
2544 			 */
2545 			esp_sadb.s_ip_q = NULL;
2546 			break;
2547 		case T_OK_ACK:
2548 			/* Probably from a (rarely sent) T_UNBIND_REQ. */
2549 			break;
2550 		default:
2551 			esp0dbg(("Unknown M_{,PC}PROTO message.\n"));
2552 		}
2553 		freemsg(mp);
2554 		break;
2555 	default:
2556 		/* For now, passthru message. */
2557 		esp2dbg(("ESP got unknown mblk type %d.\n",
2558 		    mp->b_datap->db_type));
2559 		putnext(q, mp);
2560 	}
2561 }
2562 
2563 /*
2564  * Construct an SADB_REGISTER message with the current algorithms.
2565  */
2566 static boolean_t
2567 esp_register_out(uint32_t sequence, uint32_t pid, uint_t serial)
2568 {
2569 	mblk_t *pfkey_msg_mp, *keysock_out_mp;
2570 	sadb_msg_t *samsg;
2571 	sadb_supported_t *sasupp_auth = NULL;
2572 	sadb_supported_t *sasupp_encr = NULL;
2573 	sadb_alg_t *saalg;
2574 	uint_t allocsize = sizeof (*samsg);
2575 	uint_t i, numalgs_snap;
2576 	int current_aalgs;
2577 	ipsec_alginfo_t **authalgs;
2578 	uint_t num_aalgs;
2579 	int current_ealgs;
2580 	ipsec_alginfo_t **encralgs;
2581 	uint_t num_ealgs;
2582 
2583 	/* Allocate the KEYSOCK_OUT. */
2584 	keysock_out_mp = sadb_keysock_out(serial);
2585 	if (keysock_out_mp == NULL) {
2586 		esp0dbg(("esp_register_out: couldn't allocate mblk.\n"));
2587 		return (B_FALSE);
2588 	}
2589 
2590 	/*
2591 	 * Allocate the PF_KEY message that follows KEYSOCK_OUT.
2592 	 */
2593 
2594 	mutex_enter(&alg_lock);
2595 
2596 	/*
2597 	 * Fill SADB_REGISTER message's algorithm descriptors.  Hold
2598 	 * down the lock while filling it.
2599 	 *
2600 	 * Return only valid algorithms, so the number of algorithms
2601 	 * to send up may be less than the number of algorithm entries
2602 	 * in the table.
2603 	 */
2604 	authalgs = ipsec_alglists[IPSEC_ALG_AUTH];
2605 	for (num_aalgs = 0, i = 0; i < IPSEC_MAX_ALGS; i++)
2606 		if (authalgs[i] != NULL && ALG_VALID(authalgs[i]))
2607 			num_aalgs++;
2608 
2609 	if (num_aalgs != 0) {
2610 		allocsize += (num_aalgs * sizeof (*saalg));
2611 		allocsize += sizeof (*sasupp_auth);
2612 	}
2613 	encralgs = ipsec_alglists[IPSEC_ALG_ENCR];
2614 	for (num_ealgs = 0, i = 0; i < IPSEC_MAX_ALGS; i++)
2615 		if (encralgs[i] != NULL && ALG_VALID(encralgs[i]))
2616 			num_ealgs++;
2617 
2618 	if (num_ealgs != 0) {
2619 		allocsize += (num_ealgs * sizeof (*saalg));
2620 		allocsize += sizeof (*sasupp_encr);
2621 	}
2622 	keysock_out_mp->b_cont = allocb(allocsize, BPRI_HI);
2623 	if (keysock_out_mp->b_cont == NULL) {
2624 		mutex_exit(&alg_lock);
2625 		freemsg(keysock_out_mp);
2626 		return (B_FALSE);
2627 	}
2628 
2629 	pfkey_msg_mp = keysock_out_mp->b_cont;
2630 	pfkey_msg_mp->b_wptr += allocsize;
2631 	if (num_aalgs != 0) {
2632 		sasupp_auth = (sadb_supported_t *)
2633 		    (pfkey_msg_mp->b_rptr + sizeof (*samsg));
2634 		saalg = (sadb_alg_t *)(sasupp_auth + 1);
2635 
2636 		ASSERT(((ulong_t)saalg & 0x7) == 0);
2637 
2638 		numalgs_snap = 0;
2639 		for (i = 0;
2640 		    ((i < IPSEC_MAX_ALGS) && (numalgs_snap < num_aalgs)); i++) {
2641 			if (authalgs[i] == NULL || !ALG_VALID(authalgs[i]))
2642 				continue;
2643 
2644 			saalg->sadb_alg_id = authalgs[i]->alg_id;
2645 			saalg->sadb_alg_ivlen = 0;
2646 			saalg->sadb_alg_minbits	= authalgs[i]->alg_ef_minbits;
2647 			saalg->sadb_alg_maxbits	= authalgs[i]->alg_ef_maxbits;
2648 			saalg->sadb_x_alg_defincr = authalgs[i]->alg_ef_default;
2649 			saalg->sadb_x_alg_increment =
2650 			    authalgs[i]->alg_increment;
2651 			numalgs_snap++;
2652 			saalg++;
2653 		}
2654 		ASSERT(numalgs_snap == num_aalgs);
2655 #ifdef DEBUG
2656 		/*
2657 		 * Reality check to make sure I snagged all of the
2658 		 * algorithms.
2659 		 */
2660 		for (; i < IPSEC_MAX_ALGS; i++) {
2661 			if (authalgs[i] != NULL && ALG_VALID(authalgs[i])) {
2662 				cmn_err(CE_PANIC, "esp_register_out()! "
2663 				    "Missed aalg #%d.\n", i);
2664 			}
2665 		}
2666 #endif /* DEBUG */
2667 	} else {
2668 		saalg = (sadb_alg_t *)(pfkey_msg_mp->b_rptr + sizeof (*samsg));
2669 	}
2670 
2671 	if (num_ealgs != 0) {
2672 		sasupp_encr = (sadb_supported_t *)saalg;
2673 		saalg = (sadb_alg_t *)(sasupp_encr + 1);
2674 
2675 		numalgs_snap = 0;
2676 		for (i = 0;
2677 		    ((i < IPSEC_MAX_ALGS) && (numalgs_snap < num_ealgs)); i++) {
2678 			if (encralgs[i] == NULL || !ALG_VALID(encralgs[i]))
2679 				continue;
2680 			saalg->sadb_alg_id = encralgs[i]->alg_id;
2681 			saalg->sadb_alg_ivlen = encralgs[i]->alg_datalen;
2682 			saalg->sadb_alg_minbits	= encralgs[i]->alg_ef_minbits;
2683 			saalg->sadb_alg_maxbits	= encralgs[i]->alg_ef_maxbits;
2684 			saalg->sadb_x_alg_defincr = encralgs[i]->alg_ef_default;
2685 			saalg->sadb_x_alg_increment =
2686 			    encralgs[i]->alg_increment;
2687 			numalgs_snap++;
2688 			saalg++;
2689 		}
2690 		ASSERT(numalgs_snap == num_ealgs);
2691 #ifdef DEBUG
2692 		/*
2693 		 * Reality check to make sure I snagged all of the
2694 		 * algorithms.
2695 		 */
2696 		for (; i < IPSEC_MAX_ALGS; i++) {
2697 			if (encralgs[i] != NULL && ALG_VALID(encralgs[i])) {
2698 				cmn_err(CE_PANIC, "esp_register_out()! "
2699 				    "Missed ealg #%d.\n", i);
2700 			}
2701 		}
2702 #endif /* DEBUG */
2703 	}
2704 
2705 	current_aalgs = num_aalgs;
2706 	current_ealgs = num_ealgs;
2707 
2708 	mutex_exit(&alg_lock);
2709 
2710 	/* Now fill the rest of the SADB_REGISTER message. */
2711 
2712 	samsg = (sadb_msg_t *)pfkey_msg_mp->b_rptr;
2713 	samsg->sadb_msg_version = PF_KEY_V2;
2714 	samsg->sadb_msg_type = SADB_REGISTER;
2715 	samsg->sadb_msg_errno = 0;
2716 	samsg->sadb_msg_satype = SADB_SATYPE_ESP;
2717 	samsg->sadb_msg_len = SADB_8TO64(allocsize);
2718 	samsg->sadb_msg_reserved = 0;
2719 	/*
2720 	 * Assume caller has sufficient sequence/pid number info.  If it's one
2721 	 * from me over a new alg., I could give two hoots about sequence.
2722 	 */
2723 	samsg->sadb_msg_seq = sequence;
2724 	samsg->sadb_msg_pid = pid;
2725 
2726 	if (sasupp_auth != NULL) {
2727 		sasupp_auth->sadb_supported_len =
2728 		    SADB_8TO64(sizeof (*sasupp_auth) +
2729 			sizeof (*saalg) * current_aalgs);
2730 		sasupp_auth->sadb_supported_exttype = SADB_EXT_SUPPORTED_AUTH;
2731 		sasupp_auth->sadb_supported_reserved = 0;
2732 	}
2733 
2734 	if (sasupp_encr != NULL) {
2735 		sasupp_encr->sadb_supported_len =
2736 		    SADB_8TO64(sizeof (*sasupp_encr) +
2737 			sizeof (*saalg) * current_ealgs);
2738 		sasupp_encr->sadb_supported_exttype =
2739 		    SADB_EXT_SUPPORTED_ENCRYPT;
2740 		sasupp_encr->sadb_supported_reserved = 0;
2741 	}
2742 
2743 	if (esp_pfkey_q != NULL)
2744 		putnext(esp_pfkey_q, keysock_out_mp);
2745 	else {
2746 		freemsg(keysock_out_mp);
2747 		return (B_FALSE);
2748 	}
2749 
2750 	return (B_TRUE);
2751 }
2752 
2753 /*
2754  * Invoked when the algorithm table changes. Causes SADB_REGISTER
2755  * messages continaining the current list of algorithms to be
2756  * sent up to the ESP listeners.
2757  */
2758 void
2759 ipsecesp_algs_changed(void)
2760 {
2761 	/*
2762 	 * Time to send a PF_KEY SADB_REGISTER message to ESP listeners
2763 	 * everywhere.  (The function itself checks for NULL esp_pfkey_q.)
2764 	 */
2765 	(void) esp_register_out(0, 0, 0);
2766 }
2767 
2768 /*
2769  * taskq_dispatch handler.
2770  */
2771 static void
2772 inbound_task(void *arg)
2773 {
2774 	esph_t *esph;
2775 	mblk_t *mp = (mblk_t *)arg;
2776 	ipsec_in_t *ii = (ipsec_in_t *)mp->b_rptr;
2777 	int ipsec_rc;
2778 
2779 	esp2dbg(("in ESP inbound_task"));
2780 
2781 	esph = ipsec_inbound_esp_sa(mp);
2782 	if (esph == NULL)
2783 		return;
2784 	ASSERT(ii->ipsec_in_esp_sa != NULL);
2785 	ipsec_rc = ii->ipsec_in_esp_sa->ipsa_input_func(mp, esph);
2786 	if (ipsec_rc != IPSEC_STATUS_SUCCESS)
2787 		return;
2788 	ip_fanout_proto_again(mp, NULL, NULL, NULL);
2789 }
2790 
2791 /*
2792  * Now that weak-key passed, actually ADD the security association, and
2793  * send back a reply ADD message.
2794  */
2795 static int
2796 esp_add_sa_finish(mblk_t *mp, sadb_msg_t *samsg, keysock_in_t *ksi)
2797 {
2798 	isaf_t *primary, *secondary, *inbound, *outbound;
2799 	sadb_sa_t *assoc = (sadb_sa_t *)ksi->ks_in_extv[SADB_EXT_SA];
2800 	sadb_address_t *dstext =
2801 	    (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_DST];
2802 	struct sockaddr_in *dst;
2803 	struct sockaddr_in6 *dst6;
2804 	boolean_t is_ipv4, clone = B_FALSE, is_inbound = B_FALSE;
2805 	uint32_t *dstaddr;
2806 	ipsa_t *larval = NULL;
2807 	ipsacq_t *acqrec;
2808 	iacqf_t *acq_bucket;
2809 	mblk_t *acq_msgs = NULL;
2810 	int rc;
2811 	sadb_t *sp;
2812 	int outhash;
2813 	mblk_t *lpkt;
2814 
2815 	/*
2816 	 * Locate the appropriate table(s).
2817 	 */
2818 
2819 	dst = (struct sockaddr_in *)(dstext + 1);
2820 	dst6 = (struct sockaddr_in6 *)dst;
2821 	is_ipv4 = (dst->sin_family == AF_INET);
2822 	if (is_ipv4) {
2823 		sp = &esp_sadb.s_v4;
2824 		dstaddr = (uint32_t *)(&dst->sin_addr);
2825 		outhash = OUTBOUND_HASH_V4(sp, *(ipaddr_t *)dstaddr);
2826 	} else {
2827 		sp = &esp_sadb.s_v6;
2828 		dstaddr = (uint32_t *)(&dst6->sin6_addr);
2829 		outhash = OUTBOUND_HASH_V6(sp, *(in6_addr_t *)dstaddr);
2830 	}
2831 
2832 	inbound = INBOUND_BUCKET(sp, assoc->sadb_sa_spi);
2833 	outbound = &sp->sdb_of[outhash];
2834 
2835 	switch (ksi->ks_in_dsttype) {
2836 	case KS_IN_ADDR_MBCAST:
2837 		clone = B_TRUE;	/* All mcast SAs can be bidirectional */
2838 		/* FALLTHRU */
2839 	case KS_IN_ADDR_ME:
2840 		primary = inbound;
2841 		secondary = outbound;
2842 		/*
2843 		 * If the source address is either one of mine, or unspecified
2844 		 * (which is best summed up by saying "not 'not mine'"),
2845 		 * then the association is potentially bi-directional,
2846 		 * in that it can be used for inbound traffic and outbound
2847 		 * traffic.  The best example of such an SA is a multicast
2848 		 * SA (which allows me to receive the outbound traffic).
2849 		 */
2850 		if (ksi->ks_in_srctype != KS_IN_ADDR_NOTME)
2851 			clone = B_TRUE;
2852 		is_inbound = B_TRUE;
2853 		break;
2854 	case KS_IN_ADDR_NOTME:
2855 		primary = outbound;
2856 		secondary = inbound;
2857 		/*
2858 		 * If the source address literally not mine (either
2859 		 * unspecified or not mine), then this SA may have an
2860 		 * address that WILL be mine after some configuration.
2861 		 * We pay the price for this by making it a bi-directional
2862 		 * SA.
2863 		 */
2864 		if (ksi->ks_in_srctype != KS_IN_ADDR_ME)
2865 			clone = B_TRUE;
2866 		break;
2867 	default:
2868 		samsg->sadb_x_msg_diagnostic = SADB_X_DIAGNOSTIC_BAD_DST;
2869 		return (EINVAL);
2870 	}
2871 
2872 	/*
2873 	 * Find a ACQUIRE list entry if possible.  If we've added an SA that
2874 	 * suits the needs of an ACQUIRE list entry, we can eliminate the
2875 	 * ACQUIRE list entry and transmit the enqueued packets.  Use the
2876 	 * high-bit of the sequence number to queue it.  Key off destination
2877 	 * addr, and change acqrec's state.
2878 	 */
2879 
2880 	if (samsg->sadb_msg_seq & IACQF_LOWEST_SEQ) {
2881 		acq_bucket = &sp->sdb_acq[outhash];
2882 		mutex_enter(&acq_bucket->iacqf_lock);
2883 		for (acqrec = acq_bucket->iacqf_ipsacq; acqrec != NULL;
2884 		    acqrec = acqrec->ipsacq_next) {
2885 			mutex_enter(&acqrec->ipsacq_lock);
2886 			/*
2887 			 * Q:  I only check sequence.  Should I check dst?
2888 			 * A: Yes, check dest because those are the packets
2889 			 *    that are queued up.
2890 			 */
2891 			if (acqrec->ipsacq_seq == samsg->sadb_msg_seq &&
2892 			    IPSA_ARE_ADDR_EQUAL(dstaddr,
2893 				acqrec->ipsacq_dstaddr, acqrec->ipsacq_addrfam))
2894 				break;
2895 			mutex_exit(&acqrec->ipsacq_lock);
2896 		}
2897 		if (acqrec != NULL) {
2898 			/*
2899 			 * AHA!  I found an ACQUIRE record for this SA.
2900 			 * Grab the msg list, and free the acquire record.
2901 			 * I already am holding the lock for this record,
2902 			 * so all I have to do is free it.
2903 			 */
2904 			acq_msgs = acqrec->ipsacq_mp;
2905 			acqrec->ipsacq_mp = NULL;
2906 			mutex_exit(&acqrec->ipsacq_lock);
2907 			sadb_destroy_acquire(acqrec);
2908 		}
2909 		mutex_exit(&acq_bucket->iacqf_lock);
2910 	}
2911 
2912 	/*
2913 	 * Find PF_KEY message, and see if I'm an update.  If so, find entry
2914 	 * in larval list (if there).
2915 	 */
2916 
2917 	if (samsg->sadb_msg_type == SADB_UPDATE) {
2918 		mutex_enter(&inbound->isaf_lock);
2919 		larval = ipsec_getassocbyspi(inbound, assoc->sadb_sa_spi,
2920 		    ALL_ZEROES_PTR, dstaddr, dst->sin_family);
2921 		mutex_exit(&inbound->isaf_lock);
2922 
2923 		if (larval == NULL) {
2924 			esp0dbg(("Larval update, but larval disappeared.\n"));
2925 			return (ESRCH);
2926 		} /* Else sadb_common_add unlinks it for me! */
2927 	}
2928 
2929 	lpkt = NULL;
2930 	if (larval != NULL)
2931 		lpkt = sadb_clear_lpkt(larval);
2932 
2933 	rc = sadb_common_add(esp_sadb.s_ip_q, esp_pfkey_q, mp, samsg, ksi,
2934 	    primary, secondary, larval, clone, is_inbound);
2935 
2936 	if (rc == 0 && lpkt != NULL) {
2937 		rc = !taskq_dispatch(esp_taskq, inbound_task,
2938 			    (void *) lpkt, TQ_NOSLEEP);
2939 	}
2940 
2941 	if (rc != 0) {
2942 		ip_drop_packet(lpkt, B_TRUE, NULL, NULL,
2943 		    &ipdrops_sadb_inlarval_timeout, &esp_dropper);
2944 	}
2945 
2946 	/*
2947 	 * How much more stack will I create with all of these
2948 	 * esp_outbound() calls?
2949 	 */
2950 
2951 	while (acq_msgs != NULL) {
2952 		mblk_t *mp = acq_msgs;
2953 
2954 		acq_msgs = acq_msgs->b_next;
2955 		mp->b_next = NULL;
2956 		if (rc == 0) {
2957 			if (ipsec_outbound_sa(mp, IPPROTO_ESP)) {
2958 				((ipsec_out_t *)(mp->b_rptr))->
2959 				    ipsec_out_esp_done = B_TRUE;
2960 				if (esp_outbound(mp) == IPSEC_STATUS_SUCCESS) {
2961 					ipha_t *ipha = (ipha_t *)
2962 					    mp->b_cont->b_rptr;
2963 
2964 					/* do AH processing if needed */
2965 					if (!esp_do_outbound_ah(mp))
2966 						continue;
2967 
2968 					/* finish IPsec processing */
2969 					if (is_ipv4) {
2970 						ip_wput_ipsec_out(NULL, mp,
2971 						    ipha, NULL, NULL);
2972 					} else {
2973 						ip6_t *ip6h = (ip6_t *)ipha;
2974 						ip_wput_ipsec_out_v6(NULL,
2975 						    mp, ip6h, NULL, NULL);
2976 					}
2977 				}
2978 				continue;
2979 			}
2980 		}
2981 		ESP_BUMP_STAT(out_discards);
2982 		ip_drop_packet(mp, B_FALSE, NULL, NULL,
2983 		    &ipdrops_sadb_acquire_timeout, &esp_dropper);
2984 	}
2985 
2986 	return (rc);
2987 }
2988 
2989 /*
2990  * Add new ESP security association.  This may become a generic AH/ESP
2991  * routine eventually.
2992  */
2993 static int
2994 esp_add_sa(mblk_t *mp, keysock_in_t *ksi, int *diagnostic)
2995 {
2996 	sadb_sa_t *assoc = (sadb_sa_t *)ksi->ks_in_extv[SADB_EXT_SA];
2997 	sadb_address_t *srcext =
2998 	    (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_SRC];
2999 	sadb_address_t *dstext =
3000 	    (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_DST];
3001 	sadb_address_t *nttext_loc =
3002 	    (sadb_address_t *)ksi->ks_in_extv[SADB_X_EXT_ADDRESS_NATT_LOC];
3003 	sadb_address_t *nttext_rem =
3004 	    (sadb_address_t *)ksi->ks_in_extv[SADB_X_EXT_ADDRESS_NATT_REM];
3005 	sadb_key_t *akey = (sadb_key_t *)ksi->ks_in_extv[SADB_EXT_KEY_AUTH];
3006 	sadb_key_t *ekey = (sadb_key_t *)ksi->ks_in_extv[SADB_EXT_KEY_ENCRYPT];
3007 	struct sockaddr_in *src, *dst;
3008 	struct sockaddr_in *natt_loc, *natt_rem;
3009 	struct sockaddr_in6 *natt_loc6, *natt_rem6;
3010 
3011 	sadb_lifetime_t *soft =
3012 	    (sadb_lifetime_t *)ksi->ks_in_extv[SADB_EXT_LIFETIME_SOFT];
3013 	sadb_lifetime_t *hard =
3014 	    (sadb_lifetime_t *)ksi->ks_in_extv[SADB_EXT_LIFETIME_HARD];
3015 
3016 	/* I need certain extensions present for an ADD message. */
3017 	if (srcext == NULL) {
3018 		*diagnostic = SADB_X_DIAGNOSTIC_MISSING_SRC;
3019 		return (EINVAL);
3020 	}
3021 	if (dstext == NULL) {
3022 		*diagnostic = SADB_X_DIAGNOSTIC_MISSING_DST;
3023 		return (EINVAL);
3024 	}
3025 	if (assoc == NULL) {
3026 		*diagnostic = SADB_X_DIAGNOSTIC_MISSING_SA;
3027 		return (EINVAL);
3028 	}
3029 	if (ekey == NULL && assoc->sadb_sa_encrypt != SADB_EALG_NULL) {
3030 		*diagnostic = SADB_X_DIAGNOSTIC_MISSING_EKEY;
3031 		return (EINVAL);
3032 	}
3033 
3034 	src = (struct sockaddr_in *)(srcext + 1);
3035 	dst = (struct sockaddr_in *)(dstext + 1);
3036 	natt_loc = (struct sockaddr_in *)(nttext_loc + 1);
3037 	natt_loc6 = (struct sockaddr_in6 *)(nttext_loc + 1);
3038 	natt_rem = (struct sockaddr_in *)(nttext_rem + 1);
3039 	natt_rem6 = (struct sockaddr_in6 *)(nttext_rem + 1);
3040 
3041 	/* Sundry ADD-specific reality checks. */
3042 	/* XXX STATS :  Logging/stats here? */
3043 	if (assoc->sadb_sa_state != SADB_SASTATE_MATURE) {
3044 		*diagnostic = SADB_X_DIAGNOSTIC_BAD_SASTATE;
3045 		return (EINVAL);
3046 	}
3047 	if (assoc->sadb_sa_encrypt == SADB_EALG_NONE) {
3048 		*diagnostic = SADB_X_DIAGNOSTIC_BAD_EALG;
3049 		return (EINVAL);
3050 	}
3051 
3052 	if (assoc->sadb_sa_encrypt == SADB_EALG_NULL &&
3053 	    assoc->sadb_sa_auth == SADB_AALG_NONE) {
3054 		*diagnostic = SADB_X_DIAGNOSTIC_BAD_AALG;
3055 		return (EINVAL);
3056 	}
3057 
3058 	if (assoc->sadb_sa_flags & ~(SADB_SAFLAGS_NOREPLAY |
3059 	    SADB_X_SAFLAGS_NATT_LOC | SADB_X_SAFLAGS_NATT_REM)) {
3060 		*diagnostic = SADB_X_DIAGNOSTIC_BAD_SAFLAGS;
3061 		return (EINVAL);
3062 	}
3063 
3064 	if ((*diagnostic = sadb_hardsoftchk(hard, soft)) != 0) {
3065 		return (EINVAL);
3066 	}
3067 	if (src->sin_family != dst->sin_family) {
3068 		*diagnostic = SADB_X_DIAGNOSTIC_AF_MISMATCH;
3069 		return (EINVAL);
3070 	}
3071 
3072 
3073 	if (assoc->sadb_sa_flags & SADB_X_SAFLAGS_NATT_LOC) {
3074 		if (nttext_loc == NULL) {
3075 			*diagnostic = SADB_X_DIAGNOSTIC_MISSING_NATT_LOC;
3076 			return (EINVAL);
3077 		}
3078 
3079 		if (natt_loc->sin_family == AF_INET6 &&
3080 		    !IN6_IS_ADDR_V4MAPPED(&natt_loc6->sin6_addr)) {
3081 			*diagnostic = SADB_X_DIAGNOSTIC_MALFORMED_NATT_LOC;
3082 			return (EINVAL);
3083 		}
3084 	}
3085 
3086 	if (assoc->sadb_sa_flags & SADB_X_SAFLAGS_NATT_REM) {
3087 		if (nttext_rem == NULL) {
3088 			*diagnostic = SADB_X_DIAGNOSTIC_MISSING_NATT_REM;
3089 			return (EINVAL);
3090 		}
3091 		if (natt_rem->sin_family == AF_INET6 &&
3092 		    !IN6_IS_ADDR_V4MAPPED(&natt_rem6->sin6_addr)) {
3093 			*diagnostic = SADB_X_DIAGNOSTIC_MALFORMED_NATT_REM;
3094 			return (EINVAL);
3095 		}
3096 	}
3097 
3098 
3099 	/* Stuff I don't support, for now.  XXX Diagnostic? */
3100 	if (ksi->ks_in_extv[SADB_EXT_LIFETIME_CURRENT] != NULL ||
3101 	    ksi->ks_in_extv[SADB_EXT_SENSITIVITY] != NULL)
3102 		return (EOPNOTSUPP);
3103 
3104 	/*
3105 	 * XXX Policy :  I'm not checking identities or sensitivity
3106 	 * labels at this time, but if I did, I'd do them here, before I sent
3107 	 * the weak key check up to the algorithm.
3108 	 */
3109 
3110 	mutex_enter(&alg_lock);
3111 
3112 	/*
3113 	 * First locate the authentication algorithm.
3114 	 */
3115 	if (akey != NULL) {
3116 		ipsec_alginfo_t *aalg;
3117 
3118 		aalg = ipsec_alglists[IPSEC_ALG_AUTH][assoc->sadb_sa_auth];
3119 		if (aalg == NULL || !ALG_VALID(aalg)) {
3120 			mutex_exit(&alg_lock);
3121 			esp1dbg(("Couldn't find auth alg #%d.\n",
3122 			    assoc->sadb_sa_auth));
3123 			*diagnostic = SADB_X_DIAGNOSTIC_BAD_AALG;
3124 			return (EINVAL);
3125 		}
3126 		ASSERT(aalg->alg_mech_type != CRYPTO_MECHANISM_INVALID);
3127 
3128 		/* sanity check key sizes */
3129 		if (!ipsec_valid_key_size(akey->sadb_key_bits, aalg)) {
3130 			mutex_exit(&alg_lock);
3131 			*diagnostic = SADB_X_DIAGNOSTIC_BAD_AKEYBITS;
3132 			return (EINVAL);
3133 		}
3134 
3135 		/* check key and fix parity if needed */
3136 		if (ipsec_check_key(aalg->alg_mech_type, akey, B_TRUE,
3137 		    diagnostic) != 0) {
3138 			mutex_exit(&alg_lock);
3139 			return (EINVAL);
3140 		}
3141 	}
3142 
3143 	/*
3144 	 * Then locate the encryption algorithm.
3145 	 */
3146 	if (ekey != NULL) {
3147 		ipsec_alginfo_t *ealg;
3148 
3149 		ealg = ipsec_alglists[IPSEC_ALG_ENCR][assoc->sadb_sa_encrypt];
3150 		if (ealg == NULL || !ALG_VALID(ealg)) {
3151 			mutex_exit(&alg_lock);
3152 			esp1dbg(("Couldn't find encr alg #%d.\n",
3153 			    assoc->sadb_sa_encrypt));
3154 			*diagnostic = SADB_X_DIAGNOSTIC_BAD_EALG;
3155 			return (EINVAL);
3156 		}
3157 		ASSERT(ealg->alg_mech_type != CRYPTO_MECHANISM_INVALID);
3158 
3159 		/* sanity check key sizes */
3160 		if (!ipsec_valid_key_size(ekey->sadb_key_bits, ealg)) {
3161 			mutex_exit(&alg_lock);
3162 			*diagnostic = SADB_X_DIAGNOSTIC_BAD_EKEYBITS;
3163 			return (EINVAL);
3164 		}
3165 
3166 		/* check key */
3167 		if (ipsec_check_key(ealg->alg_mech_type, ekey, B_FALSE,
3168 		    diagnostic) != 0) {
3169 			mutex_exit(&alg_lock);
3170 			return (EINVAL);
3171 		}
3172 	}
3173 	mutex_exit(&alg_lock);
3174 
3175 	return (esp_add_sa_finish(mp, (sadb_msg_t *)mp->b_cont->b_rptr, ksi));
3176 }
3177 
3178 /*
3179  * Update a security association.  Updates come in two varieties.  The first
3180  * is an update of lifetimes on a non-larval SA.  The second is an update of
3181  * a larval SA, which ends up looking a lot more like an add.
3182  */
3183 static int
3184 esp_update_sa(mblk_t *mp, keysock_in_t *ksi, int *diagnostic)
3185 {
3186 	sadb_address_t *dstext =
3187 	    (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_DST];
3188 	struct sockaddr_in *sin;
3189 
3190 	if (dstext == NULL) {
3191 		*diagnostic = SADB_X_DIAGNOSTIC_MISSING_DST;
3192 		return (EINVAL);
3193 	}
3194 
3195 	sin = (struct sockaddr_in *)(dstext + 1);
3196 	return (sadb_update_sa(mp, ksi,
3197 	    (sin->sin_family == AF_INET6) ? &esp_sadb.s_v6 : &esp_sadb.s_v4,
3198 	    diagnostic, esp_pfkey_q, esp_add_sa));
3199 }
3200 
3201 /*
3202  * Delete a security association.  This is REALLY likely to be code common to
3203  * both AH and ESP.  Find the association, then unlink it.
3204  */
3205 static int
3206 esp_del_sa(mblk_t *mp, keysock_in_t *ksi, int *diagnostic)
3207 {
3208 	sadb_sa_t *assoc = (sadb_sa_t *)ksi->ks_in_extv[SADB_EXT_SA];
3209 	sadb_address_t *dstext =
3210 	    (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_DST];
3211 	sadb_address_t *srcext =
3212 	    (sadb_address_t *)ksi->ks_in_extv[SADB_EXT_ADDRESS_SRC];
3213 	struct sockaddr_in *sin;
3214 
3215 	if (assoc == NULL) {
3216 		if (dstext != NULL) {
3217 			sin = (struct sockaddr_in *)(dstext + 1);
3218 		} else if (srcext != NULL) {
3219 			sin = (struct sockaddr_in *)(srcext + 1);
3220 		} else {
3221 			*diagnostic = SADB_X_DIAGNOSTIC_MISSING_SA;
3222 			return (EINVAL);
3223 		}
3224 		return sadb_purge_sa(mp, ksi,
3225 		    (sin->sin_family == AF_INET6) ? &esp_sadb.s_v6 :
3226 		    &esp_sadb.s_v4,
3227 		    diagnostic, esp_pfkey_q, esp_sadb.s_ip_q);
3228 	}
3229 
3230 	return (sadb_del_sa(mp, ksi, &esp_sadb, diagnostic, esp_pfkey_q));
3231 }
3232 
3233 /*
3234  * Convert the entire contents of all of ESP's SA tables into PF_KEY SADB_DUMP
3235  * messages.
3236  */
3237 static void
3238 esp_dump(mblk_t *mp, keysock_in_t *ksi)
3239 {
3240 	int error;
3241 	sadb_msg_t *samsg;
3242 
3243 	/*
3244 	 * Dump each fanout, bailing if error is non-zero.
3245 	 */
3246 
3247 	error = sadb_dump(esp_pfkey_q, mp, ksi->ks_in_serial, &esp_sadb.s_v4);
3248 	if (error != 0)
3249 		goto bail;
3250 
3251 	error = sadb_dump(esp_pfkey_q, mp, ksi->ks_in_serial, &esp_sadb.s_v6);
3252 bail:
3253 	ASSERT(mp->b_cont != NULL);
3254 	samsg = (sadb_msg_t *)mp->b_cont->b_rptr;
3255 	samsg->sadb_msg_errno = (uint8_t)error;
3256 	sadb_pfkey_echo(esp_pfkey_q, mp, (sadb_msg_t *)mp->b_cont->b_rptr, ksi,
3257 	    NULL);
3258 }
3259 
3260 /*
3261  * ESP parsing of PF_KEY messages.  Keysock did most of the really silly
3262  * error cases.  What I receive is a fully-formed, syntactically legal
3263  * PF_KEY message.  I then need to check semantics...
3264  *
3265  * This code may become common to AH and ESP.  Stay tuned.
3266  *
3267  * I also make the assumption that db_ref's are cool.  If this assumption
3268  * is wrong, this means that someone other than keysock or me has been
3269  * mucking with PF_KEY messages.
3270  */
3271 static void
3272 esp_parse_pfkey(mblk_t *mp)
3273 {
3274 	mblk_t *msg = mp->b_cont;
3275 	sadb_msg_t *samsg;
3276 	keysock_in_t *ksi;
3277 	int error;
3278 	int diagnostic = SADB_X_DIAGNOSTIC_NONE;
3279 
3280 	ASSERT(msg != NULL);
3281 	samsg = (sadb_msg_t *)msg->b_rptr;
3282 	ksi = (keysock_in_t *)mp->b_rptr;
3283 
3284 	/*
3285 	 * If applicable, convert unspecified AF_INET6 to unspecified
3286 	 * AF_INET.
3287 	 */
3288 	sadb_srcaddrfix(ksi);
3289 
3290 	switch (samsg->sadb_msg_type) {
3291 	case SADB_ADD:
3292 		error = esp_add_sa(mp, ksi, &diagnostic);
3293 		if (error != 0) {
3294 			sadb_pfkey_error(esp_pfkey_q, mp, error, diagnostic,
3295 			    ksi->ks_in_serial);
3296 		}
3297 		/* else esp_add_sa() took care of things. */
3298 		break;
3299 	case SADB_DELETE:
3300 		error = esp_del_sa(mp, ksi, &diagnostic);
3301 		if (error != 0) {
3302 			sadb_pfkey_error(esp_pfkey_q, mp, error, diagnostic,
3303 			    ksi->ks_in_serial);
3304 		}
3305 		/* Else esp_del_sa() took care of things. */
3306 		break;
3307 	case SADB_GET:
3308 		error = sadb_get_sa(mp, ksi, &esp_sadb, &diagnostic,
3309 		    esp_pfkey_q);
3310 		if (error != 0) {
3311 			sadb_pfkey_error(esp_pfkey_q, mp, error, diagnostic,
3312 			    ksi->ks_in_serial);
3313 		}
3314 		/* Else sadb_get_sa() took care of things. */
3315 		break;
3316 	case SADB_FLUSH:
3317 		sadbp_flush(&esp_sadb);
3318 		sadb_pfkey_echo(esp_pfkey_q, mp, samsg, ksi, NULL);
3319 		break;
3320 	case SADB_REGISTER:
3321 		/*
3322 		 * Hmmm, let's do it!  Check for extensions (there should
3323 		 * be none), extract the fields, call esp_register_out(),
3324 		 * then either free or report an error.
3325 		 *
3326 		 * Keysock takes care of the PF_KEY bookkeeping for this.
3327 		 */
3328 		if (esp_register_out(samsg->sadb_msg_seq, samsg->sadb_msg_pid,
3329 		    ksi->ks_in_serial)) {
3330 			freemsg(mp);
3331 		} else {
3332 			/*
3333 			 * Only way this path hits is if there is a memory
3334 			 * failure.  It will not return B_FALSE because of
3335 			 * lack of esp_pfkey_q if I am in wput().
3336 			 */
3337 			sadb_pfkey_error(esp_pfkey_q, mp, ENOMEM, diagnostic,
3338 			    ksi->ks_in_serial);
3339 		}
3340 		break;
3341 	case SADB_UPDATE:
3342 		/*
3343 		 * Find a larval, if not there, find a full one and get
3344 		 * strict.
3345 		 */
3346 		error = esp_update_sa(mp, ksi, &diagnostic);
3347 		if (error != 0) {
3348 			sadb_pfkey_error(esp_pfkey_q, mp, error, diagnostic,
3349 			    ksi->ks_in_serial);
3350 		}
3351 		/* else esp_update_sa() took care of things. */
3352 		break;
3353 	case SADB_GETSPI:
3354 		/*
3355 		 * Reserve a new larval entry.
3356 		 */
3357 		esp_getspi(mp, ksi);
3358 		break;
3359 	case SADB_ACQUIRE:
3360 		/*
3361 		 * Find larval and/or ACQUIRE record and kill it (them), I'm
3362 		 * most likely an error.  Inbound ACQUIRE messages should only
3363 		 * have the base header.
3364 		 */
3365 		sadb_in_acquire(samsg, &esp_sadb, esp_pfkey_q);
3366 		freemsg(mp);
3367 		break;
3368 	case SADB_DUMP:
3369 		/*
3370 		 * Dump all entries.
3371 		 */
3372 		esp_dump(mp, ksi);
3373 		/* esp_dump will take care of the return message, etc. */
3374 		break;
3375 	case SADB_EXPIRE:
3376 		/* Should never reach me. */
3377 		sadb_pfkey_error(esp_pfkey_q, mp, EOPNOTSUPP, diagnostic,
3378 		    ksi->ks_in_serial);
3379 		break;
3380 	default:
3381 		sadb_pfkey_error(esp_pfkey_q, mp, EINVAL,
3382 		    SADB_X_DIAGNOSTIC_UNKNOWN_MSG, ksi->ks_in_serial);
3383 		break;
3384 	}
3385 }
3386 
3387 /*
3388  * Handle case where PF_KEY says it can't find a keysock for one of my
3389  * ACQUIRE messages.
3390  */
3391 static void
3392 esp_keysock_no_socket(mblk_t *mp)
3393 {
3394 	sadb_msg_t *samsg;
3395 	keysock_out_err_t *kse = (keysock_out_err_t *)mp->b_rptr;
3396 
3397 	if (mp->b_cont == NULL) {
3398 		freemsg(mp);
3399 		return;
3400 	}
3401 	samsg = (sadb_msg_t *)mp->b_cont->b_rptr;
3402 
3403 	/*
3404 	 * If keysock can't find any registered, delete the acquire record
3405 	 * immediately, and handle errors.
3406 	 */
3407 	if (samsg->sadb_msg_type == SADB_ACQUIRE) {
3408 		samsg->sadb_msg_errno = kse->ks_err_errno;
3409 		samsg->sadb_msg_len = SADB_8TO64(sizeof (*samsg));
3410 		/*
3411 		 * Use the write-side of the esp_pfkey_q, in case there is
3412 		 * no esp_sadb.s_ip_q.
3413 		 */
3414 		sadb_in_acquire(samsg, &esp_sadb, WR(esp_pfkey_q));
3415 	}
3416 
3417 	freemsg(mp);
3418 }
3419 
3420 /*
3421  * First-cut reality check for an inbound PF_KEY message.
3422  */
3423 static boolean_t
3424 esp_pfkey_reality_failures(mblk_t *mp, keysock_in_t *ksi)
3425 {
3426 	int diagnostic;
3427 
3428 	if (ksi->ks_in_extv[SADB_EXT_PROPOSAL] != NULL) {
3429 		diagnostic = SADB_X_DIAGNOSTIC_PROP_PRESENT;
3430 		goto badmsg;
3431 	}
3432 	if (ksi->ks_in_extv[SADB_EXT_SUPPORTED_AUTH] != NULL ||
3433 	    ksi->ks_in_extv[SADB_EXT_SUPPORTED_ENCRYPT] != NULL) {
3434 		diagnostic = SADB_X_DIAGNOSTIC_SUPP_PRESENT;
3435 		goto badmsg;
3436 	}
3437 	if (ksi->ks_in_srctype == KS_IN_ADDR_MBCAST) {
3438 		diagnostic = SADB_X_DIAGNOSTIC_BAD_SRC;
3439 		goto badmsg;
3440 	}
3441 	if (ksi->ks_in_dsttype == KS_IN_ADDR_UNSPEC) {
3442 		diagnostic = SADB_X_DIAGNOSTIC_BAD_DST;
3443 		goto badmsg;
3444 	}
3445 
3446 	return (B_FALSE);	/* False ==> no failures */
3447 
3448 badmsg:
3449 	sadb_pfkey_error(esp_pfkey_q, mp, EINVAL, diagnostic,
3450 	    ksi->ks_in_serial);
3451 	return (B_TRUE);	/* True ==> failures */
3452 }
3453 
3454 /*
3455  * ESP module write put routine.
3456  */
3457 static void
3458 ipsecesp_wput(queue_t *q, mblk_t *mp)
3459 {
3460 	ipsec_info_t *ii;
3461 	keysock_in_t *ksi;
3462 	int rc;
3463 	struct iocblk *iocp;
3464 
3465 	esp3dbg(("In esp_wput().\n"));
3466 
3467 	/* NOTE: Each case must take care of freeing or passing mp. */
3468 	switch (mp->b_datap->db_type) {
3469 	case M_CTL:
3470 		if ((mp->b_wptr - mp->b_rptr) < sizeof (ipsec_info_t)) {
3471 			/* Not big enough message. */
3472 			freemsg(mp);
3473 			break;
3474 		}
3475 		ii = (ipsec_info_t *)mp->b_rptr;
3476 
3477 		switch (ii->ipsec_info_type) {
3478 		case KEYSOCK_OUT_ERR:
3479 			esp1dbg(("Got KEYSOCK_OUT_ERR message.\n"));
3480 			esp_keysock_no_socket(mp);
3481 			break;
3482 		case KEYSOCK_IN:
3483 			ESP_BUMP_STAT(keysock_in);
3484 			esp3dbg(("Got KEYSOCK_IN message.\n"));
3485 			ksi = (keysock_in_t *)ii;
3486 			/*
3487 			 * Some common reality checks.
3488 			 */
3489 
3490 			if (esp_pfkey_reality_failures(mp, ksi))
3491 				return;
3492 
3493 			/*
3494 			 * Use 'q' instead of esp_sadb.s_ip_q, since
3495 			 * it's the write side already, and it'll go
3496 			 * down to IP.  Use esp_pfkey_q because we
3497 			 * wouldn't get here if that weren't set, and
3498 			 * the RD(q) has been done already.
3499 			 */
3500 			if (ksi->ks_in_srctype == KS_IN_ADDR_UNKNOWN) {
3501 				rc = sadb_addrcheck(q, esp_pfkey_q, mp,
3502 				    ksi->ks_in_extv[SADB_EXT_ADDRESS_SRC],
3503 				    ksi->ks_in_serial);
3504 				if (rc == KS_IN_ADDR_UNKNOWN)
3505 					return;
3506 				else
3507 					ksi->ks_in_srctype = rc;
3508 			}
3509 			if (ksi->ks_in_dsttype == KS_IN_ADDR_UNKNOWN) {
3510 				rc = sadb_addrcheck(q, esp_pfkey_q, mp,
3511 				    ksi->ks_in_extv[SADB_EXT_ADDRESS_DST],
3512 				    ksi->ks_in_serial);
3513 				if (rc == KS_IN_ADDR_UNKNOWN)
3514 					return;
3515 				else
3516 					ksi->ks_in_dsttype = rc;
3517 			}
3518 			/*
3519 			 * XXX Proxy may be a different address family.
3520 			 */
3521 			if (ksi->ks_in_proxytype == KS_IN_ADDR_UNKNOWN) {
3522 				rc = sadb_addrcheck(q, esp_pfkey_q, mp,
3523 				    ksi->ks_in_extv[SADB_EXT_ADDRESS_PROXY],
3524 				    ksi->ks_in_serial);
3525 				if (rc == KS_IN_ADDR_UNKNOWN)
3526 					return;
3527 				else
3528 					ksi->ks_in_proxytype = rc;
3529 			}
3530 			esp_parse_pfkey(mp);
3531 			break;
3532 		case KEYSOCK_HELLO:
3533 			sadb_keysock_hello(&esp_pfkey_q, q, mp,
3534 			    esp_ager, &esp_event, SADB_SATYPE_ESP);
3535 			break;
3536 		default:
3537 			esp2dbg(("Got M_CTL from above of 0x%x.\n",
3538 			    ii->ipsec_info_type));
3539 			freemsg(mp);
3540 			break;
3541 		}
3542 		break;
3543 	case M_IOCTL:
3544 		iocp = (struct iocblk *)mp->b_rptr;
3545 		switch (iocp->ioc_cmd) {
3546 		case ND_SET:
3547 		case ND_GET:
3548 			if (nd_getset(q, ipsecesp_g_nd, mp)) {
3549 				qreply(q, mp);
3550 				return;
3551 			} else {
3552 				iocp->ioc_error = ENOENT;
3553 			}
3554 			/* FALLTHRU */
3555 		default:
3556 			/* We really don't support any other ioctls, do we? */
3557 
3558 			/* Return EINVAL */
3559 			if (iocp->ioc_error != ENOENT)
3560 				iocp->ioc_error = EINVAL;
3561 			iocp->ioc_count = 0;
3562 			mp->b_datap->db_type = M_IOCACK;
3563 			qreply(q, mp);
3564 			return;
3565 		}
3566 	default:
3567 		esp3dbg(("Got default message, type %d, passing to IP.\n",
3568 		    mp->b_datap->db_type));
3569 		putnext(q, mp);
3570 	}
3571 }
3572 
3573 /*
3574  * Process an outbound ESP packet that can be accelerated by a IPsec
3575  * hardware acceleration capable Provider.
3576  * The caller already inserted and initialized the ESP header.
3577  * This function allocates a tagging M_CTL, and adds room at the end
3578  * of the packet to hold the ICV if authentication is needed.
3579  *
3580  * On success returns B_TRUE, on failure returns B_FALSE and frees the
3581  * mblk chain ipsec_out.
3582  */
3583 static ipsec_status_t
3584 esp_outbound_accelerated(mblk_t *ipsec_out, uint_t icv_len)
3585 {
3586 	ipsec_out_t *io;
3587 	mblk_t *lastmp;
3588 
3589 	ESP_BUMP_STAT(out_accelerated);
3590 
3591 	io = (ipsec_out_t *)ipsec_out->b_rptr;
3592 
3593 	/* mark packet as being accelerated in IPSEC_OUT */
3594 	ASSERT(io->ipsec_out_accelerated == B_FALSE);
3595 	io->ipsec_out_accelerated = B_TRUE;
3596 
3597 	/*
3598 	 * add room at the end of the packet for the ICV if needed
3599 	 */
3600 	if (icv_len > 0) {
3601 		/* go to last mblk */
3602 		lastmp = ipsec_out;	/* For following while loop. */
3603 		do {
3604 			lastmp = lastmp->b_cont;
3605 		} while (lastmp->b_cont != NULL);
3606 
3607 		/* if not enough available room, allocate new mblk */
3608 		if ((lastmp->b_wptr + icv_len) > lastmp->b_datap->db_lim) {
3609 			lastmp->b_cont = allocb(icv_len, BPRI_HI);
3610 			if (lastmp->b_cont == NULL) {
3611 				ESP_BUMP_STAT(out_discards);
3612 				ip_drop_packet(ipsec_out, B_FALSE, NULL, NULL,
3613 				    &ipdrops_esp_nomem, &esp_dropper);
3614 				return (IPSEC_STATUS_FAILED);
3615 			}
3616 			lastmp = lastmp->b_cont;
3617 		}
3618 		lastmp->b_wptr += icv_len;
3619 	}
3620 
3621 	return (IPSEC_STATUS_SUCCESS);
3622 }
3623 
3624 /*
3625  * Process an inbound accelerated ESP packet.
3626  * On success returns B_TRUE, on failure returns B_FALSE and frees the
3627  * mblk chain ipsec_in.
3628  */
3629 static ipsec_status_t
3630 esp_inbound_accelerated(mblk_t *ipsec_in, mblk_t *data_mp, boolean_t isv4,
3631     ipsa_t *assoc)
3632 {
3633 	ipsec_in_t *ii;
3634 	mblk_t *hada_mp;
3635 	uint32_t icv_len = 0;
3636 	da_ipsec_t *hada;
3637 	ipha_t *ipha;
3638 	ip6_t *ip6h;
3639 	kstat_named_t *counter;
3640 
3641 	ESP_BUMP_STAT(in_accelerated);
3642 
3643 	ii = (ipsec_in_t *)ipsec_in->b_rptr;
3644 	hada_mp = ii->ipsec_in_da;
3645 	ASSERT(hada_mp != NULL);
3646 	hada = (da_ipsec_t *)hada_mp->b_rptr;
3647 
3648 	/*
3649 	 * We only support one level of decapsulation in hardware, so
3650 	 * nuke the pointer.
3651 	 */
3652 	ii->ipsec_in_da = NULL;
3653 	ii->ipsec_in_accelerated = B_FALSE;
3654 
3655 	if (assoc->ipsa_auth_alg != IPSA_AALG_NONE) {
3656 		/*
3657 		 * ESP with authentication. We expect the Provider to have
3658 		 * computed the ICV and placed it in the hardware acceleration
3659 		 * data attributes.
3660 		 *
3661 		 * Extract ICV length from attributes M_CTL and sanity check
3662 		 * its value. We allow the mblk to be smaller than da_ipsec_t
3663 		 * for a small ICV, as long as the entire ICV fits within the
3664 		 * mblk.
3665 		 *
3666 		 * Also ensures that the ICV length computed by Provider
3667 		 * corresponds to the ICV length of the agorithm specified by
3668 		 * the SA.
3669 		 */
3670 		icv_len = hada->da_icv_len;
3671 		if ((icv_len != assoc->ipsa_mac_len) ||
3672 		    (icv_len > DA_ICV_MAX_LEN) || (MBLKL(hada_mp) <
3673 			(sizeof (da_ipsec_t) - DA_ICV_MAX_LEN + icv_len))) {
3674 			esp0dbg(("esp_inbound_accelerated: "
3675 			    "ICV len (%u) incorrect or mblk too small (%u)\n",
3676 			    icv_len, (uint32_t)(MBLKL(hada_mp))));
3677 			counter = &ipdrops_esp_bad_auth;
3678 			goto esp_in_discard;
3679 		}
3680 	}
3681 
3682 	/* get pointers to IP header */
3683 	if (isv4) {
3684 		ipha = (ipha_t *)data_mp->b_rptr;
3685 	} else {
3686 		ip6h = (ip6_t *)data_mp->b_rptr;
3687 	}
3688 
3689 	/*
3690 	 * Compare ICV in ESP packet vs ICV computed by adapter.
3691 	 * We also remove the ICV from the end of the packet since
3692 	 * it will no longer be needed.
3693 	 *
3694 	 * Assume that esp_inbound() already ensured that the pkt
3695 	 * was in one mblk.
3696 	 */
3697 	ASSERT(data_mp->b_cont == NULL);
3698 	data_mp->b_wptr -= icv_len;
3699 	/* adjust IP header */
3700 	if (isv4)
3701 		ipha->ipha_length = htons(ntohs(ipha->ipha_length) - icv_len);
3702 	else
3703 		ip6h->ip6_plen = htons(ntohs(ip6h->ip6_plen) - icv_len);
3704 	if (icv_len && bcmp(hada->da_icv, data_mp->b_wptr, icv_len)) {
3705 		int af;
3706 		void *addr;
3707 
3708 		if (isv4) {
3709 			addr = &ipha->ipha_dst;
3710 			af = AF_INET;
3711 		} else {
3712 			addr = &ip6h->ip6_dst;
3713 			af = AF_INET6;
3714 		}
3715 
3716 		/*
3717 		 * Log the event. Don't print to the console, block
3718 		 * potential denial-of-service attack.
3719 		 */
3720 		ESP_BUMP_STAT(bad_auth);
3721 		ipsec_assocfailure(info.mi_idnum, 0, 0, SL_ERROR | SL_WARN,
3722 		    "ESP Authentication failed spi %x, dst_addr %s",
3723 		    assoc->ipsa_spi, addr, af);
3724 		counter = &ipdrops_esp_bad_auth;
3725 		goto esp_in_discard;
3726 	}
3727 
3728 	esp3dbg(("esp_inbound_accelerated: ESP authentication succeeded, "
3729 	    "checking replay\n"));
3730 
3731 	ipsec_in->b_cont = data_mp;
3732 
3733 	/*
3734 	 * Remove ESP header and padding from packet.
3735 	 */
3736 	if (!esp_strip_header(data_mp, ii->ipsec_in_v4, assoc->ipsa_iv_len,
3737 		&counter)) {
3738 		esp1dbg(("esp_inbound_accelerated: "
3739 		    "esp_strip_header() failed\n"));
3740 		goto esp_in_discard;
3741 	}
3742 
3743 	freeb(hada_mp);
3744 
3745 	/*
3746 	 * Account for usage..
3747 	 */
3748 	if (!esp_age_bytes(assoc, msgdsize(data_mp), B_TRUE)) {
3749 		/* The ipsa has hit hard expiration, LOG and AUDIT. */
3750 		ESP_BUMP_STAT(bytes_expired);
3751 		IP_ESP_BUMP_STAT(in_discards);
3752 		ipsec_assocfailure(info.mi_idnum, 0, 0, SL_ERROR | SL_WARN,
3753 		    "ESP association 0x%x, dst %s had bytes expire.\n",
3754 		    assoc->ipsa_spi, assoc->ipsa_dstaddr, assoc->ipsa_addrfam);
3755 		ip_drop_packet(ipsec_in, B_TRUE, NULL, NULL,
3756 		    &ipdrops_esp_bytes_expire, &esp_dropper);
3757 		return (IPSEC_STATUS_FAILED);
3758 	}
3759 
3760 	/* done processing the packet */
3761 	return (IPSEC_STATUS_SUCCESS);
3762 
3763 esp_in_discard:
3764 	IP_ESP_BUMP_STAT(in_discards);
3765 	freeb(hada_mp);
3766 
3767 	ipsec_in->b_cont = data_mp;	/* For ip_drop_packet()'s sake... */
3768 	ip_drop_packet(ipsec_in, B_TRUE, NULL, NULL, counter, &esp_dropper);
3769 
3770 	return (IPSEC_STATUS_FAILED);
3771 }
3772 
3773 /*
3774  * Wrapper to allow IP to trigger an ESP association failure message
3775  * during inbound SA selection.
3776  */
3777 void
3778 ipsecesp_in_assocfailure(mblk_t *mp, char level, ushort_t sl, char *fmt,
3779     uint32_t spi, void *addr, int af)
3780 {
3781 	if (ipsecesp_log_unknown_spi) {
3782 		ipsec_assocfailure(info.mi_idnum, 0, level, sl, fmt, spi,
3783 		    addr, af);
3784 	}
3785 
3786 	ip_drop_packet(mp, B_TRUE, NULL, NULL, &ipdrops_esp_no_sa,
3787 	    &esp_dropper);
3788 }
3789 
3790 /*
3791  * Initialize the ESP input and output processing functions.
3792  */
3793 void
3794 ipsecesp_init_funcs(ipsa_t *sa)
3795 {
3796 	if (sa->ipsa_output_func == NULL)
3797 		sa->ipsa_output_func = esp_outbound;
3798 	if (sa->ipsa_input_func == NULL)
3799 		sa->ipsa_input_func = esp_inbound;
3800 }
3801