xref: /titanic_41/usr/src/uts/common/crypto/api/kcf_dual.c (revision fa9e4066f08beec538e775443c5be79dd423fcab)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/errno.h>
30 #include <sys/types.h>
31 #include <sys/kmem.h>
32 #include <sys/crypto/common.h>
33 #include <sys/crypto/impl.h>
34 #include <sys/crypto/api.h>
35 #include <sys/crypto/spi.h>
36 #include <sys/crypto/sched_impl.h>
37 
38 
39 static int crypto_mac_decrypt_common(crypto_mechanism_t *,
40     crypto_mechanism_t *, crypto_dual_data_t *, crypto_key_t *, crypto_key_t *,
41     crypto_ctx_template_t, crypto_ctx_template_t, crypto_data_t *,
42     crypto_data_t *, crypto_call_req_t *, boolean_t);
43 
44 /*
45  * Performs a dual encrypt/mac atomic operation. The provider and session
46  * to use are determined by the KCF dispatcher.
47  */
48 int
49 crypto_encrypt_mac(crypto_mechanism_t *encr_mech,
50     crypto_mechanism_t *mac_mech, crypto_data_t *pt,
51     crypto_key_t *encr_key, crypto_key_t *mac_key,
52     crypto_ctx_template_t encr_tmpl, crypto_ctx_template_t mac_tmpl,
53     crypto_dual_data_t *ct, crypto_data_t *mac, crypto_call_req_t *crq)
54 {
55 	/*
56 	 * First try to find a provider for the encryption mechanism, that
57 	 * is also capable of the MAC mechanism.
58 	 */
59 	int error;
60 	kcf_mech_entry_t *me;
61 	kcf_provider_desc_t *pd;
62 	kcf_ctx_template_t *ctx_encr_tmpl, *ctx_mac_tmpl;
63 	kcf_req_params_t params;
64 	kcf_encrypt_mac_ops_params_t *cmops;
65 	crypto_spi_ctx_template_t spi_encr_tmpl = NULL, spi_mac_tmpl = NULL;
66 	crypto_mech_type_t prov_encr_mechid, prov_mac_mechid;
67 	kcf_prov_tried_t *list = NULL;
68 	boolean_t encr_tmpl_checked = B_FALSE;
69 	boolean_t mac_tmpl_checked = B_FALSE;
70 	kcf_dual_req_t *next_req = NULL;
71 
72 retry:
73 	/* pd is returned held on success */
74 	pd = kcf_get_dual_provider(encr_mech, mac_mech, &me, &prov_encr_mechid,
75 	    &prov_mac_mechid, &error, list,
76 	    CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_ENCRYPT_MAC_ATOMIC,
77 	    CRYPTO_FG_MAC_ATOMIC | CRYPTO_FG_ENCRYPT_MAC_ATOMIC,
78 	    CHECK_RESTRICT(crq), ct->dd_len1);
79 	if (pd == NULL) {
80 		if (list != NULL)
81 			kcf_free_triedlist(list);
82 		if (next_req != NULL)
83 			kmem_free(next_req, sizeof (kcf_dual_req_t));
84 		return (error);
85 	}
86 
87 	/*
88 	 * For SW providers, check the validity of the context template
89 	 * It is very rare that the generation number mis-matches, so
90 	 * is acceptable to fail here, and let the consumer recover by
91 	 * freeing this tmpl and create a new one for the key and new SW
92 	 * provider
93 	 * Warning! will need to change when multiple software providers
94 	 * per mechanism are supported.
95 	 */
96 
97 	if ((!encr_tmpl_checked) && (pd->pd_prov_type == CRYPTO_SW_PROVIDER)) {
98 		if (encr_tmpl != NULL) {
99 			ctx_encr_tmpl = (kcf_ctx_template_t *)encr_tmpl;
100 			if (ctx_encr_tmpl->ct_generation != me->me_gen_swprov) {
101 
102 				if (next_req != NULL)
103 					kmem_free(next_req,
104 					    sizeof (kcf_dual_req_t));
105 				if (list != NULL)
106 					kcf_free_triedlist(list);
107 
108 				KCF_PROV_REFRELE(pd);
109 				/* Which one is the the old one ? */
110 				return (CRYPTO_OLD_CTX_TEMPLATE);
111 			}
112 			spi_encr_tmpl = ctx_encr_tmpl->ct_prov_tmpl;
113 		}
114 		encr_tmpl_checked = B_TRUE;
115 	}
116 
117 	if (prov_mac_mechid == CRYPTO_MECH_INVALID) {
118 		crypto_call_req_t encr_req;
119 
120 		/* Need to emulate with 2 internal calls */
121 		/* Allocate and initialize the MAC req for the callback */
122 
123 		if (crq != NULL) {
124 			if (next_req == NULL) {
125 				next_req = kcf_alloc_req(crq);
126 
127 				if (next_req == NULL) {
128 					KCF_PROV_REFRELE(pd);
129 					if (list != NULL)
130 						kcf_free_triedlist(list);
131 					return (CRYPTO_HOST_MEMORY);
132 				}
133 				/*
134 				 * Careful! we're wrapping-in mac_tmpl instead
135 				 * of an spi_mac_tmpl. The callback routine will
136 				 * have to validate mac_tmpl, and use the
137 				 * mac_ctx_tmpl, once it picks a MAC provider.
138 				 */
139 				KCF_WRAP_MAC_OPS_PARAMS(&(next_req->kr_params),
140 				    KCF_OP_ATOMIC, NULL, mac_mech, mac_key,
141 				    (crypto_data_t *)ct, mac, mac_tmpl);
142 			}
143 
144 			encr_req.cr_flag = crq->cr_flag;
145 			encr_req.cr_callback_func = kcf_next_req;
146 			encr_req.cr_callback_arg = next_req;
147 		}
148 
149 		if (pt == NULL) {
150 			KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC,
151 			    pd->pd_sid, encr_mech, encr_key,
152 			    (crypto_data_t *)ct, NULL, spi_encr_tmpl);
153 		} else {
154 			KCF_WRAP_ENCRYPT_OPS_PARAMS(&params, KCF_OP_ATOMIC,
155 			    pd->pd_sid, encr_mech, encr_key, pt,
156 			    (crypto_data_t *)ct, spi_encr_tmpl);
157 		}
158 
159 		error = kcf_submit_request(pd, NULL, (crq == NULL) ? NULL :
160 		    &encr_req, &params, B_TRUE);
161 
162 		switch (error) {
163 		case CRYPTO_SUCCESS: {
164 			off_t saveoffset;
165 			size_t savelen;
166 
167 			/*
168 			 * The encryption step is done. Reuse the encr_req
169 			 * for submitting the MAC step.
170 			 */
171 			if (next_req == NULL) {
172 				saveoffset = ct->dd_offset1;
173 				savelen = ct->dd_len1;
174 			} else {
175 				saveoffset = next_req->kr_saveoffset =
176 				    ct->dd_offset1;
177 				savelen = next_req->kr_savelen = ct->dd_len1;
178 				encr_req.cr_callback_func = kcf_last_req;
179 			}
180 
181 			ct->dd_offset1 = ct->dd_offset2;
182 			ct->dd_len1 = ct->dd_len2;
183 
184 			error = crypto_mac(mac_mech, (crypto_data_t *)ct,
185 			    mac_key, mac_tmpl, mac, (crq == NULL) ? NULL :
186 			    &encr_req);
187 
188 			if (error != CRYPTO_QUEUED) {
189 				ct->dd_offset1 = saveoffset;
190 				ct->dd_len1 = savelen;
191 			}
192 			break;
193 		}
194 
195 		case CRYPTO_QUEUED:
196 			if ((crq != NULL) &&
197 			    !(crq->cr_flag & CRYPTO_SKIP_REQID))
198 				crq->cr_reqid = encr_req.cr_reqid;
199 			break;
200 
201 		default:
202 
203 			/* Add pd to the linked list of providers tried. */
204 			if (IS_RECOVERABLE(error)) {
205 				if (kcf_insert_triedlist(&list, pd,
206 				    KCF_KMFLAG(crq)) != NULL)
207 					goto retry;
208 			}
209 		}
210 		if (error != CRYPTO_QUEUED && next_req != NULL)
211 			kmem_free(next_req, sizeof (kcf_dual_req_t));
212 		if (list != NULL)
213 			kcf_free_triedlist(list);
214 		KCF_PROV_REFRELE(pd);
215 		return (error);
216 	}
217 	if ((!mac_tmpl_checked) && (pd->pd_prov_type == CRYPTO_SW_PROVIDER)) {
218 		if ((mac_tmpl != NULL) &&
219 		    (prov_mac_mechid != CRYPTO_MECH_INVALID)) {
220 			ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;
221 			if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
222 
223 				if (next_req != NULL)
224 					kmem_free(next_req,
225 					    sizeof (kcf_dual_req_t));
226 				if (list != NULL)
227 					kcf_free_triedlist(list);
228 
229 				KCF_PROV_REFRELE(pd);
230 				/* Which one is the the old one ? */
231 				return (CRYPTO_OLD_CTX_TEMPLATE);
232 			}
233 			spi_mac_tmpl = ctx_mac_tmpl->ct_prov_tmpl;
234 		}
235 		mac_tmpl_checked = B_TRUE;
236 	}
237 
238 	/* The fast path for SW providers. */
239 	if (CHECK_FASTPATH(crq, pd)) {
240 		crypto_mechanism_t lencr_mech;
241 		crypto_mechanism_t lmac_mech;
242 
243 		/* careful! structs assignments */
244 		lencr_mech = *encr_mech;
245 		lencr_mech.cm_type = prov_encr_mechid;
246 		lmac_mech = *mac_mech;
247 		lmac_mech.cm_type = prov_mac_mechid;
248 
249 		error = KCF_PROV_ENCRYPT_MAC_ATOMIC(pd, pd->pd_sid,
250 		    &lencr_mech, encr_key, &lmac_mech, mac_key, pt, ct,
251 		    mac, spi_encr_tmpl, spi_mac_tmpl, KCF_SWFP_RHNDL(crq));
252 
253 		KCF_PROV_INCRSTATS(pd, error);
254 	} else {
255 		KCF_WRAP_ENCRYPT_MAC_OPS_PARAMS(&params, KCF_OP_ATOMIC,
256 		    pd->pd_sid, encr_key, mac_key, pt, ct, mac, spi_encr_tmpl,
257 		    spi_mac_tmpl);
258 
259 		cmops = &(params.rp_u.encrypt_mac_params);
260 
261 		/* careful! structs assignments */
262 		cmops->em_encr_mech = *encr_mech;
263 		cmops->em_encr_mech.cm_type = prov_encr_mechid;
264 		cmops->em_framework_encr_mechtype = encr_mech->cm_type;
265 		cmops->em_mac_mech = *mac_mech;
266 		cmops->em_mac_mech.cm_type = prov_mac_mechid;
267 		cmops->em_framework_mac_mechtype = mac_mech->cm_type;
268 
269 		error = kcf_submit_request(pd, NULL, crq, &params, B_FALSE);
270 	}
271 
272 	if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
273 	    IS_RECOVERABLE(error)) {
274 		/* Add pd to the linked list of providers tried. */
275 		if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
276 			goto retry;
277 	}
278 
279 	if (next_req != NULL)
280 		kmem_free(next_req, sizeof (kcf_dual_req_t));
281 
282 	if (list != NULL)
283 		kcf_free_triedlist(list);
284 
285 	KCF_PROV_REFRELE(pd);
286 	return (error);
287 }
288 
289 /*
290  * Starts a multi-part dual encrypt/mac operation. The provider and session
291  * to use are determined by the KCF dispatcher.
292  */
293 /* ARGSUSED */
294 int
295 crypto_encrypt_mac_init(crypto_mechanism_t *encr_mech,
296     crypto_mechanism_t *mac_mech, crypto_key_t *encr_key,
297     crypto_key_t *mac_key, crypto_ctx_template_t encr_tmpl,
298     crypto_ctx_template_t mac_tmpl, crypto_context_t *ctxp,
299     crypto_call_req_t *cr)
300 {
301 	/*
302 	 * First try to find a provider for the encryption mechanism, that
303 	 * is also capable of the MAC mechanism.
304 	 */
305 	int error;
306 	kcf_mech_entry_t *me;
307 	kcf_provider_desc_t *pd;
308 	kcf_ctx_template_t *ctx_encr_tmpl, *ctx_mac_tmpl;
309 	kcf_req_params_t params;
310 	kcf_encrypt_mac_ops_params_t *cmops;
311 	crypto_spi_ctx_template_t spi_encr_tmpl = NULL, spi_mac_tmpl = NULL;
312 	crypto_mech_type_t prov_encr_mechid, prov_mac_mechid;
313 	kcf_prov_tried_t *list = NULL;
314 	boolean_t encr_tmpl_checked = B_FALSE;
315 	boolean_t mac_tmpl_checked = B_FALSE;
316 	crypto_ctx_t *ctx = NULL;
317 	kcf_context_t *encr_kcf_context = NULL, *mac_kcf_context;
318 	crypto_call_flag_t save_flag;
319 
320 retry:
321 	/* pd is returned held on success */
322 	pd = kcf_get_dual_provider(encr_mech, mac_mech, &me, &prov_encr_mechid,
323 	    &prov_mac_mechid, &error, list,
324 	    CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_MAC, CRYPTO_FG_MAC,
325 	    CHECK_RESTRICT(cr), 0);
326 	if (pd == NULL) {
327 		if (list != NULL)
328 			kcf_free_triedlist(list);
329 		return (error);
330 	}
331 
332 	/*
333 	 * For SW providers, check the validity of the context template
334 	 * It is very rare that the generation number mis-matches, so
335 	 * is acceptable to fail here, and let the consumer recover by
336 	 * freeing this tmpl and create a new one for the key and new SW
337 	 * provider
338 	 * Warning! will need to change when multiple software providers
339 	 * per mechanism are supported.
340 	 */
341 
342 	if ((!encr_tmpl_checked) && (pd->pd_prov_type == CRYPTO_SW_PROVIDER)) {
343 		if (encr_tmpl != NULL) {
344 			ctx_encr_tmpl = (kcf_ctx_template_t *)encr_tmpl;
345 			if (ctx_encr_tmpl->ct_generation != me->me_gen_swprov) {
346 
347 				if (list != NULL)
348 					kcf_free_triedlist(list);
349 				if (encr_kcf_context != NULL)
350 					KCF_CONTEXT_REFRELE(encr_kcf_context);
351 
352 				KCF_PROV_REFRELE(pd);
353 				/* Which one is the the old one ? */
354 				return (CRYPTO_OLD_CTX_TEMPLATE);
355 			}
356 			spi_encr_tmpl = ctx_encr_tmpl->ct_prov_tmpl;
357 		}
358 		encr_tmpl_checked = B_TRUE;
359 	}
360 
361 	if (prov_mac_mechid == CRYPTO_MECH_INVALID) {
362 		/* Need to emulate with 2 internal calls */
363 
364 		/*
365 		 * We avoid code complexity by limiting the pure async.
366 		 * case to be done using only a SW provider.
367 		 * XXX - Redo the emulation code below so that we can
368 		 * remove this limitation.
369 		 */
370 		if (cr != NULL && pd->pd_prov_type == CRYPTO_HW_PROVIDER) {
371 			if ((kcf_insert_triedlist(&list, pd, KCF_KMFLAG(cr))
372 			    != NULL))
373 				goto retry;
374 			if (list != NULL)
375 				kcf_free_triedlist(list);
376 			if (encr_kcf_context != NULL)
377 				KCF_CONTEXT_REFRELE(encr_kcf_context);
378 			KCF_PROV_REFRELE(pd);
379 			return (CRYPTO_HOST_MEMORY);
380 		}
381 
382 		if (ctx == NULL && pd->pd_prov_type == CRYPTO_SW_PROVIDER) {
383 			ctx = kcf_new_ctx(cr, pd, pd->pd_sid);
384 			if (ctx == NULL) {
385 				if (list != NULL)
386 					kcf_free_triedlist(list);
387 				if (encr_kcf_context != NULL)
388 					KCF_CONTEXT_REFRELE(encr_kcf_context);
389 				KCF_PROV_REFRELE(pd);
390 				return (CRYPTO_HOST_MEMORY);
391 			}
392 			encr_kcf_context = (kcf_context_t *)
393 			    ctx->cc_framework_private;
394 		}
395 		/*
396 		 * Trade-off speed vs avoidance of code complexity and
397 		 * duplication:
398 		 * Could do all the combinations of fastpath / synch / asynch
399 		 * for the encryption and the mac steps. Early attempts
400 		 * showed the code grew wild and bug-prone, for little gain.
401 		 * Therefore, the adaptative asynch case is not implemented.
402 		 * It's either pure synchronous, or pure asynchronous.
403 		 * We still preserve a fastpath for the pure synchronous
404 		 * requests to SW providers.
405 		 */
406 		if (cr == NULL) {
407 			crypto_context_t mac_context;
408 
409 			if (pd->pd_prov_type == CRYPTO_SW_PROVIDER) {
410 				crypto_mechanism_t lmech = *encr_mech;
411 
412 				lmech.cm_type = prov_encr_mechid;
413 
414 				error = KCF_PROV_ENCRYPT_INIT(pd, ctx, &lmech,
415 				    encr_key, spi_encr_tmpl,
416 				    KCF_RHNDL(KM_SLEEP));
417 			} else {
418 				/*
419 				 * If we did the 'goto retry' then ctx may not
420 				 * be NULL.  In general, we can't reuse another
421 				 * provider's context, so we free it now so
422 				 * we don't leak it.
423 				 */
424 				if (ctx != NULL) {
425 					KCF_CONTEXT_REFRELE((kcf_context_t *)
426 					    ctx->cc_framework_private);
427 					encr_kcf_context = NULL;
428 				}
429 				error = crypto_encrypt_init_prov(pd, pd->pd_sid,
430 				    encr_mech, encr_key, &encr_tmpl,
431 				    (crypto_context_t *)&ctx, NULL);
432 
433 				if (error == CRYPTO_SUCCESS) {
434 					encr_kcf_context = (kcf_context_t *)
435 					    ctx->cc_framework_private;
436 				}
437 			}
438 			KCF_PROV_INCRSTATS(pd, error);
439 
440 			KCF_PROV_REFRELE(pd);
441 
442 			if (error != CRYPTO_SUCCESS) {
443 				/* Can't be CRYPTO_QUEUED. return the failure */
444 				if (list != NULL)
445 					kcf_free_triedlist(list);
446 				if (encr_kcf_context != NULL)
447 					KCF_CONTEXT_REFRELE(encr_kcf_context);
448 
449 				return (error);
450 			}
451 			error = crypto_mac_init(mac_mech, mac_key, mac_tmpl,
452 			    &mac_context, NULL);
453 
454 			if (list != NULL)
455 				kcf_free_triedlist(list);
456 
457 			if (error != CRYPTO_SUCCESS) {
458 				/* Should this be an ASSERT() ? */
459 
460 				KCF_CONTEXT_REFRELE(encr_kcf_context);
461 			} else {
462 				encr_kcf_context = (kcf_context_t *)
463 				    ctx->cc_framework_private;
464 				mac_kcf_context = (kcf_context_t *)
465 				    ((crypto_ctx_t *)mac_context)->
466 				    cc_framework_private;
467 
468 				encr_kcf_context->kc_secondctx =
469 				    mac_kcf_context;
470 				KCF_CONTEXT_REFHOLD(mac_kcf_context);
471 
472 				*ctxp = (crypto_context_t)ctx;
473 			}
474 
475 			return (error);
476 		}
477 
478 		/* submit a pure asynchronous request. */
479 		save_flag = cr->cr_flag;
480 		cr->cr_flag |= CRYPTO_ALWAYS_QUEUE;
481 
482 		KCF_WRAP_ENCRYPT_MAC_OPS_PARAMS(&params, KCF_OP_INIT,
483 		    pd->pd_sid, encr_key, mac_key, NULL, NULL, NULL,
484 		    spi_encr_tmpl, spi_mac_tmpl);
485 
486 		cmops = &(params.rp_u.encrypt_mac_params);
487 
488 		/* careful! structs assignments */
489 		cmops->em_encr_mech = *encr_mech;
490 		/*
491 		 * cmops->em_encr_mech.cm_type will be set when we get to
492 		 * kcf_emulate_dual() routine.
493 		 */
494 		cmops->em_framework_encr_mechtype = encr_mech->cm_type;
495 		cmops->em_mac_mech = *mac_mech;
496 
497 		/*
498 		 * cmops->em_mac_mech.cm_type will be set when we know the
499 		 * MAC provider.
500 		 */
501 		cmops->em_framework_mac_mechtype = mac_mech->cm_type;
502 
503 		/*
504 		 * non-NULL ctx->kc_secondctx tells common_submit_request
505 		 * that this request uses separate cipher and MAC contexts.
506 		 * That function will set ctx->kc_secondctx to the new
507 		 * MAC context, once it gets one.
508 		 */
509 		encr_kcf_context->kc_secondctx = encr_kcf_context;
510 
511 		error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
512 
513 		cr->cr_flag = save_flag;
514 
515 		if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED) {
516 			KCF_CONTEXT_REFRELE(encr_kcf_context);
517 		}
518 		if (list != NULL)
519 			kcf_free_triedlist(list);
520 		*ctxp = (crypto_context_t)ctx;
521 		KCF_PROV_REFRELE(pd);
522 		return (error);
523 	}
524 
525 	if ((!mac_tmpl_checked) && (pd->pd_prov_type == CRYPTO_SW_PROVIDER)) {
526 		if ((mac_tmpl != NULL) &&
527 		    (prov_mac_mechid != CRYPTO_MECH_INVALID)) {
528 			ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;
529 			if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
530 
531 				if (list != NULL)
532 					kcf_free_triedlist(list);
533 
534 				KCF_PROV_REFRELE(pd);
535 				/* Which one is the the old one ? */
536 				return (CRYPTO_OLD_CTX_TEMPLATE);
537 			}
538 			spi_mac_tmpl = ctx_mac_tmpl->ct_prov_tmpl;
539 		}
540 		mac_tmpl_checked = B_TRUE;
541 	}
542 
543 	if (ctx == NULL) {
544 		ctx = kcf_new_ctx(cr, pd, pd->pd_sid);
545 		if (ctx == NULL) {
546 			if (list != NULL)
547 				kcf_free_triedlist(list);
548 
549 			KCF_PROV_REFRELE(pd);
550 			return (CRYPTO_HOST_MEMORY);
551 		}
552 		encr_kcf_context = (kcf_context_t *)ctx->cc_framework_private;
553 	}
554 
555 	/* The fast path for SW providers. */
556 	if (CHECK_FASTPATH(cr, pd)) {
557 		crypto_mechanism_t lencr_mech;
558 		crypto_mechanism_t lmac_mech;
559 
560 		/* careful! structs assignments */
561 		lencr_mech = *encr_mech;
562 		lencr_mech.cm_type = prov_encr_mechid;
563 		lmac_mech = *mac_mech;
564 		lmac_mech.cm_type = prov_mac_mechid;
565 
566 		error = KCF_PROV_ENCRYPT_MAC_INIT(pd, ctx, &lencr_mech,
567 		    encr_key, &lmac_mech, mac_key, spi_encr_tmpl, spi_mac_tmpl,
568 		    KCF_SWFP_RHNDL(cr));
569 
570 		KCF_PROV_INCRSTATS(pd, error);
571 	} else {
572 		KCF_WRAP_ENCRYPT_MAC_OPS_PARAMS(&params, KCF_OP_INIT,
573 		    pd->pd_sid, encr_key, mac_key, NULL, NULL, NULL,
574 		    spi_encr_tmpl, spi_mac_tmpl);
575 
576 		cmops = &(params.rp_u.encrypt_mac_params);
577 
578 		/* careful! structs assignments */
579 		cmops->em_encr_mech = *encr_mech;
580 		cmops->em_encr_mech.cm_type = prov_encr_mechid;
581 		cmops->em_framework_encr_mechtype = encr_mech->cm_type;
582 		cmops->em_mac_mech = *mac_mech;
583 		cmops->em_mac_mech.cm_type = prov_mac_mechid;
584 		cmops->em_framework_mac_mechtype = mac_mech->cm_type;
585 
586 		error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
587 	}
588 
589 	if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED) {
590 		if ((IS_RECOVERABLE(error)) &&
591 		    (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(cr)) != NULL))
592 			goto retry;
593 
594 		KCF_CONTEXT_REFRELE(encr_kcf_context);
595 	} else
596 		*ctxp = (crypto_context_t)ctx;
597 
598 	if (list != NULL)
599 		kcf_free_triedlist(list);
600 
601 	KCF_PROV_REFRELE(pd);
602 	return (error);
603 }
604 
605 /*
606  * Continues a multi-part dual encrypt/mac operation.
607  */
608 /* ARGSUSED */
609 int
610 crypto_encrypt_mac_update(crypto_context_t context,
611     crypto_data_t *pt, crypto_dual_data_t *ct, crypto_call_req_t *cr)
612 {
613 	crypto_ctx_t *ctx = (crypto_ctx_t *)context, *mac_ctx;
614 	kcf_context_t *kcf_ctx, *kcf_mac_ctx;
615 	kcf_provider_desc_t *pd;
616 	int error;
617 	kcf_req_params_t params;
618 
619 	if ((ctx == NULL) ||
620 	    ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
621 	    ((pd = kcf_ctx->kc_prov_desc) == NULL)) {
622 		return (CRYPTO_INVALID_CONTEXT);
623 	}
624 
625 	KCF_PROV_REFHOLD(pd);
626 
627 	if ((kcf_mac_ctx = kcf_ctx->kc_secondctx) != NULL) {
628 		off_t save_offset;
629 		size_t save_len;
630 		crypto_call_flag_t save_flag;
631 
632 		if (kcf_mac_ctx->kc_prov_desc == NULL) {
633 			error = CRYPTO_INVALID_CONTEXT;
634 			goto out;
635 		}
636 		mac_ctx = &kcf_mac_ctx->kc_glbl_ctx;
637 
638 		/* First we submit the encryption request */
639 		if (cr == NULL) {
640 			/*
641 			 * 'ct' is always not NULL.
642 			 * A NULL 'pt' means in-place.
643 			 */
644 			if (pt == NULL)
645 				error = crypto_encrypt_update(context,
646 				    (crypto_data_t *)ct, NULL, NULL);
647 			else
648 				error = crypto_encrypt_update(context, pt,
649 				    (crypto_data_t *)ct, NULL);
650 
651 			if (error != CRYPTO_SUCCESS)
652 				goto out;
653 
654 			/*
655 			 * call  mac_update when there is data to throw in
656 			 * the mix. Either an explicitly non-zero ct->dd_len2,
657 			 * or the last ciphertext portion.
658 			 */
659 			save_offset = ct->dd_offset1;
660 			save_len = ct->dd_len1;
661 			if (ct->dd_len2 == 0) {
662 				/*
663 				 * The previous encrypt step was an
664 				 * accumulation only and didn't produce any
665 				 * partial output
666 				 */
667 				if (ct->dd_len1 == 0)
668 					goto out;
669 			} else {
670 				ct->dd_offset1 = ct->dd_offset2;
671 				ct->dd_len1 = ct->dd_len2;
672 			}
673 			error = crypto_mac_update((crypto_context_t)mac_ctx,
674 			    (crypto_data_t *)ct, NULL);
675 
676 			ct->dd_offset1 = save_offset;
677 			ct->dd_len1 = save_len;
678 
679 			goto out;
680 		}
681 		/* submit a pure asynchronous request. */
682 		save_flag = cr->cr_flag;
683 		cr->cr_flag |= CRYPTO_ALWAYS_QUEUE;
684 
685 		KCF_WRAP_ENCRYPT_MAC_OPS_PARAMS(&params, KCF_OP_UPDATE,
686 		    pd->pd_sid, NULL, NULL, pt, ct, NULL, NULL, NULL)
687 
688 
689 		error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
690 
691 		cr->cr_flag = save_flag;
692 		goto out;
693 	}
694 
695 	/* The fast path for SW providers. */
696 	if (CHECK_FASTPATH(cr, pd)) {
697 		error = KCF_PROV_ENCRYPT_MAC_UPDATE(pd, ctx, pt, ct, NULL);
698 		KCF_PROV_INCRSTATS(pd, error);
699 	} else {
700 		KCF_WRAP_ENCRYPT_MAC_OPS_PARAMS(&params, KCF_OP_UPDATE,
701 		    pd->pd_sid, NULL, NULL, pt, ct, NULL, NULL, NULL);
702 
703 		error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
704 	}
705 out:
706 	KCF_PROV_REFRELE(pd);
707 	return (error);
708 }
709 
710 /*
711  * Terminates a multi-part dual encrypt/mac operation.
712  */
713 /* ARGSUSED */
714 int crypto_encrypt_mac_final(crypto_context_t context, crypto_dual_data_t *ct,
715     crypto_data_t *mac, crypto_call_req_t *cr)
716 {
717 	crypto_ctx_t *ctx = (crypto_ctx_t *)context, *mac_ctx;
718 	kcf_context_t *kcf_ctx, *kcf_mac_ctx;
719 	kcf_provider_desc_t *pd;
720 	int error;
721 	kcf_req_params_t params;
722 
723 	if ((ctx == NULL) ||
724 	    ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
725 	    ((pd = kcf_ctx->kc_prov_desc) == NULL)) {
726 		return (CRYPTO_INVALID_CONTEXT);
727 	}
728 
729 	KCF_PROV_REFHOLD(pd);
730 
731 	if ((kcf_mac_ctx = kcf_ctx->kc_secondctx) != NULL) {
732 		off_t save_offset;
733 		size_t save_len;
734 		crypto_context_t mac_context;
735 		crypto_call_flag_t save_flag;
736 
737 		if (kcf_mac_ctx->kc_prov_desc == NULL) {
738 			KCF_PROV_REFRELE(pd);
739 			return (CRYPTO_INVALID_CONTEXT);
740 		}
741 		mac_ctx = &kcf_mac_ctx->kc_glbl_ctx;
742 		mac_context = (crypto_context_t)mac_ctx;
743 
744 		if (cr == NULL) {
745 			/* Get the last chunk of ciphertext */
746 			error = crypto_encrypt_final(context,
747 			    (crypto_data_t *)ct, NULL);
748 
749 			KCF_PROV_REFRELE(pd);
750 
751 			if (error != CRYPTO_SUCCESS)  {
752 				/*
753 				 * Needed here, because the caller of
754 				 * crypto_encrypt_mac_final() lost all
755 				 * refs to the mac_ctx.
756 				 */
757 				crypto_cancel_ctx(mac_context);
758 				return (error);
759 			}
760 			if (ct->dd_len2 > 0) {
761 				save_offset = ct->dd_offset1;
762 				save_len = ct->dd_len1;
763 				ct->dd_offset1 = ct->dd_offset2;
764 				ct->dd_len1 = ct->dd_len2;
765 
766 				error = crypto_mac_update(mac_context,
767 				    (crypto_data_t *)ct, NULL);
768 
769 				ct->dd_offset1 = save_offset;
770 				ct->dd_len1 = save_len;
771 
772 				if (error != CRYPTO_SUCCESS)  {
773 					crypto_cancel_ctx(mac_context);
774 					return (error);
775 				}
776 			}
777 
778 			/* and finally, collect the MAC */
779 			error = crypto_mac_final(mac_context, mac, NULL);
780 
781 			return (error);
782 		}
783 		/* submit a pure asynchronous request. */
784 		save_flag = cr->cr_flag;
785 		cr->cr_flag |= CRYPTO_ALWAYS_QUEUE;
786 
787 		KCF_WRAP_ENCRYPT_MAC_OPS_PARAMS(&params, KCF_OP_FINAL,
788 		    pd->pd_sid, NULL, NULL, NULL, ct, mac, NULL, NULL)
789 
790 
791 		error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
792 
793 		cr->cr_flag = save_flag;
794 		KCF_PROV_REFRELE(pd);
795 		return (error);
796 	}
797 	/* The fast path for SW providers. */
798 	if (CHECK_FASTPATH(cr, pd)) {
799 		error = KCF_PROV_ENCRYPT_MAC_FINAL(pd, ctx, ct, mac, NULL);
800 		KCF_PROV_INCRSTATS(pd, error);
801 	} else {
802 		KCF_WRAP_ENCRYPT_MAC_OPS_PARAMS(&params, KCF_OP_FINAL,
803 		    pd->pd_sid, NULL, NULL, NULL, ct, mac, NULL, NULL);
804 		error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
805 	}
806 out:
807 	KCF_PROV_REFRELE(pd);
808 	/* Release the hold done in kcf_new_ctx() during init step. */
809 	KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
810 	return (error);
811 }
812 
813 /*
814  * Performs an atomic dual mac/decrypt operation. The provider to use
815  * is determined by the KCF dispatcher.
816  */
817 int
818 crypto_mac_decrypt(crypto_mechanism_t *mac_mech,
819     crypto_mechanism_t *decr_mech, crypto_dual_data_t *ct,
820     crypto_key_t *mac_key, crypto_key_t *decr_key,
821     crypto_ctx_template_t mac_tmpl, crypto_ctx_template_t decr_tmpl,
822     crypto_data_t *mac, crypto_data_t *pt, crypto_call_req_t *crq)
823 {
824 	return (crypto_mac_decrypt_common(mac_mech, decr_mech, ct, mac_key,
825 	    decr_key, mac_tmpl, decr_tmpl, mac, pt, crq, B_FALSE));
826 }
827 
828 /*
829  * Performs an atomic dual mac/decrypt operation. The provider to use
830  * is determined by the KCF dispatcher. 'mac' specifies the expected
831  * value for the MAC. The decryption is not performed if the computed
832  * MAC does not match the expected MAC.
833  */
834 int
835 crypto_mac_verify_decrypt(crypto_mechanism_t *mac_mech,
836     crypto_mechanism_t *decr_mech, crypto_dual_data_t *ct,
837     crypto_key_t *mac_key, crypto_key_t *decr_key,
838     crypto_ctx_template_t mac_tmpl, crypto_ctx_template_t decr_tmpl,
839     crypto_data_t *mac, crypto_data_t *pt, crypto_call_req_t *crq)
840 {
841 	return (crypto_mac_decrypt_common(mac_mech, decr_mech, ct, mac_key,
842 	    decr_key, mac_tmpl, decr_tmpl, mac, pt, crq, B_TRUE));
843 }
844 
845 /*
846  * Called by both crypto_mac_decrypt() and crypto_mac_verify_decrypt().
847  * optionally verified if the MACs match before calling the decryption step.
848  */
849 static int
850 crypto_mac_decrypt_common(crypto_mechanism_t *mac_mech,
851     crypto_mechanism_t *decr_mech, crypto_dual_data_t *ct,
852     crypto_key_t *mac_key, crypto_key_t *decr_key,
853     crypto_ctx_template_t mac_tmpl, crypto_ctx_template_t decr_tmpl,
854     crypto_data_t *mac, crypto_data_t *pt, crypto_call_req_t *crq,
855     boolean_t do_verify)
856 {
857 	/*
858 	 * First try to find a provider for the decryption mechanism, that
859 	 * is also capable of the MAC mechanism.
860 	 * We still favor optimizing the costlier decryption.
861 	 */
862 	int error;
863 	kcf_mech_entry_t *me;
864 	kcf_provider_desc_t *pd;
865 	kcf_ctx_template_t *ctx_decr_tmpl, *ctx_mac_tmpl;
866 	kcf_req_params_t params;
867 	kcf_mac_decrypt_ops_params_t *cmops;
868 	crypto_spi_ctx_template_t spi_decr_tmpl = NULL, spi_mac_tmpl = NULL;
869 	crypto_mech_type_t prov_decr_mechid, prov_mac_mechid;
870 	kcf_prov_tried_t *list = NULL;
871 	boolean_t decr_tmpl_checked = B_FALSE;
872 	boolean_t mac_tmpl_checked = B_FALSE;
873 	kcf_dual_req_t *next_req = NULL;
874 	crypto_call_req_t mac_req, *mac_reqp = NULL;
875 
876 retry:
877 	/* pd is returned held on success */
878 	pd = kcf_get_dual_provider(decr_mech, mac_mech, &me, &prov_decr_mechid,
879 	    &prov_mac_mechid, &error, list,
880 	    CRYPTO_FG_DECRYPT_ATOMIC | CRYPTO_FG_MAC_DECRYPT_ATOMIC,
881 	    CRYPTO_FG_MAC_ATOMIC | CRYPTO_FG_MAC_DECRYPT_ATOMIC,
882 	    CHECK_RESTRICT(crq), ct->dd_len2);
883 	if (pd == NULL) {
884 		if (list != NULL)
885 			kcf_free_triedlist(list);
886 		if (next_req != NULL)
887 			kmem_free(next_req, sizeof (kcf_dual_req_t));
888 		return (CRYPTO_MECH_NOT_SUPPORTED);
889 	}
890 
891 	/*
892 	 * For SW providers, check the validity of the context template
893 	 * It is very rare that the generation number mis-matches, so
894 	 * is acceptable to fail here, and let the consumer recover by
895 	 * freeing this tmpl and create a new one for the key and new SW
896 	 * provider
897 	 */
898 
899 	if ((!decr_tmpl_checked) && (pd->pd_prov_type == CRYPTO_SW_PROVIDER)) {
900 		if (decr_tmpl != NULL) {
901 			ctx_decr_tmpl = (kcf_ctx_template_t *)decr_tmpl;
902 			if (ctx_decr_tmpl->ct_generation != me->me_gen_swprov) {
903 				if (next_req != NULL)
904 					kmem_free(next_req,
905 					    sizeof (kcf_dual_req_t));
906 				if (list != NULL)
907 					kcf_free_triedlist(list);
908 				KCF_PROV_REFRELE(pd);
909 
910 				/* Which one is the the old one ? */
911 				return (CRYPTO_OLD_CTX_TEMPLATE);
912 			}
913 			spi_decr_tmpl = ctx_decr_tmpl->ct_prov_tmpl;
914 		}
915 		decr_tmpl_checked = B_TRUE;
916 	}
917 	if (prov_mac_mechid == CRYPTO_MECH_INVALID) {
918 		/* Need to emulate with 2 internal calls */
919 
920 		/* Prepare the call_req to be submitted for the MAC step */
921 
922 		if (crq != NULL) {
923 
924 			if (next_req == NULL) {
925 				/*
926 				 * allocate, initialize and prepare the
927 				 * params for the next step only in the
928 				 * first pass (not on every retry).
929 				 */
930 				next_req = kcf_alloc_req(crq);
931 
932 				if (next_req == NULL) {
933 					KCF_PROV_REFRELE(pd);
934 					if (list != NULL)
935 						kcf_free_triedlist(list);
936 					return (CRYPTO_HOST_MEMORY);
937 				}
938 				KCF_WRAP_DECRYPT_OPS_PARAMS(
939 				    &(next_req->kr_params), KCF_OP_ATOMIC,
940 				    NULL, decr_mech, decr_key,
941 				    (crypto_data_t *)ct, pt, spi_decr_tmpl);
942 			}
943 
944 			mac_req.cr_flag = (crq != NULL) ? crq->cr_flag : 0;
945 			mac_req.cr_flag |= CRYPTO_SETDUAL;
946 			mac_req.cr_callback_func = kcf_next_req;
947 			mac_req.cr_callback_arg = next_req;
948 			mac_reqp = &mac_req;
949 		}
950 
951 		/* 'pd' is the decryption provider. */
952 
953 		if (do_verify)
954 			error = crypto_mac_verify(mac_mech, (crypto_data_t *)ct,
955 			    mac_key, mac_tmpl, mac,
956 			    (crq == NULL) ? NULL : mac_reqp);
957 		else
958 			error = crypto_mac(mac_mech, (crypto_data_t *)ct,
959 			    mac_key, mac_tmpl, mac,
960 			    (crq == NULL) ? NULL : mac_reqp);
961 
962 		switch (error) {
963 		case CRYPTO_SUCCESS: {
964 			off_t saveoffset;
965 			size_t savelen;
966 
967 			if (next_req == NULL) {
968 				saveoffset = ct->dd_offset1;
969 				savelen = ct->dd_len1;
970 			} else {
971 				saveoffset = next_req->kr_saveoffset =
972 				    ct->dd_offset1;
973 				savelen = next_req->kr_savelen = ct->dd_len1;
974 
975 				ASSERT(mac_reqp != NULL);
976 				mac_req.cr_flag &= ~CRYPTO_SETDUAL;
977 				mac_req.cr_callback_func = kcf_last_req;
978 			}
979 			ct->dd_offset1 = ct->dd_offset2;
980 			ct->dd_len1 = ct->dd_len2;
981 
982 			if (CHECK_FASTPATH(crq, pd)) {
983 				crypto_mechanism_t lmech;
984 
985 				lmech = *decr_mech;
986 				KCF_SET_PROVIDER_MECHNUM(decr_mech->cm_type,
987 				    pd, &lmech);
988 
989 				error = KCF_PROV_DECRYPT_ATOMIC(pd, pd->pd_sid,
990 				    &lmech, decr_key, (crypto_data_t *)ct,
991 				    (crypto_data_t *)pt, spi_decr_tmpl,
992 				    KCF_SWFP_RHNDL(mac_reqp));
993 
994 				KCF_PROV_INCRSTATS(pd, error);
995 			} else {
996 				KCF_WRAP_DECRYPT_OPS_PARAMS(&params,
997 				    KCF_OP_ATOMIC, pd->pd_sid, decr_mech,
998 				    decr_key, (crypto_data_t *)ct, pt,
999 				    spi_decr_tmpl);
1000 
1001 				error = kcf_submit_request(pd, NULL,
1002 				    (crq == NULL) ? NULL : mac_reqp,
1003 				    &params, B_FALSE);
1004 			}
1005 			if (error != CRYPTO_QUEUED) {
1006 				KCF_PROV_INCRSTATS(pd, error);
1007 				ct->dd_offset1 = saveoffset;
1008 				ct->dd_len1 = savelen;
1009 			}
1010 			break;
1011 		}
1012 
1013 		case CRYPTO_QUEUED:
1014 			if ((crq != NULL) && (crq->cr_flag & CRYPTO_SKIP_REQID))
1015 				crq->cr_reqid = mac_req.cr_reqid;
1016 			break;
1017 
1018 		default:
1019 			if (IS_RECOVERABLE(error)) {
1020 				if (kcf_insert_triedlist(&list, pd,
1021 				    KCF_KMFLAG(crq)) != NULL)
1022 					goto retry;
1023 			}
1024 		}
1025 		if (error != CRYPTO_QUEUED && next_req != NULL)
1026 			kmem_free(next_req, sizeof (kcf_dual_req_t));
1027 		if (list != NULL)
1028 			kcf_free_triedlist(list);
1029 		KCF_PROV_REFRELE(pd);
1030 		return (error);
1031 	}
1032 
1033 	if ((!mac_tmpl_checked) && (pd->pd_prov_type == CRYPTO_SW_PROVIDER)) {
1034 		if ((mac_tmpl != NULL) &&
1035 		    (prov_mac_mechid != CRYPTO_MECH_INVALID)) {
1036 			ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;
1037 			if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
1038 				if (next_req != NULL)
1039 					kmem_free(next_req,
1040 					    sizeof (kcf_dual_req_t));
1041 				if (list != NULL)
1042 					kcf_free_triedlist(list);
1043 				KCF_PROV_REFRELE(pd);
1044 
1045 				/* Which one is the the old one ? */
1046 				return (CRYPTO_OLD_CTX_TEMPLATE);
1047 			}
1048 			spi_mac_tmpl = ctx_mac_tmpl->ct_prov_tmpl;
1049 		}
1050 		mac_tmpl_checked = B_TRUE;
1051 	}
1052 
1053 	/* The fast path for SW providers. */
1054 	if (CHECK_FASTPATH(crq, pd)) {
1055 		crypto_mechanism_t lmac_mech;
1056 		crypto_mechanism_t ldecr_mech;
1057 
1058 		/* careful! structs assignments */
1059 		ldecr_mech = *decr_mech;
1060 		ldecr_mech.cm_type = prov_decr_mechid;
1061 		lmac_mech = *mac_mech;
1062 		lmac_mech.cm_type = prov_mac_mechid;
1063 
1064 		if (do_verify)
1065 			error = KCF_PROV_MAC_VERIFY_DECRYPT_ATOMIC(pd,
1066 			    pd->pd_sid, &lmac_mech, mac_key, &ldecr_mech,
1067 			    decr_key, ct, mac, pt, spi_mac_tmpl, spi_decr_tmpl,
1068 			    KCF_SWFP_RHNDL(crq));
1069 		else
1070 			error = KCF_PROV_MAC_DECRYPT_ATOMIC(pd, pd->pd_sid,
1071 			    &lmac_mech, mac_key, &ldecr_mech, decr_key,
1072 			    ct, mac, pt, spi_mac_tmpl, spi_decr_tmpl,
1073 			    KCF_SWFP_RHNDL(crq));
1074 
1075 		KCF_PROV_INCRSTATS(pd, error);
1076 	} else {
1077 		KCF_WRAP_MAC_DECRYPT_OPS_PARAMS(&params,
1078 		    (do_verify) ? KCF_OP_MAC_VERIFY_DECRYPT_ATOMIC :
1079 		    KCF_OP_ATOMIC, pd->pd_sid, mac_key, decr_key, ct, mac, pt,
1080 		    spi_mac_tmpl, spi_decr_tmpl);
1081 
1082 		cmops = &(params.rp_u.mac_decrypt_params);
1083 
1084 		/* careful! structs assignments */
1085 		cmops->md_decr_mech = *decr_mech;
1086 		cmops->md_decr_mech.cm_type = prov_decr_mechid;
1087 		cmops->md_framework_decr_mechtype = decr_mech->cm_type;
1088 		cmops->md_mac_mech = *mac_mech;
1089 		cmops->md_mac_mech.cm_type = prov_mac_mechid;
1090 		cmops->md_framework_mac_mechtype = mac_mech->cm_type;
1091 
1092 		error = kcf_submit_request(pd, NULL, crq, &params, B_FALSE);
1093 	}
1094 
1095 	if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
1096 	    IS_RECOVERABLE(error)) {
1097 		/* Add pd to the linked list of providers tried. */
1098 		if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
1099 			goto retry;
1100 	}
1101 
1102 	if (list != NULL)
1103 		kcf_free_triedlist(list);
1104 
1105 	if (next_req != NULL)
1106 		kmem_free(next_req, sizeof (kcf_dual_req_t));
1107 	KCF_PROV_REFRELE(pd);
1108 	return (error);
1109 }
1110 
1111 /*
1112  * Starts a multi-part dual mac/decrypt operation. The provider to
1113  * use is determined by the KCF dispatcher.
1114  */
1115 /* ARGSUSED */
1116 int
1117 crypto_mac_decrypt_init(crypto_mechanism_t *mac_mech,
1118     crypto_mechanism_t *decr_mech, crypto_key_t *mac_key,
1119     crypto_key_t *decr_key, crypto_ctx_template_t mac_tmpl,
1120     crypto_ctx_template_t decr_tmpl, crypto_context_t *ctxp,
1121     crypto_call_req_t *cr)
1122 {
1123 	/*
1124 	 * First try to find a provider for the decryption mechanism, that
1125 	 * is also capable of the MAC mechanism.
1126 	 * We still favor optimizing the costlier decryption.
1127 	 */
1128 	int error;
1129 	kcf_mech_entry_t *me;
1130 	kcf_provider_desc_t *pd;
1131 	kcf_ctx_template_t *ctx_decr_tmpl, *ctx_mac_tmpl;
1132 	kcf_req_params_t params;
1133 	kcf_mac_decrypt_ops_params_t *mdops;
1134 	crypto_spi_ctx_template_t spi_decr_tmpl = NULL, spi_mac_tmpl = NULL;
1135 	crypto_mech_type_t prov_decr_mechid, prov_mac_mechid;
1136 	kcf_prov_tried_t *list = NULL;
1137 	boolean_t decr_tmpl_checked = B_FALSE;
1138 	boolean_t mac_tmpl_checked = B_FALSE;
1139 	crypto_ctx_t *ctx = NULL;
1140 	kcf_context_t *decr_kcf_context = NULL, *mac_kcf_context = NULL;
1141 	crypto_call_flag_t save_flag;
1142 
1143 retry:
1144 	/* pd is returned held on success */
1145 	pd = kcf_get_dual_provider(decr_mech, mac_mech, &me, &prov_decr_mechid,
1146 	    &prov_mac_mechid, &error, list,
1147 	    CRYPTO_FG_DECRYPT | CRYPTO_FG_MAC_DECRYPT, CRYPTO_FG_MAC,
1148 	    CHECK_RESTRICT(cr), 0);
1149 	if (pd == NULL) {
1150 		if (list != NULL)
1151 			kcf_free_triedlist(list);
1152 		return (error);
1153 	}
1154 
1155 	/*
1156 	 * For SW providers, check the validity of the context template
1157 	 * It is very rare that the generation number mis-matches, so
1158 	 * is acceptable to fail here, and let the consumer recover by
1159 	 * freeing this tmpl and create a new one for the key and new SW
1160 	 * provider
1161 	 * Warning! will need to change when multiple software providers
1162 	 * per mechanism are supported.
1163 	 */
1164 
1165 	if ((!decr_tmpl_checked) && (pd->pd_prov_type == CRYPTO_SW_PROVIDER)) {
1166 		if (decr_tmpl != NULL) {
1167 			ctx_decr_tmpl = (kcf_ctx_template_t *)decr_tmpl;
1168 			if (ctx_decr_tmpl->ct_generation != me->me_gen_swprov) {
1169 
1170 				if (list != NULL)
1171 					kcf_free_triedlist(list);
1172 				if (decr_kcf_context != NULL)
1173 					KCF_CONTEXT_REFRELE(decr_kcf_context);
1174 
1175 				KCF_PROV_REFRELE(pd);
1176 				/* Which one is the the old one ? */
1177 				return (CRYPTO_OLD_CTX_TEMPLATE);
1178 			}
1179 			spi_decr_tmpl = ctx_decr_tmpl->ct_prov_tmpl;
1180 		}
1181 		decr_tmpl_checked = B_TRUE;
1182 	}
1183 
1184 	if (prov_mac_mechid == CRYPTO_MECH_INVALID) {
1185 		/* Need to emulate with 2 internal calls */
1186 
1187 		/*
1188 		 * We avoid code complexity by limiting the pure async.
1189 		 * case to be done using only a SW provider.
1190 		 * XXX - Redo the emulation code below so that we can
1191 		 * remove this limitation.
1192 		 */
1193 		if (cr != NULL && pd->pd_prov_type == CRYPTO_HW_PROVIDER) {
1194 			if ((kcf_insert_triedlist(&list, pd, KCF_KMFLAG(cr))
1195 			    != NULL))
1196 				goto retry;
1197 			if (list != NULL)
1198 				kcf_free_triedlist(list);
1199 			if (decr_kcf_context != NULL)
1200 				KCF_CONTEXT_REFRELE(decr_kcf_context);
1201 			KCF_PROV_REFRELE(pd);
1202 			return (CRYPTO_HOST_MEMORY);
1203 		}
1204 
1205 		if (ctx == NULL && pd->pd_prov_type == CRYPTO_SW_PROVIDER) {
1206 			ctx = kcf_new_ctx(cr, pd, pd->pd_sid);
1207 			if (ctx == NULL) {
1208 				if (list != NULL)
1209 					kcf_free_triedlist(list);
1210 				if (decr_kcf_context != NULL)
1211 					KCF_CONTEXT_REFRELE(decr_kcf_context);
1212 				KCF_PROV_REFRELE(pd);
1213 				return (CRYPTO_HOST_MEMORY);
1214 			}
1215 			decr_kcf_context = (kcf_context_t *)
1216 			    ctx->cc_framework_private;
1217 		}
1218 		/*
1219 		 * Trade-off speed vs avoidance of code complexity and
1220 		 * duplication:
1221 		 * Could do all the combinations of fastpath / synch / asynch
1222 		 * for the decryption and the mac steps. Early attempts
1223 		 * showed the code grew wild and bug-prone, for little gain.
1224 		 * Therefore, the adaptative asynch case is not implemented.
1225 		 * It's either pure synchronous, or pure asynchronous.
1226 		 * We still preserve a fastpath for the pure synchronous
1227 		 * requests to SW providers.
1228 		 */
1229 		if (cr == NULL) {
1230 			crypto_context_t mac_context;
1231 
1232 			error = crypto_mac_init(mac_mech, mac_key, mac_tmpl,
1233 			    &mac_context, NULL);
1234 
1235 			if (error != CRYPTO_SUCCESS) {
1236 				/* Can't be CRYPTO_QUEUED. return the failure */
1237 				if (list != NULL)
1238 					kcf_free_triedlist(list);
1239 
1240 				if (decr_kcf_context != NULL)
1241 					KCF_CONTEXT_REFRELE(decr_kcf_context);
1242 				return (error);
1243 			}
1244 			if (pd->pd_prov_type == CRYPTO_SW_PROVIDER) {
1245 				crypto_mechanism_t lmech = *decr_mech;
1246 
1247 				lmech.cm_type = prov_decr_mechid;
1248 
1249 				error = KCF_PROV_DECRYPT_INIT(pd, ctx, &lmech,
1250 				    decr_key, spi_decr_tmpl,
1251 				    KCF_RHNDL(KM_SLEEP));
1252 			} else {
1253 				/*
1254 				 * If we did the 'goto retry' then ctx may not
1255 				 * be NULL.  In general, we can't reuse another
1256 				 * provider's context, so we free it now so
1257 				 * we don't leak it.
1258 				 */
1259 				if (ctx != NULL) {
1260 					KCF_CONTEXT_REFRELE((kcf_context_t *)
1261 					    ctx->cc_framework_private);
1262 					decr_kcf_context = NULL;
1263 				}
1264 				error = crypto_decrypt_init_prov(pd, pd->pd_sid,
1265 				    decr_mech, decr_key, &decr_tmpl,
1266 				    (crypto_context_t *)&ctx, NULL);
1267 
1268 				if (error == CRYPTO_SUCCESS) {
1269 					decr_kcf_context = (kcf_context_t *)
1270 					    ctx->cc_framework_private;
1271 				}
1272 			}
1273 
1274 			KCF_PROV_INCRSTATS(pd, error);
1275 
1276 			KCF_PROV_REFRELE(pd);
1277 
1278 			if (error != CRYPTO_SUCCESS) {
1279 				/* Can't be CRYPTO_QUEUED. return the failure */
1280 				if (list != NULL)
1281 					kcf_free_triedlist(list);
1282 				if (mac_kcf_context != NULL)
1283 					KCF_CONTEXT_REFRELE(mac_kcf_context);
1284 
1285 				return (error);
1286 			}
1287 			mac_kcf_context = (kcf_context_t *)
1288 			    ((crypto_ctx_t *)mac_context)->
1289 			    cc_framework_private;
1290 
1291 			decr_kcf_context = (kcf_context_t *)
1292 			    ctx->cc_framework_private;
1293 
1294 			/*
1295 			 * Here also, the mac context is second. The callback
1296 			 * case can't overwrite the context returned to
1297 			 * the caller.
1298 			 */
1299 			decr_kcf_context->kc_secondctx = mac_kcf_context;
1300 			KCF_CONTEXT_REFHOLD(mac_kcf_context);
1301 
1302 			*ctxp = (crypto_context_t)ctx;
1303 
1304 			return (error);
1305 		}
1306 		/* submit a pure asynchronous request. */
1307 		save_flag = cr->cr_flag;
1308 		cr->cr_flag |= CRYPTO_ALWAYS_QUEUE;
1309 
1310 		KCF_WRAP_MAC_DECRYPT_OPS_PARAMS(&params, KCF_OP_INIT,
1311 		    pd->pd_sid, mac_key, decr_key, NULL, NULL, NULL,
1312 		    spi_mac_tmpl, spi_decr_tmpl);
1313 
1314 		mdops = &(params.rp_u.mac_decrypt_params);
1315 
1316 		/* careful! structs assignments */
1317 		mdops->md_decr_mech = *decr_mech;
1318 		/*
1319 		 * mdops->md_decr_mech.cm_type will be set when we get to
1320 		 * kcf_emulate_dual() routine.
1321 		 */
1322 		mdops->md_framework_decr_mechtype = decr_mech->cm_type;
1323 		mdops->md_mac_mech = *mac_mech;
1324 
1325 		/*
1326 		 * mdops->md_mac_mech.cm_type will be set when we know the
1327 		 * MAC provider.
1328 		 */
1329 		mdops->md_framework_mac_mechtype = mac_mech->cm_type;
1330 
1331 		/*
1332 		 * non-NULL ctx->kc_secondctx tells common_submit_request
1333 		 * that this request uses separate cipher and MAC contexts.
1334 		 * That function will set the MAC context's kc_secondctx to
1335 		 * this decrypt context.
1336 		 */
1337 		decr_kcf_context->kc_secondctx = decr_kcf_context;
1338 
1339 		error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
1340 
1341 		cr->cr_flag = save_flag;
1342 
1343 		if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED) {
1344 			KCF_CONTEXT_REFRELE(decr_kcf_context);
1345 		}
1346 		if (list != NULL)
1347 			kcf_free_triedlist(list);
1348 		*ctxp =  ctx;
1349 		KCF_PROV_REFRELE(pd);
1350 		return (error);
1351 	}
1352 
1353 	if ((!mac_tmpl_checked) && (pd->pd_prov_type == CRYPTO_SW_PROVIDER)) {
1354 		if ((mac_tmpl != NULL) &&
1355 		    (prov_mac_mechid != CRYPTO_MECH_INVALID)) {
1356 			ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;
1357 			if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
1358 
1359 				if (list != NULL)
1360 					kcf_free_triedlist(list);
1361 
1362 				KCF_PROV_REFRELE(pd);
1363 				/* Which one is the the old one ? */
1364 				return (CRYPTO_OLD_CTX_TEMPLATE);
1365 			}
1366 			spi_mac_tmpl = ctx_mac_tmpl->ct_prov_tmpl;
1367 		}
1368 		mac_tmpl_checked = B_TRUE;
1369 	}
1370 
1371 	if (ctx == NULL) {
1372 		ctx = kcf_new_ctx(cr, pd, pd->pd_sid);
1373 		if (ctx == NULL) {
1374 			error = CRYPTO_HOST_MEMORY;
1375 			if (list != NULL)
1376 				kcf_free_triedlist(list);
1377 			return (CRYPTO_HOST_MEMORY);
1378 		}
1379 		decr_kcf_context = (kcf_context_t *)ctx->cc_framework_private;
1380 	}
1381 
1382 	/* The fast path for SW providers. */
1383 	if (CHECK_FASTPATH(cr, pd)) {
1384 		crypto_mechanism_t ldecr_mech;
1385 		crypto_mechanism_t lmac_mech;
1386 
1387 		/* careful! structs assignments */
1388 		ldecr_mech = *decr_mech;
1389 		ldecr_mech.cm_type = prov_decr_mechid;
1390 		lmac_mech = *mac_mech;
1391 		lmac_mech.cm_type = prov_mac_mechid;
1392 
1393 		error = KCF_PROV_MAC_DECRYPT_INIT(pd, ctx, &lmac_mech,
1394 		    mac_key, &ldecr_mech, decr_key, spi_mac_tmpl, spi_decr_tmpl,
1395 		    KCF_SWFP_RHNDL(cr));
1396 
1397 		KCF_PROV_INCRSTATS(pd, error);
1398 	} else {
1399 		KCF_WRAP_MAC_DECRYPT_OPS_PARAMS(&params, KCF_OP_INIT,
1400 		    pd->pd_sid, mac_key, decr_key, NULL, NULL, NULL,
1401 		    spi_mac_tmpl, spi_decr_tmpl);
1402 
1403 		mdops = &(params.rp_u.mac_decrypt_params);
1404 
1405 		/* careful! structs assignments */
1406 		mdops->md_decr_mech = *decr_mech;
1407 		mdops->md_decr_mech.cm_type = prov_decr_mechid;
1408 		mdops->md_framework_decr_mechtype = decr_mech->cm_type;
1409 		mdops->md_mac_mech = *mac_mech;
1410 		mdops->md_mac_mech.cm_type = prov_mac_mechid;
1411 		mdops->md_framework_mac_mechtype = mac_mech->cm_type;
1412 
1413 		error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
1414 	}
1415 
1416 	if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED) {
1417 		if ((IS_RECOVERABLE(error)) &&
1418 		    (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(cr)) != NULL))
1419 			goto retry;
1420 
1421 		KCF_CONTEXT_REFRELE(decr_kcf_context);
1422 	} else
1423 		*ctxp = (crypto_context_t)ctx;
1424 
1425 	if (list != NULL)
1426 		kcf_free_triedlist(list);
1427 
1428 	KCF_PROV_REFRELE(pd);
1429 	return (error);
1430 }
1431 
1432 /*
1433  * Continues a multi-part dual mac/decrypt operation.
1434  */
1435 /* ARGSUSED */
1436 int
1437 crypto_mac_decrypt_update(crypto_context_t context,
1438     crypto_dual_data_t *ct, crypto_data_t *pt, crypto_call_req_t *cr)
1439 {
1440 	crypto_ctx_t *ctx = (crypto_ctx_t *)context, *mac_ctx;
1441 	kcf_context_t *kcf_ctx, *kcf_mac_ctx;
1442 	kcf_provider_desc_t *pd;
1443 	int error;
1444 	kcf_req_params_t params;
1445 
1446 	if ((ctx == NULL) ||
1447 	    ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
1448 	    ((pd = kcf_ctx->kc_prov_desc) == NULL)) {
1449 		return (CRYPTO_INVALID_CONTEXT);
1450 	}
1451 
1452 	KCF_PROV_REFHOLD(pd);
1453 
1454 	if ((kcf_mac_ctx = kcf_ctx->kc_secondctx) != NULL) {
1455 		off_t save_offset;
1456 		size_t save_len;
1457 		crypto_call_flag_t save_flag;
1458 
1459 		if (kcf_mac_ctx->kc_prov_desc == NULL) {
1460 			error = CRYPTO_INVALID_CONTEXT;
1461 			goto out;
1462 		}
1463 		mac_ctx = &kcf_mac_ctx->kc_glbl_ctx;
1464 
1465 		/* First we submit the MAC request */
1466 		if (cr == NULL) {
1467 			/*
1468 			 * 'ct' is always not NULL.
1469 			 */
1470 			error = crypto_mac_update((crypto_context_t)mac_ctx,
1471 			    (crypto_data_t *)ct, NULL);
1472 
1473 			if (error != CRYPTO_SUCCESS)
1474 				goto out;
1475 
1476 			/* Decrypt a different length only when told so */
1477 
1478 			save_offset = ct->dd_offset1;
1479 			save_len = ct->dd_len1;
1480 
1481 			if (ct->dd_len2 > 0) {
1482 				ct->dd_offset1 = ct->dd_offset2;
1483 				ct->dd_len1 = ct->dd_len2;
1484 			}
1485 
1486 			error = crypto_decrypt_update(context,
1487 			    (crypto_data_t *)ct, pt, NULL);
1488 
1489 			ct->dd_offset1 = save_offset;
1490 			ct->dd_len1 = save_len;
1491 
1492 			goto out;
1493 		}
1494 		/* submit a pure asynchronous request. */
1495 		save_flag = cr->cr_flag;
1496 		cr->cr_flag |= CRYPTO_ALWAYS_QUEUE;
1497 
1498 		KCF_WRAP_MAC_DECRYPT_OPS_PARAMS(&params, KCF_OP_UPDATE,
1499 		    pd->pd_sid, NULL, NULL, ct, NULL, pt, NULL, NULL)
1500 
1501 
1502 		error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
1503 
1504 		cr->cr_flag = save_flag;
1505 		goto out;
1506 	}
1507 
1508 	/* The fast path for SW providers. */
1509 	if (CHECK_FASTPATH(cr, pd)) {
1510 		error = KCF_PROV_MAC_DECRYPT_UPDATE(pd, ctx, ct, pt, NULL);
1511 		KCF_PROV_INCRSTATS(pd, error);
1512 	} else {
1513 		KCF_WRAP_MAC_DECRYPT_OPS_PARAMS(&params, KCF_OP_UPDATE,
1514 		    pd->pd_sid, NULL, NULL, ct, NULL, pt, NULL, NULL);
1515 
1516 		error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
1517 	}
1518 out:
1519 	KCF_PROV_REFRELE(pd);
1520 	return (error);
1521 }
1522 
1523 /*
1524  * Terminates a multi-part dual mac/decrypt operation.
1525  */
1526 /* ARGSUSED */
1527 int
1528 crypto_mac_decrypt_final(crypto_context_t context, crypto_data_t *mac,
1529     crypto_data_t *pt, crypto_call_req_t *cr)
1530 {
1531 	crypto_ctx_t *ctx = (crypto_ctx_t *)context, *mac_ctx;
1532 	kcf_context_t *kcf_ctx, *kcf_mac_ctx;
1533 	kcf_provider_desc_t *pd;
1534 	int error;
1535 	kcf_req_params_t params;
1536 
1537 	if ((ctx == NULL) ||
1538 	    ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) ||
1539 	    ((pd = kcf_ctx->kc_prov_desc) == NULL)) {
1540 		return (CRYPTO_INVALID_CONTEXT);
1541 	}
1542 
1543 	KCF_PROV_REFHOLD(pd);
1544 
1545 	if ((kcf_mac_ctx = kcf_ctx->kc_secondctx) != NULL) {
1546 		crypto_call_flag_t save_flag;
1547 
1548 		if (kcf_mac_ctx->kc_prov_desc == NULL) {
1549 			error = CRYPTO_INVALID_CONTEXT;
1550 			goto out;
1551 		}
1552 		mac_ctx = &kcf_mac_ctx->kc_glbl_ctx;
1553 
1554 		/* First we collect the MAC */
1555 		if (cr == NULL) {
1556 
1557 			error = crypto_mac_final((crypto_context_t)mac_ctx,
1558 			    mac, NULL);
1559 
1560 			if (error != CRYPTO_SUCCESS) {
1561 				crypto_cancel_ctx(ctx);
1562 			} else {
1563 				/* Get the last chunk of plaintext */
1564 				error = crypto_decrypt_final(context, pt, NULL);
1565 			}
1566 
1567 			KCF_PROV_REFRELE(pd);
1568 			return (error);
1569 		}
1570 		/* submit a pure asynchronous request. */
1571 		save_flag = cr->cr_flag;
1572 		cr->cr_flag |= CRYPTO_ALWAYS_QUEUE;
1573 
1574 		KCF_WRAP_MAC_DECRYPT_OPS_PARAMS(&params, KCF_OP_FINAL,
1575 		    pd->pd_sid, NULL, NULL, NULL, mac, pt, NULL, NULL)
1576 
1577 
1578 		error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
1579 
1580 		cr->cr_flag = save_flag;
1581 
1582 		KCF_PROV_REFRELE(pd);
1583 		return (error);
1584 	}
1585 
1586 	/* The fast path for SW providers. */
1587 	if (CHECK_FASTPATH(cr, pd)) {
1588 		error = KCF_PROV_MAC_DECRYPT_FINAL(pd, ctx, mac, pt, NULL);
1589 		KCF_PROV_INCRSTATS(pd, error);
1590 	} else {
1591 		KCF_WRAP_MAC_DECRYPT_OPS_PARAMS(&params, KCF_OP_FINAL,
1592 		    pd->pd_sid, NULL, NULL, NULL, mac, pt, NULL, NULL);
1593 
1594 		error = kcf_submit_request(pd, ctx, cr, &params, B_FALSE);
1595 	}
1596 out:
1597 	KCF_PROV_REFRELE(pd);
1598 	/* Release the hold done in kcf_new_ctx() during init step. */
1599 	KCF_CONTEXT_COND_RELEASE(error, kcf_ctx);
1600 	return (error);
1601 }
1602 
1603 /*
1604  * Digest/Encrypt dual operation. Project-private entry point, not part of
1605  * the k-API.
1606  */
1607 /* ARGSUSED */
1608 int
1609 crypto_digest_encrypt_update(crypto_context_t digest_ctx,
1610     crypto_context_t encrypt_ctx, crypto_data_t *plaintext,
1611     crypto_data_t *ciphertext, crypto_call_req_t *crq)
1612 {
1613 	/*
1614 	 * RFE 4688647:
1615 	 * core functions needed by ioctl interface missing from impl.h
1616 	 */
1617 	return (CRYPTO_NOT_SUPPORTED);
1618 }
1619 
1620 /*
1621  * Decrypt/Digest dual operation. Project-private entry point, not part of
1622  * the k-API.
1623  */
1624 /* ARGSUSED */
1625 int
1626 crypto_decrypt_digest_update(crypto_context_t decryptctx,
1627     crypto_context_t encrypt_ctx, crypto_data_t *ciphertext,
1628     crypto_data_t *plaintext, crypto_call_req_t *crq)
1629 {
1630 	/*
1631 	 * RFE 4688647:
1632 	 * core functions needed by ioctl interface missing from impl.h
1633 	 */
1634 	return (CRYPTO_NOT_SUPPORTED);
1635 }
1636 
1637 /*
1638  * Sign/Encrypt dual operation. Project-private entry point, not part of
1639  * the k-API.
1640  */
1641 /* ARGSUSED */
1642 int
1643 crypto_sign_encrypt_update(crypto_context_t sign_ctx,
1644     crypto_context_t encrypt_ctx, crypto_data_t *plaintext,
1645     crypto_data_t *ciphertext, crypto_call_req_t *crq)
1646 {
1647 	/*
1648 	 * RFE 4688647:
1649 	 * core functions needed by ioctl interface missing from impl.h
1650 	 */
1651 	return (CRYPTO_NOT_SUPPORTED);
1652 }
1653 
1654 /*
1655  * Decrypt/Verify dual operation. Project-private entry point, not part of
1656  * the k-API.
1657  */
1658 /* ARGSUSED */
1659 int
1660 crypto_decrypt_verify_update(crypto_context_t decrypt_ctx,
1661     crypto_context_t verify_ctx, crypto_data_t *ciphertext,
1662     crypto_data_t *plaintext, crypto_call_req_t *crq)
1663 {
1664 	/*
1665 	 * RFE 4688647:
1666 	 * core functions needed by ioctl interface missing from impl.h
1667 	 */
1668 	return (CRYPTO_NOT_SUPPORTED);
1669 }
1670