xref: /titanic_52/usr/src/uts/common/crypto/io/sha1_mod.c (revision ac448965596bc1c42f7accb3023f48d5fa9b8180)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/modctl.h>
30 #include <sys/cmn_err.h>
31 #include <sys/note.h>
32 #include <sys/crypto/common.h>
33 #include <sys/crypto/spi.h>
34 #include <sys/strsun.h>
35 #include <sys/systm.h>
36 #include <sys/sysmacros.h>
37 
38 #include <sys/sha1.h>
39 
40 /*
41  * The sha1 module is created with two modlinkages:
42  * - a modlmisc that allows consumers to directly call the entry points
43  *   SHA1Init, SHA1Update, and SHA1Final.
44  * - a modlcrypto that allows the module to register with the Kernel
45  *   Cryptographic Framework (KCF) as a software provider for the SHA1
46  *   mechanisms.
47  */
48 
49 static struct modlmisc modlmisc = {
50 	&mod_miscops,
51 	"SHA1 Message-Digest Algorithm"
52 };
53 
54 static struct modlcrypto modlcrypto = {
55 	&mod_cryptoops,
56 	"SHA1 Kernel SW Provider 1.1"
57 };
58 
59 static struct modlinkage modlinkage = {
60 	MODREV_1, &modlmisc, &modlcrypto, NULL
61 };
62 
63 /*
64  * CSPI information (entry points, provider info, etc.)
65  */
66 
67 typedef enum sha1_mech_type {
68 	SHA1_MECH_INFO_TYPE,		/* SUN_CKM_SHA1 */
69 	SHA1_HMAC_MECH_INFO_TYPE,	/* SUN_CKM_SHA1_HMAC */
70 	SHA1_HMAC_GEN_MECH_INFO_TYPE	/* SUN_CKM_SHA1_HMAC_GENERAL */
71 } sha1_mech_type_t;
72 
73 #define	SHA1_DIGEST_LENGTH	20	/* SHA1 digest length in bytes */
74 #define	SHA1_HMAC_BLOCK_SIZE	64	/* SHA1-HMAC block size */
75 #define	SHA1_HMAC_MIN_KEY_LEN	8	/* SHA1-HMAC min key length in bits */
76 #define	SHA1_HMAC_MAX_KEY_LEN	INT_MAX /* SHA1-HMAC max key length in bits */
77 #define	SHA1_HMAC_INTS_PER_BLOCK	(SHA1_HMAC_BLOCK_SIZE/sizeof (uint32_t))
78 
79 /*
80  * Context for SHA1 mechanism.
81  */
82 typedef struct sha1_ctx {
83 	sha1_mech_type_t	sc_mech_type;	/* type of context */
84 	SHA1_CTX		sc_sha1_ctx;	/* SHA1 context */
85 } sha1_ctx_t;
86 
87 /*
88  * Context for SHA1-HMAC and SHA1-HMAC-GENERAL mechanisms.
89  */
90 typedef struct sha1_hmac_ctx {
91 	sha1_mech_type_t	hc_mech_type;	/* type of context */
92 	uint32_t		hc_digest_len;	/* digest len in bytes */
93 	SHA1_CTX		hc_icontext;	/* inner SHA1 context */
94 	SHA1_CTX		hc_ocontext;	/* outer SHA1 context */
95 } sha1_hmac_ctx_t;
96 
97 /*
98  * Macros to access the SHA1 or SHA1-HMAC contexts from a context passed
99  * by KCF to one of the entry points.
100  */
101 
102 #define	PROV_SHA1_CTX(ctx)	((sha1_ctx_t *)(ctx)->cc_provider_private)
103 #define	PROV_SHA1_HMAC_CTX(ctx)	((sha1_hmac_ctx_t *)(ctx)->cc_provider_private)
104 
105 /* to extract the digest length passed as mechanism parameter */
106 #define	PROV_SHA1_GET_DIGEST_LEN(m, len) {				\
107 	if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t)))		\
108 		(len) = (uint32_t)*((ulong_t *)mechanism->cm_param);	\
109 	else {								\
110 		ulong_t tmp_ulong;					\
111 		bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t));	\
112 		(len) = (uint32_t)tmp_ulong;				\
113 	}								\
114 }
115 
116 #define	PROV_SHA1_DIGEST_KEY(ctx, key, len, digest) {	\
117 	SHA1Init(ctx);					\
118 	SHA1Update(ctx, key, len);			\
119 	SHA1Final(digest, ctx);				\
120 }
121 
122 /*
123  * Mechanism info structure passed to KCF during registration.
124  */
125 static crypto_mech_info_t sha1_mech_info_tab[] = {
126 	/* SHA1 */
127 	{SUN_CKM_SHA1, SHA1_MECH_INFO_TYPE,
128 	    CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
129 	    0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
130 	/* SHA1-HMAC */
131 	{SUN_CKM_SHA1_HMAC, SHA1_HMAC_MECH_INFO_TYPE,
132 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
133 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
134 	    CRYPTO_KEYSIZE_UNIT_IN_BITS},
135 	/* SHA1-HMAC GENERAL */
136 	{SUN_CKM_SHA1_HMAC_GENERAL, SHA1_HMAC_GEN_MECH_INFO_TYPE,
137 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
138 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
139 	    CRYPTO_KEYSIZE_UNIT_IN_BITS}
140 };
141 
142 static void sha1_provider_status(crypto_provider_handle_t, uint_t *);
143 
144 static crypto_control_ops_t sha1_control_ops = {
145 	sha1_provider_status
146 };
147 
148 static int sha1_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
149     crypto_req_handle_t);
150 static int sha1_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
151     crypto_req_handle_t);
152 static int sha1_digest_update(crypto_ctx_t *, crypto_data_t *,
153     crypto_req_handle_t);
154 static int sha1_digest_final(crypto_ctx_t *, crypto_data_t *,
155     crypto_req_handle_t);
156 static int sha1_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
157     crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
158     crypto_req_handle_t);
159 
160 static crypto_digest_ops_t sha1_digest_ops = {
161 	sha1_digest_init,
162 	sha1_digest,
163 	sha1_digest_update,
164 	NULL,
165 	sha1_digest_final,
166 	sha1_digest_atomic
167 };
168 
169 static int sha1_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
170     crypto_spi_ctx_template_t, crypto_req_handle_t);
171 static int sha1_mac_update(crypto_ctx_t *, crypto_data_t *,
172     crypto_req_handle_t);
173 static int sha1_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
174 static int sha1_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
175     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
176     crypto_spi_ctx_template_t, crypto_req_handle_t);
177 static int sha1_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
178     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
179     crypto_spi_ctx_template_t, crypto_req_handle_t);
180 
181 static crypto_mac_ops_t sha1_mac_ops = {
182 	sha1_mac_init,
183 	NULL,
184 	sha1_mac_update,
185 	sha1_mac_final,
186 	sha1_mac_atomic,
187 	sha1_mac_verify_atomic
188 };
189 
190 static int sha1_create_ctx_template(crypto_provider_handle_t,
191     crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
192     size_t *, crypto_req_handle_t);
193 static int sha1_free_context(crypto_ctx_t *);
194 
195 static crypto_ctx_ops_t sha1_ctx_ops = {
196 	sha1_create_ctx_template,
197 	sha1_free_context
198 };
199 
200 static crypto_ops_t sha1_crypto_ops = {
201 	&sha1_control_ops,
202 	&sha1_digest_ops,
203 	NULL,
204 	&sha1_mac_ops,
205 	NULL,
206 	NULL,
207 	NULL,
208 	NULL,
209 	NULL,
210 	NULL,
211 	NULL,
212 	NULL,
213 	NULL,
214 	&sha1_ctx_ops
215 };
216 
217 static crypto_provider_info_t sha1_prov_info = {
218 	CRYPTO_SPI_VERSION_1,
219 	"SHA1 Software Provider",
220 	CRYPTO_SW_PROVIDER,
221 	{&modlinkage},
222 	NULL,
223 	&sha1_crypto_ops,
224 	sizeof (sha1_mech_info_tab)/sizeof (crypto_mech_info_t),
225 	sha1_mech_info_tab
226 };
227 
228 static crypto_kcf_provider_handle_t sha1_prov_handle = NULL;
229 
230 int
231 _init()
232 {
233 	int ret;
234 
235 	if ((ret = mod_install(&modlinkage)) != 0)
236 		return (ret);
237 
238 	/*
239 	 * Register with KCF. If the registration fails, log an
240 	 * error but do not uninstall the module, since the functionality
241 	 * provided by misc/sha1 should still be available.
242 	 */
243 	if ((ret = crypto_register_provider(&sha1_prov_info,
244 	    &sha1_prov_handle)) != CRYPTO_SUCCESS)
245 		cmn_err(CE_WARN, "sha1 _init: "
246 		    "crypto_register_provider() failed (0x%x)", ret);
247 
248 	return (0);
249 }
250 
251 int
252 _info(struct modinfo *modinfop)
253 {
254 	return (mod_info(&modlinkage, modinfop));
255 }
256 
257 /*
258  * KCF software provider control entry points.
259  */
260 /* ARGSUSED */
261 static void
262 sha1_provider_status(crypto_provider_handle_t provider, uint_t *status)
263 {
264 	*status = CRYPTO_PROVIDER_READY;
265 }
266 
267 /*
268  * KCF software provider digest entry points.
269  */
270 
271 static int
272 sha1_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
273     crypto_req_handle_t req)
274 {
275 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
276 		return (CRYPTO_MECHANISM_INVALID);
277 
278 	/*
279 	 * Allocate and initialize SHA1 context.
280 	 */
281 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_ctx_t),
282 	    crypto_kmflag(req));
283 	if (ctx->cc_provider_private == NULL)
284 		return (CRYPTO_HOST_MEMORY);
285 
286 	PROV_SHA1_CTX(ctx)->sc_mech_type = SHA1_MECH_INFO_TYPE;
287 	SHA1Init(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
288 
289 	return (CRYPTO_SUCCESS);
290 }
291 
292 /*
293  * Helper SHA1 digest update function for uio data.
294  */
295 static int
296 sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data)
297 {
298 	off_t offset = data->cd_offset;
299 	size_t length = data->cd_length;
300 	uint_t vec_idx;
301 	size_t cur_len;
302 
303 	/* we support only kernel buffer */
304 	if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
305 		return (CRYPTO_ARGUMENTS_BAD);
306 
307 	/*
308 	 * Jump to the first iovec containing data to be
309 	 * digested.
310 	 */
311 	for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
312 	    offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
313 	    offset -= data->cd_uio->uio_iov[vec_idx++].iov_len);
314 	if (vec_idx == data->cd_uio->uio_iovcnt) {
315 		/*
316 		 * The caller specified an offset that is larger than the
317 		 * total size of the buffers it provided.
318 		 */
319 		return (CRYPTO_DATA_LEN_RANGE);
320 	}
321 
322 	/*
323 	 * Now do the digesting on the iovecs.
324 	 */
325 	while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
326 		cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
327 		    offset, length);
328 
329 		SHA1Update(sha1_ctx,
330 		    (uint8_t *)data->cd_uio->uio_iov[vec_idx].iov_base + offset,
331 		    cur_len);
332 
333 		length -= cur_len;
334 		vec_idx++;
335 		offset = 0;
336 	}
337 
338 	if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
339 		/*
340 		 * The end of the specified iovec's was reached but
341 		 * the length requested could not be processed, i.e.
342 		 * The caller requested to digest more data than it provided.
343 		 */
344 		return (CRYPTO_DATA_LEN_RANGE);
345 	}
346 
347 	return (CRYPTO_SUCCESS);
348 }
349 
350 /*
351  * Helper SHA1 digest final function for uio data.
352  * digest_len is the length of the desired digest. If digest_len
353  * is smaller than the default SHA1 digest length, the caller
354  * must pass a scratch buffer, digest_scratch, which must
355  * be at least SHA1_DIGEST_LENGTH bytes.
356  */
357 static int
358 sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
359     ulong_t digest_len, uchar_t *digest_scratch)
360 {
361 	off_t offset = digest->cd_offset;
362 	uint_t vec_idx;
363 
364 	/* we support only kernel buffer */
365 	if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
366 		return (CRYPTO_ARGUMENTS_BAD);
367 
368 	/*
369 	 * Jump to the first iovec containing ptr to the digest to
370 	 * be returned.
371 	 */
372 	for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
373 	    vec_idx < digest->cd_uio->uio_iovcnt;
374 	    offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len);
375 	if (vec_idx == digest->cd_uio->uio_iovcnt) {
376 		/*
377 		 * The caller specified an offset that is
378 		 * larger than the total size of the buffers
379 		 * it provided.
380 		 */
381 		return (CRYPTO_DATA_LEN_RANGE);
382 	}
383 
384 	if (offset + digest_len <=
385 	    digest->cd_uio->uio_iov[vec_idx].iov_len) {
386 		/*
387 		 * The computed SHA1 digest will fit in the current
388 		 * iovec.
389 		 */
390 		if (digest_len != SHA1_DIGEST_LENGTH) {
391 			/*
392 			 * The caller requested a short digest. Digest
393 			 * into a scratch buffer and return to
394 			 * the user only what was requested.
395 			 */
396 			SHA1Final(digest_scratch, sha1_ctx);
397 			bcopy(digest_scratch, (uchar_t *)digest->
398 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
399 			    digest_len);
400 		} else {
401 			SHA1Final((uchar_t *)digest->
402 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
403 			    sha1_ctx);
404 		}
405 	} else {
406 		/*
407 		 * The computed digest will be crossing one or more iovec's.
408 		 * This is bad performance-wise but we need to support it.
409 		 * Allocate a small scratch buffer on the stack and
410 		 * copy it piece meal to the specified digest iovec's.
411 		 */
412 		uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
413 		off_t scratch_offset = 0;
414 		size_t length = digest_len;
415 		size_t cur_len;
416 
417 		SHA1Final(digest_tmp, sha1_ctx);
418 
419 		while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
420 			cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
421 			    offset, length);
422 			bcopy(digest_tmp + scratch_offset,
423 			    digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
424 			    cur_len);
425 
426 			length -= cur_len;
427 			vec_idx++;
428 			scratch_offset += cur_len;
429 			offset = 0;
430 		}
431 
432 		if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
433 			/*
434 			 * The end of the specified iovec's was reached but
435 			 * the length requested could not be processed, i.e.
436 			 * The caller requested to digest more data than it
437 			 * provided.
438 			 */
439 			return (CRYPTO_DATA_LEN_RANGE);
440 		}
441 	}
442 
443 	return (CRYPTO_SUCCESS);
444 }
445 
446 /*
447  * Helper SHA1 digest update for mblk's.
448  */
449 static int
450 sha1_digest_update_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *data)
451 {
452 	off_t offset = data->cd_offset;
453 	size_t length = data->cd_length;
454 	mblk_t *mp;
455 	size_t cur_len;
456 
457 	/*
458 	 * Jump to the first mblk_t containing data to be digested.
459 	 */
460 	for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
461 	    offset -= MBLKL(mp), mp = mp->b_cont);
462 	if (mp == NULL) {
463 		/*
464 		 * The caller specified an offset that is larger than the
465 		 * total size of the buffers it provided.
466 		 */
467 		return (CRYPTO_DATA_LEN_RANGE);
468 	}
469 
470 	/*
471 	 * Now do the digesting on the mblk chain.
472 	 */
473 	while (mp != NULL && length > 0) {
474 		cur_len = MIN(MBLKL(mp) - offset, length);
475 		SHA1Update(sha1_ctx, mp->b_rptr + offset, cur_len);
476 		length -= cur_len;
477 		offset = 0;
478 		mp = mp->b_cont;
479 	}
480 
481 	if (mp == NULL && length > 0) {
482 		/*
483 		 * The end of the mblk was reached but the length requested
484 		 * could not be processed, i.e. The caller requested
485 		 * to digest more data than it provided.
486 		 */
487 		return (CRYPTO_DATA_LEN_RANGE);
488 	}
489 
490 	return (CRYPTO_SUCCESS);
491 }
492 
493 /*
494  * Helper SHA1 digest final for mblk's.
495  * digest_len is the length of the desired digest. If digest_len
496  * is smaller than the default SHA1 digest length, the caller
497  * must pass a scratch buffer, digest_scratch, which must
498  * be at least SHA1_DIGEST_LENGTH bytes.
499  */
500 static int
501 sha1_digest_final_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
502     ulong_t digest_len, uchar_t *digest_scratch)
503 {
504 	off_t offset = digest->cd_offset;
505 	mblk_t *mp;
506 
507 	/*
508 	 * Jump to the first mblk_t that will be used to store the digest.
509 	 */
510 	for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
511 	    offset -= MBLKL(mp), mp = mp->b_cont);
512 	if (mp == NULL) {
513 		/*
514 		 * The caller specified an offset that is larger than the
515 		 * total size of the buffers it provided.
516 		 */
517 		return (CRYPTO_DATA_LEN_RANGE);
518 	}
519 
520 	if (offset + digest_len <= MBLKL(mp)) {
521 		/*
522 		 * The computed SHA1 digest will fit in the current mblk.
523 		 * Do the SHA1Final() in-place.
524 		 */
525 		if (digest_len != SHA1_DIGEST_LENGTH) {
526 			/*
527 			 * The caller requested a short digest. Digest
528 			 * into a scratch buffer and return to
529 			 * the user only what was requested.
530 			 */
531 			SHA1Final(digest_scratch, sha1_ctx);
532 			bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
533 		} else {
534 			SHA1Final(mp->b_rptr + offset, sha1_ctx);
535 		}
536 	} else {
537 		/*
538 		 * The computed digest will be crossing one or more mblk's.
539 		 * This is bad performance-wise but we need to support it.
540 		 * Allocate a small scratch buffer on the stack and
541 		 * copy it piece meal to the specified digest iovec's.
542 		 */
543 		uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
544 		off_t scratch_offset = 0;
545 		size_t length = digest_len;
546 		size_t cur_len;
547 
548 		SHA1Final(digest_tmp, sha1_ctx);
549 
550 		while (mp != NULL && length > 0) {
551 			cur_len = MIN(MBLKL(mp) - offset, length);
552 			bcopy(digest_tmp + scratch_offset,
553 			    mp->b_rptr + offset, cur_len);
554 
555 			length -= cur_len;
556 			mp = mp->b_cont;
557 			scratch_offset += cur_len;
558 			offset = 0;
559 		}
560 
561 		if (mp == NULL && length > 0) {
562 			/*
563 			 * The end of the specified mblk was reached but
564 			 * the length requested could not be processed, i.e.
565 			 * The caller requested to digest more data than it
566 			 * provided.
567 			 */
568 			return (CRYPTO_DATA_LEN_RANGE);
569 		}
570 	}
571 
572 	return (CRYPTO_SUCCESS);
573 }
574 
575 /* ARGSUSED */
576 static int
577 sha1_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
578     crypto_req_handle_t req)
579 {
580 	int ret = CRYPTO_SUCCESS;
581 
582 	ASSERT(ctx->cc_provider_private != NULL);
583 
584 	/*
585 	 * We need to just return the length needed to store the output.
586 	 * We should not destroy the context for the following cases.
587 	 */
588 	if ((digest->cd_length == 0) ||
589 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
590 		digest->cd_length = SHA1_DIGEST_LENGTH;
591 		return (CRYPTO_BUFFER_TOO_SMALL);
592 	}
593 
594 	/*
595 	 * Do the SHA1 update on the specified input data.
596 	 */
597 	switch (data->cd_format) {
598 	case CRYPTO_DATA_RAW:
599 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
600 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
601 		    data->cd_length);
602 		break;
603 	case CRYPTO_DATA_UIO:
604 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
605 		    data);
606 		break;
607 	case CRYPTO_DATA_MBLK:
608 		ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
609 		    data);
610 		break;
611 	default:
612 		ret = CRYPTO_ARGUMENTS_BAD;
613 	}
614 
615 	if (ret != CRYPTO_SUCCESS) {
616 		/* the update failed, free context and bail */
617 		kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
618 		ctx->cc_provider_private = NULL;
619 		digest->cd_length = 0;
620 		return (ret);
621 	}
622 
623 	/*
624 	 * Do a SHA1 final, must be done separately since the digest
625 	 * type can be different than the input data type.
626 	 */
627 	switch (digest->cd_format) {
628 	case CRYPTO_DATA_RAW:
629 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
630 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
631 		break;
632 	case CRYPTO_DATA_UIO:
633 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
634 		    digest, SHA1_DIGEST_LENGTH, NULL);
635 		break;
636 	case CRYPTO_DATA_MBLK:
637 		ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
638 		    digest, SHA1_DIGEST_LENGTH, NULL);
639 		break;
640 	default:
641 		ret = CRYPTO_ARGUMENTS_BAD;
642 	}
643 
644 	/* all done, free context and return */
645 
646 	if (ret == CRYPTO_SUCCESS) {
647 		digest->cd_length = SHA1_DIGEST_LENGTH;
648 	} else {
649 		digest->cd_length = 0;
650 	}
651 
652 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
653 	ctx->cc_provider_private = NULL;
654 	return (ret);
655 }
656 
657 /* ARGSUSED */
658 static int
659 sha1_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
660     crypto_req_handle_t req)
661 {
662 	int ret = CRYPTO_SUCCESS;
663 
664 	ASSERT(ctx->cc_provider_private != NULL);
665 
666 	/*
667 	 * Do the SHA1 update on the specified input data.
668 	 */
669 	switch (data->cd_format) {
670 	case CRYPTO_DATA_RAW:
671 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
672 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
673 		    data->cd_length);
674 		break;
675 	case CRYPTO_DATA_UIO:
676 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
677 		    data);
678 		break;
679 	case CRYPTO_DATA_MBLK:
680 		ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
681 		    data);
682 		break;
683 	default:
684 		ret = CRYPTO_ARGUMENTS_BAD;
685 	}
686 
687 	return (ret);
688 }
689 
690 /* ARGSUSED */
691 static int
692 sha1_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
693     crypto_req_handle_t req)
694 {
695 	int ret = CRYPTO_SUCCESS;
696 
697 	ASSERT(ctx->cc_provider_private != NULL);
698 
699 	/*
700 	 * We need to just return the length needed to store the output.
701 	 * We should not destroy the context for the following cases.
702 	 */
703 	if ((digest->cd_length == 0) ||
704 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
705 		digest->cd_length = SHA1_DIGEST_LENGTH;
706 		return (CRYPTO_BUFFER_TOO_SMALL);
707 	}
708 
709 	/*
710 	 * Do a SHA1 final.
711 	 */
712 	switch (digest->cd_format) {
713 	case CRYPTO_DATA_RAW:
714 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
715 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
716 		break;
717 	case CRYPTO_DATA_UIO:
718 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
719 		    digest, SHA1_DIGEST_LENGTH, NULL);
720 		break;
721 	case CRYPTO_DATA_MBLK:
722 		ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
723 		    digest, SHA1_DIGEST_LENGTH, NULL);
724 		break;
725 	default:
726 		ret = CRYPTO_ARGUMENTS_BAD;
727 	}
728 
729 	/* all done, free context and return */
730 
731 	if (ret == CRYPTO_SUCCESS) {
732 		digest->cd_length = SHA1_DIGEST_LENGTH;
733 	} else {
734 		digest->cd_length = 0;
735 	}
736 
737 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
738 	ctx->cc_provider_private = NULL;
739 
740 	return (ret);
741 }
742 
743 /* ARGSUSED */
744 static int
745 sha1_digest_atomic(crypto_provider_handle_t provider,
746     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
747     crypto_data_t *data, crypto_data_t *digest,
748     crypto_req_handle_t req)
749 {
750 	int ret = CRYPTO_SUCCESS;
751 	SHA1_CTX sha1_ctx;
752 
753 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
754 		return (CRYPTO_MECHANISM_INVALID);
755 
756 	/*
757 	 * Do the SHA1 init.
758 	 */
759 	SHA1Init(&sha1_ctx);
760 
761 	/*
762 	 * Do the SHA1 update on the specified input data.
763 	 */
764 	switch (data->cd_format) {
765 	case CRYPTO_DATA_RAW:
766 		SHA1Update(&sha1_ctx,
767 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
768 		    data->cd_length);
769 		break;
770 	case CRYPTO_DATA_UIO:
771 		ret = sha1_digest_update_uio(&sha1_ctx, data);
772 		break;
773 	case CRYPTO_DATA_MBLK:
774 		ret = sha1_digest_update_mblk(&sha1_ctx, data);
775 		break;
776 	default:
777 		ret = CRYPTO_ARGUMENTS_BAD;
778 	}
779 
780 	if (ret != CRYPTO_SUCCESS) {
781 		/* the update failed, bail */
782 		digest->cd_length = 0;
783 		return (ret);
784 	}
785 
786 	/*
787 	 * Do a SHA1 final, must be done separately since the digest
788 	 * type can be different than the input data type.
789 	 */
790 	switch (digest->cd_format) {
791 	case CRYPTO_DATA_RAW:
792 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
793 		    digest->cd_offset, &sha1_ctx);
794 		break;
795 	case CRYPTO_DATA_UIO:
796 		ret = sha1_digest_final_uio(&sha1_ctx, digest,
797 		    SHA1_DIGEST_LENGTH, NULL);
798 		break;
799 	case CRYPTO_DATA_MBLK:
800 		ret = sha1_digest_final_mblk(&sha1_ctx, digest,
801 		    SHA1_DIGEST_LENGTH, NULL);
802 		break;
803 	default:
804 		ret = CRYPTO_ARGUMENTS_BAD;
805 	}
806 
807 	if (ret == CRYPTO_SUCCESS) {
808 		digest->cd_length = SHA1_DIGEST_LENGTH;
809 	} else {
810 		digest->cd_length = 0;
811 	}
812 
813 	return (ret);
814 }
815 
816 /*
817  * KCF software provider mac entry points.
818  *
819  * SHA1 HMAC is: SHA1(key XOR opad, SHA1(key XOR ipad, text))
820  *
821  * Init:
822  * The initialization routine initializes what we denote
823  * as the inner and outer contexts by doing
824  * - for inner context: SHA1(key XOR ipad)
825  * - for outer context: SHA1(key XOR opad)
826  *
827  * Update:
828  * Each subsequent SHA1 HMAC update will result in an
829  * update of the inner context with the specified data.
830  *
831  * Final:
832  * The SHA1 HMAC final will do a SHA1 final operation on the
833  * inner context, and the resulting digest will be used
834  * as the data for an update on the outer context. Last
835  * but not least, a SHA1 final on the outer context will
836  * be performed to obtain the SHA1 HMAC digest to return
837  * to the user.
838  */
839 
840 /*
841  * Initialize a SHA1-HMAC context.
842  */
843 static void
844 sha1_mac_init_ctx(sha1_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
845 {
846 	uint32_t ipad[SHA1_HMAC_INTS_PER_BLOCK];
847 	uint32_t opad[SHA1_HMAC_INTS_PER_BLOCK];
848 	uint_t i;
849 
850 	bzero(ipad, SHA1_HMAC_BLOCK_SIZE);
851 	bzero(opad, SHA1_HMAC_BLOCK_SIZE);
852 
853 	bcopy(keyval, ipad, length_in_bytes);
854 	bcopy(keyval, opad, length_in_bytes);
855 
856 	/* XOR key with ipad (0x36) and opad (0x5c) */
857 	for (i = 0; i < SHA1_HMAC_INTS_PER_BLOCK; i++) {
858 		ipad[i] ^= 0x36363636;
859 		opad[i] ^= 0x5c5c5c5c;
860 	}
861 
862 	/* perform SHA1 on ipad */
863 	SHA1Init(&ctx->hc_icontext);
864 	SHA1Update(&ctx->hc_icontext, (uint8_t *)ipad, SHA1_HMAC_BLOCK_SIZE);
865 
866 	/* perform SHA1 on opad */
867 	SHA1Init(&ctx->hc_ocontext);
868 	SHA1Update(&ctx->hc_ocontext, (uint8_t *)opad, SHA1_HMAC_BLOCK_SIZE);
869 }
870 
871 /*
872  */
873 static int
874 sha1_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
875     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
876     crypto_req_handle_t req)
877 {
878 	int ret = CRYPTO_SUCCESS;
879 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
880 
881 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
882 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
883 		return (CRYPTO_MECHANISM_INVALID);
884 
885 	/* Add support for key by attributes (RFE 4706552) */
886 	if (key->ck_format != CRYPTO_KEY_RAW)
887 		return (CRYPTO_ARGUMENTS_BAD);
888 
889 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_hmac_ctx_t),
890 	    crypto_kmflag(req));
891 	if (ctx->cc_provider_private == NULL)
892 		return (CRYPTO_HOST_MEMORY);
893 
894 	if (ctx_template != NULL) {
895 		/* reuse context template */
896 		bcopy(ctx_template, PROV_SHA1_HMAC_CTX(ctx),
897 		    sizeof (sha1_hmac_ctx_t));
898 	} else {
899 		/* no context template, compute context */
900 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
901 			uchar_t digested_key[SHA1_DIGEST_LENGTH];
902 			sha1_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
903 
904 			/*
905 			 * Hash the passed-in key to get a smaller key.
906 			 * The inner context is used since it hasn't been
907 			 * initialized yet.
908 			 */
909 			PROV_SHA1_DIGEST_KEY(&hmac_ctx->hc_icontext,
910 			    key->ck_data, keylen_in_bytes, digested_key);
911 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
912 			    digested_key, SHA1_DIGEST_LENGTH);
913 		} else {
914 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
915 			    key->ck_data, keylen_in_bytes);
916 		}
917 	}
918 
919 	/*
920 	 * Get the mechanism parameters, if applicable.
921 	 */
922 	PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
923 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
924 		if (mechanism->cm_param == NULL ||
925 		    mechanism->cm_param_len != sizeof (ulong_t))
926 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
927 		PROV_SHA1_GET_DIGEST_LEN(mechanism,
928 		    PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len);
929 		if (PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len >
930 		    SHA1_DIGEST_LENGTH)
931 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
932 	}
933 
934 	if (ret != CRYPTO_SUCCESS) {
935 		bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
936 		kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
937 		ctx->cc_provider_private = NULL;
938 	}
939 
940 	return (ret);
941 }
942 
943 /* ARGSUSED */
944 static int
945 sha1_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
946 {
947 	int ret = CRYPTO_SUCCESS;
948 
949 	ASSERT(ctx->cc_provider_private != NULL);
950 
951 	/*
952 	 * Do a SHA1 update of the inner context using the specified
953 	 * data.
954 	 */
955 	switch (data->cd_format) {
956 	case CRYPTO_DATA_RAW:
957 		SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_icontext,
958 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
959 		    data->cd_length);
960 		break;
961 	case CRYPTO_DATA_UIO:
962 		ret = sha1_digest_update_uio(
963 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
964 		break;
965 	case CRYPTO_DATA_MBLK:
966 		ret = sha1_digest_update_mblk(
967 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
968 		break;
969 	default:
970 		ret = CRYPTO_ARGUMENTS_BAD;
971 	}
972 
973 	return (ret);
974 }
975 
976 /* ARGSUSED */
977 static int
978 sha1_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
979 {
980 	int ret = CRYPTO_SUCCESS;
981 	uchar_t digest[SHA1_DIGEST_LENGTH];
982 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
983 
984 	ASSERT(ctx->cc_provider_private != NULL);
985 
986 	if (PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type ==
987 	    SHA1_HMAC_GEN_MECH_INFO_TYPE)
988 		digest_len = PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len;
989 
990 	/*
991 	 * We need to just return the length needed to store the output.
992 	 * We should not destroy the context for the following cases.
993 	 */
994 	if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
995 		mac->cd_length = digest_len;
996 		return (CRYPTO_BUFFER_TOO_SMALL);
997 	}
998 
999 	/*
1000 	 * Do a SHA1 final on the inner context.
1001 	 */
1002 	SHA1Final(digest, &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext);
1003 
1004 	/*
1005 	 * Do a SHA1 update on the outer context, feeding the inner
1006 	 * digest as data.
1007 	 */
1008 	SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, digest,
1009 	    SHA1_DIGEST_LENGTH);
1010 
1011 	/*
1012 	 * Do a SHA1 final on the outer context, storing the computing
1013 	 * digest in the users buffer.
1014 	 */
1015 	switch (mac->cd_format) {
1016 	case CRYPTO_DATA_RAW:
1017 		if (digest_len != SHA1_DIGEST_LENGTH) {
1018 			/*
1019 			 * The caller requested a short digest. Digest
1020 			 * into a scratch buffer and return to
1021 			 * the user only what was requested.
1022 			 */
1023 			SHA1Final(digest,
1024 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
1025 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1026 			    mac->cd_offset, digest_len);
1027 		} else {
1028 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
1029 			    mac->cd_offset,
1030 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
1031 		}
1032 		break;
1033 	case CRYPTO_DATA_UIO:
1034 		ret = sha1_digest_final_uio(
1035 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
1036 		    digest_len, digest);
1037 		break;
1038 	case CRYPTO_DATA_MBLK:
1039 		ret = sha1_digest_final_mblk(
1040 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
1041 		    digest_len, digest);
1042 		break;
1043 	default:
1044 		ret = CRYPTO_ARGUMENTS_BAD;
1045 	}
1046 
1047 	if (ret == CRYPTO_SUCCESS) {
1048 		mac->cd_length = digest_len;
1049 	} else {
1050 		mac->cd_length = 0;
1051 	}
1052 
1053 	bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1054 	kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1055 	ctx->cc_provider_private = NULL;
1056 
1057 	return (ret);
1058 }
1059 
1060 #define	SHA1_MAC_UPDATE(data, ctx, ret) {				\
1061 	switch (data->cd_format) {					\
1062 	case CRYPTO_DATA_RAW:						\
1063 		SHA1Update(&(ctx).hc_icontext,				\
1064 		    (uint8_t *)data->cd_raw.iov_base +			\
1065 		    data->cd_offset, data->cd_length);			\
1066 		break;							\
1067 	case CRYPTO_DATA_UIO:						\
1068 		ret = sha1_digest_update_uio(&(ctx).hc_icontext, data); \
1069 		break;							\
1070 	case CRYPTO_DATA_MBLK:						\
1071 		ret = sha1_digest_update_mblk(&(ctx).hc_icontext,	\
1072 		    data);						\
1073 		break;							\
1074 	default:							\
1075 		ret = CRYPTO_ARGUMENTS_BAD;				\
1076 	}								\
1077 }
1078 
1079 /* ARGSUSED */
1080 static int
1081 sha1_mac_atomic(crypto_provider_handle_t provider,
1082     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1083     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1084     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1085 {
1086 	int ret = CRYPTO_SUCCESS;
1087 	uchar_t digest[SHA1_DIGEST_LENGTH];
1088 	sha1_hmac_ctx_t sha1_hmac_ctx;
1089 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
1090 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1091 
1092 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1093 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1094 		return (CRYPTO_MECHANISM_INVALID);
1095 
1096 	/* Add support for key by attributes (RFE 4706552) */
1097 	if (key->ck_format != CRYPTO_KEY_RAW)
1098 		return (CRYPTO_ARGUMENTS_BAD);
1099 
1100 	if (ctx_template != NULL) {
1101 		/* reuse context template */
1102 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1103 	} else {
1104 		/* no context template, initialize context */
1105 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1106 			/*
1107 			 * Hash the passed-in key to get a smaller key.
1108 			 * The inner context is used since it hasn't been
1109 			 * initialized yet.
1110 			 */
1111 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
1112 			    key->ck_data, keylen_in_bytes, digest);
1113 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
1114 			    SHA1_DIGEST_LENGTH);
1115 		} else {
1116 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
1117 			    keylen_in_bytes);
1118 		}
1119 	}
1120 
1121 	/* get the mechanism parameters, if applicable */
1122 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1123 		if (mechanism->cm_param == NULL ||
1124 		    mechanism->cm_param_len != sizeof (ulong_t)) {
1125 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1126 			goto bail;
1127 		}
1128 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
1129 		if (digest_len > SHA1_DIGEST_LENGTH) {
1130 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1131 			goto bail;
1132 		}
1133 	}
1134 
1135 	/* do a SHA1 update of the inner context using the specified data */
1136 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
1137 	if (ret != CRYPTO_SUCCESS)
1138 		/* the update failed, free context and bail */
1139 		goto bail;
1140 
1141 	/*
1142 	 * Do a SHA1 final on the inner context.
1143 	 */
1144 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
1145 
1146 	/*
1147 	 * Do an SHA1 update on the outer context, feeding the inner
1148 	 * digest as data.
1149 	 */
1150 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
1151 
1152 	/*
1153 	 * Do a SHA1 final on the outer context, storing the computed
1154 	 * digest in the users buffer.
1155 	 */
1156 	switch (mac->cd_format) {
1157 	case CRYPTO_DATA_RAW:
1158 		if (digest_len != SHA1_DIGEST_LENGTH) {
1159 			/*
1160 			 * The caller requested a short digest. Digest
1161 			 * into a scratch buffer and return to
1162 			 * the user only what was requested.
1163 			 */
1164 			SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
1165 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1166 			    mac->cd_offset, digest_len);
1167 		} else {
1168 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
1169 			    mac->cd_offset, &sha1_hmac_ctx.hc_ocontext);
1170 		}
1171 		break;
1172 	case CRYPTO_DATA_UIO:
1173 		ret = sha1_digest_final_uio(&sha1_hmac_ctx.hc_ocontext, mac,
1174 		    digest_len, digest);
1175 		break;
1176 	case CRYPTO_DATA_MBLK:
1177 		ret = sha1_digest_final_mblk(&sha1_hmac_ctx.hc_ocontext, mac,
1178 		    digest_len, digest);
1179 		break;
1180 	default:
1181 		ret = CRYPTO_ARGUMENTS_BAD;
1182 	}
1183 
1184 	if (ret == CRYPTO_SUCCESS) {
1185 		mac->cd_length = digest_len;
1186 	} else {
1187 		mac->cd_length = 0;
1188 	}
1189 	/* Extra paranoia: zeroize the context on the stack */
1190 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1191 
1192 	return (ret);
1193 bail:
1194 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1195 	mac->cd_length = 0;
1196 	return (ret);
1197 }
1198 
1199 /* ARGSUSED */
1200 static int
1201 sha1_mac_verify_atomic(crypto_provider_handle_t provider,
1202     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1203     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1204     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1205 {
1206 	int ret = CRYPTO_SUCCESS;
1207 	uchar_t digest[SHA1_DIGEST_LENGTH];
1208 	sha1_hmac_ctx_t sha1_hmac_ctx;
1209 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
1210 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1211 
1212 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1213 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1214 		return (CRYPTO_MECHANISM_INVALID);
1215 
1216 	/* Add support for key by attributes (RFE 4706552) */
1217 	if (key->ck_format != CRYPTO_KEY_RAW)
1218 		return (CRYPTO_ARGUMENTS_BAD);
1219 
1220 	if (ctx_template != NULL) {
1221 		/* reuse context template */
1222 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1223 	} else {
1224 		/* no context template, initialize context */
1225 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1226 			/*
1227 			 * Hash the passed-in key to get a smaller key.
1228 			 * The inner context is used since it hasn't been
1229 			 * initialized yet.
1230 			 */
1231 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
1232 			    key->ck_data, keylen_in_bytes, digest);
1233 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
1234 			    SHA1_DIGEST_LENGTH);
1235 		} else {
1236 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
1237 			    keylen_in_bytes);
1238 		}
1239 	}
1240 
1241 	/* get the mechanism parameters, if applicable */
1242 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1243 		if (mechanism->cm_param == NULL ||
1244 		    mechanism->cm_param_len != sizeof (ulong_t)) {
1245 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1246 			goto bail;
1247 		}
1248 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
1249 		if (digest_len > SHA1_DIGEST_LENGTH) {
1250 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1251 			goto bail;
1252 		}
1253 	}
1254 
1255 	if (mac->cd_length != digest_len) {
1256 		ret = CRYPTO_INVALID_MAC;
1257 		goto bail;
1258 	}
1259 
1260 	/* do a SHA1 update of the inner context using the specified data */
1261 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
1262 	if (ret != CRYPTO_SUCCESS)
1263 		/* the update failed, free context and bail */
1264 		goto bail;
1265 
1266 	/* do a SHA1 final on the inner context */
1267 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
1268 
1269 	/*
1270 	 * Do an SHA1 update on the outer context, feeding the inner
1271 	 * digest as data.
1272 	 */
1273 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
1274 
1275 	/*
1276 	 * Do a SHA1 final on the outer context, storing the computed
1277 	 * digest in the users buffer.
1278 	 */
1279 	SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
1280 
1281 	/*
1282 	 * Compare the computed digest against the expected digest passed
1283 	 * as argument.
1284 	 */
1285 
1286 	switch (mac->cd_format) {
1287 
1288 	case CRYPTO_DATA_RAW:
1289 		if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
1290 		    mac->cd_offset, digest_len) != 0)
1291 			ret = CRYPTO_INVALID_MAC;
1292 		break;
1293 
1294 	case CRYPTO_DATA_UIO: {
1295 		off_t offset = mac->cd_offset;
1296 		uint_t vec_idx;
1297 		off_t scratch_offset = 0;
1298 		size_t length = digest_len;
1299 		size_t cur_len;
1300 
1301 		/* we support only kernel buffer */
1302 		if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
1303 			return (CRYPTO_ARGUMENTS_BAD);
1304 
1305 		/* jump to the first iovec containing the expected digest */
1306 		for (vec_idx = 0;
1307 		    offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
1308 		    vec_idx < mac->cd_uio->uio_iovcnt;
1309 		    offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len);
1310 		if (vec_idx == mac->cd_uio->uio_iovcnt) {
1311 			/*
1312 			 * The caller specified an offset that is
1313 			 * larger than the total size of the buffers
1314 			 * it provided.
1315 			 */
1316 			ret = CRYPTO_DATA_LEN_RANGE;
1317 			break;
1318 		}
1319 
1320 		/* do the comparison of computed digest vs specified one */
1321 		while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
1322 			cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
1323 			    offset, length);
1324 
1325 			if (bcmp(digest + scratch_offset,
1326 			    mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
1327 			    cur_len) != 0) {
1328 				ret = CRYPTO_INVALID_MAC;
1329 				break;
1330 			}
1331 
1332 			length -= cur_len;
1333 			vec_idx++;
1334 			scratch_offset += cur_len;
1335 			offset = 0;
1336 		}
1337 		break;
1338 	}
1339 
1340 	case CRYPTO_DATA_MBLK: {
1341 		off_t offset = mac->cd_offset;
1342 		mblk_t *mp;
1343 		off_t scratch_offset = 0;
1344 		size_t length = digest_len;
1345 		size_t cur_len;
1346 
1347 		/* jump to the first mblk_t containing the expected digest */
1348 		for (mp = mac->cd_mp; mp != NULL && offset >= MBLKL(mp);
1349 		    offset -= MBLKL(mp), mp = mp->b_cont);
1350 		if (mp == NULL) {
1351 			/*
1352 			 * The caller specified an offset that is larger than
1353 			 * the total size of the buffers it provided.
1354 			 */
1355 			ret = CRYPTO_DATA_LEN_RANGE;
1356 			break;
1357 		}
1358 
1359 		while (mp != NULL && length > 0) {
1360 			cur_len = MIN(MBLKL(mp) - offset, length);
1361 			if (bcmp(digest + scratch_offset,
1362 			    mp->b_rptr + offset, cur_len) != 0) {
1363 				ret = CRYPTO_INVALID_MAC;
1364 				break;
1365 			}
1366 
1367 			length -= cur_len;
1368 			mp = mp->b_cont;
1369 			scratch_offset += cur_len;
1370 			offset = 0;
1371 		}
1372 		break;
1373 	}
1374 
1375 	default:
1376 		ret = CRYPTO_ARGUMENTS_BAD;
1377 	}
1378 
1379 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1380 	return (ret);
1381 bail:
1382 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1383 	mac->cd_length = 0;
1384 	return (ret);
1385 }
1386 
1387 /*
1388  * KCF software provider context management entry points.
1389  */
1390 
1391 /* ARGSUSED */
1392 static int
1393 sha1_create_ctx_template(crypto_provider_handle_t provider,
1394     crypto_mechanism_t *mechanism, crypto_key_t *key,
1395     crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
1396     crypto_req_handle_t req)
1397 {
1398 	sha1_hmac_ctx_t *sha1_hmac_ctx_tmpl;
1399 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1400 
1401 	if ((mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE) &&
1402 	    (mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)) {
1403 		return (CRYPTO_MECHANISM_INVALID);
1404 	}
1405 
1406 	/* Add support for key by attributes (RFE 4706552) */
1407 	if (key->ck_format != CRYPTO_KEY_RAW)
1408 		return (CRYPTO_ARGUMENTS_BAD);
1409 
1410 	/*
1411 	 * Allocate and initialize SHA1 context.
1412 	 */
1413 	sha1_hmac_ctx_tmpl = kmem_alloc(sizeof (sha1_hmac_ctx_t),
1414 	    crypto_kmflag(req));
1415 	if (sha1_hmac_ctx_tmpl == NULL)
1416 		return (CRYPTO_HOST_MEMORY);
1417 
1418 	if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1419 		uchar_t digested_key[SHA1_DIGEST_LENGTH];
1420 
1421 		/*
1422 		 * Hash the passed-in key to get a smaller key.
1423 		 * The inner context is used since it hasn't been
1424 		 * initialized yet.
1425 		 */
1426 		PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx_tmpl->hc_icontext,
1427 		    key->ck_data, keylen_in_bytes, digested_key);
1428 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, digested_key,
1429 		    SHA1_DIGEST_LENGTH);
1430 	} else {
1431 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, key->ck_data,
1432 		    keylen_in_bytes);
1433 	}
1434 
1435 	sha1_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
1436 	*ctx_template = (crypto_spi_ctx_template_t)sha1_hmac_ctx_tmpl;
1437 	*ctx_template_size = sizeof (sha1_hmac_ctx_t);
1438 
1439 
1440 	return (CRYPTO_SUCCESS);
1441 }
1442 
1443 static int
1444 sha1_free_context(crypto_ctx_t *ctx)
1445 {
1446 	uint_t ctx_len;
1447 	sha1_mech_type_t mech_type;
1448 
1449 	if (ctx->cc_provider_private == NULL)
1450 		return (CRYPTO_SUCCESS);
1451 
1452 	/*
1453 	 * We have to free either SHA1 or SHA1-HMAC contexts, which
1454 	 * have different lengths.
1455 	 */
1456 
1457 	mech_type = PROV_SHA1_CTX(ctx)->sc_mech_type;
1458 	if (mech_type == SHA1_MECH_INFO_TYPE)
1459 		ctx_len = sizeof (sha1_ctx_t);
1460 	else {
1461 		ASSERT(mech_type == SHA1_HMAC_MECH_INFO_TYPE ||
1462 		    mech_type == SHA1_HMAC_GEN_MECH_INFO_TYPE);
1463 		ctx_len = sizeof (sha1_hmac_ctx_t);
1464 	}
1465 
1466 	bzero(ctx->cc_provider_private, ctx_len);
1467 	kmem_free(ctx->cc_provider_private, ctx_len);
1468 	ctx->cc_provider_private = NULL;
1469 
1470 	return (CRYPTO_SUCCESS);
1471 }
1472