xref: /illumos-gate/usr/src/uts/common/crypto/io/sha1_mod.c (revision 6b1abd46bd2c746d42947df930a0a3a632f81256)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/modctl.h>
28 #include <sys/cmn_err.h>
29 #include <sys/note.h>
30 #include <sys/crypto/common.h>
31 #include <sys/crypto/spi.h>
32 #include <sys/strsun.h>
33 #include <sys/systm.h>
34 #include <sys/sysmacros.h>
35 
36 #include <sys/sha1.h>
37 #include <sha1/sha1_impl.h>
38 
39 /*
40  * The sha1 module is created with two modlinkages:
41  * - a modlmisc that allows consumers to directly call the entry points
42  *   SHA1Init, SHA1Update, and SHA1Final.
43  * - a modlcrypto that allows the module to register with the Kernel
44  *   Cryptographic Framework (KCF) as a software provider for the SHA1
45  *   mechanisms.
46  */
47 
48 static struct modlmisc modlmisc = {
49 	&mod_miscops,
50 	"SHA1 Message-Digest Algorithm"
51 };
52 
53 static struct modlcrypto modlcrypto = {
54 	&mod_cryptoops,
55 	"SHA1 Kernel SW Provider 1.1"
56 };
57 
58 static struct modlinkage modlinkage = {
59 	MODREV_1, &modlmisc, &modlcrypto, NULL
60 };
61 
62 
63 /*
64  * Macros to access the SHA1 or SHA1-HMAC contexts from a context passed
65  * by KCF to one of the entry points.
66  */
67 
68 #define	PROV_SHA1_CTX(ctx)	((sha1_ctx_t *)(ctx)->cc_provider_private)
69 #define	PROV_SHA1_HMAC_CTX(ctx)	((sha1_hmac_ctx_t *)(ctx)->cc_provider_private)
70 
71 /* to extract the digest length passed as mechanism parameter */
72 #define	PROV_SHA1_GET_DIGEST_LEN(m, len) {				\
73 	if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t)))		\
74 		(len) = (uint32_t)*((ulong_t *)(void *)mechanism->cm_param); \
75 	else {								\
76 		ulong_t tmp_ulong;					\
77 		bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t));	\
78 		(len) = (uint32_t)tmp_ulong;				\
79 	}								\
80 }
81 
82 #define	PROV_SHA1_DIGEST_KEY(ctx, key, len, digest) {	\
83 	SHA1Init(ctx);					\
84 	SHA1Update(ctx, key, len);			\
85 	SHA1Final(digest, ctx);				\
86 }
87 
88 /*
89  * Mechanism info structure passed to KCF during registration.
90  */
91 static crypto_mech_info_t sha1_mech_info_tab[] = {
92 	/* SHA1 */
93 	{SUN_CKM_SHA1, SHA1_MECH_INFO_TYPE,
94 	    CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
95 	    0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
96 	/* SHA1-HMAC */
97 	{SUN_CKM_SHA1_HMAC, SHA1_HMAC_MECH_INFO_TYPE,
98 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
99 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
100 	    CRYPTO_KEYSIZE_UNIT_IN_BYTES},
101 	/* SHA1-HMAC GENERAL */
102 	{SUN_CKM_SHA1_HMAC_GENERAL, SHA1_HMAC_GEN_MECH_INFO_TYPE,
103 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
104 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
105 	    CRYPTO_KEYSIZE_UNIT_IN_BYTES}
106 };
107 
108 static void sha1_provider_status(crypto_provider_handle_t, uint_t *);
109 
110 static crypto_control_ops_t sha1_control_ops = {
111 	sha1_provider_status
112 };
113 
114 static int sha1_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
115     crypto_req_handle_t);
116 static int sha1_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
117     crypto_req_handle_t);
118 static int sha1_digest_update(crypto_ctx_t *, crypto_data_t *,
119     crypto_req_handle_t);
120 static int sha1_digest_final(crypto_ctx_t *, crypto_data_t *,
121     crypto_req_handle_t);
122 static int sha1_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
123     crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
124     crypto_req_handle_t);
125 
126 static crypto_digest_ops_t sha1_digest_ops = {
127 	sha1_digest_init,
128 	sha1_digest,
129 	sha1_digest_update,
130 	NULL,
131 	sha1_digest_final,
132 	sha1_digest_atomic
133 };
134 
135 static int sha1_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
136     crypto_spi_ctx_template_t, crypto_req_handle_t);
137 static int sha1_mac_update(crypto_ctx_t *, crypto_data_t *,
138     crypto_req_handle_t);
139 static int sha1_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
140 static int sha1_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
141     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
142     crypto_spi_ctx_template_t, crypto_req_handle_t);
143 static int sha1_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
144     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
145     crypto_spi_ctx_template_t, crypto_req_handle_t);
146 
147 static crypto_mac_ops_t sha1_mac_ops = {
148 	sha1_mac_init,
149 	NULL,
150 	sha1_mac_update,
151 	sha1_mac_final,
152 	sha1_mac_atomic,
153 	sha1_mac_verify_atomic
154 };
155 
156 static int sha1_create_ctx_template(crypto_provider_handle_t,
157     crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
158     size_t *, crypto_req_handle_t);
159 static int sha1_free_context(crypto_ctx_t *);
160 
161 static crypto_ctx_ops_t sha1_ctx_ops = {
162 	sha1_create_ctx_template,
163 	sha1_free_context
164 };
165 
166 static void sha1_POST(int *);
167 
168 static crypto_fips140_ops_t sha1_fips140_ops = {
169 	sha1_POST
170 };
171 
172 static crypto_ops_t sha1_crypto_ops = {
173 	&sha1_control_ops,
174 	&sha1_digest_ops,
175 	NULL,
176 	&sha1_mac_ops,
177 	NULL,
178 	NULL,
179 	NULL,
180 	NULL,
181 	NULL,
182 	NULL,
183 	NULL,
184 	NULL,
185 	NULL,
186 	&sha1_ctx_ops,
187 	NULL,
188 	NULL,
189 	&sha1_fips140_ops
190 };
191 
192 static crypto_provider_info_t sha1_prov_info = {
193 	CRYPTO_SPI_VERSION_4,
194 	"SHA1 Software Provider",
195 	CRYPTO_SW_PROVIDER,
196 	{&modlinkage},
197 	NULL,
198 	&sha1_crypto_ops,
199 	sizeof (sha1_mech_info_tab)/sizeof (crypto_mech_info_t),
200 	sha1_mech_info_tab
201 };
202 
203 static crypto_kcf_provider_handle_t sha1_prov_handle = NULL;
204 
205 int
206 _init()
207 {
208 	int ret;
209 
210 	if ((ret = mod_install(&modlinkage)) != 0)
211 		return (ret);
212 
213 	/*
214 	 * Register with KCF. If the registration fails, log do not uninstall
215 	 * the module, since the functionality provided by misc/sha1 should
216 	 * still be available.
217 	 */
218 	(void) crypto_register_provider(&sha1_prov_info, &sha1_prov_handle);
219 
220 	return (0);
221 }
222 
223 int
224 _info(struct modinfo *modinfop)
225 {
226 	return (mod_info(&modlinkage, modinfop));
227 }
228 
229 /*
230  * KCF software provider control entry points.
231  */
232 /* ARGSUSED */
233 static void
234 sha1_provider_status(crypto_provider_handle_t provider, uint_t *status)
235 {
236 	*status = CRYPTO_PROVIDER_READY;
237 }
238 
239 /*
240  * KCF software provider digest entry points.
241  */
242 
243 static int
244 sha1_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
245     crypto_req_handle_t req)
246 {
247 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
248 		return (CRYPTO_MECHANISM_INVALID);
249 
250 	/*
251 	 * Allocate and initialize SHA1 context.
252 	 */
253 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_ctx_t),
254 	    crypto_kmflag(req));
255 	if (ctx->cc_provider_private == NULL)
256 		return (CRYPTO_HOST_MEMORY);
257 
258 	PROV_SHA1_CTX(ctx)->sc_mech_type = SHA1_MECH_INFO_TYPE;
259 	SHA1Init(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
260 
261 	return (CRYPTO_SUCCESS);
262 }
263 
264 /*
265  * Helper SHA1 digest update function for uio data.
266  */
267 static int
268 sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data)
269 {
270 	off_t offset = data->cd_offset;
271 	size_t length = data->cd_length;
272 	uint_t vec_idx;
273 	size_t cur_len;
274 
275 	/* we support only kernel buffer */
276 	if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
277 		return (CRYPTO_ARGUMENTS_BAD);
278 
279 	/*
280 	 * Jump to the first iovec containing data to be
281 	 * digested.
282 	 */
283 	for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
284 	    offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
285 	    offset -= data->cd_uio->uio_iov[vec_idx++].iov_len)
286 		;
287 	if (vec_idx == data->cd_uio->uio_iovcnt) {
288 		/*
289 		 * The caller specified an offset that is larger than the
290 		 * total size of the buffers it provided.
291 		 */
292 		return (CRYPTO_DATA_LEN_RANGE);
293 	}
294 
295 	/*
296 	 * Now do the digesting on the iovecs.
297 	 */
298 	while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
299 		cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
300 		    offset, length);
301 
302 		SHA1Update(sha1_ctx,
303 		    (uint8_t *)data->cd_uio->uio_iov[vec_idx].iov_base + offset,
304 		    cur_len);
305 
306 		length -= cur_len;
307 		vec_idx++;
308 		offset = 0;
309 	}
310 
311 	if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
312 		/*
313 		 * The end of the specified iovec's was reached but
314 		 * the length requested could not be processed, i.e.
315 		 * The caller requested to digest more data than it provided.
316 		 */
317 		return (CRYPTO_DATA_LEN_RANGE);
318 	}
319 
320 	return (CRYPTO_SUCCESS);
321 }
322 
323 /*
324  * Helper SHA1 digest final function for uio data.
325  * digest_len is the length of the desired digest. If digest_len
326  * is smaller than the default SHA1 digest length, the caller
327  * must pass a scratch buffer, digest_scratch, which must
328  * be at least SHA1_DIGEST_LENGTH bytes.
329  */
330 static int
331 sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
332     ulong_t digest_len, uchar_t *digest_scratch)
333 {
334 	off_t offset = digest->cd_offset;
335 	uint_t vec_idx;
336 
337 	/* we support only kernel buffer */
338 	if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
339 		return (CRYPTO_ARGUMENTS_BAD);
340 
341 	/*
342 	 * Jump to the first iovec containing ptr to the digest to
343 	 * be returned.
344 	 */
345 	for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
346 	    vec_idx < digest->cd_uio->uio_iovcnt;
347 	    offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len)
348 		;
349 	if (vec_idx == digest->cd_uio->uio_iovcnt) {
350 		/*
351 		 * The caller specified an offset that is
352 		 * larger than the total size of the buffers
353 		 * it provided.
354 		 */
355 		return (CRYPTO_DATA_LEN_RANGE);
356 	}
357 
358 	if (offset + digest_len <=
359 	    digest->cd_uio->uio_iov[vec_idx].iov_len) {
360 		/*
361 		 * The computed SHA1 digest will fit in the current
362 		 * iovec.
363 		 */
364 		if (digest_len != SHA1_DIGEST_LENGTH) {
365 			/*
366 			 * The caller requested a short digest. Digest
367 			 * into a scratch buffer and return to
368 			 * the user only what was requested.
369 			 */
370 			SHA1Final(digest_scratch, sha1_ctx);
371 			bcopy(digest_scratch, (uchar_t *)digest->
372 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
373 			    digest_len);
374 		} else {
375 			SHA1Final((uchar_t *)digest->
376 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
377 			    sha1_ctx);
378 		}
379 	} else {
380 		/*
381 		 * The computed digest will be crossing one or more iovec's.
382 		 * This is bad performance-wise but we need to support it.
383 		 * Allocate a small scratch buffer on the stack and
384 		 * copy it piece meal to the specified digest iovec's.
385 		 */
386 		uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
387 		off_t scratch_offset = 0;
388 		size_t length = digest_len;
389 		size_t cur_len;
390 
391 		SHA1Final(digest_tmp, sha1_ctx);
392 
393 		while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
394 			cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
395 			    offset, length);
396 			bcopy(digest_tmp + scratch_offset,
397 			    digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
398 			    cur_len);
399 
400 			length -= cur_len;
401 			vec_idx++;
402 			scratch_offset += cur_len;
403 			offset = 0;
404 		}
405 
406 		if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
407 			/*
408 			 * The end of the specified iovec's was reached but
409 			 * the length requested could not be processed, i.e.
410 			 * The caller requested to digest more data than it
411 			 * provided.
412 			 */
413 			return (CRYPTO_DATA_LEN_RANGE);
414 		}
415 	}
416 
417 	return (CRYPTO_SUCCESS);
418 }
419 
420 /*
421  * Helper SHA1 digest update for mblk's.
422  */
423 static int
424 sha1_digest_update_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *data)
425 {
426 	off_t offset = data->cd_offset;
427 	size_t length = data->cd_length;
428 	mblk_t *mp;
429 	size_t cur_len;
430 
431 	/*
432 	 * Jump to the first mblk_t containing data to be digested.
433 	 */
434 	for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
435 	    offset -= MBLKL(mp), mp = mp->b_cont)
436 		;
437 	if (mp == NULL) {
438 		/*
439 		 * The caller specified an offset that is larger than the
440 		 * total size of the buffers it provided.
441 		 */
442 		return (CRYPTO_DATA_LEN_RANGE);
443 	}
444 
445 	/*
446 	 * Now do the digesting on the mblk chain.
447 	 */
448 	while (mp != NULL && length > 0) {
449 		cur_len = MIN(MBLKL(mp) - offset, length);
450 		SHA1Update(sha1_ctx, mp->b_rptr + offset, cur_len);
451 		length -= cur_len;
452 		offset = 0;
453 		mp = mp->b_cont;
454 	}
455 
456 	if (mp == NULL && length > 0) {
457 		/*
458 		 * The end of the mblk was reached but the length requested
459 		 * could not be processed, i.e. The caller requested
460 		 * to digest more data than it provided.
461 		 */
462 		return (CRYPTO_DATA_LEN_RANGE);
463 	}
464 
465 	return (CRYPTO_SUCCESS);
466 }
467 
468 /*
469  * Helper SHA1 digest final for mblk's.
470  * digest_len is the length of the desired digest. If digest_len
471  * is smaller than the default SHA1 digest length, the caller
472  * must pass a scratch buffer, digest_scratch, which must
473  * be at least SHA1_DIGEST_LENGTH bytes.
474  */
475 static int
476 sha1_digest_final_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
477     ulong_t digest_len, uchar_t *digest_scratch)
478 {
479 	off_t offset = digest->cd_offset;
480 	mblk_t *mp;
481 
482 	/*
483 	 * Jump to the first mblk_t that will be used to store the digest.
484 	 */
485 	for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
486 	    offset -= MBLKL(mp), mp = mp->b_cont)
487 		;
488 	if (mp == NULL) {
489 		/*
490 		 * The caller specified an offset that is larger than the
491 		 * total size of the buffers it provided.
492 		 */
493 		return (CRYPTO_DATA_LEN_RANGE);
494 	}
495 
496 	if (offset + digest_len <= MBLKL(mp)) {
497 		/*
498 		 * The computed SHA1 digest will fit in the current mblk.
499 		 * Do the SHA1Final() in-place.
500 		 */
501 		if (digest_len != SHA1_DIGEST_LENGTH) {
502 			/*
503 			 * The caller requested a short digest. Digest
504 			 * into a scratch buffer and return to
505 			 * the user only what was requested.
506 			 */
507 			SHA1Final(digest_scratch, sha1_ctx);
508 			bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
509 		} else {
510 			SHA1Final(mp->b_rptr + offset, sha1_ctx);
511 		}
512 	} else {
513 		/*
514 		 * The computed digest will be crossing one or more mblk's.
515 		 * This is bad performance-wise but we need to support it.
516 		 * Allocate a small scratch buffer on the stack and
517 		 * copy it piece meal to the specified digest iovec's.
518 		 */
519 		uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
520 		off_t scratch_offset = 0;
521 		size_t length = digest_len;
522 		size_t cur_len;
523 
524 		SHA1Final(digest_tmp, sha1_ctx);
525 
526 		while (mp != NULL && length > 0) {
527 			cur_len = MIN(MBLKL(mp) - offset, length);
528 			bcopy(digest_tmp + scratch_offset,
529 			    mp->b_rptr + offset, cur_len);
530 
531 			length -= cur_len;
532 			mp = mp->b_cont;
533 			scratch_offset += cur_len;
534 			offset = 0;
535 		}
536 
537 		if (mp == NULL && length > 0) {
538 			/*
539 			 * The end of the specified mblk was reached but
540 			 * the length requested could not be processed, i.e.
541 			 * The caller requested to digest more data than it
542 			 * provided.
543 			 */
544 			return (CRYPTO_DATA_LEN_RANGE);
545 		}
546 	}
547 
548 	return (CRYPTO_SUCCESS);
549 }
550 
551 /* ARGSUSED */
552 static int
553 sha1_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
554     crypto_req_handle_t req)
555 {
556 	int ret = CRYPTO_SUCCESS;
557 
558 	ASSERT(ctx->cc_provider_private != NULL);
559 
560 	/*
561 	 * We need to just return the length needed to store the output.
562 	 * We should not destroy the context for the following cases.
563 	 */
564 	if ((digest->cd_length == 0) ||
565 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
566 		digest->cd_length = SHA1_DIGEST_LENGTH;
567 		return (CRYPTO_BUFFER_TOO_SMALL);
568 	}
569 
570 	/*
571 	 * Do the SHA1 update on the specified input data.
572 	 */
573 	switch (data->cd_format) {
574 	case CRYPTO_DATA_RAW:
575 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
576 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
577 		    data->cd_length);
578 		break;
579 	case CRYPTO_DATA_UIO:
580 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
581 		    data);
582 		break;
583 	case CRYPTO_DATA_MBLK:
584 		ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
585 		    data);
586 		break;
587 	default:
588 		ret = CRYPTO_ARGUMENTS_BAD;
589 	}
590 
591 	if (ret != CRYPTO_SUCCESS) {
592 		/* the update failed, free context and bail */
593 		kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
594 		ctx->cc_provider_private = NULL;
595 		digest->cd_length = 0;
596 		return (ret);
597 	}
598 
599 	/*
600 	 * Do a SHA1 final, must be done separately since the digest
601 	 * type can be different than the input data type.
602 	 */
603 	switch (digest->cd_format) {
604 	case CRYPTO_DATA_RAW:
605 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
606 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
607 		break;
608 	case CRYPTO_DATA_UIO:
609 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
610 		    digest, SHA1_DIGEST_LENGTH, NULL);
611 		break;
612 	case CRYPTO_DATA_MBLK:
613 		ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
614 		    digest, SHA1_DIGEST_LENGTH, NULL);
615 		break;
616 	default:
617 		ret = CRYPTO_ARGUMENTS_BAD;
618 	}
619 
620 	/* all done, free context and return */
621 
622 	if (ret == CRYPTO_SUCCESS) {
623 		digest->cd_length = SHA1_DIGEST_LENGTH;
624 	} else {
625 		digest->cd_length = 0;
626 	}
627 
628 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
629 	ctx->cc_provider_private = NULL;
630 	return (ret);
631 }
632 
633 /* ARGSUSED */
634 static int
635 sha1_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
636     crypto_req_handle_t req)
637 {
638 	int ret = CRYPTO_SUCCESS;
639 
640 	ASSERT(ctx->cc_provider_private != NULL);
641 
642 	/*
643 	 * Do the SHA1 update on the specified input data.
644 	 */
645 	switch (data->cd_format) {
646 	case CRYPTO_DATA_RAW:
647 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
648 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
649 		    data->cd_length);
650 		break;
651 	case CRYPTO_DATA_UIO:
652 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
653 		    data);
654 		break;
655 	case CRYPTO_DATA_MBLK:
656 		ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
657 		    data);
658 		break;
659 	default:
660 		ret = CRYPTO_ARGUMENTS_BAD;
661 	}
662 
663 	return (ret);
664 }
665 
666 /* ARGSUSED */
667 static int
668 sha1_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
669     crypto_req_handle_t req)
670 {
671 	int ret = CRYPTO_SUCCESS;
672 
673 	ASSERT(ctx->cc_provider_private != NULL);
674 
675 	/*
676 	 * We need to just return the length needed to store the output.
677 	 * We should not destroy the context for the following cases.
678 	 */
679 	if ((digest->cd_length == 0) ||
680 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
681 		digest->cd_length = SHA1_DIGEST_LENGTH;
682 		return (CRYPTO_BUFFER_TOO_SMALL);
683 	}
684 
685 	/*
686 	 * Do a SHA1 final.
687 	 */
688 	switch (digest->cd_format) {
689 	case CRYPTO_DATA_RAW:
690 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
691 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
692 		break;
693 	case CRYPTO_DATA_UIO:
694 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
695 		    digest, SHA1_DIGEST_LENGTH, NULL);
696 		break;
697 	case CRYPTO_DATA_MBLK:
698 		ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
699 		    digest, SHA1_DIGEST_LENGTH, NULL);
700 		break;
701 	default:
702 		ret = CRYPTO_ARGUMENTS_BAD;
703 	}
704 
705 	/* all done, free context and return */
706 
707 	if (ret == CRYPTO_SUCCESS) {
708 		digest->cd_length = SHA1_DIGEST_LENGTH;
709 	} else {
710 		digest->cd_length = 0;
711 	}
712 
713 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
714 	ctx->cc_provider_private = NULL;
715 
716 	return (ret);
717 }
718 
719 /* ARGSUSED */
720 static int
721 sha1_digest_atomic(crypto_provider_handle_t provider,
722     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
723     crypto_data_t *data, crypto_data_t *digest,
724     crypto_req_handle_t req)
725 {
726 	int ret = CRYPTO_SUCCESS;
727 	SHA1_CTX sha1_ctx;
728 
729 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
730 		return (CRYPTO_MECHANISM_INVALID);
731 
732 	/*
733 	 * Do the SHA1 init.
734 	 */
735 	SHA1Init(&sha1_ctx);
736 
737 	/*
738 	 * Do the SHA1 update on the specified input data.
739 	 */
740 	switch (data->cd_format) {
741 	case CRYPTO_DATA_RAW:
742 		SHA1Update(&sha1_ctx,
743 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
744 		    data->cd_length);
745 		break;
746 	case CRYPTO_DATA_UIO:
747 		ret = sha1_digest_update_uio(&sha1_ctx, data);
748 		break;
749 	case CRYPTO_DATA_MBLK:
750 		ret = sha1_digest_update_mblk(&sha1_ctx, data);
751 		break;
752 	default:
753 		ret = CRYPTO_ARGUMENTS_BAD;
754 	}
755 
756 	if (ret != CRYPTO_SUCCESS) {
757 		/* the update failed, bail */
758 		digest->cd_length = 0;
759 		return (ret);
760 	}
761 
762 	/*
763 	 * Do a SHA1 final, must be done separately since the digest
764 	 * type can be different than the input data type.
765 	 */
766 	switch (digest->cd_format) {
767 	case CRYPTO_DATA_RAW:
768 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
769 		    digest->cd_offset, &sha1_ctx);
770 		break;
771 	case CRYPTO_DATA_UIO:
772 		ret = sha1_digest_final_uio(&sha1_ctx, digest,
773 		    SHA1_DIGEST_LENGTH, NULL);
774 		break;
775 	case CRYPTO_DATA_MBLK:
776 		ret = sha1_digest_final_mblk(&sha1_ctx, digest,
777 		    SHA1_DIGEST_LENGTH, NULL);
778 		break;
779 	default:
780 		ret = CRYPTO_ARGUMENTS_BAD;
781 	}
782 
783 	if (ret == CRYPTO_SUCCESS) {
784 		digest->cd_length = SHA1_DIGEST_LENGTH;
785 	} else {
786 		digest->cd_length = 0;
787 	}
788 
789 	return (ret);
790 }
791 
792 /*
793  * KCF software provider mac entry points.
794  *
795  * SHA1 HMAC is: SHA1(key XOR opad, SHA1(key XOR ipad, text))
796  *
797  * Init:
798  * The initialization routine initializes what we denote
799  * as the inner and outer contexts by doing
800  * - for inner context: SHA1(key XOR ipad)
801  * - for outer context: SHA1(key XOR opad)
802  *
803  * Update:
804  * Each subsequent SHA1 HMAC update will result in an
805  * update of the inner context with the specified data.
806  *
807  * Final:
808  * The SHA1 HMAC final will do a SHA1 final operation on the
809  * inner context, and the resulting digest will be used
810  * as the data for an update on the outer context. Last
811  * but not least, a SHA1 final on the outer context will
812  * be performed to obtain the SHA1 HMAC digest to return
813  * to the user.
814  */
815 
816 /*
817  * Initialize a SHA1-HMAC context.
818  */
819 static void
820 sha1_mac_init_ctx(sha1_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
821 {
822 	uint32_t ipad[SHA1_HMAC_INTS_PER_BLOCK];
823 	uint32_t opad[SHA1_HMAC_INTS_PER_BLOCK];
824 	uint_t i;
825 
826 	bzero(ipad, SHA1_HMAC_BLOCK_SIZE);
827 	bzero(opad, SHA1_HMAC_BLOCK_SIZE);
828 
829 	bcopy(keyval, ipad, length_in_bytes);
830 	bcopy(keyval, opad, length_in_bytes);
831 
832 	/* XOR key with ipad (0x36) and opad (0x5c) */
833 	for (i = 0; i < SHA1_HMAC_INTS_PER_BLOCK; i++) {
834 		ipad[i] ^= 0x36363636;
835 		opad[i] ^= 0x5c5c5c5c;
836 	}
837 
838 	/* perform SHA1 on ipad */
839 	SHA1Init(&ctx->hc_icontext);
840 	SHA1Update(&ctx->hc_icontext, (uint8_t *)ipad, SHA1_HMAC_BLOCK_SIZE);
841 
842 	/* perform SHA1 on opad */
843 	SHA1Init(&ctx->hc_ocontext);
844 	SHA1Update(&ctx->hc_ocontext, (uint8_t *)opad, SHA1_HMAC_BLOCK_SIZE);
845 }
846 
847 /*
848  */
849 static int
850 sha1_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
851     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
852     crypto_req_handle_t req)
853 {
854 	int ret = CRYPTO_SUCCESS;
855 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
856 
857 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
858 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
859 		return (CRYPTO_MECHANISM_INVALID);
860 
861 	/* Add support for key by attributes (RFE 4706552) */
862 	if (key->ck_format != CRYPTO_KEY_RAW)
863 		return (CRYPTO_ARGUMENTS_BAD);
864 
865 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_hmac_ctx_t),
866 	    crypto_kmflag(req));
867 	if (ctx->cc_provider_private == NULL)
868 		return (CRYPTO_HOST_MEMORY);
869 
870 	if (ctx_template != NULL) {
871 		/* reuse context template */
872 		bcopy(ctx_template, PROV_SHA1_HMAC_CTX(ctx),
873 		    sizeof (sha1_hmac_ctx_t));
874 	} else {
875 		/* no context template, compute context */
876 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
877 			uchar_t digested_key[SHA1_DIGEST_LENGTH];
878 			sha1_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
879 
880 			/*
881 			 * Hash the passed-in key to get a smaller key.
882 			 * The inner context is used since it hasn't been
883 			 * initialized yet.
884 			 */
885 			PROV_SHA1_DIGEST_KEY(&hmac_ctx->hc_icontext,
886 			    key->ck_data, keylen_in_bytes, digested_key);
887 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
888 			    digested_key, SHA1_DIGEST_LENGTH);
889 		} else {
890 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
891 			    key->ck_data, keylen_in_bytes);
892 		}
893 	}
894 
895 	/*
896 	 * Get the mechanism parameters, if applicable.
897 	 */
898 	PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
899 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
900 		if (mechanism->cm_param == NULL ||
901 		    mechanism->cm_param_len != sizeof (ulong_t))
902 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
903 		PROV_SHA1_GET_DIGEST_LEN(mechanism,
904 		    PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len);
905 		if (PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len >
906 		    SHA1_DIGEST_LENGTH)
907 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
908 	}
909 
910 	if (ret != CRYPTO_SUCCESS) {
911 		bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
912 		kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
913 		ctx->cc_provider_private = NULL;
914 	}
915 
916 	return (ret);
917 }
918 
919 /* ARGSUSED */
920 static int
921 sha1_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
922 {
923 	int ret = CRYPTO_SUCCESS;
924 
925 	ASSERT(ctx->cc_provider_private != NULL);
926 
927 	/*
928 	 * Do a SHA1 update of the inner context using the specified
929 	 * data.
930 	 */
931 	switch (data->cd_format) {
932 	case CRYPTO_DATA_RAW:
933 		SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_icontext,
934 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
935 		    data->cd_length);
936 		break;
937 	case CRYPTO_DATA_UIO:
938 		ret = sha1_digest_update_uio(
939 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
940 		break;
941 	case CRYPTO_DATA_MBLK:
942 		ret = sha1_digest_update_mblk(
943 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
944 		break;
945 	default:
946 		ret = CRYPTO_ARGUMENTS_BAD;
947 	}
948 
949 	return (ret);
950 }
951 
952 /* ARGSUSED */
953 static int
954 sha1_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
955 {
956 	int ret = CRYPTO_SUCCESS;
957 	uchar_t digest[SHA1_DIGEST_LENGTH];
958 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
959 
960 	ASSERT(ctx->cc_provider_private != NULL);
961 
962 	if (PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type ==
963 	    SHA1_HMAC_GEN_MECH_INFO_TYPE)
964 		digest_len = PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len;
965 
966 	/*
967 	 * We need to just return the length needed to store the output.
968 	 * We should not destroy the context for the following cases.
969 	 */
970 	if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
971 		mac->cd_length = digest_len;
972 		return (CRYPTO_BUFFER_TOO_SMALL);
973 	}
974 
975 	/*
976 	 * Do a SHA1 final on the inner context.
977 	 */
978 	SHA1Final(digest, &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext);
979 
980 	/*
981 	 * Do a SHA1 update on the outer context, feeding the inner
982 	 * digest as data.
983 	 */
984 	SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, digest,
985 	    SHA1_DIGEST_LENGTH);
986 
987 	/*
988 	 * Do a SHA1 final on the outer context, storing the computing
989 	 * digest in the users buffer.
990 	 */
991 	switch (mac->cd_format) {
992 	case CRYPTO_DATA_RAW:
993 		if (digest_len != SHA1_DIGEST_LENGTH) {
994 			/*
995 			 * The caller requested a short digest. Digest
996 			 * into a scratch buffer and return to
997 			 * the user only what was requested.
998 			 */
999 			SHA1Final(digest,
1000 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
1001 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1002 			    mac->cd_offset, digest_len);
1003 		} else {
1004 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
1005 			    mac->cd_offset,
1006 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
1007 		}
1008 		break;
1009 	case CRYPTO_DATA_UIO:
1010 		ret = sha1_digest_final_uio(
1011 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
1012 		    digest_len, digest);
1013 		break;
1014 	case CRYPTO_DATA_MBLK:
1015 		ret = sha1_digest_final_mblk(
1016 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
1017 		    digest_len, digest);
1018 		break;
1019 	default:
1020 		ret = CRYPTO_ARGUMENTS_BAD;
1021 	}
1022 
1023 	if (ret == CRYPTO_SUCCESS) {
1024 		mac->cd_length = digest_len;
1025 	} else {
1026 		mac->cd_length = 0;
1027 	}
1028 
1029 	bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1030 	kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1031 	ctx->cc_provider_private = NULL;
1032 
1033 	return (ret);
1034 }
1035 
1036 #define	SHA1_MAC_UPDATE(data, ctx, ret) {				\
1037 	switch (data->cd_format) {					\
1038 	case CRYPTO_DATA_RAW:						\
1039 		SHA1Update(&(ctx).hc_icontext,				\
1040 		    (uint8_t *)data->cd_raw.iov_base +			\
1041 		    data->cd_offset, data->cd_length);			\
1042 		break;							\
1043 	case CRYPTO_DATA_UIO:						\
1044 		ret = sha1_digest_update_uio(&(ctx).hc_icontext, data); \
1045 		break;							\
1046 	case CRYPTO_DATA_MBLK:						\
1047 		ret = sha1_digest_update_mblk(&(ctx).hc_icontext,	\
1048 		    data);						\
1049 		break;							\
1050 	default:							\
1051 		ret = CRYPTO_ARGUMENTS_BAD;				\
1052 	}								\
1053 }
1054 
1055 /* ARGSUSED */
1056 static int
1057 sha1_mac_atomic(crypto_provider_handle_t provider,
1058     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1059     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1060     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1061 {
1062 	int ret = CRYPTO_SUCCESS;
1063 	uchar_t digest[SHA1_DIGEST_LENGTH];
1064 	sha1_hmac_ctx_t sha1_hmac_ctx;
1065 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
1066 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1067 
1068 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1069 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1070 		return (CRYPTO_MECHANISM_INVALID);
1071 
1072 	/* Add support for key by attributes (RFE 4706552) */
1073 	if (key->ck_format != CRYPTO_KEY_RAW)
1074 		return (CRYPTO_ARGUMENTS_BAD);
1075 
1076 	if (ctx_template != NULL) {
1077 		/* reuse context template */
1078 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1079 	} else {
1080 		/* no context template, initialize context */
1081 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1082 			/*
1083 			 * Hash the passed-in key to get a smaller key.
1084 			 * The inner context is used since it hasn't been
1085 			 * initialized yet.
1086 			 */
1087 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
1088 			    key->ck_data, keylen_in_bytes, digest);
1089 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
1090 			    SHA1_DIGEST_LENGTH);
1091 		} else {
1092 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
1093 			    keylen_in_bytes);
1094 		}
1095 	}
1096 
1097 	/* get the mechanism parameters, if applicable */
1098 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1099 		if (mechanism->cm_param == NULL ||
1100 		    mechanism->cm_param_len != sizeof (ulong_t)) {
1101 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1102 			goto bail;
1103 		}
1104 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
1105 		if (digest_len > SHA1_DIGEST_LENGTH) {
1106 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1107 			goto bail;
1108 		}
1109 	}
1110 
1111 	/* do a SHA1 update of the inner context using the specified data */
1112 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
1113 	if (ret != CRYPTO_SUCCESS)
1114 		/* the update failed, free context and bail */
1115 		goto bail;
1116 
1117 	/*
1118 	 * Do a SHA1 final on the inner context.
1119 	 */
1120 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
1121 
1122 	/*
1123 	 * Do an SHA1 update on the outer context, feeding the inner
1124 	 * digest as data.
1125 	 */
1126 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
1127 
1128 	/*
1129 	 * Do a SHA1 final on the outer context, storing the computed
1130 	 * digest in the users buffer.
1131 	 */
1132 	switch (mac->cd_format) {
1133 	case CRYPTO_DATA_RAW:
1134 		if (digest_len != SHA1_DIGEST_LENGTH) {
1135 			/*
1136 			 * The caller requested a short digest. Digest
1137 			 * into a scratch buffer and return to
1138 			 * the user only what was requested.
1139 			 */
1140 			SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
1141 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1142 			    mac->cd_offset, digest_len);
1143 		} else {
1144 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
1145 			    mac->cd_offset, &sha1_hmac_ctx.hc_ocontext);
1146 		}
1147 		break;
1148 	case CRYPTO_DATA_UIO:
1149 		ret = sha1_digest_final_uio(&sha1_hmac_ctx.hc_ocontext, mac,
1150 		    digest_len, digest);
1151 		break;
1152 	case CRYPTO_DATA_MBLK:
1153 		ret = sha1_digest_final_mblk(&sha1_hmac_ctx.hc_ocontext, mac,
1154 		    digest_len, digest);
1155 		break;
1156 	default:
1157 		ret = CRYPTO_ARGUMENTS_BAD;
1158 	}
1159 
1160 	if (ret == CRYPTO_SUCCESS) {
1161 		mac->cd_length = digest_len;
1162 	} else {
1163 		mac->cd_length = 0;
1164 	}
1165 	/* Extra paranoia: zeroize the context on the stack */
1166 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1167 
1168 	return (ret);
1169 bail:
1170 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1171 	mac->cd_length = 0;
1172 	return (ret);
1173 }
1174 
1175 /* ARGSUSED */
1176 static int
1177 sha1_mac_verify_atomic(crypto_provider_handle_t provider,
1178     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1179     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1180     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1181 {
1182 	int ret = CRYPTO_SUCCESS;
1183 	uchar_t digest[SHA1_DIGEST_LENGTH];
1184 	sha1_hmac_ctx_t sha1_hmac_ctx;
1185 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
1186 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1187 
1188 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1189 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1190 		return (CRYPTO_MECHANISM_INVALID);
1191 
1192 	/* Add support for key by attributes (RFE 4706552) */
1193 	if (key->ck_format != CRYPTO_KEY_RAW)
1194 		return (CRYPTO_ARGUMENTS_BAD);
1195 
1196 	if (ctx_template != NULL) {
1197 		/* reuse context template */
1198 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1199 	} else {
1200 		/* no context template, initialize context */
1201 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1202 			/*
1203 			 * Hash the passed-in key to get a smaller key.
1204 			 * The inner context is used since it hasn't been
1205 			 * initialized yet.
1206 			 */
1207 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
1208 			    key->ck_data, keylen_in_bytes, digest);
1209 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
1210 			    SHA1_DIGEST_LENGTH);
1211 		} else {
1212 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
1213 			    keylen_in_bytes);
1214 		}
1215 	}
1216 
1217 	/* get the mechanism parameters, if applicable */
1218 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1219 		if (mechanism->cm_param == NULL ||
1220 		    mechanism->cm_param_len != sizeof (ulong_t)) {
1221 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1222 			goto bail;
1223 		}
1224 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
1225 		if (digest_len > SHA1_DIGEST_LENGTH) {
1226 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1227 			goto bail;
1228 		}
1229 	}
1230 
1231 	if (mac->cd_length != digest_len) {
1232 		ret = CRYPTO_INVALID_MAC;
1233 		goto bail;
1234 	}
1235 
1236 	/* do a SHA1 update of the inner context using the specified data */
1237 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
1238 	if (ret != CRYPTO_SUCCESS)
1239 		/* the update failed, free context and bail */
1240 		goto bail;
1241 
1242 	/* do a SHA1 final on the inner context */
1243 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
1244 
1245 	/*
1246 	 * Do an SHA1 update on the outer context, feeding the inner
1247 	 * digest as data.
1248 	 */
1249 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
1250 
1251 	/*
1252 	 * Do a SHA1 final on the outer context, storing the computed
1253 	 * digest in the users buffer.
1254 	 */
1255 	SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
1256 
1257 	/*
1258 	 * Compare the computed digest against the expected digest passed
1259 	 * as argument.
1260 	 */
1261 
1262 	switch (mac->cd_format) {
1263 
1264 	case CRYPTO_DATA_RAW:
1265 		if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
1266 		    mac->cd_offset, digest_len) != 0)
1267 			ret = CRYPTO_INVALID_MAC;
1268 		break;
1269 
1270 	case CRYPTO_DATA_UIO: {
1271 		off_t offset = mac->cd_offset;
1272 		uint_t vec_idx;
1273 		off_t scratch_offset = 0;
1274 		size_t length = digest_len;
1275 		size_t cur_len;
1276 
1277 		/* we support only kernel buffer */
1278 		if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
1279 			return (CRYPTO_ARGUMENTS_BAD);
1280 
1281 		/* jump to the first iovec containing the expected digest */
1282 		for (vec_idx = 0;
1283 		    offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
1284 		    vec_idx < mac->cd_uio->uio_iovcnt;
1285 		    offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len)
1286 			;
1287 		if (vec_idx == mac->cd_uio->uio_iovcnt) {
1288 			/*
1289 			 * The caller specified an offset that is
1290 			 * larger than the total size of the buffers
1291 			 * it provided.
1292 			 */
1293 			ret = CRYPTO_DATA_LEN_RANGE;
1294 			break;
1295 		}
1296 
1297 		/* do the comparison of computed digest vs specified one */
1298 		while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
1299 			cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
1300 			    offset, length);
1301 
1302 			if (bcmp(digest + scratch_offset,
1303 			    mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
1304 			    cur_len) != 0) {
1305 				ret = CRYPTO_INVALID_MAC;
1306 				break;
1307 			}
1308 
1309 			length -= cur_len;
1310 			vec_idx++;
1311 			scratch_offset += cur_len;
1312 			offset = 0;
1313 		}
1314 		break;
1315 	}
1316 
1317 	case CRYPTO_DATA_MBLK: {
1318 		off_t offset = mac->cd_offset;
1319 		mblk_t *mp;
1320 		off_t scratch_offset = 0;
1321 		size_t length = digest_len;
1322 		size_t cur_len;
1323 
1324 		/* jump to the first mblk_t containing the expected digest */
1325 		for (mp = mac->cd_mp; mp != NULL && offset >= MBLKL(mp);
1326 		    offset -= MBLKL(mp), mp = mp->b_cont)
1327 			;
1328 		if (mp == NULL) {
1329 			/*
1330 			 * The caller specified an offset that is larger than
1331 			 * the total size of the buffers it provided.
1332 			 */
1333 			ret = CRYPTO_DATA_LEN_RANGE;
1334 			break;
1335 		}
1336 
1337 		while (mp != NULL && length > 0) {
1338 			cur_len = MIN(MBLKL(mp) - offset, length);
1339 			if (bcmp(digest + scratch_offset,
1340 			    mp->b_rptr + offset, cur_len) != 0) {
1341 				ret = CRYPTO_INVALID_MAC;
1342 				break;
1343 			}
1344 
1345 			length -= cur_len;
1346 			mp = mp->b_cont;
1347 			scratch_offset += cur_len;
1348 			offset = 0;
1349 		}
1350 		break;
1351 	}
1352 
1353 	default:
1354 		ret = CRYPTO_ARGUMENTS_BAD;
1355 	}
1356 
1357 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1358 	return (ret);
1359 bail:
1360 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1361 	mac->cd_length = 0;
1362 	return (ret);
1363 }
1364 
1365 /*
1366  * KCF software provider context management entry points.
1367  */
1368 
1369 /* ARGSUSED */
1370 static int
1371 sha1_create_ctx_template(crypto_provider_handle_t provider,
1372     crypto_mechanism_t *mechanism, crypto_key_t *key,
1373     crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
1374     crypto_req_handle_t req)
1375 {
1376 	sha1_hmac_ctx_t *sha1_hmac_ctx_tmpl;
1377 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1378 
1379 	if ((mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE) &&
1380 	    (mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)) {
1381 		return (CRYPTO_MECHANISM_INVALID);
1382 	}
1383 
1384 	/* Add support for key by attributes (RFE 4706552) */
1385 	if (key->ck_format != CRYPTO_KEY_RAW)
1386 		return (CRYPTO_ARGUMENTS_BAD);
1387 
1388 	/*
1389 	 * Allocate and initialize SHA1 context.
1390 	 */
1391 	sha1_hmac_ctx_tmpl = kmem_alloc(sizeof (sha1_hmac_ctx_t),
1392 	    crypto_kmflag(req));
1393 	if (sha1_hmac_ctx_tmpl == NULL)
1394 		return (CRYPTO_HOST_MEMORY);
1395 
1396 	if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1397 		uchar_t digested_key[SHA1_DIGEST_LENGTH];
1398 
1399 		/*
1400 		 * Hash the passed-in key to get a smaller key.
1401 		 * The inner context is used since it hasn't been
1402 		 * initialized yet.
1403 		 */
1404 		PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx_tmpl->hc_icontext,
1405 		    key->ck_data, keylen_in_bytes, digested_key);
1406 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, digested_key,
1407 		    SHA1_DIGEST_LENGTH);
1408 	} else {
1409 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, key->ck_data,
1410 		    keylen_in_bytes);
1411 	}
1412 
1413 	sha1_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
1414 	*ctx_template = (crypto_spi_ctx_template_t)sha1_hmac_ctx_tmpl;
1415 	*ctx_template_size = sizeof (sha1_hmac_ctx_t);
1416 
1417 
1418 	return (CRYPTO_SUCCESS);
1419 }
1420 
1421 static int
1422 sha1_free_context(crypto_ctx_t *ctx)
1423 {
1424 	uint_t ctx_len;
1425 	sha1_mech_type_t mech_type;
1426 
1427 	if (ctx->cc_provider_private == NULL)
1428 		return (CRYPTO_SUCCESS);
1429 
1430 	/*
1431 	 * We have to free either SHA1 or SHA1-HMAC contexts, which
1432 	 * have different lengths.
1433 	 */
1434 
1435 	mech_type = PROV_SHA1_CTX(ctx)->sc_mech_type;
1436 	if (mech_type == SHA1_MECH_INFO_TYPE)
1437 		ctx_len = sizeof (sha1_ctx_t);
1438 	else {
1439 		ASSERT(mech_type == SHA1_HMAC_MECH_INFO_TYPE ||
1440 		    mech_type == SHA1_HMAC_GEN_MECH_INFO_TYPE);
1441 		ctx_len = sizeof (sha1_hmac_ctx_t);
1442 	}
1443 
1444 	bzero(ctx->cc_provider_private, ctx_len);
1445 	kmem_free(ctx->cc_provider_private, ctx_len);
1446 	ctx->cc_provider_private = NULL;
1447 
1448 	return (CRYPTO_SUCCESS);
1449 }
1450 
1451 /*
1452  * SHA-1 Power-Up Self-Test
1453  */
1454 void
1455 sha1_POST(int *rc)
1456 {
1457 
1458 	*rc = fips_sha1_post();
1459 
1460 }
1461