xref: /illumos-gate/usr/src/common/crypto/sha1/sha1.c (revision 65a89a64c60f3061bbe2381edaacc81660af9a95)
1 /*
2  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 #pragma ident	"%Z%%M%	%I%	%E% SMI"
7 
8 /*
9  * The basic framework for this code came from the reference
10  * implementation for MD5.  That implementation is Copyright (C)
11  * 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved.
12  *
13  * License to copy and use this software is granted provided that it
14  * is identified as the "RSA Data Security, Inc. MD5 Message-Digest
15  * Algorithm" in all material mentioning or referencing this software
16  * or this function.
17  *
18  * License is also granted to make and use derivative works provided
19  * that such works are identified as "derived from the RSA Data
20  * Security, Inc. MD5 Message-Digest Algorithm" in all material
21  * mentioning or referencing the derived work.
22  *
23  * RSA Data Security, Inc. makes no representations concerning either
24  * the merchantability of this software or the suitability of this
25  * software for any particular purpose. It is provided "as is"
26  * without express or implied warranty of any kind.
27  *
28  * These notices must be retained in any copies of any part of this
29  * documentation and/or software.
30  *
31  * NOTE: Cleaned-up and optimized, version of SHA1, based on the FIPS 180-1
32  * standard, available at http://www.itl.nist.gov/div897/pubs/fip180-1.htm
33  * Not as fast as one would like -- further optimizations are encouraged
34  * and appreciated.
35  */
36 
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysmacros.h>
41 #include <sys/sha1.h>
42 #include <sys/sha1_consts.h>
43 
44 #ifdef _KERNEL
45 
46 #include <sys/modctl.h>
47 #include <sys/cmn_err.h>
48 #include <sys/note.h>
49 #include <sys/crypto/common.h>
50 #include <sys/crypto/spi.h>
51 #include <sys/strsun.h>
52 
53 /*
54  * The sha1 module is created with two modlinkages:
55  * - a modlmisc that allows consumers to directly call the entry points
56  *   SHA1Init, SHA1Update, and SHA1Final.
57  * - a modlcrypto that allows the module to register with the Kernel
58  *   Cryptographic Framework (KCF) as a software provider for the SHA1
59  *   mechanisms.
60  */
61 
62 #endif /* _KERNEL */
63 #ifndef	_KERNEL
64 #include <strings.h>
65 #include <stdlib.h>
66 #include <errno.h>
67 #include <sys/systeminfo.h>
68 #endif	/* !_KERNEL */
69 
70 static void Encode(uint8_t *, uint32_t *, size_t);
71 static void SHA1Transform(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t,
72     SHA1_CTX *, const uint8_t *);
73 
74 static uint8_t PADDING[64] = { 0x80, /* all zeros */ };
75 
76 /*
77  * F, G, and H are the basic SHA1 functions.
78  */
79 #define	F(b, c, d)	(((b) & (c)) | ((~b) & (d)))
80 #define	G(b, c, d)	((b) ^ (c) ^ (d))
81 #define	H(b, c, d)	(((b) & (c)) | ((b) & (d)) | ((c) & (d)))
82 
83 /*
84  * ROTATE_LEFT rotates x left n bits.
85  */
86 #define	ROTATE_LEFT(x, n)	\
87 	(((x) << (n)) | ((x) >> ((sizeof (x) * NBBY)-(n))))
88 
89 #ifdef _KERNEL
90 
91 static struct modlmisc modlmisc = {
92 	&mod_miscops,
93 	"SHA1 Message-Digest Algorithm"
94 };
95 
96 static struct modlcrypto modlcrypto = {
97 	&mod_cryptoops,
98 	"SHA1 Kernel SW Provider %I%"
99 };
100 
101 static struct modlinkage modlinkage = {
102 	MODREV_1, &modlmisc, &modlcrypto, NULL
103 };
104 
105 /*
106  * CSPI information (entry points, provider info, etc.)
107  */
108 
109 typedef enum sha1_mech_type {
110 	SHA1_MECH_INFO_TYPE,		/* SUN_CKM_SHA1 */
111 	SHA1_HMAC_MECH_INFO_TYPE,	/* SUN_CKM_SHA1_HMAC */
112 	SHA1_HMAC_GEN_MECH_INFO_TYPE	/* SUN_CKM_SHA1_HMAC_GENERAL */
113 } sha1_mech_type_t;
114 
115 #define	SHA1_DIGEST_LENGTH	20	/* SHA1 digest length in bytes */
116 #define	SHA1_HMAC_BLOCK_SIZE	64	/* SHA1-HMAC block size */
117 #define	SHA1_HMAC_MIN_KEY_LEN	8	/* SHA1-HMAC min key length in bits */
118 #define	SHA1_HMAC_MAX_KEY_LEN	INT_MAX /* SHA1-HMAC max key length in bits */
119 #define	SHA1_HMAC_INTS_PER_BLOCK	(SHA1_HMAC_BLOCK_SIZE/sizeof (uint32_t))
120 
121 /*
122  * Context for SHA1 mechanism.
123  */
124 typedef struct sha1_ctx {
125 	sha1_mech_type_t	sc_mech_type;	/* type of context */
126 	SHA1_CTX		sc_sha1_ctx;	/* SHA1 context */
127 } sha1_ctx_t;
128 
129 /*
130  * Context for SHA1-HMAC and SHA1-HMAC-GENERAL mechanisms.
131  */
132 typedef struct sha1_hmac_ctx {
133 	sha1_mech_type_t	hc_mech_type;	/* type of context */
134 	uint32_t		hc_digest_len;	/* digest len in bytes */
135 	SHA1_CTX		hc_icontext;	/* inner SHA1 context */
136 	SHA1_CTX		hc_ocontext;	/* outer SHA1 context */
137 } sha1_hmac_ctx_t;
138 
139 /*
140  * Macros to access the SHA1 or SHA1-HMAC contexts from a context passed
141  * by KCF to one of the entry points.
142  */
143 
144 #define	PROV_SHA1_CTX(ctx)	((sha1_ctx_t *)(ctx)->cc_provider_private)
145 #define	PROV_SHA1_HMAC_CTX(ctx)	((sha1_hmac_ctx_t *)(ctx)->cc_provider_private)
146 
147 /* to extract the digest length passed as mechanism parameter */
148 #define	PROV_SHA1_GET_DIGEST_LEN(m, len) {				\
149 	if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t)))		\
150 		(len) = (uint32_t)*((ulong_t *)mechanism->cm_param);	\
151 	else {								\
152 		ulong_t tmp_ulong;					\
153 		bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t));	\
154 		(len) = (uint32_t)tmp_ulong;				\
155 	}								\
156 }
157 
158 #define	PROV_SHA1_DIGEST_KEY(ctx, key, len, digest) {	\
159 	SHA1Init(ctx);					\
160 	SHA1Update(ctx, key, len);			\
161 	SHA1Final(digest, ctx);				\
162 }
163 
164 /*
165  * Mechanism info structure passed to KCF during registration.
166  */
167 static crypto_mech_info_t sha1_mech_info_tab[] = {
168 	/* SHA1 */
169 	{SUN_CKM_SHA1, SHA1_MECH_INFO_TYPE,
170 	    CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
171 	    0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
172 	/* SHA1-HMAC */
173 	{SUN_CKM_SHA1_HMAC, SHA1_HMAC_MECH_INFO_TYPE,
174 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
175 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
176 	    CRYPTO_KEYSIZE_UNIT_IN_BITS},
177 	/* SHA1-HMAC GENERAL */
178 	{SUN_CKM_SHA1_HMAC_GENERAL, SHA1_HMAC_GEN_MECH_INFO_TYPE,
179 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
180 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
181 	    CRYPTO_KEYSIZE_UNIT_IN_BITS}
182 };
183 
184 static void sha1_provider_status(crypto_provider_handle_t, uint_t *);
185 
186 static crypto_control_ops_t sha1_control_ops = {
187 	sha1_provider_status
188 };
189 
190 static int sha1_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
191     crypto_req_handle_t);
192 static int sha1_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
193     crypto_req_handle_t);
194 static int sha1_digest_update(crypto_ctx_t *, crypto_data_t *,
195     crypto_req_handle_t);
196 static int sha1_digest_final(crypto_ctx_t *, crypto_data_t *,
197     crypto_req_handle_t);
198 static int sha1_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
199     crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
200     crypto_req_handle_t);
201 
202 static crypto_digest_ops_t sha1_digest_ops = {
203 	sha1_digest_init,
204 	sha1_digest,
205 	sha1_digest_update,
206 	NULL,
207 	sha1_digest_final,
208 	sha1_digest_atomic
209 };
210 
211 static int sha1_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
212     crypto_spi_ctx_template_t, crypto_req_handle_t);
213 static int sha1_mac_update(crypto_ctx_t *, crypto_data_t *,
214     crypto_req_handle_t);
215 static int sha1_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
216 static int sha1_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
217     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
218     crypto_spi_ctx_template_t, crypto_req_handle_t);
219 static int sha1_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
220     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
221     crypto_spi_ctx_template_t, crypto_req_handle_t);
222 
223 static crypto_mac_ops_t sha1_mac_ops = {
224 	sha1_mac_init,
225 	NULL,
226 	sha1_mac_update,
227 	sha1_mac_final,
228 	sha1_mac_atomic,
229 	sha1_mac_verify_atomic
230 };
231 
232 static int sha1_create_ctx_template(crypto_provider_handle_t,
233     crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
234     size_t *, crypto_req_handle_t);
235 static int sha1_free_context(crypto_ctx_t *);
236 
237 static crypto_ctx_ops_t sha1_ctx_ops = {
238 	sha1_create_ctx_template,
239 	sha1_free_context
240 };
241 
242 static crypto_ops_t sha1_crypto_ops = {
243 	&sha1_control_ops,
244 	&sha1_digest_ops,
245 	NULL,
246 	&sha1_mac_ops,
247 	NULL,
248 	NULL,
249 	NULL,
250 	NULL,
251 	NULL,
252 	NULL,
253 	NULL,
254 	NULL,
255 	NULL,
256 	&sha1_ctx_ops
257 };
258 
259 static crypto_provider_info_t sha1_prov_info = {
260 	CRYPTO_SPI_VERSION_1,
261 	"SHA1 Software Provider",
262 	CRYPTO_SW_PROVIDER,
263 	{&modlinkage},
264 	NULL,
265 	&sha1_crypto_ops,
266 	sizeof (sha1_mech_info_tab)/sizeof (crypto_mech_info_t),
267 	sha1_mech_info_tab
268 };
269 
270 static crypto_kcf_provider_handle_t sha1_prov_handle = NULL;
271 
272 int
273 _init()
274 {
275 	int ret;
276 
277 	if ((ret = mod_install(&modlinkage)) != 0)
278 		return (ret);
279 
280 	/*
281 	 * Register with KCF. If the registration fails, log an
282 	 * error but do not uninstall the module, since the functionality
283 	 * provided by misc/sha1 should still be available.
284 	 */
285 	if ((ret = crypto_register_provider(&sha1_prov_info,
286 	    &sha1_prov_handle)) != CRYPTO_SUCCESS)
287 		cmn_err(CE_WARN, "sha1 _init: "
288 		    "crypto_register_provider() failed (0x%x)", ret);
289 
290 	return (0);
291 }
292 
293 int
294 _info(struct modinfo *modinfop)
295 {
296 	return (mod_info(&modlinkage, modinfop));
297 }
298 
299 #endif /* _KERNEL */
300 
301 /*
302  * SHA1Init()
303  *
304  * purpose: initializes the sha1 context and begins and sha1 digest operation
305  *   input: SHA1_CTX *	: the context to initializes.
306  *  output: void
307  */
308 
309 void
310 SHA1Init(SHA1_CTX *ctx)
311 {
312 	ctx->count[0] = ctx->count[1] = 0;
313 
314 	/*
315 	 * load magic initialization constants. Tell lint
316 	 * that these constants are unsigned by using U.
317 	 */
318 
319 	ctx->state[0] = 0x67452301U;
320 	ctx->state[1] = 0xefcdab89U;
321 	ctx->state[2] = 0x98badcfeU;
322 	ctx->state[3] = 0x10325476U;
323 	ctx->state[4] = 0xc3d2e1f0U;
324 }
325 
326 #ifdef VIS_SHA1
327 
328 
329 #ifdef _KERNEL
330 
331 #include <sys/regset.h>
332 #include <sys/vis.h>
333 #include <sys/fpu/fpusystm.h>
334 
335 /* the alignment for block stores to save fp registers */
336 #define	VIS_ALIGN	(64)
337 
338 extern int sha1_savefp(kfpu_t *, int);
339 extern void sha1_restorefp(kfpu_t *);
340 
341 uint32_t	vis_sha1_svfp_threshold = 128;
342 
343 #else /* !_KERNEL */
344 
345 static boolean_t checked_vis = B_FALSE;
346 static int usevis = 0;
347 
348 static int
349 havevis()
350 {
351 	char *buf = NULL;
352 	char *isa_token;
353 	char *lasts;
354 	int ret = 0;
355 	size_t bufsize = 255; /* UltraSPARC III needs 115 chars */
356 	int v9_isa_token, vis_isa_token, isa_token_num;
357 
358 	if (checked_vis) {
359 		return (usevis);
360 	}
361 
362 	if ((buf = malloc(bufsize)) == NULL) {
363 		return (0);
364 	}
365 
366 	if ((ret = sysinfo(SI_ISALIST, buf, bufsize)) == -1) {
367 		free(buf);
368 		return (0);
369 	} else if (ret > bufsize) {
370 		/* We lost some because our buffer was too small  */
371 		if ((buf = realloc(buf, bufsize = ret)) == NULL) {
372 			return (0);
373 		}
374 		if ((ret = sysinfo(SI_ISALIST, buf, bufsize)) == -1) {
375 			free(buf);
376 			return (0);
377 		}
378 	}
379 
380 	/*
381 	 * Check the relative posistions of sparcv9 & sparcv9+vis
382 	 * because they are listed in (best) performance order.
383 	 * For example: The Niagara chip reports it has VIS but the
384 	 * SHA1 code runs faster without this optimisation.
385 	 */
386 	isa_token = strtok_r(buf, " ", &lasts);
387 	v9_isa_token = vis_isa_token = -1;
388 	isa_token_num = 0;
389 	do {
390 		if (strcmp(isa_token, "sparcv9") == 0) {
391 			v9_isa_token = isa_token_num;
392 		} else if (strcmp(isa_token, "sparcv9+vis") == 0) {
393 			vis_isa_token = isa_token_num;
394 		}
395 		isa_token_num++;
396 	} while (isa_token = strtok_r(NULL, " ", &lasts));
397 
398 	if (vis_isa_token != -1 && vis_isa_token < v9_isa_token)
399 		usevis = 1;
400 	free(buf);
401 
402 	checked_vis = B_TRUE;
403 	return (usevis);
404 }
405 
406 #endif /* _KERNEL */
407 
408 /*
409  * VIS SHA-1 consts.
410  */
411 static uint64_t VIS[] = {
412 	0x8000000080000000ULL,
413 	0x0002000200020002ULL,
414 	0x5a8279996ed9eba1ULL,
415 	0x8f1bbcdcca62c1d6ULL,
416 	0x012389ab456789abULL};
417 
418 extern void SHA1TransformVIS(uint64_t *, uint64_t *, uint32_t *, uint64_t *);
419 
420 
421 /*
422  * SHA1Update()
423  *
424  * purpose: continues an sha1 digest operation, using the message block
425  *          to update the context.
426  *   input: SHA1_CTX *	: the context to update
427  *          uint8_t *	: the message block
428  *          uint32_t    : the length of the message block in bytes
429  *  output: void
430  */
431 
432 void
433 SHA1Update(SHA1_CTX *ctx, const uint8_t *input, uint32_t input_len)
434 {
435 	uint32_t i, buf_index, buf_len;
436 	uint64_t X0[40], input64[8];
437 #ifdef _KERNEL
438 	int usevis = 0;
439 #endif /* _KERNEL */
440 
441 	/* check for noop */
442 	if (input_len == 0)
443 		return;
444 
445 	/* compute number of bytes mod 64 */
446 	buf_index = (ctx->count[1] >> 3) & 0x3F;
447 
448 	/* update number of bits */
449 	if ((ctx->count[1] += (input_len << 3)) < (input_len << 3))
450 		ctx->count[0]++;
451 
452 	ctx->count[0] += (input_len >> 29);
453 
454 	buf_len = 64 - buf_index;
455 
456 	/* transform as many times as possible */
457 	i = 0;
458 	if (input_len >= buf_len) {
459 #ifdef _KERNEL
460 		uint8_t fpua[sizeof (kfpu_t) + GSR_SIZE + VIS_ALIGN];
461 		kfpu_t *fpu;
462 
463 		uint32_t len = (input_len + buf_index) & ~0x3f;
464 		int svfp_ok;
465 
466 		fpu = (kfpu_t *)P2ROUNDUP((uintptr_t)fpua, 64);
467 		svfp_ok = ((len >= vis_sha1_svfp_threshold) ? 1 : 0);
468 		usevis = fpu_exists && sha1_savefp(fpu, svfp_ok);
469 #else
470 		if (!checked_vis)
471 			usevis = havevis();
472 #endif /* _KERNEL */
473 
474 		/*
475 		 * general optimization:
476 		 *
477 		 * only do initial bcopy() and SHA1Transform() if
478 		 * buf_index != 0.  if buf_index == 0, we're just
479 		 * wasting our time doing the bcopy() since there
480 		 * wasn't any data left over from a previous call to
481 		 * SHA1Update().
482 		 */
483 
484 		if (buf_index) {
485 			bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
486 			if (usevis) {
487 				SHA1TransformVIS(X0,
488 				    (uint64_t *)ctx->buf_un.buf8,
489 				    &ctx->state[0], VIS);
490 			} else {
491 				SHA1Transform(ctx->state[0], ctx->state[1],
492 				    ctx->state[2], ctx->state[3],
493 				    ctx->state[4], ctx, ctx->buf_un.buf8);
494 			}
495 			i = buf_len;
496 		}
497 
498 		/*
499 		 * VIS SHA-1: uses the VIS 1.0 instructions to accelerate
500 		 * SHA-1 processing. This is achieved by "offloading" the
501 		 * computation of the message schedule (MS) to the VIS units.
502 		 * This allows the VIS computation of the message schedule
503 		 * to be performed in parallel with the standard integer
504 		 * processing of the remainder of the SHA-1 computation.
505 		 * performance by up to around 1.37X, compared to an optimized
506 		 * integer-only implementation.
507 		 *
508 		 * The VIS implementation of SHA1Transform has a different API
509 		 * to the standard integer version:
510 		 *
511 		 * void SHA1TransformVIS(
512 		 *	 uint64_t *, // Pointer to MS for ith block
513 		 *	 uint64_t *, // Pointer to ith block of message data
514 		 *	 uint32_t *, // Pointer to SHA state i.e ctx->state
515 		 *	 uint64_t *, // Pointer to various VIS constants
516 		 * )
517 		 *
518 		 * Note: the message data must by 4-byte aligned.
519 		 *
520 		 * Function requires VIS 1.0 support.
521 		 *
522 		 * Handling is provided to deal with arbitrary byte alingment
523 		 * of the input data but the performance gains are reduced
524 		 * for alignments other than 4-bytes.
525 		 */
526 		if (usevis) {
527 			if (((uint64_t)(uintptr_t)(&input[i]) & 0x3)) {
528 				/*
529 				 * Main processing loop - input misaligned
530 				 */
531 				for (; i + 63 < input_len; i += 64) {
532 				    bcopy(&input[i], input64, 64);
533 				    SHA1TransformVIS(X0, input64,
534 					&ctx->state[0], VIS);
535 				}
536 			} else {
537 				/*
538 				 * Main processing loop - input 8-byte aligned
539 				 */
540 				for (; i + 63 < input_len; i += 64) {
541 					SHA1TransformVIS(X0,
542 					    (uint64_t *)&input[i],
543 					    &ctx->state[0], VIS);
544 				}
545 
546 			}
547 #ifdef _KERNEL
548 			sha1_restorefp(fpu);
549 #endif /* _KERNEL */
550 		} else {
551 			for (; i + 63 < input_len; i += 64) {
552 			    SHA1Transform(ctx->state[0], ctx->state[1],
553 				ctx->state[2], ctx->state[3], ctx->state[4],
554 				ctx, &input[i]);
555 			}
556 		}
557 
558 		/*
559 		 * general optimization:
560 		 *
561 		 * if i and input_len are the same, return now instead
562 		 * of calling bcopy(), since the bcopy() in this case
563 		 * will be an expensive nop.
564 		 */
565 
566 		if (input_len == i)
567 			return;
568 
569 		buf_index = 0;
570 	}
571 
572 	/* buffer remaining input */
573 	bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
574 }
575 
576 #else /* VIS_SHA1 */
577 
578 void
579 SHA1Update(SHA1_CTX *ctx, const uint8_t *input, uint32_t input_len)
580 {
581 	uint32_t i, buf_index, buf_len;
582 
583 	/* check for noop */
584 	if (input_len == 0)
585 		return;
586 
587 	/* compute number of bytes mod 64 */
588 	buf_index = (ctx->count[1] >> 3) & 0x3F;
589 
590 	/* update number of bits */
591 	if ((ctx->count[1] += (input_len << 3)) < (input_len << 3))
592 		ctx->count[0]++;
593 
594 	ctx->count[0] += (input_len >> 29);
595 
596 	buf_len = 64 - buf_index;
597 
598 	/* transform as many times as possible */
599 	i = 0;
600 	if (input_len >= buf_len) {
601 
602 		/*
603 		 * general optimization:
604 		 *
605 		 * only do initial bcopy() and SHA1Transform() if
606 		 * buf_index != 0.  if buf_index == 0, we're just
607 		 * wasting our time doing the bcopy() since there
608 		 * wasn't any data left over from a previous call to
609 		 * SHA1Update().
610 		 */
611 
612 		if (buf_index) {
613 			bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
614 
615 
616 			SHA1Transform(ctx->state[0], ctx->state[1],
617 			    ctx->state[2], ctx->state[3], ctx->state[4], ctx,
618 			    ctx->buf_un.buf8);
619 
620 			i = buf_len;
621 		}
622 
623 		for (; i + 63 < input_len; i += 64)
624 			SHA1Transform(ctx->state[0], ctx->state[1],
625 			    ctx->state[2], ctx->state[3], ctx->state[4],
626 			    ctx, &input[i]);
627 
628 		/*
629 		 * general optimization:
630 		 *
631 		 * if i and input_len are the same, return now instead
632 		 * of calling bcopy(), since the bcopy() in this case
633 		 * will be an expensive nop.
634 		 */
635 
636 		if (input_len == i)
637 			return;
638 
639 		buf_index = 0;
640 	}
641 
642 	/* buffer remaining input */
643 	bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
644 }
645 
646 #endif /* VIS_SHA1 */
647 
648 /*
649  * SHA1Final()
650  *
651  * purpose: ends an sha1 digest operation, finalizing the message digest and
652  *          zeroing the context.
653  *   input: uint8_t *	: a buffer to store the digest in
654  *          SHA1_CTX *  : the context to finalize, save, and zero
655  *  output: void
656  */
657 
658 void
659 SHA1Final(uint8_t *digest, SHA1_CTX *ctx)
660 {
661 	uint8_t		bitcount_be[sizeof (ctx->count)];
662 	uint32_t	index = (ctx->count[1] >> 3) & 0x3f;
663 
664 	/* store bit count, big endian */
665 	Encode(bitcount_be, ctx->count, sizeof (bitcount_be));
666 
667 	/* pad out to 56 mod 64 */
668 	SHA1Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index);
669 
670 	/* append length (before padding) */
671 	SHA1Update(ctx, bitcount_be, sizeof (bitcount_be));
672 
673 	/* store state in digest */
674 	Encode(digest, ctx->state, sizeof (ctx->state));
675 }
676 
677 /*
678  * sparc optimization:
679  *
680  * on the sparc, we can load big endian 32-bit data easily.  note that
681  * special care must be taken to ensure the address is 32-bit aligned.
682  * in the interest of speed, we don't check to make sure, since
683  * careful programming can guarantee this for us.
684  */
685 
686 #if	defined(_BIG_ENDIAN)
687 
688 #define	LOAD_BIG_32(addr)	(*(uint32_t *)(addr))
689 
690 #else	/* little endian -- will work on big endian, but slowly */
691 
692 #define	LOAD_BIG_32(addr)	\
693 	(((addr)[0] << 24) | ((addr)[1] << 16) | ((addr)[2] << 8) | (addr)[3])
694 #endif
695 
696 /*
697  * sparc register window optimization:
698  *
699  * `a', `b', `c', `d', and `e' are passed into SHA1Transform
700  * explicitly since it increases the number of registers available to
701  * the compiler.  under this scheme, these variables can be held in
702  * %i0 - %i4, which leaves more local and out registers available.
703  */
704 
705 /*
706  * SHA1Transform()
707  *
708  * purpose: sha1 transformation -- updates the digest based on `block'
709  *   input: uint32_t	: bytes  1 -  4 of the digest
710  *          uint32_t	: bytes  5 -  8 of the digest
711  *          uint32_t	: bytes  9 - 12 of the digest
712  *          uint32_t	: bytes 12 - 16 of the digest
713  *          uint32_t	: bytes 16 - 20 of the digest
714  *          SHA1_CTX *	: the context to update
715  *          uint8_t [64]: the block to use to update the digest
716  *  output: void
717  */
718 
719 void
720 SHA1Transform(uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e,
721     SHA1_CTX *ctx, const uint8_t blk[64])
722 {
723 	/*
724 	 * sparc optimization:
725 	 *
726 	 * while it is somewhat counter-intuitive, on sparc, it is
727 	 * more efficient to place all the constants used in this
728 	 * function in an array and load the values out of the array
729 	 * than to manually load the constants.  this is because
730 	 * setting a register to a 32-bit value takes two ops in most
731 	 * cases: a `sethi' and an `or', but loading a 32-bit value
732 	 * from memory only takes one `ld' (or `lduw' on v9).  while
733 	 * this increases memory usage, the compiler can find enough
734 	 * other things to do while waiting to keep the pipeline does
735 	 * not stall.  additionally, it is likely that many of these
736 	 * constants are cached so that later accesses do not even go
737 	 * out to the bus.
738 	 *
739 	 * this array is declared `static' to keep the compiler from
740 	 * having to bcopy() this array onto the stack frame of
741 	 * SHA1Transform() each time it is called -- which is
742 	 * unacceptably expensive.
743 	 *
744 	 * the `const' is to ensure that callers are good citizens and
745 	 * do not try to munge the array.  since these routines are
746 	 * going to be called from inside multithreaded kernelland,
747 	 * this is a good safety check. -- `sha1_consts' will end up in
748 	 * .rodata.
749 	 *
750 	 * unfortunately, loading from an array in this manner hurts
751 	 * performance under intel.  so, there is a macro,
752 	 * SHA1_CONST(), used in SHA1Transform(), that either expands to
753 	 * a reference to this array, or to the actual constant,
754 	 * depending on what platform this code is compiled for.
755 	 */
756 
757 #if	defined(__sparc)
758 	static const uint32_t sha1_consts[] = {
759 		SHA1_CONST_0,	SHA1_CONST_1,	SHA1_CONST_2,	SHA1_CONST_3,
760 	};
761 #endif
762 
763 	/*
764 	 * general optimization:
765 	 *
766 	 * use individual integers instead of using an array.  this is a
767 	 * win, although the amount it wins by seems to vary quite a bit.
768 	 */
769 
770 	uint32_t	w_0, w_1, w_2,  w_3,  w_4,  w_5,  w_6,  w_7;
771 	uint32_t	w_8, w_9, w_10, w_11, w_12, w_13, w_14, w_15;
772 
773 	/*
774 	 * sparc optimization:
775 	 *
776 	 * if `block' is already aligned on a 4-byte boundary, use
777 	 * LOAD_BIG_32() directly.  otherwise, bcopy() into a
778 	 * buffer that *is* aligned on a 4-byte boundary and then do
779 	 * the LOAD_BIG_32() on that buffer.  benchmarks have shown
780 	 * that using the bcopy() is better than loading the bytes
781 	 * individually and doing the endian-swap by hand.
782 	 *
783 	 * even though it's quite tempting to assign to do:
784 	 *
785 	 * blk = bcopy(ctx->buf_un.buf32, blk, sizeof (ctx->buf_un.buf32));
786 	 *
787 	 * and only have one set of LOAD_BIG_32()'s, the compiler
788 	 * *does not* like that, so please resist the urge.
789 	 */
790 
791 #if	defined(__sparc)
792 	if ((uintptr_t)blk & 0x3) {		/* not 4-byte aligned? */
793 		bcopy(blk, ctx->buf_un.buf32,  sizeof (ctx->buf_un.buf32));
794 		w_15 = LOAD_BIG_32(ctx->buf_un.buf32 + 15);
795 		w_14 = LOAD_BIG_32(ctx->buf_un.buf32 + 14);
796 		w_13 = LOAD_BIG_32(ctx->buf_un.buf32 + 13);
797 		w_12 = LOAD_BIG_32(ctx->buf_un.buf32 + 12);
798 		w_11 = LOAD_BIG_32(ctx->buf_un.buf32 + 11);
799 		w_10 = LOAD_BIG_32(ctx->buf_un.buf32 + 10);
800 		w_9  = LOAD_BIG_32(ctx->buf_un.buf32 +  9);
801 		w_8  = LOAD_BIG_32(ctx->buf_un.buf32 +  8);
802 		w_7  = LOAD_BIG_32(ctx->buf_un.buf32 +  7);
803 		w_6  = LOAD_BIG_32(ctx->buf_un.buf32 +  6);
804 		w_5  = LOAD_BIG_32(ctx->buf_un.buf32 +  5);
805 		w_4  = LOAD_BIG_32(ctx->buf_un.buf32 +  4);
806 		w_3  = LOAD_BIG_32(ctx->buf_un.buf32 +  3);
807 		w_2  = LOAD_BIG_32(ctx->buf_un.buf32 +  2);
808 		w_1  = LOAD_BIG_32(ctx->buf_un.buf32 +  1);
809 		w_0  = LOAD_BIG_32(ctx->buf_un.buf32 +  0);
810 	} else {
811 		/*LINTED*/
812 		w_15 = LOAD_BIG_32(blk + 60);
813 		/*LINTED*/
814 		w_14 = LOAD_BIG_32(blk + 56);
815 		/*LINTED*/
816 		w_13 = LOAD_BIG_32(blk + 52);
817 		/*LINTED*/
818 		w_12 = LOAD_BIG_32(blk + 48);
819 		/*LINTED*/
820 		w_11 = LOAD_BIG_32(blk + 44);
821 		/*LINTED*/
822 		w_10 = LOAD_BIG_32(blk + 40);
823 		/*LINTED*/
824 		w_9  = LOAD_BIG_32(blk + 36);
825 		/*LINTED*/
826 		w_8  = LOAD_BIG_32(blk + 32);
827 		/*LINTED*/
828 		w_7  = LOAD_BIG_32(blk + 28);
829 		/*LINTED*/
830 		w_6  = LOAD_BIG_32(blk + 24);
831 		/*LINTED*/
832 		w_5  = LOAD_BIG_32(blk + 20);
833 		/*LINTED*/
834 		w_4  = LOAD_BIG_32(blk + 16);
835 		/*LINTED*/
836 		w_3  = LOAD_BIG_32(blk + 12);
837 		/*LINTED*/
838 		w_2  = LOAD_BIG_32(blk +  8);
839 		/*LINTED*/
840 		w_1  = LOAD_BIG_32(blk +  4);
841 		/*LINTED*/
842 		w_0  = LOAD_BIG_32(blk +  0);
843 	}
844 #else
845 	w_15 = LOAD_BIG_32(blk + 60);
846 	w_14 = LOAD_BIG_32(blk + 56);
847 	w_13 = LOAD_BIG_32(blk + 52);
848 	w_12 = LOAD_BIG_32(blk + 48);
849 	w_11 = LOAD_BIG_32(blk + 44);
850 	w_10 = LOAD_BIG_32(blk + 40);
851 	w_9  = LOAD_BIG_32(blk + 36);
852 	w_8  = LOAD_BIG_32(blk + 32);
853 	w_7  = LOAD_BIG_32(blk + 28);
854 	w_6  = LOAD_BIG_32(blk + 24);
855 	w_5  = LOAD_BIG_32(blk + 20);
856 	w_4  = LOAD_BIG_32(blk + 16);
857 	w_3  = LOAD_BIG_32(blk + 12);
858 	w_2  = LOAD_BIG_32(blk +  8);
859 	w_1  = LOAD_BIG_32(blk +  4);
860 	w_0  = LOAD_BIG_32(blk +  0);
861 #endif
862 	/*
863 	 * general optimization:
864 	 *
865 	 * even though this approach is described in the standard as
866 	 * being slower algorithmically, it is 30-40% faster than the
867 	 * "faster" version under SPARC, because this version has more
868 	 * of the constraints specified at compile-time and uses fewer
869 	 * variables (and therefore has better register utilization)
870 	 * than its "speedier" brother.  (i've tried both, trust me)
871 	 *
872 	 * for either method given in the spec, there is an "assignment"
873 	 * phase where the following takes place:
874 	 *
875 	 *	tmp = (main_computation);
876 	 *	e = d; d = c; c = rotate_left(b, 30); b = a; a = tmp;
877 	 *
878 	 * we can make the algorithm go faster by not doing this work,
879 	 * but just pretending that `d' is now `e', etc. this works
880 	 * really well and obviates the need for a temporary variable.
881 	 * however, we still explictly perform the rotate action,
882 	 * since it is cheaper on SPARC to do it once than to have to
883 	 * do it over and over again.
884 	 */
885 
886 	/* round 1 */
887 	e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_0 + SHA1_CONST(0); /* 0 */
888 	b = ROTATE_LEFT(b, 30);
889 
890 	d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_1 + SHA1_CONST(0); /* 1 */
891 	a = ROTATE_LEFT(a, 30);
892 
893 	c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_2 + SHA1_CONST(0); /* 2 */
894 	e = ROTATE_LEFT(e, 30);
895 
896 	b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_3 + SHA1_CONST(0); /* 3 */
897 	d = ROTATE_LEFT(d, 30);
898 
899 	a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_4 + SHA1_CONST(0); /* 4 */
900 	c = ROTATE_LEFT(c, 30);
901 
902 	e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_5 + SHA1_CONST(0); /* 5 */
903 	b = ROTATE_LEFT(b, 30);
904 
905 	d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_6 + SHA1_CONST(0); /* 6 */
906 	a = ROTATE_LEFT(a, 30);
907 
908 	c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_7 + SHA1_CONST(0); /* 7 */
909 	e = ROTATE_LEFT(e, 30);
910 
911 	b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_8 + SHA1_CONST(0); /* 8 */
912 	d = ROTATE_LEFT(d, 30);
913 
914 	a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_9 + SHA1_CONST(0); /* 9 */
915 	c = ROTATE_LEFT(c, 30);
916 
917 	e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_10 + SHA1_CONST(0); /* 10 */
918 	b = ROTATE_LEFT(b, 30);
919 
920 	d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_11 + SHA1_CONST(0); /* 11 */
921 	a = ROTATE_LEFT(a, 30);
922 
923 	c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_12 + SHA1_CONST(0); /* 12 */
924 	e = ROTATE_LEFT(e, 30);
925 
926 	b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_13 + SHA1_CONST(0); /* 13 */
927 	d = ROTATE_LEFT(d, 30);
928 
929 	a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_14 + SHA1_CONST(0); /* 14 */
930 	c = ROTATE_LEFT(c, 30);
931 
932 	e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_15 + SHA1_CONST(0); /* 15 */
933 	b = ROTATE_LEFT(b, 30);
934 
935 	w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1);		/* 16 */
936 	d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_0 + SHA1_CONST(0);
937 	a = ROTATE_LEFT(a, 30);
938 
939 	w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1);		/* 17 */
940 	c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_1 + SHA1_CONST(0);
941 	e = ROTATE_LEFT(e, 30);
942 
943 	w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1);	/* 18 */
944 	b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_2 + SHA1_CONST(0);
945 	d = ROTATE_LEFT(d, 30);
946 
947 	w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1);		/* 19 */
948 	a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_3 + SHA1_CONST(0);
949 	c = ROTATE_LEFT(c, 30);
950 
951 	/* round 2 */
952 	w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1);		/* 20 */
953 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_4 + SHA1_CONST(1);
954 	b = ROTATE_LEFT(b, 30);
955 
956 	w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1);		/* 21 */
957 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_5 + SHA1_CONST(1);
958 	a = ROTATE_LEFT(a, 30);
959 
960 	w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1);		/* 22 */
961 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_6 + SHA1_CONST(1);
962 	e = ROTATE_LEFT(e, 30);
963 
964 	w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1);		/* 23 */
965 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_7 + SHA1_CONST(1);
966 	d = ROTATE_LEFT(d, 30);
967 
968 	w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1);		/* 24 */
969 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_8 + SHA1_CONST(1);
970 	c = ROTATE_LEFT(c, 30);
971 
972 	w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1);		/* 25 */
973 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_9 + SHA1_CONST(1);
974 	b = ROTATE_LEFT(b, 30);
975 
976 	w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1);	/* 26 */
977 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_10 + SHA1_CONST(1);
978 	a = ROTATE_LEFT(a, 30);
979 
980 	w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1);	/* 27 */
981 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_11 + SHA1_CONST(1);
982 	e = ROTATE_LEFT(e, 30);
983 
984 	w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1);	/* 28 */
985 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_12 + SHA1_CONST(1);
986 	d = ROTATE_LEFT(d, 30);
987 
988 	w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1);	/* 29 */
989 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_13 + SHA1_CONST(1);
990 	c = ROTATE_LEFT(c, 30);
991 
992 	w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1);	/* 30 */
993 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_14 + SHA1_CONST(1);
994 	b = ROTATE_LEFT(b, 30);
995 
996 	w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1);	/* 31 */
997 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_15 + SHA1_CONST(1);
998 	a = ROTATE_LEFT(a, 30);
999 
1000 	w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1);		/* 32 */
1001 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_0 + SHA1_CONST(1);
1002 	e = ROTATE_LEFT(e, 30);
1003 
1004 	w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1);		/* 33 */
1005 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_1 + SHA1_CONST(1);
1006 	d = ROTATE_LEFT(d, 30);
1007 
1008 	w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1);	/* 34 */
1009 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_2 + SHA1_CONST(1);
1010 	c = ROTATE_LEFT(c, 30);
1011 
1012 	w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1);		/* 35 */
1013 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_3 + SHA1_CONST(1);
1014 	b = ROTATE_LEFT(b, 30);
1015 
1016 	w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1);		/* 36 */
1017 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_4 + SHA1_CONST(1);
1018 	a = ROTATE_LEFT(a, 30);
1019 
1020 	w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1);		/* 37 */
1021 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_5 + SHA1_CONST(1);
1022 	e = ROTATE_LEFT(e, 30);
1023 
1024 	w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1);		/* 38 */
1025 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_6 + SHA1_CONST(1);
1026 	d = ROTATE_LEFT(d, 30);
1027 
1028 	w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1);		/* 39 */
1029 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_7 + SHA1_CONST(1);
1030 	c = ROTATE_LEFT(c, 30);
1031 
1032 	/* round 3 */
1033 	w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1);		/* 40 */
1034 	e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_8 + SHA1_CONST(2);
1035 	b = ROTATE_LEFT(b, 30);
1036 
1037 	w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1);		/* 41 */
1038 	d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_9 + SHA1_CONST(2);
1039 	a = ROTATE_LEFT(a, 30);
1040 
1041 	w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1);	/* 42 */
1042 	c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_10 + SHA1_CONST(2);
1043 	e = ROTATE_LEFT(e, 30);
1044 
1045 	w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1);	/* 43 */
1046 	b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_11 + SHA1_CONST(2);
1047 	d = ROTATE_LEFT(d, 30);
1048 
1049 	w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1);	/* 44 */
1050 	a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_12 + SHA1_CONST(2);
1051 	c = ROTATE_LEFT(c, 30);
1052 
1053 	w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1);	/* 45 */
1054 	e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_13 + SHA1_CONST(2);
1055 	b = ROTATE_LEFT(b, 30);
1056 
1057 	w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1);	/* 46 */
1058 	d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_14 + SHA1_CONST(2);
1059 	a = ROTATE_LEFT(a, 30);
1060 
1061 	w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1);	/* 47 */
1062 	c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_15 + SHA1_CONST(2);
1063 	e = ROTATE_LEFT(e, 30);
1064 
1065 	w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1);		/* 48 */
1066 	b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_0 + SHA1_CONST(2);
1067 	d = ROTATE_LEFT(d, 30);
1068 
1069 	w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1);		/* 49 */
1070 	a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_1 + SHA1_CONST(2);
1071 	c = ROTATE_LEFT(c, 30);
1072 
1073 	w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1);	/* 50 */
1074 	e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_2 + SHA1_CONST(2);
1075 	b = ROTATE_LEFT(b, 30);
1076 
1077 	w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1);		/* 51 */
1078 	d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_3 + SHA1_CONST(2);
1079 	a = ROTATE_LEFT(a, 30);
1080 
1081 	w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1);		/* 52 */
1082 	c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_4 + SHA1_CONST(2);
1083 	e = ROTATE_LEFT(e, 30);
1084 
1085 	w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1);		/* 53 */
1086 	b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_5 + SHA1_CONST(2);
1087 	d = ROTATE_LEFT(d, 30);
1088 
1089 	w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1);		/* 54 */
1090 	a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_6 + SHA1_CONST(2);
1091 	c = ROTATE_LEFT(c, 30);
1092 
1093 	w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1);		/* 55 */
1094 	e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_7 + SHA1_CONST(2);
1095 	b = ROTATE_LEFT(b, 30);
1096 
1097 	w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1);		/* 56 */
1098 	d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_8 + SHA1_CONST(2);
1099 	a = ROTATE_LEFT(a, 30);
1100 
1101 	w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1);		/* 57 */
1102 	c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_9 + SHA1_CONST(2);
1103 	e = ROTATE_LEFT(e, 30);
1104 
1105 	w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1);	/* 58 */
1106 	b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_10 + SHA1_CONST(2);
1107 	d = ROTATE_LEFT(d, 30);
1108 
1109 	w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1);	/* 59 */
1110 	a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_11 + SHA1_CONST(2);
1111 	c = ROTATE_LEFT(c, 30);
1112 
1113 	/* round 4 */
1114 	w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1);	/* 60 */
1115 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_12 + SHA1_CONST(3);
1116 	b = ROTATE_LEFT(b, 30);
1117 
1118 	w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1);	/* 61 */
1119 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_13 + SHA1_CONST(3);
1120 	a = ROTATE_LEFT(a, 30);
1121 
1122 	w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1);	/* 62 */
1123 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_14 + SHA1_CONST(3);
1124 	e = ROTATE_LEFT(e, 30);
1125 
1126 	w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1);	/* 63 */
1127 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_15 + SHA1_CONST(3);
1128 	d = ROTATE_LEFT(d, 30);
1129 
1130 	w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1);		/* 64 */
1131 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_0 + SHA1_CONST(3);
1132 	c = ROTATE_LEFT(c, 30);
1133 
1134 	w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1);		/* 65 */
1135 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_1 + SHA1_CONST(3);
1136 	b = ROTATE_LEFT(b, 30);
1137 
1138 	w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1);	/* 66 */
1139 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_2 + SHA1_CONST(3);
1140 	a = ROTATE_LEFT(a, 30);
1141 
1142 	w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1);		/* 67 */
1143 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_3 + SHA1_CONST(3);
1144 	e = ROTATE_LEFT(e, 30);
1145 
1146 	w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1);		/* 68 */
1147 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_4 + SHA1_CONST(3);
1148 	d = ROTATE_LEFT(d, 30);
1149 
1150 	w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1);		/* 69 */
1151 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_5 + SHA1_CONST(3);
1152 	c = ROTATE_LEFT(c, 30);
1153 
1154 	w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1);		/* 70 */
1155 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_6 + SHA1_CONST(3);
1156 	b = ROTATE_LEFT(b, 30);
1157 
1158 	w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1);		/* 71 */
1159 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_7 + SHA1_CONST(3);
1160 	a = ROTATE_LEFT(a, 30);
1161 
1162 	w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1);		/* 72 */
1163 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_8 + SHA1_CONST(3);
1164 	e = ROTATE_LEFT(e, 30);
1165 
1166 	w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1);		/* 73 */
1167 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_9 + SHA1_CONST(3);
1168 	d = ROTATE_LEFT(d, 30);
1169 
1170 	w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1);	/* 74 */
1171 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_10 + SHA1_CONST(3);
1172 	c = ROTATE_LEFT(c, 30);
1173 
1174 	w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1);	/* 75 */
1175 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_11 + SHA1_CONST(3);
1176 	b = ROTATE_LEFT(b, 30);
1177 
1178 	w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1);	/* 76 */
1179 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_12 + SHA1_CONST(3);
1180 	a = ROTATE_LEFT(a, 30);
1181 
1182 	w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1);	/* 77 */
1183 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_13 + SHA1_CONST(3);
1184 	e = ROTATE_LEFT(e, 30);
1185 
1186 	w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1);	/* 78 */
1187 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_14 + SHA1_CONST(3);
1188 	d = ROTATE_LEFT(d, 30);
1189 
1190 	w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1);	/* 79 */
1191 
1192 	ctx->state[0] += ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_15 +
1193 	    SHA1_CONST(3);
1194 	ctx->state[1] += b;
1195 	ctx->state[2] += ROTATE_LEFT(c, 30);
1196 	ctx->state[3] += d;
1197 	ctx->state[4] += e;
1198 
1199 	/* zeroize sensitive information */
1200 	w_0 = w_1 = w_2 = w_3 = w_4 = w_5 = w_6 = w_7 = w_8 = 0;
1201 	w_9 = w_10 = w_11 = w_12 = w_13 = w_14 = w_15 = 0;
1202 }
1203 
1204 /*
1205  * devpro compiler optimization:
1206  *
1207  * the compiler can generate better code if it knows that `input' and
1208  * `output' do not point to the same source.  there is no portable
1209  * way to tell the compiler this, but the sun compiler recognizes the
1210  * `_Restrict' keyword to indicate this condition.  use it if possible.
1211  */
1212 
1213 #ifdef	__RESTRICT
1214 #define	restrict	_Restrict
1215 #else
1216 #define	restrict	/* nothing */
1217 #endif
1218 
1219 /*
1220  * Encode()
1221  *
1222  * purpose: to convert a list of numbers from little endian to big endian
1223  *   input: uint8_t *	: place to store the converted big endian numbers
1224  *	    uint32_t *	: place to get numbers to convert from
1225  *          size_t	: the length of the input in bytes
1226  *  output: void
1227  */
1228 
1229 static void
1230 Encode(uint8_t *restrict output, uint32_t *restrict input, size_t len)
1231 {
1232 	size_t		i, j;
1233 
1234 #if	defined(__sparc)
1235 	if (IS_P2ALIGNED(output, sizeof (uint32_t))) {
1236 		for (i = 0, j = 0; j < len; i++, j += 4) {
1237 			/* LINTED: pointer alignment */
1238 			*((uint32_t *)(output + j)) = input[i];
1239 		}
1240 	} else {
1241 #endif	/* little endian -- will work on big endian, but slowly */
1242 		for (i = 0, j = 0; j < len; i++, j += 4) {
1243 			output[j]	= (input[i] >> 24) & 0xff;
1244 			output[j + 1]	= (input[i] >> 16) & 0xff;
1245 			output[j + 2]	= (input[i] >>  8) & 0xff;
1246 			output[j + 3]	= input[i] & 0xff;
1247 		}
1248 #if	defined(__sparc)
1249 	}
1250 #endif
1251 }
1252 
1253 
1254 #ifdef _KERNEL
1255 
1256 /*
1257  * KCF software provider control entry points.
1258  */
1259 /* ARGSUSED */
1260 static void
1261 sha1_provider_status(crypto_provider_handle_t provider, uint_t *status)
1262 {
1263 	*status = CRYPTO_PROVIDER_READY;
1264 }
1265 
1266 /*
1267  * KCF software provider digest entry points.
1268  */
1269 
1270 static int
1271 sha1_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
1272     crypto_req_handle_t req)
1273 {
1274 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
1275 		return (CRYPTO_MECHANISM_INVALID);
1276 
1277 	/*
1278 	 * Allocate and initialize SHA1 context.
1279 	 */
1280 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_ctx_t),
1281 	    crypto_kmflag(req));
1282 	if (ctx->cc_provider_private == NULL)
1283 		return (CRYPTO_HOST_MEMORY);
1284 
1285 	PROV_SHA1_CTX(ctx)->sc_mech_type = SHA1_MECH_INFO_TYPE;
1286 	SHA1Init(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
1287 
1288 	return (CRYPTO_SUCCESS);
1289 }
1290 
1291 /*
1292  * Helper SHA1 digest update function for uio data.
1293  */
1294 static int
1295 sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data)
1296 {
1297 	off_t offset = data->cd_offset;
1298 	size_t length = data->cd_length;
1299 	uint_t vec_idx;
1300 	size_t cur_len;
1301 
1302 	/* we support only kernel buffer */
1303 	if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
1304 		return (CRYPTO_ARGUMENTS_BAD);
1305 
1306 	/*
1307 	 * Jump to the first iovec containing data to be
1308 	 * digested.
1309 	 */
1310 	for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
1311 	    offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
1312 	    offset -= data->cd_uio->uio_iov[vec_idx++].iov_len);
1313 	if (vec_idx == data->cd_uio->uio_iovcnt) {
1314 		/*
1315 		 * The caller specified an offset that is larger than the
1316 		 * total size of the buffers it provided.
1317 		 */
1318 		return (CRYPTO_DATA_LEN_RANGE);
1319 	}
1320 
1321 	/*
1322 	 * Now do the digesting on the iovecs.
1323 	 */
1324 	while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
1325 		cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
1326 		    offset, length);
1327 
1328 		SHA1Update(sha1_ctx,
1329 		    (uint8_t *)data->cd_uio->uio_iov[vec_idx].iov_base + offset,
1330 		    cur_len);
1331 
1332 		length -= cur_len;
1333 		vec_idx++;
1334 		offset = 0;
1335 	}
1336 
1337 	if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
1338 		/*
1339 		 * The end of the specified iovec's was reached but
1340 		 * the length requested could not be processed, i.e.
1341 		 * The caller requested to digest more data than it provided.
1342 		 */
1343 		return (CRYPTO_DATA_LEN_RANGE);
1344 	}
1345 
1346 	return (CRYPTO_SUCCESS);
1347 }
1348 
1349 /*
1350  * Helper SHA1 digest final function for uio data.
1351  * digest_len is the length of the desired digest. If digest_len
1352  * is smaller than the default SHA1 digest length, the caller
1353  * must pass a scratch buffer, digest_scratch, which must
1354  * be at least SHA1_DIGEST_LENGTH bytes.
1355  */
1356 static int
1357 sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
1358     ulong_t digest_len, uchar_t *digest_scratch)
1359 {
1360 	off_t offset = digest->cd_offset;
1361 	uint_t vec_idx;
1362 
1363 	/* we support only kernel buffer */
1364 	if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
1365 		return (CRYPTO_ARGUMENTS_BAD);
1366 
1367 	/*
1368 	 * Jump to the first iovec containing ptr to the digest to
1369 	 * be returned.
1370 	 */
1371 	for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
1372 	    vec_idx < digest->cd_uio->uio_iovcnt;
1373 	    offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len);
1374 	if (vec_idx == digest->cd_uio->uio_iovcnt) {
1375 		/*
1376 		 * The caller specified an offset that is
1377 		 * larger than the total size of the buffers
1378 		 * it provided.
1379 		 */
1380 		return (CRYPTO_DATA_LEN_RANGE);
1381 	}
1382 
1383 	if (offset + digest_len <=
1384 	    digest->cd_uio->uio_iov[vec_idx].iov_len) {
1385 		/*
1386 		 * The computed SHA1 digest will fit in the current
1387 		 * iovec.
1388 		 */
1389 		if (digest_len != SHA1_DIGEST_LENGTH) {
1390 			/*
1391 			 * The caller requested a short digest. Digest
1392 			 * into a scratch buffer and return to
1393 			 * the user only what was requested.
1394 			 */
1395 			SHA1Final(digest_scratch, sha1_ctx);
1396 			bcopy(digest_scratch, (uchar_t *)digest->
1397 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
1398 			    digest_len);
1399 		} else {
1400 			SHA1Final((uchar_t *)digest->
1401 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
1402 			    sha1_ctx);
1403 		}
1404 	} else {
1405 		/*
1406 		 * The computed digest will be crossing one or more iovec's.
1407 		 * This is bad performance-wise but we need to support it.
1408 		 * Allocate a small scratch buffer on the stack and
1409 		 * copy it piece meal to the specified digest iovec's.
1410 		 */
1411 		uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
1412 		off_t scratch_offset = 0;
1413 		size_t length = digest_len;
1414 		size_t cur_len;
1415 
1416 		SHA1Final(digest_tmp, sha1_ctx);
1417 
1418 		while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
1419 			cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
1420 			    offset, length);
1421 			bcopy(digest_tmp + scratch_offset,
1422 			    digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
1423 			    cur_len);
1424 
1425 			length -= cur_len;
1426 			vec_idx++;
1427 			scratch_offset += cur_len;
1428 			offset = 0;
1429 		}
1430 
1431 		if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
1432 			/*
1433 			 * The end of the specified iovec's was reached but
1434 			 * the length requested could not be processed, i.e.
1435 			 * The caller requested to digest more data than it
1436 			 * provided.
1437 			 */
1438 			return (CRYPTO_DATA_LEN_RANGE);
1439 		}
1440 	}
1441 
1442 	return (CRYPTO_SUCCESS);
1443 }
1444 
1445 /*
1446  * Helper SHA1 digest update for mblk's.
1447  */
1448 static int
1449 sha1_digest_update_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *data)
1450 {
1451 	off_t offset = data->cd_offset;
1452 	size_t length = data->cd_length;
1453 	mblk_t *mp;
1454 	size_t cur_len;
1455 
1456 	/*
1457 	 * Jump to the first mblk_t containing data to be digested.
1458 	 */
1459 	for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
1460 	    offset -= MBLKL(mp), mp = mp->b_cont);
1461 	if (mp == NULL) {
1462 		/*
1463 		 * The caller specified an offset that is larger than the
1464 		 * total size of the buffers it provided.
1465 		 */
1466 		return (CRYPTO_DATA_LEN_RANGE);
1467 	}
1468 
1469 	/*
1470 	 * Now do the digesting on the mblk chain.
1471 	 */
1472 	while (mp != NULL && length > 0) {
1473 		cur_len = MIN(MBLKL(mp) - offset, length);
1474 		SHA1Update(sha1_ctx, mp->b_rptr + offset, cur_len);
1475 		length -= cur_len;
1476 		offset = 0;
1477 		mp = mp->b_cont;
1478 	}
1479 
1480 	if (mp == NULL && length > 0) {
1481 		/*
1482 		 * The end of the mblk was reached but the length requested
1483 		 * could not be processed, i.e. The caller requested
1484 		 * to digest more data than it provided.
1485 		 */
1486 		return (CRYPTO_DATA_LEN_RANGE);
1487 	}
1488 
1489 	return (CRYPTO_SUCCESS);
1490 }
1491 
1492 /*
1493  * Helper SHA1 digest final for mblk's.
1494  * digest_len is the length of the desired digest. If digest_len
1495  * is smaller than the default SHA1 digest length, the caller
1496  * must pass a scratch buffer, digest_scratch, which must
1497  * be at least SHA1_DIGEST_LENGTH bytes.
1498  */
1499 static int
1500 sha1_digest_final_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
1501     ulong_t digest_len, uchar_t *digest_scratch)
1502 {
1503 	off_t offset = digest->cd_offset;
1504 	mblk_t *mp;
1505 
1506 	/*
1507 	 * Jump to the first mblk_t that will be used to store the digest.
1508 	 */
1509 	for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
1510 	    offset -= MBLKL(mp), mp = mp->b_cont);
1511 	if (mp == NULL) {
1512 		/*
1513 		 * The caller specified an offset that is larger than the
1514 		 * total size of the buffers it provided.
1515 		 */
1516 		return (CRYPTO_DATA_LEN_RANGE);
1517 	}
1518 
1519 	if (offset + digest_len <= MBLKL(mp)) {
1520 		/*
1521 		 * The computed SHA1 digest will fit in the current mblk.
1522 		 * Do the SHA1Final() in-place.
1523 		 */
1524 		if (digest_len != SHA1_DIGEST_LENGTH) {
1525 			/*
1526 			 * The caller requested a short digest. Digest
1527 			 * into a scratch buffer and return to
1528 			 * the user only what was requested.
1529 			 */
1530 			SHA1Final(digest_scratch, sha1_ctx);
1531 			bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
1532 		} else {
1533 			SHA1Final(mp->b_rptr + offset, sha1_ctx);
1534 		}
1535 	} else {
1536 		/*
1537 		 * The computed digest will be crossing one or more mblk's.
1538 		 * This is bad performance-wise but we need to support it.
1539 		 * Allocate a small scratch buffer on the stack and
1540 		 * copy it piece meal to the specified digest iovec's.
1541 		 */
1542 		uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
1543 		off_t scratch_offset = 0;
1544 		size_t length = digest_len;
1545 		size_t cur_len;
1546 
1547 		SHA1Final(digest_tmp, sha1_ctx);
1548 
1549 		while (mp != NULL && length > 0) {
1550 			cur_len = MIN(MBLKL(mp) - offset, length);
1551 			bcopy(digest_tmp + scratch_offset,
1552 			    mp->b_rptr + offset, cur_len);
1553 
1554 			length -= cur_len;
1555 			mp = mp->b_cont;
1556 			scratch_offset += cur_len;
1557 			offset = 0;
1558 		}
1559 
1560 		if (mp == NULL && length > 0) {
1561 			/*
1562 			 * The end of the specified mblk was reached but
1563 			 * the length requested could not be processed, i.e.
1564 			 * The caller requested to digest more data than it
1565 			 * provided.
1566 			 */
1567 			return (CRYPTO_DATA_LEN_RANGE);
1568 		}
1569 	}
1570 
1571 	return (CRYPTO_SUCCESS);
1572 }
1573 
1574 /* ARGSUSED */
1575 static int
1576 sha1_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
1577     crypto_req_handle_t req)
1578 {
1579 	int ret = CRYPTO_SUCCESS;
1580 
1581 	ASSERT(ctx->cc_provider_private != NULL);
1582 
1583 	/*
1584 	 * We need to just return the length needed to store the output.
1585 	 * We should not destroy the context for the following cases.
1586 	 */
1587 	if ((digest->cd_length == 0) ||
1588 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
1589 		digest->cd_length = SHA1_DIGEST_LENGTH;
1590 		return (CRYPTO_BUFFER_TOO_SMALL);
1591 	}
1592 
1593 	/*
1594 	 * Do the SHA1 update on the specified input data.
1595 	 */
1596 	switch (data->cd_format) {
1597 	case CRYPTO_DATA_RAW:
1598 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1599 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
1600 		    data->cd_length);
1601 		break;
1602 	case CRYPTO_DATA_UIO:
1603 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1604 		    data);
1605 		break;
1606 	case CRYPTO_DATA_MBLK:
1607 		ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1608 		    data);
1609 		break;
1610 	default:
1611 		ret = CRYPTO_ARGUMENTS_BAD;
1612 	}
1613 
1614 	if (ret != CRYPTO_SUCCESS) {
1615 		/* the update failed, free context and bail */
1616 		kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
1617 		ctx->cc_provider_private = NULL;
1618 		digest->cd_length = 0;
1619 		return (ret);
1620 	}
1621 
1622 	/*
1623 	 * Do a SHA1 final, must be done separately since the digest
1624 	 * type can be different than the input data type.
1625 	 */
1626 	switch (digest->cd_format) {
1627 	case CRYPTO_DATA_RAW:
1628 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
1629 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
1630 		break;
1631 	case CRYPTO_DATA_UIO:
1632 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1633 		    digest, SHA1_DIGEST_LENGTH, NULL);
1634 		break;
1635 	case CRYPTO_DATA_MBLK:
1636 		ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1637 		    digest, SHA1_DIGEST_LENGTH, NULL);
1638 		break;
1639 	default:
1640 		ret = CRYPTO_ARGUMENTS_BAD;
1641 	}
1642 
1643 	/* all done, free context and return */
1644 
1645 	if (ret == CRYPTO_SUCCESS) {
1646 		digest->cd_length = SHA1_DIGEST_LENGTH;
1647 	} else {
1648 		digest->cd_length = 0;
1649 	}
1650 
1651 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
1652 	ctx->cc_provider_private = NULL;
1653 	return (ret);
1654 }
1655 
1656 /* ARGSUSED */
1657 static int
1658 sha1_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
1659     crypto_req_handle_t req)
1660 {
1661 	int ret = CRYPTO_SUCCESS;
1662 
1663 	ASSERT(ctx->cc_provider_private != NULL);
1664 
1665 	/*
1666 	 * Do the SHA1 update on the specified input data.
1667 	 */
1668 	switch (data->cd_format) {
1669 	case CRYPTO_DATA_RAW:
1670 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1671 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
1672 		    data->cd_length);
1673 		break;
1674 	case CRYPTO_DATA_UIO:
1675 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1676 		    data);
1677 		break;
1678 	case CRYPTO_DATA_MBLK:
1679 		ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1680 		    data);
1681 		break;
1682 	default:
1683 		ret = CRYPTO_ARGUMENTS_BAD;
1684 	}
1685 
1686 	return (ret);
1687 }
1688 
1689 /* ARGSUSED */
1690 static int
1691 sha1_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
1692     crypto_req_handle_t req)
1693 {
1694 	int ret = CRYPTO_SUCCESS;
1695 
1696 	ASSERT(ctx->cc_provider_private != NULL);
1697 
1698 	/*
1699 	 * We need to just return the length needed to store the output.
1700 	 * We should not destroy the context for the following cases.
1701 	 */
1702 	if ((digest->cd_length == 0) ||
1703 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
1704 		digest->cd_length = SHA1_DIGEST_LENGTH;
1705 		return (CRYPTO_BUFFER_TOO_SMALL);
1706 	}
1707 
1708 	/*
1709 	 * Do a SHA1 final.
1710 	 */
1711 	switch (digest->cd_format) {
1712 	case CRYPTO_DATA_RAW:
1713 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
1714 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
1715 		break;
1716 	case CRYPTO_DATA_UIO:
1717 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1718 		    digest, SHA1_DIGEST_LENGTH, NULL);
1719 		break;
1720 	case CRYPTO_DATA_MBLK:
1721 		ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1722 		    digest, SHA1_DIGEST_LENGTH, NULL);
1723 		break;
1724 	default:
1725 		ret = CRYPTO_ARGUMENTS_BAD;
1726 	}
1727 
1728 	/* all done, free context and return */
1729 
1730 	if (ret == CRYPTO_SUCCESS) {
1731 		digest->cd_length = SHA1_DIGEST_LENGTH;
1732 	} else {
1733 		digest->cd_length = 0;
1734 	}
1735 
1736 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
1737 	ctx->cc_provider_private = NULL;
1738 
1739 	return (ret);
1740 }
1741 
1742 /* ARGSUSED */
1743 static int
1744 sha1_digest_atomic(crypto_provider_handle_t provider,
1745     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1746     crypto_data_t *data, crypto_data_t *digest,
1747     crypto_req_handle_t req)
1748 {
1749 	int ret = CRYPTO_SUCCESS;
1750 	SHA1_CTX sha1_ctx;
1751 
1752 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
1753 		return (CRYPTO_MECHANISM_INVALID);
1754 
1755 	/*
1756 	 * Do the SHA1 init.
1757 	 */
1758 	SHA1Init(&sha1_ctx);
1759 
1760 	/*
1761 	 * Do the SHA1 update on the specified input data.
1762 	 */
1763 	switch (data->cd_format) {
1764 	case CRYPTO_DATA_RAW:
1765 		SHA1Update(&sha1_ctx,
1766 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
1767 		    data->cd_length);
1768 		break;
1769 	case CRYPTO_DATA_UIO:
1770 		ret = sha1_digest_update_uio(&sha1_ctx, data);
1771 		break;
1772 	case CRYPTO_DATA_MBLK:
1773 		ret = sha1_digest_update_mblk(&sha1_ctx, data);
1774 		break;
1775 	default:
1776 		ret = CRYPTO_ARGUMENTS_BAD;
1777 	}
1778 
1779 	if (ret != CRYPTO_SUCCESS) {
1780 		/* the update failed, bail */
1781 		digest->cd_length = 0;
1782 		return (ret);
1783 	}
1784 
1785 	/*
1786 	 * Do a SHA1 final, must be done separately since the digest
1787 	 * type can be different than the input data type.
1788 	 */
1789 	switch (digest->cd_format) {
1790 	case CRYPTO_DATA_RAW:
1791 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
1792 		    digest->cd_offset, &sha1_ctx);
1793 		break;
1794 	case CRYPTO_DATA_UIO:
1795 		ret = sha1_digest_final_uio(&sha1_ctx, digest,
1796 		    SHA1_DIGEST_LENGTH, NULL);
1797 		break;
1798 	case CRYPTO_DATA_MBLK:
1799 		ret = sha1_digest_final_mblk(&sha1_ctx, digest,
1800 		    SHA1_DIGEST_LENGTH, NULL);
1801 		break;
1802 	default:
1803 		ret = CRYPTO_ARGUMENTS_BAD;
1804 	}
1805 
1806 	if (ret == CRYPTO_SUCCESS) {
1807 		digest->cd_length = SHA1_DIGEST_LENGTH;
1808 	} else {
1809 		digest->cd_length = 0;
1810 	}
1811 
1812 	return (ret);
1813 }
1814 
1815 /*
1816  * KCF software provider mac entry points.
1817  *
1818  * SHA1 HMAC is: SHA1(key XOR opad, SHA1(key XOR ipad, text))
1819  *
1820  * Init:
1821  * The initialization routine initializes what we denote
1822  * as the inner and outer contexts by doing
1823  * - for inner context: SHA1(key XOR ipad)
1824  * - for outer context: SHA1(key XOR opad)
1825  *
1826  * Update:
1827  * Each subsequent SHA1 HMAC update will result in an
1828  * update of the inner context with the specified data.
1829  *
1830  * Final:
1831  * The SHA1 HMAC final will do a SHA1 final operation on the
1832  * inner context, and the resulting digest will be used
1833  * as the data for an update on the outer context. Last
1834  * but not least, a SHA1 final on the outer context will
1835  * be performed to obtain the SHA1 HMAC digest to return
1836  * to the user.
1837  */
1838 
1839 /*
1840  * Initialize a SHA1-HMAC context.
1841  */
1842 static void
1843 sha1_mac_init_ctx(sha1_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
1844 {
1845 	uint32_t ipad[SHA1_HMAC_INTS_PER_BLOCK];
1846 	uint32_t opad[SHA1_HMAC_INTS_PER_BLOCK];
1847 	uint_t i;
1848 
1849 	bzero(ipad, SHA1_HMAC_BLOCK_SIZE);
1850 	bzero(opad, SHA1_HMAC_BLOCK_SIZE);
1851 
1852 	bcopy(keyval, ipad, length_in_bytes);
1853 	bcopy(keyval, opad, length_in_bytes);
1854 
1855 	/* XOR key with ipad (0x36) and opad (0x5c) */
1856 	for (i = 0; i < SHA1_HMAC_INTS_PER_BLOCK; i++) {
1857 		ipad[i] ^= 0x36363636;
1858 		opad[i] ^= 0x5c5c5c5c;
1859 	}
1860 
1861 	/* perform SHA1 on ipad */
1862 	SHA1Init(&ctx->hc_icontext);
1863 	SHA1Update(&ctx->hc_icontext, (uint8_t *)ipad, SHA1_HMAC_BLOCK_SIZE);
1864 
1865 	/* perform SHA1 on opad */
1866 	SHA1Init(&ctx->hc_ocontext);
1867 	SHA1Update(&ctx->hc_ocontext, (uint8_t *)opad, SHA1_HMAC_BLOCK_SIZE);
1868 }
1869 
1870 /*
1871  */
1872 static int
1873 sha1_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
1874     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
1875     crypto_req_handle_t req)
1876 {
1877 	int ret = CRYPTO_SUCCESS;
1878 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1879 
1880 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1881 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1882 		return (CRYPTO_MECHANISM_INVALID);
1883 
1884 	/* Add support for key by attributes (RFE 4706552) */
1885 	if (key->ck_format != CRYPTO_KEY_RAW)
1886 		return (CRYPTO_ARGUMENTS_BAD);
1887 
1888 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_hmac_ctx_t),
1889 	    crypto_kmflag(req));
1890 	if (ctx->cc_provider_private == NULL)
1891 		return (CRYPTO_HOST_MEMORY);
1892 
1893 	if (ctx_template != NULL) {
1894 		/* reuse context template */
1895 		bcopy(ctx_template, PROV_SHA1_HMAC_CTX(ctx),
1896 		    sizeof (sha1_hmac_ctx_t));
1897 	} else {
1898 		/* no context template, compute context */
1899 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1900 			uchar_t digested_key[SHA1_DIGEST_LENGTH];
1901 			sha1_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
1902 
1903 			/*
1904 			 * Hash the passed-in key to get a smaller key.
1905 			 * The inner context is used since it hasn't been
1906 			 * initialized yet.
1907 			 */
1908 			PROV_SHA1_DIGEST_KEY(&hmac_ctx->hc_icontext,
1909 			    key->ck_data, keylen_in_bytes, digested_key);
1910 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
1911 			    digested_key, SHA1_DIGEST_LENGTH);
1912 		} else {
1913 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
1914 			    key->ck_data, keylen_in_bytes);
1915 		}
1916 	}
1917 
1918 	/*
1919 	 * Get the mechanism parameters, if applicable.
1920 	 */
1921 	PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
1922 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1923 		if (mechanism->cm_param == NULL ||
1924 		    mechanism->cm_param_len != sizeof (ulong_t))
1925 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1926 		PROV_SHA1_GET_DIGEST_LEN(mechanism,
1927 		    PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len);
1928 		if (PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len >
1929 		    SHA1_DIGEST_LENGTH)
1930 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1931 	}
1932 
1933 	if (ret != CRYPTO_SUCCESS) {
1934 		bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1935 		kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1936 		ctx->cc_provider_private = NULL;
1937 	}
1938 
1939 	return (ret);
1940 }
1941 
1942 /* ARGSUSED */
1943 static int
1944 sha1_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
1945 {
1946 	int ret = CRYPTO_SUCCESS;
1947 
1948 	ASSERT(ctx->cc_provider_private != NULL);
1949 
1950 	/*
1951 	 * Do a SHA1 update of the inner context using the specified
1952 	 * data.
1953 	 */
1954 	switch (data->cd_format) {
1955 	case CRYPTO_DATA_RAW:
1956 		SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_icontext,
1957 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
1958 		    data->cd_length);
1959 		break;
1960 	case CRYPTO_DATA_UIO:
1961 		ret = sha1_digest_update_uio(
1962 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
1963 		break;
1964 	case CRYPTO_DATA_MBLK:
1965 		ret = sha1_digest_update_mblk(
1966 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
1967 		break;
1968 	default:
1969 		ret = CRYPTO_ARGUMENTS_BAD;
1970 	}
1971 
1972 	return (ret);
1973 }
1974 
1975 /* ARGSUSED */
1976 static int
1977 sha1_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
1978 {
1979 	int ret = CRYPTO_SUCCESS;
1980 	uchar_t digest[SHA1_DIGEST_LENGTH];
1981 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
1982 
1983 	ASSERT(ctx->cc_provider_private != NULL);
1984 
1985 	if (PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type ==
1986 	    SHA1_HMAC_GEN_MECH_INFO_TYPE)
1987 		digest_len = PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len;
1988 
1989 	/*
1990 	 * We need to just return the length needed to store the output.
1991 	 * We should not destroy the context for the following cases.
1992 	 */
1993 	if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
1994 		mac->cd_length = digest_len;
1995 		return (CRYPTO_BUFFER_TOO_SMALL);
1996 	}
1997 
1998 	/*
1999 	 * Do a SHA1 final on the inner context.
2000 	 */
2001 	SHA1Final(digest, &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext);
2002 
2003 	/*
2004 	 * Do a SHA1 update on the outer context, feeding the inner
2005 	 * digest as data.
2006 	 */
2007 	SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, digest,
2008 	    SHA1_DIGEST_LENGTH);
2009 
2010 	/*
2011 	 * Do a SHA1 final on the outer context, storing the computing
2012 	 * digest in the users buffer.
2013 	 */
2014 	switch (mac->cd_format) {
2015 	case CRYPTO_DATA_RAW:
2016 		if (digest_len != SHA1_DIGEST_LENGTH) {
2017 			/*
2018 			 * The caller requested a short digest. Digest
2019 			 * into a scratch buffer and return to
2020 			 * the user only what was requested.
2021 			 */
2022 			SHA1Final(digest,
2023 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
2024 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
2025 			    mac->cd_offset, digest_len);
2026 		} else {
2027 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
2028 			    mac->cd_offset,
2029 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
2030 		}
2031 		break;
2032 	case CRYPTO_DATA_UIO:
2033 		ret = sha1_digest_final_uio(
2034 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
2035 		    digest_len, digest);
2036 		break;
2037 	case CRYPTO_DATA_MBLK:
2038 		ret = sha1_digest_final_mblk(
2039 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
2040 		    digest_len, digest);
2041 		break;
2042 	default:
2043 		ret = CRYPTO_ARGUMENTS_BAD;
2044 	}
2045 
2046 	if (ret == CRYPTO_SUCCESS) {
2047 		mac->cd_length = digest_len;
2048 	} else {
2049 		mac->cd_length = 0;
2050 	}
2051 
2052 	bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
2053 	kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
2054 	ctx->cc_provider_private = NULL;
2055 
2056 	return (ret);
2057 }
2058 
2059 #define	SHA1_MAC_UPDATE(data, ctx, ret) {				\
2060 	switch (data->cd_format) {					\
2061 	case CRYPTO_DATA_RAW:						\
2062 		SHA1Update(&(ctx).hc_icontext,				\
2063 		    (uint8_t *)data->cd_raw.iov_base +			\
2064 		    data->cd_offset, data->cd_length);			\
2065 		break;							\
2066 	case CRYPTO_DATA_UIO:						\
2067 		ret = sha1_digest_update_uio(&(ctx).hc_icontext, data); \
2068 		break;							\
2069 	case CRYPTO_DATA_MBLK:						\
2070 		ret = sha1_digest_update_mblk(&(ctx).hc_icontext,	\
2071 		    data);						\
2072 		break;							\
2073 	default:							\
2074 		ret = CRYPTO_ARGUMENTS_BAD;				\
2075 	}								\
2076 }
2077 
2078 /* ARGSUSED */
2079 static int
2080 sha1_mac_atomic(crypto_provider_handle_t provider,
2081     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
2082     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
2083     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
2084 {
2085 	int ret = CRYPTO_SUCCESS;
2086 	uchar_t digest[SHA1_DIGEST_LENGTH];
2087 	sha1_hmac_ctx_t sha1_hmac_ctx;
2088 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
2089 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
2090 
2091 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
2092 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
2093 		return (CRYPTO_MECHANISM_INVALID);
2094 
2095 	/* Add support for key by attributes (RFE 4706552) */
2096 	if (key->ck_format != CRYPTO_KEY_RAW)
2097 		return (CRYPTO_ARGUMENTS_BAD);
2098 
2099 	if (ctx_template != NULL) {
2100 		/* reuse context template */
2101 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2102 	} else {
2103 		/* no context template, initialize context */
2104 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
2105 			/*
2106 			 * Hash the passed-in key to get a smaller key.
2107 			 * The inner context is used since it hasn't been
2108 			 * initialized yet.
2109 			 */
2110 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
2111 			    key->ck_data, keylen_in_bytes, digest);
2112 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
2113 			    SHA1_DIGEST_LENGTH);
2114 		} else {
2115 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
2116 			    keylen_in_bytes);
2117 		}
2118 	}
2119 
2120 	/* get the mechanism parameters, if applicable */
2121 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
2122 		if (mechanism->cm_param == NULL ||
2123 		    mechanism->cm_param_len != sizeof (ulong_t)) {
2124 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2125 			goto bail;
2126 		}
2127 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
2128 		if (digest_len > SHA1_DIGEST_LENGTH) {
2129 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2130 			goto bail;
2131 		}
2132 	}
2133 
2134 	/* do a SHA1 update of the inner context using the specified data */
2135 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
2136 	if (ret != CRYPTO_SUCCESS)
2137 		/* the update failed, free context and bail */
2138 		goto bail;
2139 
2140 	/*
2141 	 * Do a SHA1 final on the inner context.
2142 	 */
2143 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
2144 
2145 	/*
2146 	 * Do an SHA1 update on the outer context, feeding the inner
2147 	 * digest as data.
2148 	 */
2149 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
2150 
2151 	/*
2152 	 * Do a SHA1 final on the outer context, storing the computed
2153 	 * digest in the users buffer.
2154 	 */
2155 	switch (mac->cd_format) {
2156 	case CRYPTO_DATA_RAW:
2157 		if (digest_len != SHA1_DIGEST_LENGTH) {
2158 			/*
2159 			 * The caller requested a short digest. Digest
2160 			 * into a scratch buffer and return to
2161 			 * the user only what was requested.
2162 			 */
2163 			SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
2164 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
2165 			    mac->cd_offset, digest_len);
2166 		} else {
2167 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
2168 			    mac->cd_offset, &sha1_hmac_ctx.hc_ocontext);
2169 		}
2170 		break;
2171 	case CRYPTO_DATA_UIO:
2172 		ret = sha1_digest_final_uio(&sha1_hmac_ctx.hc_ocontext, mac,
2173 		    digest_len, digest);
2174 		break;
2175 	case CRYPTO_DATA_MBLK:
2176 		ret = sha1_digest_final_mblk(&sha1_hmac_ctx.hc_ocontext, mac,
2177 		    digest_len, digest);
2178 		break;
2179 	default:
2180 		ret = CRYPTO_ARGUMENTS_BAD;
2181 	}
2182 
2183 	if (ret == CRYPTO_SUCCESS) {
2184 		mac->cd_length = digest_len;
2185 	} else {
2186 		mac->cd_length = 0;
2187 	}
2188 	/* Extra paranoia: zeroize the context on the stack */
2189 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2190 
2191 	return (ret);
2192 bail:
2193 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2194 	mac->cd_length = 0;
2195 	return (ret);
2196 }
2197 
2198 /* ARGSUSED */
2199 static int
2200 sha1_mac_verify_atomic(crypto_provider_handle_t provider,
2201     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
2202     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
2203     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
2204 {
2205 	int ret = CRYPTO_SUCCESS;
2206 	uchar_t digest[SHA1_DIGEST_LENGTH];
2207 	sha1_hmac_ctx_t sha1_hmac_ctx;
2208 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
2209 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
2210 
2211 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
2212 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
2213 		return (CRYPTO_MECHANISM_INVALID);
2214 
2215 	/* Add support for key by attributes (RFE 4706552) */
2216 	if (key->ck_format != CRYPTO_KEY_RAW)
2217 		return (CRYPTO_ARGUMENTS_BAD);
2218 
2219 	if (ctx_template != NULL) {
2220 		/* reuse context template */
2221 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2222 	} else {
2223 		/* no context template, initialize context */
2224 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
2225 			/*
2226 			 * Hash the passed-in key to get a smaller key.
2227 			 * The inner context is used since it hasn't been
2228 			 * initialized yet.
2229 			 */
2230 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
2231 			    key->ck_data, keylen_in_bytes, digest);
2232 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
2233 			    SHA1_DIGEST_LENGTH);
2234 		} else {
2235 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
2236 			    keylen_in_bytes);
2237 		}
2238 	}
2239 
2240 	/* get the mechanism parameters, if applicable */
2241 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
2242 		if (mechanism->cm_param == NULL ||
2243 		    mechanism->cm_param_len != sizeof (ulong_t)) {
2244 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2245 			goto bail;
2246 		}
2247 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
2248 		if (digest_len > SHA1_DIGEST_LENGTH) {
2249 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2250 			goto bail;
2251 		}
2252 	}
2253 
2254 	if (mac->cd_length != digest_len) {
2255 		ret = CRYPTO_INVALID_MAC;
2256 		goto bail;
2257 	}
2258 
2259 	/* do a SHA1 update of the inner context using the specified data */
2260 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
2261 	if (ret != CRYPTO_SUCCESS)
2262 		/* the update failed, free context and bail */
2263 		goto bail;
2264 
2265 	/* do a SHA1 final on the inner context */
2266 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
2267 
2268 	/*
2269 	 * Do an SHA1 update on the outer context, feeding the inner
2270 	 * digest as data.
2271 	 */
2272 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
2273 
2274 	/*
2275 	 * Do a SHA1 final on the outer context, storing the computed
2276 	 * digest in the users buffer.
2277 	 */
2278 	SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
2279 
2280 	/*
2281 	 * Compare the computed digest against the expected digest passed
2282 	 * as argument.
2283 	 */
2284 
2285 	switch (mac->cd_format) {
2286 
2287 	case CRYPTO_DATA_RAW:
2288 		if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
2289 		    mac->cd_offset, digest_len) != 0)
2290 			ret = CRYPTO_INVALID_MAC;
2291 		break;
2292 
2293 	case CRYPTO_DATA_UIO: {
2294 		off_t offset = mac->cd_offset;
2295 		uint_t vec_idx;
2296 		off_t scratch_offset = 0;
2297 		size_t length = digest_len;
2298 		size_t cur_len;
2299 
2300 		/* we support only kernel buffer */
2301 		if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
2302 			return (CRYPTO_ARGUMENTS_BAD);
2303 
2304 		/* jump to the first iovec containing the expected digest */
2305 		for (vec_idx = 0;
2306 		    offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
2307 		    vec_idx < mac->cd_uio->uio_iovcnt;
2308 		    offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len);
2309 		if (vec_idx == mac->cd_uio->uio_iovcnt) {
2310 			/*
2311 			 * The caller specified an offset that is
2312 			 * larger than the total size of the buffers
2313 			 * it provided.
2314 			 */
2315 			ret = CRYPTO_DATA_LEN_RANGE;
2316 			break;
2317 		}
2318 
2319 		/* do the comparison of computed digest vs specified one */
2320 		while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
2321 			cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
2322 			    offset, length);
2323 
2324 			if (bcmp(digest + scratch_offset,
2325 			    mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
2326 			    cur_len) != 0) {
2327 				ret = CRYPTO_INVALID_MAC;
2328 				break;
2329 			}
2330 
2331 			length -= cur_len;
2332 			vec_idx++;
2333 			scratch_offset += cur_len;
2334 			offset = 0;
2335 		}
2336 		break;
2337 	}
2338 
2339 	case CRYPTO_DATA_MBLK: {
2340 		off_t offset = mac->cd_offset;
2341 		mblk_t *mp;
2342 		off_t scratch_offset = 0;
2343 		size_t length = digest_len;
2344 		size_t cur_len;
2345 
2346 		/* jump to the first mblk_t containing the expected digest */
2347 		for (mp = mac->cd_mp; mp != NULL && offset >= MBLKL(mp);
2348 		    offset -= MBLKL(mp), mp = mp->b_cont);
2349 		if (mp == NULL) {
2350 			/*
2351 			 * The caller specified an offset that is larger than
2352 			 * the total size of the buffers it provided.
2353 			 */
2354 			ret = CRYPTO_DATA_LEN_RANGE;
2355 			break;
2356 		}
2357 
2358 		while (mp != NULL && length > 0) {
2359 			cur_len = MIN(MBLKL(mp) - offset, length);
2360 			if (bcmp(digest + scratch_offset,
2361 			    mp->b_rptr + offset, cur_len) != 0) {
2362 				ret = CRYPTO_INVALID_MAC;
2363 				break;
2364 			}
2365 
2366 			length -= cur_len;
2367 			mp = mp->b_cont;
2368 			scratch_offset += cur_len;
2369 			offset = 0;
2370 		}
2371 		break;
2372 	}
2373 
2374 	default:
2375 		ret = CRYPTO_ARGUMENTS_BAD;
2376 	}
2377 
2378 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2379 	return (ret);
2380 bail:
2381 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2382 	mac->cd_length = 0;
2383 	return (ret);
2384 }
2385 
2386 /*
2387  * KCF software provider context management entry points.
2388  */
2389 
2390 /* ARGSUSED */
2391 static int
2392 sha1_create_ctx_template(crypto_provider_handle_t provider,
2393     crypto_mechanism_t *mechanism, crypto_key_t *key,
2394     crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
2395     crypto_req_handle_t req)
2396 {
2397 	sha1_hmac_ctx_t *sha1_hmac_ctx_tmpl;
2398 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
2399 
2400 	if ((mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE) &&
2401 	    (mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)) {
2402 		return (CRYPTO_MECHANISM_INVALID);
2403 	}
2404 
2405 	/* Add support for key by attributes (RFE 4706552) */
2406 	if (key->ck_format != CRYPTO_KEY_RAW)
2407 		return (CRYPTO_ARGUMENTS_BAD);
2408 
2409 	/*
2410 	 * Allocate and initialize SHA1 context.
2411 	 */
2412 	sha1_hmac_ctx_tmpl = kmem_alloc(sizeof (sha1_hmac_ctx_t),
2413 	    crypto_kmflag(req));
2414 	if (sha1_hmac_ctx_tmpl == NULL)
2415 		return (CRYPTO_HOST_MEMORY);
2416 
2417 	if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
2418 		uchar_t digested_key[SHA1_DIGEST_LENGTH];
2419 
2420 		/*
2421 		 * Hash the passed-in key to get a smaller key.
2422 		 * The inner context is used since it hasn't been
2423 		 * initialized yet.
2424 		 */
2425 		PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx_tmpl->hc_icontext,
2426 		    key->ck_data, keylen_in_bytes, digested_key);
2427 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, digested_key,
2428 		    SHA1_DIGEST_LENGTH);
2429 	} else {
2430 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, key->ck_data,
2431 		    keylen_in_bytes);
2432 	}
2433 
2434 	sha1_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
2435 	*ctx_template = (crypto_spi_ctx_template_t)sha1_hmac_ctx_tmpl;
2436 	*ctx_template_size = sizeof (sha1_hmac_ctx_t);
2437 
2438 
2439 	return (CRYPTO_SUCCESS);
2440 }
2441 
2442 static int
2443 sha1_free_context(crypto_ctx_t *ctx)
2444 {
2445 	uint_t ctx_len;
2446 	sha1_mech_type_t mech_type;
2447 
2448 	if (ctx->cc_provider_private == NULL)
2449 		return (CRYPTO_SUCCESS);
2450 
2451 	/*
2452 	 * We have to free either SHA1 or SHA1-HMAC contexts, which
2453 	 * have different lengths.
2454 	 */
2455 
2456 	mech_type = PROV_SHA1_CTX(ctx)->sc_mech_type;
2457 	if (mech_type == SHA1_MECH_INFO_TYPE)
2458 		ctx_len = sizeof (sha1_ctx_t);
2459 	else {
2460 		ASSERT(mech_type == SHA1_HMAC_MECH_INFO_TYPE ||
2461 		    mech_type == SHA1_HMAC_GEN_MECH_INFO_TYPE);
2462 		ctx_len = sizeof (sha1_hmac_ctx_t);
2463 	}
2464 
2465 	bzero(ctx->cc_provider_private, ctx_len);
2466 	kmem_free(ctx->cc_provider_private, ctx_len);
2467 	ctx->cc_provider_private = NULL;
2468 
2469 	return (CRYPTO_SUCCESS);
2470 }
2471 
2472 #endif /* _KERNEL */
2473