xref: /titanic_50/usr/src/common/crypto/sha1/sha1.c (revision 05fa0d51e3dcc60bf87a28d2fd544362e368a474)
1 /*
2  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 #pragma ident	"%Z%%M%	%I%	%E% SMI"
7 
8 /*
9  * The basic framework for this code came from the reference
10  * implementation for MD5.  That implementation is Copyright (C)
11  * 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved.
12  *
13  * License to copy and use this software is granted provided that it
14  * is identified as the "RSA Data Security, Inc. MD5 Message-Digest
15  * Algorithm" in all material mentioning or referencing this software
16  * or this function.
17  *
18  * License is also granted to make and use derivative works provided
19  * that such works are identified as "derived from the RSA Data
20  * Security, Inc. MD5 Message-Digest Algorithm" in all material
21  * mentioning or referencing the derived work.
22  *
23  * RSA Data Security, Inc. makes no representations concerning either
24  * the merchantability of this software or the suitability of this
25  * software for any particular purpose. It is provided "as is"
26  * without express or implied warranty of any kind.
27  *
28  * These notices must be retained in any copies of any part of this
29  * documentation and/or software.
30  *
31  * NOTE: Cleaned-up and optimized, version of SHA1, based on the FIPS 180-1
32  * standard, available at http://www.itl.nist.gov/div897/pubs/fip180-1.htm
33  * Not as fast as one would like -- further optimizations are encouraged
34  * and appreciated.
35  */
36 
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysmacros.h>
41 #include <sys/sha1.h>
42 #include <sys/sha1_consts.h>
43 
44 #ifdef _KERNEL
45 
46 #include <sys/modctl.h>
47 #include <sys/cmn_err.h>
48 #include <sys/note.h>
49 #include <sys/crypto/common.h>
50 #include <sys/crypto/spi.h>
51 #include <sys/strsun.h>
52 
53 /*
54  * The sha1 module is created with two modlinkages:
55  * - a modlmisc that allows consumers to directly call the entry points
56  *   SHA1Init, SHA1Update, and SHA1Final.
57  * - a modlcrypto that allows the module to register with the Kernel
58  *   Cryptographic Framework (KCF) as a software provider for the SHA1
59  *   mechanisms.
60  */
61 
62 #endif /* _KERNEL */
63 #ifndef	_KERNEL
64 #include <strings.h>
65 #include <stdlib.h>
66 #include <errno.h>
67 #include <sys/systeminfo.h>
68 #endif	/* !_KERNEL */
69 
70 static void Encode(uint8_t *, uint32_t *, size_t);
71 static void SHA1Transform(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t,
72     SHA1_CTX *, const uint8_t *);
73 
74 static uint8_t PADDING[64] = { 0x80, /* all zeros */ };
75 
76 /*
77  * F, G, and H are the basic SHA1 functions.
78  */
79 #define	F(b, c, d)	(((b) & (c)) | ((~b) & (d)))
80 #define	G(b, c, d)	((b) ^ (c) ^ (d))
81 #define	H(b, c, d)	(((b) & (c)) | ((b) & (d)) | ((c) & (d)))
82 
83 /*
84  * ROTATE_LEFT rotates x left n bits.
85  */
86 #define	ROTATE_LEFT(x, n)	\
87 	(((x) << (n)) | ((x) >> ((sizeof (x) * NBBY)-(n))))
88 
89 #ifdef _KERNEL
90 
91 static struct modlmisc modlmisc = {
92 	&mod_miscops,
93 	"SHA1 Message-Digest Algorithm"
94 };
95 
96 static struct modlcrypto modlcrypto = {
97 	&mod_cryptoops,
98 	"SHA1 Kernel SW Provider %I%"
99 };
100 
101 static struct modlinkage modlinkage = {
102 	MODREV_1, &modlmisc, &modlcrypto, NULL
103 };
104 
105 /*
106  * CSPI information (entry points, provider info, etc.)
107  */
108 
109 typedef enum sha1_mech_type {
110 	SHA1_MECH_INFO_TYPE,		/* SUN_CKM_SHA1 */
111 	SHA1_HMAC_MECH_INFO_TYPE,	/* SUN_CKM_SHA1_HMAC */
112 	SHA1_HMAC_GEN_MECH_INFO_TYPE	/* SUN_CKM_SHA1_HMAC_GENERAL */
113 } sha1_mech_type_t;
114 
115 #define	SHA1_DIGEST_LENGTH	20	/* SHA1 digest length in bytes */
116 #define	SHA1_HMAC_BLOCK_SIZE	64	/* SHA1-HMAC block size */
117 #define	SHA1_HMAC_MIN_KEY_LEN	8	/* SHA1-HMAC min key length in bits */
118 #define	SHA1_HMAC_MAX_KEY_LEN	INT_MAX /* SHA1-HMAC max key length in bits */
119 #define	SHA1_HMAC_INTS_PER_BLOCK	(SHA1_HMAC_BLOCK_SIZE/sizeof (uint32_t))
120 
121 /*
122  * Context for SHA1 mechanism.
123  */
124 typedef struct sha1_ctx {
125 	sha1_mech_type_t	sc_mech_type;	/* type of context */
126 	SHA1_CTX		sc_sha1_ctx;	/* SHA1 context */
127 } sha1_ctx_t;
128 
129 /*
130  * Context for SHA1-HMAC and SHA1-HMAC-GENERAL mechanisms.
131  */
132 typedef struct sha1_hmac_ctx {
133 	sha1_mech_type_t	hc_mech_type;	/* type of context */
134 	uint32_t		hc_digest_len;	/* digest len in bytes */
135 	SHA1_CTX		hc_icontext;	/* inner SHA1 context */
136 	SHA1_CTX		hc_ocontext;	/* outer SHA1 context */
137 } sha1_hmac_ctx_t;
138 
139 /*
140  * Macros to access the SHA1 or SHA1-HMAC contexts from a context passed
141  * by KCF to one of the entry points.
142  */
143 
144 #define	PROV_SHA1_CTX(ctx)	((sha1_ctx_t *)(ctx)->cc_provider_private)
145 #define	PROV_SHA1_HMAC_CTX(ctx)	((sha1_hmac_ctx_t *)(ctx)->cc_provider_private)
146 
147 /* to extract the digest length passed as mechanism parameter */
148 #define	PROV_SHA1_GET_DIGEST_LEN(m, len) {				\
149 	if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t)))		\
150 		(len) = (uint32_t)*((ulong_t *)mechanism->cm_param);	\
151 	else {								\
152 		ulong_t tmp_ulong;					\
153 		bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t));	\
154 		(len) = (uint32_t)tmp_ulong;				\
155 	}								\
156 }
157 
158 #define	PROV_SHA1_DIGEST_KEY(ctx, key, len, digest) {	\
159 	SHA1Init(ctx);					\
160 	SHA1Update(ctx, key, len);			\
161 	SHA1Final(digest, ctx);				\
162 }
163 
164 /*
165  * Mechanism info structure passed to KCF during registration.
166  */
167 static crypto_mech_info_t sha1_mech_info_tab[] = {
168 	/* SHA1 */
169 	{SUN_CKM_SHA1, SHA1_MECH_INFO_TYPE,
170 	    CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
171 	    0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
172 	/* SHA1-HMAC */
173 	{SUN_CKM_SHA1_HMAC, SHA1_HMAC_MECH_INFO_TYPE,
174 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
175 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
176 	    CRYPTO_KEYSIZE_UNIT_IN_BITS},
177 	/* SHA1-HMAC GENERAL */
178 	{SUN_CKM_SHA1_HMAC_GENERAL, SHA1_HMAC_GEN_MECH_INFO_TYPE,
179 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
180 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
181 	    CRYPTO_KEYSIZE_UNIT_IN_BITS}
182 };
183 
184 static void sha1_provider_status(crypto_provider_handle_t, uint_t *);
185 
186 static crypto_control_ops_t sha1_control_ops = {
187 	sha1_provider_status
188 };
189 
190 static int sha1_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
191     crypto_req_handle_t);
192 static int sha1_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
193     crypto_req_handle_t);
194 static int sha1_digest_update(crypto_ctx_t *, crypto_data_t *,
195     crypto_req_handle_t);
196 static int sha1_digest_final(crypto_ctx_t *, crypto_data_t *,
197     crypto_req_handle_t);
198 static int sha1_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
199     crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
200     crypto_req_handle_t);
201 
202 static crypto_digest_ops_t sha1_digest_ops = {
203 	sha1_digest_init,
204 	sha1_digest,
205 	sha1_digest_update,
206 	NULL,
207 	sha1_digest_final,
208 	sha1_digest_atomic
209 };
210 
211 static int sha1_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
212     crypto_spi_ctx_template_t, crypto_req_handle_t);
213 static int sha1_mac_update(crypto_ctx_t *, crypto_data_t *,
214     crypto_req_handle_t);
215 static int sha1_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
216 static int sha1_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
217     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
218     crypto_spi_ctx_template_t, crypto_req_handle_t);
219 static int sha1_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
220     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
221     crypto_spi_ctx_template_t, crypto_req_handle_t);
222 
223 static crypto_mac_ops_t sha1_mac_ops = {
224 	sha1_mac_init,
225 	NULL,
226 	sha1_mac_update,
227 	sha1_mac_final,
228 	sha1_mac_atomic,
229 	sha1_mac_verify_atomic
230 };
231 
232 static int sha1_create_ctx_template(crypto_provider_handle_t,
233     crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
234     size_t *, crypto_req_handle_t);
235 static int sha1_free_context(crypto_ctx_t *);
236 
237 static crypto_ctx_ops_t sha1_ctx_ops = {
238 	sha1_create_ctx_template,
239 	sha1_free_context
240 };
241 
242 static crypto_ops_t sha1_crypto_ops = {
243 	&sha1_control_ops,
244 	&sha1_digest_ops,
245 	NULL,
246 	&sha1_mac_ops,
247 	NULL,
248 	NULL,
249 	NULL,
250 	NULL,
251 	NULL,
252 	NULL,
253 	NULL,
254 	NULL,
255 	NULL,
256 	&sha1_ctx_ops
257 };
258 
259 static crypto_provider_info_t sha1_prov_info = {
260 	CRYPTO_SPI_VERSION_1,
261 	"SHA1 Software Provider",
262 	CRYPTO_SW_PROVIDER,
263 	{&modlinkage},
264 	NULL,
265 	&sha1_crypto_ops,
266 	sizeof (sha1_mech_info_tab)/sizeof (crypto_mech_info_t),
267 	sha1_mech_info_tab
268 };
269 
270 static crypto_kcf_provider_handle_t sha1_prov_handle = NULL;
271 
272 int
273 _init()
274 {
275 	int ret;
276 
277 	if ((ret = mod_install(&modlinkage)) != 0)
278 		return (ret);
279 
280 	/*
281 	 * Register with KCF. If the registration fails, log an
282 	 * error but do not uninstall the module, since the functionality
283 	 * provided by misc/sha1 should still be available.
284 	 */
285 	if ((ret = crypto_register_provider(&sha1_prov_info,
286 	    &sha1_prov_handle)) != CRYPTO_SUCCESS)
287 		cmn_err(CE_WARN, "sha1 _init: "
288 		    "crypto_register_provider() failed (0x%x)", ret);
289 
290 	return (0);
291 }
292 
293 int
294 _info(struct modinfo *modinfop)
295 {
296 	return (mod_info(&modlinkage, modinfop));
297 }
298 
299 #endif /* _KERNEL */
300 
301 /*
302  * SHA1Init()
303  *
304  * purpose: initializes the sha1 context and begins and sha1 digest operation
305  *   input: SHA1_CTX *	: the context to initializes.
306  *  output: void
307  */
308 
309 void
310 SHA1Init(SHA1_CTX *ctx)
311 {
312 	ctx->count[0] = ctx->count[1] = 0;
313 
314 	/*
315 	 * load magic initialization constants. Tell lint
316 	 * that these constants are unsigned by using U.
317 	 */
318 
319 	ctx->state[0] = 0x67452301U;
320 	ctx->state[1] = 0xefcdab89U;
321 	ctx->state[2] = 0x98badcfeU;
322 	ctx->state[3] = 0x10325476U;
323 	ctx->state[4] = 0xc3d2e1f0U;
324 }
325 
326 #ifdef VIS_SHA1
327 
328 
329 #ifdef _KERNEL
330 
331 #include <sys/regset.h>
332 #include <sys/vis.h>
333 #include <sys/fpu/fpusystm.h>
334 
335 /* the alignment for block stores to save fp registers */
336 #define	VIS_ALIGN	(64)
337 
338 extern int sha1_savefp(kfpu_t *, int);
339 extern void sha1_restorefp(kfpu_t *);
340 
341 uint32_t	vis_sha1_svfp_threshold = 128;
342 
343 #else /* !_KERNEL */
344 
345 static boolean_t checked_vis = B_FALSE;
346 static int usevis = 0;
347 
348 static int
349 havevis()
350 {
351 	char *buf = NULL;
352 	char *isa_token;
353 	char *lasts;
354 	int ret = 0;
355 	size_t bufsize = 255; /* UltraSPARC III needs 115 chars */
356 	int v9_isa_token, vis_isa_token, isa_token_num;
357 
358 	if (checked_vis) {
359 		return (usevis);
360 	}
361 
362 	if ((buf = malloc(bufsize)) == NULL) {
363 		return (0);
364 	}
365 
366 	if ((ret = sysinfo(SI_ISALIST, buf, bufsize)) == -1) {
367 		free(buf);
368 		return (0);
369 	} else if (ret > bufsize) {
370 		/* We lost some because our buffer was too small  */
371 		if ((buf = realloc(buf, bufsize = ret)) == NULL) {
372 			return (0);
373 		}
374 		if ((ret = sysinfo(SI_ISALIST, buf, bufsize)) == -1) {
375 			free(buf);
376 			return (0);
377 		}
378 	}
379 
380 	/*
381 	 * Check the relative posistions of sparcv9 & sparcv9+vis
382 	 * because they are listed in (best) performance order.
383 	 * For example: The Niagara chip reports it has VIS but the
384 	 * SHA1 code runs faster without this optimisation.
385 	 */
386 	isa_token = strtok_r(buf, " ", &lasts);
387 	v9_isa_token = vis_isa_token = -1;
388 	isa_token_num = 0;
389 	do {
390 		if (strcmp(isa_token, "sparcv9") == 0) {
391 			v9_isa_token = isa_token_num;
392 		} else if (strcmp(isa_token, "sparcv9+vis") == 0) {
393 			vis_isa_token = isa_token_num;
394 		}
395 		isa_token_num++;
396 	} while (isa_token = strtok_r(NULL, " ", &lasts));
397 
398 	if (vis_isa_token != -1 && vis_isa_token < v9_isa_token)
399 		usevis = 1;
400 	free(buf);
401 
402 	checked_vis = B_TRUE;
403 	return (usevis);
404 }
405 
406 #endif /* _KERNEL */
407 
408 /*
409  * VIS SHA-1 consts.
410  */
411 static uint64_t VIS[] = {
412 	0x8000000080000000ULL,
413 	0x0002000200020002ULL,
414 	0x5a8279996ed9eba1ULL,
415 	0x8f1bbcdcca62c1d6ULL,
416 	0x012389ab456789abULL};
417 
418 extern void SHA1TransformVIS(uint64_t *, uint64_t *, uint32_t *, uint64_t *);
419 
420 
421 /*
422  * SHA1Update()
423  *
424  * purpose: continues an sha1 digest operation, using the message block
425  *          to update the context.
426  *   input: SHA1_CTX *	: the context to update
427  *          uint8_t *	: the message block
428  *          uint32_t    : the length of the message block in bytes
429  *  output: void
430  */
431 
432 void
433 SHA1Update(SHA1_CTX *ctx, const uint8_t *input, uint32_t input_len)
434 {
435 	uint32_t i, buf_index, buf_len;
436 	uint64_t X0[40], input64[8];
437 #ifdef _KERNEL
438 	int usevis = 0;
439 #endif /* _KERNEL */
440 
441 	/* check for noop */
442 	if (input_len == 0)
443 		return;
444 
445 	/* compute number of bytes mod 64 */
446 	buf_index = (ctx->count[1] >> 3) & 0x3F;
447 
448 	/* update number of bits */
449 	if ((ctx->count[1] += (input_len << 3)) < (input_len << 3))
450 		ctx->count[0]++;
451 
452 	ctx->count[0] += (input_len >> 29);
453 
454 	buf_len = 64 - buf_index;
455 
456 	/* transform as many times as possible */
457 	i = 0;
458 	if (input_len >= buf_len) {
459 #ifdef _KERNEL
460 		uint8_t fpua[sizeof (kfpu_t) + GSR_SIZE + VIS_ALIGN];
461 		kfpu_t *fpu;
462 
463 		uint32_t len = (input_len + buf_index) & ~0x3f;
464 		int svfp_ok;
465 
466 		fpu = (kfpu_t *)P2ROUNDUP((uintptr_t)fpua, 64);
467 		svfp_ok = ((len >= vis_sha1_svfp_threshold) ? 1 : 0);
468 		usevis = fpu_exists && sha1_savefp(fpu, svfp_ok);
469 #else
470 		if (!checked_vis)
471 			usevis = havevis();
472 #endif /* _KERNEL */
473 
474 		/*
475 		 * general optimization:
476 		 *
477 		 * only do initial bcopy() and SHA1Transform() if
478 		 * buf_index != 0.  if buf_index == 0, we're just
479 		 * wasting our time doing the bcopy() since there
480 		 * wasn't any data left over from a previous call to
481 		 * SHA1Update().
482 		 */
483 
484 		if (buf_index) {
485 			bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
486 			if (usevis) {
487 				SHA1TransformVIS(X0,
488 				    (uint64_t *)ctx->buf_un.buf8,
489 				    &ctx->state[0], VIS);
490 			} else {
491 				SHA1Transform(ctx->state[0], ctx->state[1],
492 				    ctx->state[2], ctx->state[3],
493 				    ctx->state[4], ctx, ctx->buf_un.buf8);
494 			}
495 			i = buf_len;
496 		}
497 
498 		/*
499 		 * VIS SHA-1: uses the VIS 1.0 instructions to accelerate
500 		 * SHA-1 processing. This is achieved by "offloading" the
501 		 * computation of the message schedule (MS) to the VIS units.
502 		 * This allows the VIS computation of the message schedule
503 		 * to be performed in parallel with the standard integer
504 		 * processing of the remainder of the SHA-1 computation.
505 		 * performance by up to around 1.37X, compared to an optimized
506 		 * integer-only implementation.
507 		 *
508 		 * The VIS implementation of SHA1Transform has a different API
509 		 * to the standard integer version:
510 		 *
511 		 * void SHA1TransformVIS(
512 		 *	 uint64_t *, // Pointer to MS for ith block
513 		 *	 uint64_t *, // Pointer to ith block of message data
514 		 *	 uint32_t *, // Pointer to SHA state i.e ctx->state
515 		 *	 uint64_t *, // Pointer to various VIS constants
516 		 * )
517 		 *
518 		 * Note: the message data must by 4-byte aligned.
519 		 *
520 		 * Function requires VIS 1.0 support.
521 		 *
522 		 * Handling is provided to deal with arbitrary byte alingment
523 		 * of the input data but the performance gains are reduced
524 		 * for alignments other than 4-bytes.
525 		 */
526 		if (usevis) {
527 			if (((uint64_t)(uintptr_t)(&input[i]) & 0x3)) {
528 				/*
529 				 * Main processing loop - input misaligned
530 				 */
531 				for (; i + 63 < input_len; i += 64) {
532 				    bcopy(&input[i], input64, 64);
533 				    SHA1TransformVIS(X0, input64,
534 					&ctx->state[0], VIS);
535 				}
536 			} else {
537 				/*
538 				 * Main processing loop - input 8-byte aligned
539 				 */
540 				for (; i + 63 < input_len; i += 64) {
541 					SHA1TransformVIS(X0,
542 					    (uint64_t *)&input[i],
543 					    &ctx->state[0], VIS);
544 				}
545 
546 			}
547 #ifdef _KERNEL
548 			sha1_restorefp(fpu);
549 #endif /* _KERNEL */
550 		} else {
551 			for (; i + 63 < input_len; i += 64) {
552 			    SHA1Transform(ctx->state[0], ctx->state[1],
553 				ctx->state[2], ctx->state[3], ctx->state[4],
554 				ctx, &input[i]);
555 			}
556 		}
557 
558 		/*
559 		 * general optimization:
560 		 *
561 		 * if i and input_len are the same, return now instead
562 		 * of calling bcopy(), since the bcopy() in this case
563 		 * will be an expensive nop.
564 		 */
565 
566 		if (input_len == i)
567 			return;
568 
569 		buf_index = 0;
570 	}
571 
572 	/* buffer remaining input */
573 	bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
574 }
575 
576 #else /* VIS_SHA1 */
577 
578 void
579 SHA1Update(SHA1_CTX *ctx, const uint8_t *input, uint32_t input_len)
580 {
581 	uint32_t i, buf_index, buf_len;
582 
583 	/* check for noop */
584 	if (input_len == 0)
585 		return;
586 
587 	/* compute number of bytes mod 64 */
588 	buf_index = (ctx->count[1] >> 3) & 0x3F;
589 
590 	/* update number of bits */
591 	if ((ctx->count[1] += (input_len << 3)) < (input_len << 3))
592 		ctx->count[0]++;
593 
594 	ctx->count[0] += (input_len >> 29);
595 
596 	buf_len = 64 - buf_index;
597 
598 	/* transform as many times as possible */
599 	i = 0;
600 	if (input_len >= buf_len) {
601 
602 		/*
603 		 * general optimization:
604 		 *
605 		 * only do initial bcopy() and SHA1Transform() if
606 		 * buf_index != 0.  if buf_index == 0, we're just
607 		 * wasting our time doing the bcopy() since there
608 		 * wasn't any data left over from a previous call to
609 		 * SHA1Update().
610 		 */
611 
612 		if (buf_index) {
613 			bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
614 
615 
616 			SHA1Transform(ctx->state[0], ctx->state[1],
617 			    ctx->state[2], ctx->state[3], ctx->state[4], ctx,
618 			    ctx->buf_un.buf8);
619 
620 			i = buf_len;
621 		}
622 
623 		for (; i + 63 < input_len; i += 64)
624 			SHA1Transform(ctx->state[0], ctx->state[1],
625 			    ctx->state[2], ctx->state[3], ctx->state[4],
626 			    ctx, &input[i]);
627 
628 		/*
629 		 * general optimization:
630 		 *
631 		 * if i and input_len are the same, return now instead
632 		 * of calling bcopy(), since the bcopy() in this case
633 		 * will be an expensive nop.
634 		 */
635 
636 		if (input_len == i)
637 			return;
638 
639 		buf_index = 0;
640 	}
641 
642 	/* buffer remaining input */
643 	bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
644 }
645 
646 #endif /* VIS_SHA1 */
647 
648 /*
649  * SHA1Final()
650  *
651  * purpose: ends an sha1 digest operation, finalizing the message digest and
652  *          zeroing the context.
653  *   input: uint8_t *	: a buffer to store the digest in
654  *          SHA1_CTX *  : the context to finalize, save, and zero
655  *  output: void
656  */
657 
658 void
659 SHA1Final(uint8_t *digest, SHA1_CTX *ctx)
660 {
661 	uint8_t		bitcount_be[sizeof (ctx->count)];
662 	uint32_t	index = (ctx->count[1] >> 3) & 0x3f;
663 
664 	/* store bit count, big endian */
665 	Encode(bitcount_be, ctx->count, sizeof (bitcount_be));
666 
667 	/* pad out to 56 mod 64 */
668 	SHA1Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index);
669 
670 	/* append length (before padding) */
671 	SHA1Update(ctx, bitcount_be, sizeof (bitcount_be));
672 
673 	/* store state in digest */
674 	Encode(digest, ctx->state, sizeof (ctx->state));
675 
676 	/* zeroize sensitive information */
677 	bzero(ctx, sizeof (*ctx));
678 }
679 
680 /*
681  * sparc optimization:
682  *
683  * on the sparc, we can load big endian 32-bit data easily.  note that
684  * special care must be taken to ensure the address is 32-bit aligned.
685  * in the interest of speed, we don't check to make sure, since
686  * careful programming can guarantee this for us.
687  */
688 
689 #if	defined(_BIG_ENDIAN)
690 
691 #define	LOAD_BIG_32(addr)	(*(uint32_t *)(addr))
692 
693 #else	/* little endian -- will work on big endian, but slowly */
694 
695 #define	LOAD_BIG_32(addr)	\
696 	(((addr)[0] << 24) | ((addr)[1] << 16) | ((addr)[2] << 8) | (addr)[3])
697 #endif
698 
699 /*
700  * sparc register window optimization:
701  *
702  * `a', `b', `c', `d', and `e' are passed into SHA1Transform
703  * explicitly since it increases the number of registers available to
704  * the compiler.  under this scheme, these variables can be held in
705  * %i0 - %i4, which leaves more local and out registers available.
706  */
707 
708 /*
709  * SHA1Transform()
710  *
711  * purpose: sha1 transformation -- updates the digest based on `block'
712  *   input: uint32_t	: bytes  1 -  4 of the digest
713  *          uint32_t	: bytes  5 -  8 of the digest
714  *          uint32_t	: bytes  9 - 12 of the digest
715  *          uint32_t	: bytes 12 - 16 of the digest
716  *          uint32_t	: bytes 16 - 20 of the digest
717  *          SHA1_CTX *	: the context to update
718  *          uint8_t [64]: the block to use to update the digest
719  *  output: void
720  */
721 
722 void
723 SHA1Transform(uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e,
724     SHA1_CTX *ctx, const uint8_t blk[64])
725 {
726 	/*
727 	 * sparc optimization:
728 	 *
729 	 * while it is somewhat counter-intuitive, on sparc, it is
730 	 * more efficient to place all the constants used in this
731 	 * function in an array and load the values out of the array
732 	 * than to manually load the constants.  this is because
733 	 * setting a register to a 32-bit value takes two ops in most
734 	 * cases: a `sethi' and an `or', but loading a 32-bit value
735 	 * from memory only takes one `ld' (or `lduw' on v9).  while
736 	 * this increases memory usage, the compiler can find enough
737 	 * other things to do while waiting to keep the pipeline does
738 	 * not stall.  additionally, it is likely that many of these
739 	 * constants are cached so that later accesses do not even go
740 	 * out to the bus.
741 	 *
742 	 * this array is declared `static' to keep the compiler from
743 	 * having to bcopy() this array onto the stack frame of
744 	 * SHA1Transform() each time it is called -- which is
745 	 * unacceptably expensive.
746 	 *
747 	 * the `const' is to ensure that callers are good citizens and
748 	 * do not try to munge the array.  since these routines are
749 	 * going to be called from inside multithreaded kernelland,
750 	 * this is a good safety check. -- `sha1_consts' will end up in
751 	 * .rodata.
752 	 *
753 	 * unfortunately, loading from an array in this manner hurts
754 	 * performance under intel.  so, there is a macro,
755 	 * SHA1_CONST(), used in SHA1Transform(), that either expands to
756 	 * a reference to this array, or to the actual constant,
757 	 * depending on what platform this code is compiled for.
758 	 */
759 
760 #if	defined(__sparc)
761 	static const uint32_t sha1_consts[] = {
762 		SHA1_CONST_0,	SHA1_CONST_1,	SHA1_CONST_2,	SHA1_CONST_3,
763 	};
764 #endif
765 
766 	/*
767 	 * general optimization:
768 	 *
769 	 * use individual integers instead of using an array.  this is a
770 	 * win, although the amount it wins by seems to vary quite a bit.
771 	 */
772 
773 	uint32_t	w_0, w_1, w_2,  w_3,  w_4,  w_5,  w_6,  w_7;
774 	uint32_t	w_8, w_9, w_10, w_11, w_12, w_13, w_14, w_15;
775 
776 	/*
777 	 * sparc optimization:
778 	 *
779 	 * if `block' is already aligned on a 4-byte boundary, use
780 	 * LOAD_BIG_32() directly.  otherwise, bcopy() into a
781 	 * buffer that *is* aligned on a 4-byte boundary and then do
782 	 * the LOAD_BIG_32() on that buffer.  benchmarks have shown
783 	 * that using the bcopy() is better than loading the bytes
784 	 * individually and doing the endian-swap by hand.
785 	 *
786 	 * even though it's quite tempting to assign to do:
787 	 *
788 	 * blk = bcopy(ctx->buf_un.buf32, blk, sizeof (ctx->buf_un.buf32));
789 	 *
790 	 * and only have one set of LOAD_BIG_32()'s, the compiler
791 	 * *does not* like that, so please resist the urge.
792 	 */
793 
794 #if	defined(__sparc)
795 	if ((uintptr_t)blk & 0x3) {		/* not 4-byte aligned? */
796 		bcopy(blk, ctx->buf_un.buf32,  sizeof (ctx->buf_un.buf32));
797 		w_15 = LOAD_BIG_32(ctx->buf_un.buf32 + 15);
798 		w_14 = LOAD_BIG_32(ctx->buf_un.buf32 + 14);
799 		w_13 = LOAD_BIG_32(ctx->buf_un.buf32 + 13);
800 		w_12 = LOAD_BIG_32(ctx->buf_un.buf32 + 12);
801 		w_11 = LOAD_BIG_32(ctx->buf_un.buf32 + 11);
802 		w_10 = LOAD_BIG_32(ctx->buf_un.buf32 + 10);
803 		w_9  = LOAD_BIG_32(ctx->buf_un.buf32 +  9);
804 		w_8  = LOAD_BIG_32(ctx->buf_un.buf32 +  8);
805 		w_7  = LOAD_BIG_32(ctx->buf_un.buf32 +  7);
806 		w_6  = LOAD_BIG_32(ctx->buf_un.buf32 +  6);
807 		w_5  = LOAD_BIG_32(ctx->buf_un.buf32 +  5);
808 		w_4  = LOAD_BIG_32(ctx->buf_un.buf32 +  4);
809 		w_3  = LOAD_BIG_32(ctx->buf_un.buf32 +  3);
810 		w_2  = LOAD_BIG_32(ctx->buf_un.buf32 +  2);
811 		w_1  = LOAD_BIG_32(ctx->buf_un.buf32 +  1);
812 		w_0  = LOAD_BIG_32(ctx->buf_un.buf32 +  0);
813 	} else {
814 		/*LINTED*/
815 		w_15 = LOAD_BIG_32(blk + 60);
816 		/*LINTED*/
817 		w_14 = LOAD_BIG_32(blk + 56);
818 		/*LINTED*/
819 		w_13 = LOAD_BIG_32(blk + 52);
820 		/*LINTED*/
821 		w_12 = LOAD_BIG_32(blk + 48);
822 		/*LINTED*/
823 		w_11 = LOAD_BIG_32(blk + 44);
824 		/*LINTED*/
825 		w_10 = LOAD_BIG_32(blk + 40);
826 		/*LINTED*/
827 		w_9  = LOAD_BIG_32(blk + 36);
828 		/*LINTED*/
829 		w_8  = LOAD_BIG_32(blk + 32);
830 		/*LINTED*/
831 		w_7  = LOAD_BIG_32(blk + 28);
832 		/*LINTED*/
833 		w_6  = LOAD_BIG_32(blk + 24);
834 		/*LINTED*/
835 		w_5  = LOAD_BIG_32(blk + 20);
836 		/*LINTED*/
837 		w_4  = LOAD_BIG_32(blk + 16);
838 		/*LINTED*/
839 		w_3  = LOAD_BIG_32(blk + 12);
840 		/*LINTED*/
841 		w_2  = LOAD_BIG_32(blk +  8);
842 		/*LINTED*/
843 		w_1  = LOAD_BIG_32(blk +  4);
844 		/*LINTED*/
845 		w_0  = LOAD_BIG_32(blk +  0);
846 	}
847 #else
848 	w_15 = LOAD_BIG_32(blk + 60);
849 	w_14 = LOAD_BIG_32(blk + 56);
850 	w_13 = LOAD_BIG_32(blk + 52);
851 	w_12 = LOAD_BIG_32(blk + 48);
852 	w_11 = LOAD_BIG_32(blk + 44);
853 	w_10 = LOAD_BIG_32(blk + 40);
854 	w_9  = LOAD_BIG_32(blk + 36);
855 	w_8  = LOAD_BIG_32(blk + 32);
856 	w_7  = LOAD_BIG_32(blk + 28);
857 	w_6  = LOAD_BIG_32(blk + 24);
858 	w_5  = LOAD_BIG_32(blk + 20);
859 	w_4  = LOAD_BIG_32(blk + 16);
860 	w_3  = LOAD_BIG_32(blk + 12);
861 	w_2  = LOAD_BIG_32(blk +  8);
862 	w_1  = LOAD_BIG_32(blk +  4);
863 	w_0  = LOAD_BIG_32(blk +  0);
864 #endif
865 	/*
866 	 * general optimization:
867 	 *
868 	 * even though this approach is described in the standard as
869 	 * being slower algorithmically, it is 30-40% faster than the
870 	 * "faster" version under SPARC, because this version has more
871 	 * of the constraints specified at compile-time and uses fewer
872 	 * variables (and therefore has better register utilization)
873 	 * than its "speedier" brother.  (i've tried both, trust me)
874 	 *
875 	 * for either method given in the spec, there is an "assignment"
876 	 * phase where the following takes place:
877 	 *
878 	 *	tmp = (main_computation);
879 	 *	e = d; d = c; c = rotate_left(b, 30); b = a; a = tmp;
880 	 *
881 	 * we can make the algorithm go faster by not doing this work,
882 	 * but just pretending that `d' is now `e', etc. this works
883 	 * really well and obviates the need for a temporary variable.
884 	 * however, we still explictly perform the rotate action,
885 	 * since it is cheaper on SPARC to do it once than to have to
886 	 * do it over and over again.
887 	 */
888 
889 	/* round 1 */
890 	e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_0 + SHA1_CONST(0); /* 0 */
891 	b = ROTATE_LEFT(b, 30);
892 
893 	d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_1 + SHA1_CONST(0); /* 1 */
894 	a = ROTATE_LEFT(a, 30);
895 
896 	c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_2 + SHA1_CONST(0); /* 2 */
897 	e = ROTATE_LEFT(e, 30);
898 
899 	b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_3 + SHA1_CONST(0); /* 3 */
900 	d = ROTATE_LEFT(d, 30);
901 
902 	a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_4 + SHA1_CONST(0); /* 4 */
903 	c = ROTATE_LEFT(c, 30);
904 
905 	e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_5 + SHA1_CONST(0); /* 5 */
906 	b = ROTATE_LEFT(b, 30);
907 
908 	d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_6 + SHA1_CONST(0); /* 6 */
909 	a = ROTATE_LEFT(a, 30);
910 
911 	c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_7 + SHA1_CONST(0); /* 7 */
912 	e = ROTATE_LEFT(e, 30);
913 
914 	b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_8 + SHA1_CONST(0); /* 8 */
915 	d = ROTATE_LEFT(d, 30);
916 
917 	a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_9 + SHA1_CONST(0); /* 9 */
918 	c = ROTATE_LEFT(c, 30);
919 
920 	e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_10 + SHA1_CONST(0); /* 10 */
921 	b = ROTATE_LEFT(b, 30);
922 
923 	d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_11 + SHA1_CONST(0); /* 11 */
924 	a = ROTATE_LEFT(a, 30);
925 
926 	c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_12 + SHA1_CONST(0); /* 12 */
927 	e = ROTATE_LEFT(e, 30);
928 
929 	b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_13 + SHA1_CONST(0); /* 13 */
930 	d = ROTATE_LEFT(d, 30);
931 
932 	a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_14 + SHA1_CONST(0); /* 14 */
933 	c = ROTATE_LEFT(c, 30);
934 
935 	e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_15 + SHA1_CONST(0); /* 15 */
936 	b = ROTATE_LEFT(b, 30);
937 
938 	w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1);		/* 16 */
939 	d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_0 + SHA1_CONST(0);
940 	a = ROTATE_LEFT(a, 30);
941 
942 	w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1);		/* 17 */
943 	c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_1 + SHA1_CONST(0);
944 	e = ROTATE_LEFT(e, 30);
945 
946 	w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1);	/* 18 */
947 	b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_2 + SHA1_CONST(0);
948 	d = ROTATE_LEFT(d, 30);
949 
950 	w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1);		/* 19 */
951 	a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_3 + SHA1_CONST(0);
952 	c = ROTATE_LEFT(c, 30);
953 
954 	/* round 2 */
955 	w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1);		/* 20 */
956 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_4 + SHA1_CONST(1);
957 	b = ROTATE_LEFT(b, 30);
958 
959 	w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1);		/* 21 */
960 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_5 + SHA1_CONST(1);
961 	a = ROTATE_LEFT(a, 30);
962 
963 	w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1);		/* 22 */
964 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_6 + SHA1_CONST(1);
965 	e = ROTATE_LEFT(e, 30);
966 
967 	w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1);		/* 23 */
968 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_7 + SHA1_CONST(1);
969 	d = ROTATE_LEFT(d, 30);
970 
971 	w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1);		/* 24 */
972 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_8 + SHA1_CONST(1);
973 	c = ROTATE_LEFT(c, 30);
974 
975 	w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1);		/* 25 */
976 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_9 + SHA1_CONST(1);
977 	b = ROTATE_LEFT(b, 30);
978 
979 	w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1);	/* 26 */
980 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_10 + SHA1_CONST(1);
981 	a = ROTATE_LEFT(a, 30);
982 
983 	w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1);	/* 27 */
984 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_11 + SHA1_CONST(1);
985 	e = ROTATE_LEFT(e, 30);
986 
987 	w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1);	/* 28 */
988 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_12 + SHA1_CONST(1);
989 	d = ROTATE_LEFT(d, 30);
990 
991 	w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1);	/* 29 */
992 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_13 + SHA1_CONST(1);
993 	c = ROTATE_LEFT(c, 30);
994 
995 	w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1);	/* 30 */
996 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_14 + SHA1_CONST(1);
997 	b = ROTATE_LEFT(b, 30);
998 
999 	w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1);	/* 31 */
1000 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_15 + SHA1_CONST(1);
1001 	a = ROTATE_LEFT(a, 30);
1002 
1003 	w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1);		/* 32 */
1004 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_0 + SHA1_CONST(1);
1005 	e = ROTATE_LEFT(e, 30);
1006 
1007 	w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1);		/* 33 */
1008 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_1 + SHA1_CONST(1);
1009 	d = ROTATE_LEFT(d, 30);
1010 
1011 	w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1);	/* 34 */
1012 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_2 + SHA1_CONST(1);
1013 	c = ROTATE_LEFT(c, 30);
1014 
1015 	w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1);		/* 35 */
1016 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_3 + SHA1_CONST(1);
1017 	b = ROTATE_LEFT(b, 30);
1018 
1019 	w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1);		/* 36 */
1020 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_4 + SHA1_CONST(1);
1021 	a = ROTATE_LEFT(a, 30);
1022 
1023 	w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1);		/* 37 */
1024 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_5 + SHA1_CONST(1);
1025 	e = ROTATE_LEFT(e, 30);
1026 
1027 	w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1);		/* 38 */
1028 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_6 + SHA1_CONST(1);
1029 	d = ROTATE_LEFT(d, 30);
1030 
1031 	w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1);		/* 39 */
1032 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_7 + SHA1_CONST(1);
1033 	c = ROTATE_LEFT(c, 30);
1034 
1035 	/* round 3 */
1036 	w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1);		/* 40 */
1037 	e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_8 + SHA1_CONST(2);
1038 	b = ROTATE_LEFT(b, 30);
1039 
1040 	w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1);		/* 41 */
1041 	d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_9 + SHA1_CONST(2);
1042 	a = ROTATE_LEFT(a, 30);
1043 
1044 	w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1);	/* 42 */
1045 	c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_10 + SHA1_CONST(2);
1046 	e = ROTATE_LEFT(e, 30);
1047 
1048 	w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1);	/* 43 */
1049 	b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_11 + SHA1_CONST(2);
1050 	d = ROTATE_LEFT(d, 30);
1051 
1052 	w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1);	/* 44 */
1053 	a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_12 + SHA1_CONST(2);
1054 	c = ROTATE_LEFT(c, 30);
1055 
1056 	w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1);	/* 45 */
1057 	e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_13 + SHA1_CONST(2);
1058 	b = ROTATE_LEFT(b, 30);
1059 
1060 	w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1);	/* 46 */
1061 	d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_14 + SHA1_CONST(2);
1062 	a = ROTATE_LEFT(a, 30);
1063 
1064 	w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1);	/* 47 */
1065 	c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_15 + SHA1_CONST(2);
1066 	e = ROTATE_LEFT(e, 30);
1067 
1068 	w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1);		/* 48 */
1069 	b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_0 + SHA1_CONST(2);
1070 	d = ROTATE_LEFT(d, 30);
1071 
1072 	w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1);		/* 49 */
1073 	a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_1 + SHA1_CONST(2);
1074 	c = ROTATE_LEFT(c, 30);
1075 
1076 	w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1);	/* 50 */
1077 	e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_2 + SHA1_CONST(2);
1078 	b = ROTATE_LEFT(b, 30);
1079 
1080 	w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1);		/* 51 */
1081 	d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_3 + SHA1_CONST(2);
1082 	a = ROTATE_LEFT(a, 30);
1083 
1084 	w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1);		/* 52 */
1085 	c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_4 + SHA1_CONST(2);
1086 	e = ROTATE_LEFT(e, 30);
1087 
1088 	w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1);		/* 53 */
1089 	b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_5 + SHA1_CONST(2);
1090 	d = ROTATE_LEFT(d, 30);
1091 
1092 	w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1);		/* 54 */
1093 	a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_6 + SHA1_CONST(2);
1094 	c = ROTATE_LEFT(c, 30);
1095 
1096 	w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1);		/* 55 */
1097 	e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_7 + SHA1_CONST(2);
1098 	b = ROTATE_LEFT(b, 30);
1099 
1100 	w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1);		/* 56 */
1101 	d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_8 + SHA1_CONST(2);
1102 	a = ROTATE_LEFT(a, 30);
1103 
1104 	w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1);		/* 57 */
1105 	c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_9 + SHA1_CONST(2);
1106 	e = ROTATE_LEFT(e, 30);
1107 
1108 	w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1);	/* 58 */
1109 	b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_10 + SHA1_CONST(2);
1110 	d = ROTATE_LEFT(d, 30);
1111 
1112 	w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1);	/* 59 */
1113 	a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_11 + SHA1_CONST(2);
1114 	c = ROTATE_LEFT(c, 30);
1115 
1116 	/* round 4 */
1117 	w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1);	/* 60 */
1118 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_12 + SHA1_CONST(3);
1119 	b = ROTATE_LEFT(b, 30);
1120 
1121 	w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1);	/* 61 */
1122 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_13 + SHA1_CONST(3);
1123 	a = ROTATE_LEFT(a, 30);
1124 
1125 	w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1);	/* 62 */
1126 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_14 + SHA1_CONST(3);
1127 	e = ROTATE_LEFT(e, 30);
1128 
1129 	w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1);	/* 63 */
1130 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_15 + SHA1_CONST(3);
1131 	d = ROTATE_LEFT(d, 30);
1132 
1133 	w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1);		/* 64 */
1134 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_0 + SHA1_CONST(3);
1135 	c = ROTATE_LEFT(c, 30);
1136 
1137 	w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1);		/* 65 */
1138 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_1 + SHA1_CONST(3);
1139 	b = ROTATE_LEFT(b, 30);
1140 
1141 	w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1);	/* 66 */
1142 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_2 + SHA1_CONST(3);
1143 	a = ROTATE_LEFT(a, 30);
1144 
1145 	w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1);		/* 67 */
1146 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_3 + SHA1_CONST(3);
1147 	e = ROTATE_LEFT(e, 30);
1148 
1149 	w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1);		/* 68 */
1150 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_4 + SHA1_CONST(3);
1151 	d = ROTATE_LEFT(d, 30);
1152 
1153 	w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1);		/* 69 */
1154 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_5 + SHA1_CONST(3);
1155 	c = ROTATE_LEFT(c, 30);
1156 
1157 	w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1);		/* 70 */
1158 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_6 + SHA1_CONST(3);
1159 	b = ROTATE_LEFT(b, 30);
1160 
1161 	w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1);		/* 71 */
1162 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_7 + SHA1_CONST(3);
1163 	a = ROTATE_LEFT(a, 30);
1164 
1165 	w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1);		/* 72 */
1166 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_8 + SHA1_CONST(3);
1167 	e = ROTATE_LEFT(e, 30);
1168 
1169 	w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1);		/* 73 */
1170 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_9 + SHA1_CONST(3);
1171 	d = ROTATE_LEFT(d, 30);
1172 
1173 	w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1);	/* 74 */
1174 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_10 + SHA1_CONST(3);
1175 	c = ROTATE_LEFT(c, 30);
1176 
1177 	w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1);	/* 75 */
1178 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_11 + SHA1_CONST(3);
1179 	b = ROTATE_LEFT(b, 30);
1180 
1181 	w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1);	/* 76 */
1182 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_12 + SHA1_CONST(3);
1183 	a = ROTATE_LEFT(a, 30);
1184 
1185 	w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1);	/* 77 */
1186 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_13 + SHA1_CONST(3);
1187 	e = ROTATE_LEFT(e, 30);
1188 
1189 	w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1);	/* 78 */
1190 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_14 + SHA1_CONST(3);
1191 	d = ROTATE_LEFT(d, 30);
1192 
1193 	w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1);	/* 79 */
1194 
1195 	ctx->state[0] += ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_15 +
1196 	    SHA1_CONST(3);
1197 	ctx->state[1] += b;
1198 	ctx->state[2] += ROTATE_LEFT(c, 30);
1199 	ctx->state[3] += d;
1200 	ctx->state[4] += e;
1201 
1202 	/* zeroize sensitive information */
1203 	w_0 = w_1 = w_2 = w_3 = w_4 = w_5 = w_6 = w_7 = w_8 = 0;
1204 	w_9 = w_10 = w_11 = w_12 = w_13 = w_14 = w_15 = 0;
1205 }
1206 
1207 /*
1208  * devpro compiler optimization:
1209  *
1210  * the compiler can generate better code if it knows that `input' and
1211  * `output' do not point to the same source.  there is no portable
1212  * way to tell the compiler this, but the sun compiler recognizes the
1213  * `_Restrict' keyword to indicate this condition.  use it if possible.
1214  */
1215 
1216 #ifdef	__RESTRICT
1217 #define	restrict	_Restrict
1218 #else
1219 #define	restrict	/* nothing */
1220 #endif
1221 
1222 /*
1223  * Encode()
1224  *
1225  * purpose: to convert a list of numbers from little endian to big endian
1226  *   input: uint8_t *	: place to store the converted big endian numbers
1227  *	    uint32_t *	: place to get numbers to convert from
1228  *          size_t	: the length of the input in bytes
1229  *  output: void
1230  */
1231 
1232 static void
1233 Encode(uint8_t *restrict output, uint32_t *restrict input, size_t len)
1234 {
1235 	size_t		i, j;
1236 
1237 #if	defined(__sparc)
1238 	if (IS_P2ALIGNED(output, sizeof (uint32_t))) {
1239 		for (i = 0, j = 0; j < len; i++, j += 4) {
1240 			/* LINTED: pointer alignment */
1241 			*((uint32_t *)(output + j)) = input[i];
1242 		}
1243 	} else {
1244 #endif	/* little endian -- will work on big endian, but slowly */
1245 		for (i = 0, j = 0; j < len; i++, j += 4) {
1246 			output[j]	= (input[i] >> 24) & 0xff;
1247 			output[j + 1]	= (input[i] >> 16) & 0xff;
1248 			output[j + 2]	= (input[i] >>  8) & 0xff;
1249 			output[j + 3]	= input[i] & 0xff;
1250 		}
1251 #if	defined(__sparc)
1252 	}
1253 #endif
1254 }
1255 
1256 
1257 #ifdef _KERNEL
1258 
1259 /*
1260  * KCF software provider control entry points.
1261  */
1262 /* ARGSUSED */
1263 static void
1264 sha1_provider_status(crypto_provider_handle_t provider, uint_t *status)
1265 {
1266 	*status = CRYPTO_PROVIDER_READY;
1267 }
1268 
1269 /*
1270  * KCF software provider digest entry points.
1271  */
1272 
1273 static int
1274 sha1_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
1275     crypto_req_handle_t req)
1276 {
1277 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
1278 		return (CRYPTO_MECHANISM_INVALID);
1279 
1280 	/*
1281 	 * Allocate and initialize SHA1 context.
1282 	 */
1283 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_ctx_t),
1284 	    crypto_kmflag(req));
1285 	if (ctx->cc_provider_private == NULL)
1286 		return (CRYPTO_HOST_MEMORY);
1287 
1288 	PROV_SHA1_CTX(ctx)->sc_mech_type = SHA1_MECH_INFO_TYPE;
1289 	SHA1Init(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
1290 
1291 	return (CRYPTO_SUCCESS);
1292 }
1293 
1294 /*
1295  * Helper SHA1 digest update function for uio data.
1296  */
1297 static int
1298 sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data)
1299 {
1300 	off_t offset = data->cd_offset;
1301 	size_t length = data->cd_length;
1302 	uint_t vec_idx;
1303 	size_t cur_len;
1304 
1305 	/* we support only kernel buffer */
1306 	if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
1307 		return (CRYPTO_ARGUMENTS_BAD);
1308 
1309 	/*
1310 	 * Jump to the first iovec containing data to be
1311 	 * digested.
1312 	 */
1313 	for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
1314 	    offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
1315 	    offset -= data->cd_uio->uio_iov[vec_idx++].iov_len);
1316 	if (vec_idx == data->cd_uio->uio_iovcnt) {
1317 		/*
1318 		 * The caller specified an offset that is larger than the
1319 		 * total size of the buffers it provided.
1320 		 */
1321 		return (CRYPTO_DATA_LEN_RANGE);
1322 	}
1323 
1324 	/*
1325 	 * Now do the digesting on the iovecs.
1326 	 */
1327 	while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
1328 		cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
1329 		    offset, length);
1330 
1331 		SHA1Update(sha1_ctx,
1332 		    (uint8_t *)data->cd_uio->uio_iov[vec_idx].iov_base + offset,
1333 		    cur_len);
1334 
1335 		length -= cur_len;
1336 		vec_idx++;
1337 		offset = 0;
1338 	}
1339 
1340 	if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
1341 		/*
1342 		 * The end of the specified iovec's was reached but
1343 		 * the length requested could not be processed, i.e.
1344 		 * The caller requested to digest more data than it provided.
1345 		 */
1346 		return (CRYPTO_DATA_LEN_RANGE);
1347 	}
1348 
1349 	return (CRYPTO_SUCCESS);
1350 }
1351 
1352 /*
1353  * Helper SHA1 digest final function for uio data.
1354  * digest_len is the length of the desired digest. If digest_len
1355  * is smaller than the default SHA1 digest length, the caller
1356  * must pass a scratch buffer, digest_scratch, which must
1357  * be at least SHA1_DIGEST_LENGTH bytes.
1358  */
1359 static int
1360 sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
1361     ulong_t digest_len, uchar_t *digest_scratch)
1362 {
1363 	off_t offset = digest->cd_offset;
1364 	uint_t vec_idx;
1365 
1366 	/* we support only kernel buffer */
1367 	if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
1368 		return (CRYPTO_ARGUMENTS_BAD);
1369 
1370 	/*
1371 	 * Jump to the first iovec containing ptr to the digest to
1372 	 * be returned.
1373 	 */
1374 	for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
1375 	    vec_idx < digest->cd_uio->uio_iovcnt;
1376 	    offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len);
1377 	if (vec_idx == digest->cd_uio->uio_iovcnt) {
1378 		/*
1379 		 * The caller specified an offset that is
1380 		 * larger than the total size of the buffers
1381 		 * it provided.
1382 		 */
1383 		return (CRYPTO_DATA_LEN_RANGE);
1384 	}
1385 
1386 	if (offset + digest_len <=
1387 	    digest->cd_uio->uio_iov[vec_idx].iov_len) {
1388 		/*
1389 		 * The computed SHA1 digest will fit in the current
1390 		 * iovec.
1391 		 */
1392 		if (digest_len != SHA1_DIGEST_LENGTH) {
1393 			/*
1394 			 * The caller requested a short digest. Digest
1395 			 * into a scratch buffer and return to
1396 			 * the user only what was requested.
1397 			 */
1398 			SHA1Final(digest_scratch, sha1_ctx);
1399 			bcopy(digest_scratch, (uchar_t *)digest->
1400 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
1401 			    digest_len);
1402 		} else {
1403 			SHA1Final((uchar_t *)digest->
1404 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
1405 			    sha1_ctx);
1406 		}
1407 	} else {
1408 		/*
1409 		 * The computed digest will be crossing one or more iovec's.
1410 		 * This is bad performance-wise but we need to support it.
1411 		 * Allocate a small scratch buffer on the stack and
1412 		 * copy it piece meal to the specified digest iovec's.
1413 		 */
1414 		uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
1415 		off_t scratch_offset = 0;
1416 		size_t length = digest_len;
1417 		size_t cur_len;
1418 
1419 		SHA1Final(digest_tmp, sha1_ctx);
1420 
1421 		while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
1422 			cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
1423 			    offset, length);
1424 			bcopy(digest_tmp + scratch_offset,
1425 			    digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
1426 			    cur_len);
1427 
1428 			length -= cur_len;
1429 			vec_idx++;
1430 			scratch_offset += cur_len;
1431 			offset = 0;
1432 		}
1433 
1434 		if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
1435 			/*
1436 			 * The end of the specified iovec's was reached but
1437 			 * the length requested could not be processed, i.e.
1438 			 * The caller requested to digest more data than it
1439 			 * provided.
1440 			 */
1441 			return (CRYPTO_DATA_LEN_RANGE);
1442 		}
1443 	}
1444 
1445 	return (CRYPTO_SUCCESS);
1446 }
1447 
1448 /*
1449  * Helper SHA1 digest update for mblk's.
1450  */
1451 static int
1452 sha1_digest_update_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *data)
1453 {
1454 	off_t offset = data->cd_offset;
1455 	size_t length = data->cd_length;
1456 	mblk_t *mp;
1457 	size_t cur_len;
1458 
1459 	/*
1460 	 * Jump to the first mblk_t containing data to be digested.
1461 	 */
1462 	for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
1463 	    offset -= MBLKL(mp), mp = mp->b_cont);
1464 	if (mp == NULL) {
1465 		/*
1466 		 * The caller specified an offset that is larger than the
1467 		 * total size of the buffers it provided.
1468 		 */
1469 		return (CRYPTO_DATA_LEN_RANGE);
1470 	}
1471 
1472 	/*
1473 	 * Now do the digesting on the mblk chain.
1474 	 */
1475 	while (mp != NULL && length > 0) {
1476 		cur_len = MIN(MBLKL(mp) - offset, length);
1477 		SHA1Update(sha1_ctx, mp->b_rptr + offset, cur_len);
1478 		length -= cur_len;
1479 		offset = 0;
1480 		mp = mp->b_cont;
1481 	}
1482 
1483 	if (mp == NULL && length > 0) {
1484 		/*
1485 		 * The end of the mblk was reached but the length requested
1486 		 * could not be processed, i.e. The caller requested
1487 		 * to digest more data than it provided.
1488 		 */
1489 		return (CRYPTO_DATA_LEN_RANGE);
1490 	}
1491 
1492 	return (CRYPTO_SUCCESS);
1493 }
1494 
1495 /*
1496  * Helper SHA1 digest final for mblk's.
1497  * digest_len is the length of the desired digest. If digest_len
1498  * is smaller than the default SHA1 digest length, the caller
1499  * must pass a scratch buffer, digest_scratch, which must
1500  * be at least SHA1_DIGEST_LENGTH bytes.
1501  */
1502 static int
1503 sha1_digest_final_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
1504     ulong_t digest_len, uchar_t *digest_scratch)
1505 {
1506 	off_t offset = digest->cd_offset;
1507 	mblk_t *mp;
1508 
1509 	/*
1510 	 * Jump to the first mblk_t that will be used to store the digest.
1511 	 */
1512 	for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
1513 	    offset -= MBLKL(mp), mp = mp->b_cont);
1514 	if (mp == NULL) {
1515 		/*
1516 		 * The caller specified an offset that is larger than the
1517 		 * total size of the buffers it provided.
1518 		 */
1519 		return (CRYPTO_DATA_LEN_RANGE);
1520 	}
1521 
1522 	if (offset + digest_len <= MBLKL(mp)) {
1523 		/*
1524 		 * The computed SHA1 digest will fit in the current mblk.
1525 		 * Do the SHA1Final() in-place.
1526 		 */
1527 		if (digest_len != SHA1_DIGEST_LENGTH) {
1528 			/*
1529 			 * The caller requested a short digest. Digest
1530 			 * into a scratch buffer and return to
1531 			 * the user only what was requested.
1532 			 */
1533 			SHA1Final(digest_scratch, sha1_ctx);
1534 			bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
1535 		} else {
1536 			SHA1Final(mp->b_rptr + offset, sha1_ctx);
1537 		}
1538 	} else {
1539 		/*
1540 		 * The computed digest will be crossing one or more mblk's.
1541 		 * This is bad performance-wise but we need to support it.
1542 		 * Allocate a small scratch buffer on the stack and
1543 		 * copy it piece meal to the specified digest iovec's.
1544 		 */
1545 		uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
1546 		off_t scratch_offset = 0;
1547 		size_t length = digest_len;
1548 		size_t cur_len;
1549 
1550 		SHA1Final(digest_tmp, sha1_ctx);
1551 
1552 		while (mp != NULL && length > 0) {
1553 			cur_len = MIN(MBLKL(mp) - offset, length);
1554 			bcopy(digest_tmp + scratch_offset,
1555 			    mp->b_rptr + offset, cur_len);
1556 
1557 			length -= cur_len;
1558 			mp = mp->b_cont;
1559 			scratch_offset += cur_len;
1560 			offset = 0;
1561 		}
1562 
1563 		if (mp == NULL && length > 0) {
1564 			/*
1565 			 * The end of the specified mblk was reached but
1566 			 * the length requested could not be processed, i.e.
1567 			 * The caller requested to digest more data than it
1568 			 * provided.
1569 			 */
1570 			return (CRYPTO_DATA_LEN_RANGE);
1571 		}
1572 	}
1573 
1574 	return (CRYPTO_SUCCESS);
1575 }
1576 
1577 /* ARGSUSED */
1578 static int
1579 sha1_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
1580     crypto_req_handle_t req)
1581 {
1582 	int ret = CRYPTO_SUCCESS;
1583 
1584 	ASSERT(ctx->cc_provider_private != NULL);
1585 
1586 	/*
1587 	 * We need to just return the length needed to store the output.
1588 	 * We should not destroy the context for the following cases.
1589 	 */
1590 	if ((digest->cd_length == 0) ||
1591 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
1592 		digest->cd_length = SHA1_DIGEST_LENGTH;
1593 		return (CRYPTO_BUFFER_TOO_SMALL);
1594 	}
1595 
1596 	/*
1597 	 * Do the SHA1 update on the specified input data.
1598 	 */
1599 	switch (data->cd_format) {
1600 	case CRYPTO_DATA_RAW:
1601 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1602 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
1603 		    data->cd_length);
1604 		break;
1605 	case CRYPTO_DATA_UIO:
1606 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1607 		    data);
1608 		break;
1609 	case CRYPTO_DATA_MBLK:
1610 		ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1611 		    data);
1612 		break;
1613 	default:
1614 		ret = CRYPTO_ARGUMENTS_BAD;
1615 	}
1616 
1617 	if (ret != CRYPTO_SUCCESS) {
1618 		/* the update failed, free context and bail */
1619 		kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
1620 		ctx->cc_provider_private = NULL;
1621 		digest->cd_length = 0;
1622 		return (ret);
1623 	}
1624 
1625 	/*
1626 	 * Do a SHA1 final, must be done separately since the digest
1627 	 * type can be different than the input data type.
1628 	 */
1629 	switch (digest->cd_format) {
1630 	case CRYPTO_DATA_RAW:
1631 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
1632 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
1633 		break;
1634 	case CRYPTO_DATA_UIO:
1635 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1636 		    digest, SHA1_DIGEST_LENGTH, NULL);
1637 		break;
1638 	case CRYPTO_DATA_MBLK:
1639 		ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1640 		    digest, SHA1_DIGEST_LENGTH, NULL);
1641 		break;
1642 	default:
1643 		ret = CRYPTO_ARGUMENTS_BAD;
1644 	}
1645 
1646 	/* all done, free context and return */
1647 
1648 	if (ret == CRYPTO_SUCCESS) {
1649 		digest->cd_length = SHA1_DIGEST_LENGTH;
1650 	} else {
1651 		digest->cd_length = 0;
1652 	}
1653 
1654 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
1655 	ctx->cc_provider_private = NULL;
1656 	return (ret);
1657 }
1658 
1659 /* ARGSUSED */
1660 static int
1661 sha1_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
1662     crypto_req_handle_t req)
1663 {
1664 	int ret = CRYPTO_SUCCESS;
1665 
1666 	ASSERT(ctx->cc_provider_private != NULL);
1667 
1668 	/*
1669 	 * Do the SHA1 update on the specified input data.
1670 	 */
1671 	switch (data->cd_format) {
1672 	case CRYPTO_DATA_RAW:
1673 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1674 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
1675 		    data->cd_length);
1676 		break;
1677 	case CRYPTO_DATA_UIO:
1678 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1679 		    data);
1680 		break;
1681 	case CRYPTO_DATA_MBLK:
1682 		ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1683 		    data);
1684 		break;
1685 	default:
1686 		ret = CRYPTO_ARGUMENTS_BAD;
1687 	}
1688 
1689 	return (ret);
1690 }
1691 
1692 /* ARGSUSED */
1693 static int
1694 sha1_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
1695     crypto_req_handle_t req)
1696 {
1697 	int ret = CRYPTO_SUCCESS;
1698 
1699 	ASSERT(ctx->cc_provider_private != NULL);
1700 
1701 	/*
1702 	 * We need to just return the length needed to store the output.
1703 	 * We should not destroy the context for the following cases.
1704 	 */
1705 	if ((digest->cd_length == 0) ||
1706 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
1707 		digest->cd_length = SHA1_DIGEST_LENGTH;
1708 		return (CRYPTO_BUFFER_TOO_SMALL);
1709 	}
1710 
1711 	/*
1712 	 * Do a SHA1 final.
1713 	 */
1714 	switch (digest->cd_format) {
1715 	case CRYPTO_DATA_RAW:
1716 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
1717 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
1718 		break;
1719 	case CRYPTO_DATA_UIO:
1720 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1721 		    digest, SHA1_DIGEST_LENGTH, NULL);
1722 		break;
1723 	case CRYPTO_DATA_MBLK:
1724 		ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1725 		    digest, SHA1_DIGEST_LENGTH, NULL);
1726 		break;
1727 	default:
1728 		ret = CRYPTO_ARGUMENTS_BAD;
1729 	}
1730 
1731 	/* all done, free context and return */
1732 
1733 	if (ret == CRYPTO_SUCCESS) {
1734 		digest->cd_length = SHA1_DIGEST_LENGTH;
1735 	} else {
1736 		digest->cd_length = 0;
1737 	}
1738 
1739 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
1740 	ctx->cc_provider_private = NULL;
1741 
1742 	return (ret);
1743 }
1744 
1745 /* ARGSUSED */
1746 static int
1747 sha1_digest_atomic(crypto_provider_handle_t provider,
1748     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1749     crypto_data_t *data, crypto_data_t *digest,
1750     crypto_req_handle_t req)
1751 {
1752 	int ret = CRYPTO_SUCCESS;
1753 	SHA1_CTX sha1_ctx;
1754 
1755 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
1756 		return (CRYPTO_MECHANISM_INVALID);
1757 
1758 	/*
1759 	 * Do the SHA1 init.
1760 	 */
1761 	SHA1Init(&sha1_ctx);
1762 
1763 	/*
1764 	 * Do the SHA1 update on the specified input data.
1765 	 */
1766 	switch (data->cd_format) {
1767 	case CRYPTO_DATA_RAW:
1768 		SHA1Update(&sha1_ctx,
1769 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
1770 		    data->cd_length);
1771 		break;
1772 	case CRYPTO_DATA_UIO:
1773 		ret = sha1_digest_update_uio(&sha1_ctx, data);
1774 		break;
1775 	case CRYPTO_DATA_MBLK:
1776 		ret = sha1_digest_update_mblk(&sha1_ctx, data);
1777 		break;
1778 	default:
1779 		ret = CRYPTO_ARGUMENTS_BAD;
1780 	}
1781 
1782 	if (ret != CRYPTO_SUCCESS) {
1783 		/* the update failed, bail */
1784 		digest->cd_length = 0;
1785 		return (ret);
1786 	}
1787 
1788 	/*
1789 	 * Do a SHA1 final, must be done separately since the digest
1790 	 * type can be different than the input data type.
1791 	 */
1792 	switch (digest->cd_format) {
1793 	case CRYPTO_DATA_RAW:
1794 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
1795 		    digest->cd_offset, &sha1_ctx);
1796 		break;
1797 	case CRYPTO_DATA_UIO:
1798 		ret = sha1_digest_final_uio(&sha1_ctx, digest,
1799 		    SHA1_DIGEST_LENGTH, NULL);
1800 		break;
1801 	case CRYPTO_DATA_MBLK:
1802 		ret = sha1_digest_final_mblk(&sha1_ctx, digest,
1803 		    SHA1_DIGEST_LENGTH, NULL);
1804 		break;
1805 	default:
1806 		ret = CRYPTO_ARGUMENTS_BAD;
1807 	}
1808 
1809 	if (ret == CRYPTO_SUCCESS) {
1810 		digest->cd_length = SHA1_DIGEST_LENGTH;
1811 	} else {
1812 		digest->cd_length = 0;
1813 	}
1814 
1815 	return (ret);
1816 }
1817 
1818 /*
1819  * KCF software provider mac entry points.
1820  *
1821  * SHA1 HMAC is: SHA1(key XOR opad, SHA1(key XOR ipad, text))
1822  *
1823  * Init:
1824  * The initialization routine initializes what we denote
1825  * as the inner and outer contexts by doing
1826  * - for inner context: SHA1(key XOR ipad)
1827  * - for outer context: SHA1(key XOR opad)
1828  *
1829  * Update:
1830  * Each subsequent SHA1 HMAC update will result in an
1831  * update of the inner context with the specified data.
1832  *
1833  * Final:
1834  * The SHA1 HMAC final will do a SHA1 final operation on the
1835  * inner context, and the resulting digest will be used
1836  * as the data for an update on the outer context. Last
1837  * but not least, a SHA1 final on the outer context will
1838  * be performed to obtain the SHA1 HMAC digest to return
1839  * to the user.
1840  */
1841 
1842 /*
1843  * Initialize a SHA1-HMAC context.
1844  */
1845 static void
1846 sha1_mac_init_ctx(sha1_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
1847 {
1848 	uint32_t ipad[SHA1_HMAC_INTS_PER_BLOCK];
1849 	uint32_t opad[SHA1_HMAC_INTS_PER_BLOCK];
1850 	uint_t i;
1851 
1852 	bzero(ipad, SHA1_HMAC_BLOCK_SIZE);
1853 	bzero(opad, SHA1_HMAC_BLOCK_SIZE);
1854 
1855 	bcopy(keyval, ipad, length_in_bytes);
1856 	bcopy(keyval, opad, length_in_bytes);
1857 
1858 	/* XOR key with ipad (0x36) and opad (0x5c) */
1859 	for (i = 0; i < SHA1_HMAC_INTS_PER_BLOCK; i++) {
1860 		ipad[i] ^= 0x36363636;
1861 		opad[i] ^= 0x5c5c5c5c;
1862 	}
1863 
1864 	/* perform SHA1 on ipad */
1865 	SHA1Init(&ctx->hc_icontext);
1866 	SHA1Update(&ctx->hc_icontext, (uint8_t *)ipad, SHA1_HMAC_BLOCK_SIZE);
1867 
1868 	/* perform SHA1 on opad */
1869 	SHA1Init(&ctx->hc_ocontext);
1870 	SHA1Update(&ctx->hc_ocontext, (uint8_t *)opad, SHA1_HMAC_BLOCK_SIZE);
1871 }
1872 
1873 /*
1874  */
1875 static int
1876 sha1_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
1877     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
1878     crypto_req_handle_t req)
1879 {
1880 	int ret = CRYPTO_SUCCESS;
1881 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1882 
1883 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1884 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1885 		return (CRYPTO_MECHANISM_INVALID);
1886 
1887 	/* Add support for key by attributes (RFE 4706552) */
1888 	if (key->ck_format != CRYPTO_KEY_RAW)
1889 		return (CRYPTO_ARGUMENTS_BAD);
1890 
1891 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_hmac_ctx_t),
1892 	    crypto_kmflag(req));
1893 	if (ctx->cc_provider_private == NULL)
1894 		return (CRYPTO_HOST_MEMORY);
1895 
1896 	if (ctx_template != NULL) {
1897 		/* reuse context template */
1898 		bcopy(ctx_template, PROV_SHA1_HMAC_CTX(ctx),
1899 		    sizeof (sha1_hmac_ctx_t));
1900 	} else {
1901 		/* no context template, compute context */
1902 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1903 			uchar_t digested_key[SHA1_DIGEST_LENGTH];
1904 			sha1_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
1905 
1906 			/*
1907 			 * Hash the passed-in key to get a smaller key.
1908 			 * The inner context is used since it hasn't been
1909 			 * initialized yet.
1910 			 */
1911 			PROV_SHA1_DIGEST_KEY(&hmac_ctx->hc_icontext,
1912 			    key->ck_data, keylen_in_bytes, digested_key);
1913 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
1914 			    digested_key, SHA1_DIGEST_LENGTH);
1915 		} else {
1916 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
1917 			    key->ck_data, keylen_in_bytes);
1918 		}
1919 	}
1920 
1921 	/*
1922 	 * Get the mechanism parameters, if applicable.
1923 	 */
1924 	PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
1925 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1926 		if (mechanism->cm_param == NULL ||
1927 		    mechanism->cm_param_len != sizeof (ulong_t))
1928 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1929 		PROV_SHA1_GET_DIGEST_LEN(mechanism,
1930 		    PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len);
1931 		if (PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len >
1932 		    SHA1_DIGEST_LENGTH)
1933 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1934 	}
1935 
1936 	if (ret != CRYPTO_SUCCESS) {
1937 		bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1938 		kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1939 		ctx->cc_provider_private = NULL;
1940 	}
1941 
1942 	return (ret);
1943 }
1944 
1945 /* ARGSUSED */
1946 static int
1947 sha1_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
1948 {
1949 	int ret = CRYPTO_SUCCESS;
1950 
1951 	ASSERT(ctx->cc_provider_private != NULL);
1952 
1953 	/*
1954 	 * Do a SHA1 update of the inner context using the specified
1955 	 * data.
1956 	 */
1957 	switch (data->cd_format) {
1958 	case CRYPTO_DATA_RAW:
1959 		SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_icontext,
1960 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
1961 		    data->cd_length);
1962 		break;
1963 	case CRYPTO_DATA_UIO:
1964 		ret = sha1_digest_update_uio(
1965 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
1966 		break;
1967 	case CRYPTO_DATA_MBLK:
1968 		ret = sha1_digest_update_mblk(
1969 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
1970 		break;
1971 	default:
1972 		ret = CRYPTO_ARGUMENTS_BAD;
1973 	}
1974 
1975 	return (ret);
1976 }
1977 
1978 /* ARGSUSED */
1979 static int
1980 sha1_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
1981 {
1982 	int ret = CRYPTO_SUCCESS;
1983 	uchar_t digest[SHA1_DIGEST_LENGTH];
1984 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
1985 
1986 	ASSERT(ctx->cc_provider_private != NULL);
1987 
1988 	if (PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type ==
1989 	    SHA1_HMAC_GEN_MECH_INFO_TYPE)
1990 		digest_len = PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len;
1991 
1992 	/*
1993 	 * We need to just return the length needed to store the output.
1994 	 * We should not destroy the context for the following cases.
1995 	 */
1996 	if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
1997 		mac->cd_length = digest_len;
1998 		return (CRYPTO_BUFFER_TOO_SMALL);
1999 	}
2000 
2001 	/*
2002 	 * Do a SHA1 final on the inner context.
2003 	 */
2004 	SHA1Final(digest, &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext);
2005 
2006 	/*
2007 	 * Do a SHA1 update on the outer context, feeding the inner
2008 	 * digest as data.
2009 	 */
2010 	SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, digest,
2011 	    SHA1_DIGEST_LENGTH);
2012 
2013 	/*
2014 	 * Do a SHA1 final on the outer context, storing the computing
2015 	 * digest in the users buffer.
2016 	 */
2017 	switch (mac->cd_format) {
2018 	case CRYPTO_DATA_RAW:
2019 		if (digest_len != SHA1_DIGEST_LENGTH) {
2020 			/*
2021 			 * The caller requested a short digest. Digest
2022 			 * into a scratch buffer and return to
2023 			 * the user only what was requested.
2024 			 */
2025 			SHA1Final(digest,
2026 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
2027 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
2028 			    mac->cd_offset, digest_len);
2029 		} else {
2030 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
2031 			    mac->cd_offset,
2032 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
2033 		}
2034 		break;
2035 	case CRYPTO_DATA_UIO:
2036 		ret = sha1_digest_final_uio(
2037 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
2038 		    digest_len, digest);
2039 		break;
2040 	case CRYPTO_DATA_MBLK:
2041 		ret = sha1_digest_final_mblk(
2042 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
2043 		    digest_len, digest);
2044 		break;
2045 	default:
2046 		ret = CRYPTO_ARGUMENTS_BAD;
2047 	}
2048 
2049 	if (ret == CRYPTO_SUCCESS) {
2050 		mac->cd_length = digest_len;
2051 	} else {
2052 		mac->cd_length = 0;
2053 	}
2054 
2055 	bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
2056 	kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
2057 	ctx->cc_provider_private = NULL;
2058 
2059 	return (ret);
2060 }
2061 
2062 #define	SHA1_MAC_UPDATE(data, ctx, ret) {				\
2063 	switch (data->cd_format) {					\
2064 	case CRYPTO_DATA_RAW:						\
2065 		SHA1Update(&(ctx).hc_icontext,				\
2066 		    (uint8_t *)data->cd_raw.iov_base +			\
2067 		    data->cd_offset, data->cd_length);			\
2068 		break;							\
2069 	case CRYPTO_DATA_UIO:						\
2070 		ret = sha1_digest_update_uio(&(ctx).hc_icontext, data); \
2071 		break;							\
2072 	case CRYPTO_DATA_MBLK:						\
2073 		ret = sha1_digest_update_mblk(&(ctx).hc_icontext,	\
2074 		    data);						\
2075 		break;							\
2076 	default:							\
2077 		ret = CRYPTO_ARGUMENTS_BAD;				\
2078 	}								\
2079 }
2080 
2081 /* ARGSUSED */
2082 static int
2083 sha1_mac_atomic(crypto_provider_handle_t provider,
2084     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
2085     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
2086     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
2087 {
2088 	int ret = CRYPTO_SUCCESS;
2089 	uchar_t digest[SHA1_DIGEST_LENGTH];
2090 	sha1_hmac_ctx_t sha1_hmac_ctx;
2091 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
2092 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
2093 
2094 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
2095 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
2096 		return (CRYPTO_MECHANISM_INVALID);
2097 
2098 	/* Add support for key by attributes (RFE 4706552) */
2099 	if (key->ck_format != CRYPTO_KEY_RAW)
2100 		return (CRYPTO_ARGUMENTS_BAD);
2101 
2102 	if (ctx_template != NULL) {
2103 		/* reuse context template */
2104 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2105 	} else {
2106 		/* no context template, initialize context */
2107 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
2108 			/*
2109 			 * Hash the passed-in key to get a smaller key.
2110 			 * The inner context is used since it hasn't been
2111 			 * initialized yet.
2112 			 */
2113 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
2114 			    key->ck_data, keylen_in_bytes, digest);
2115 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
2116 			    SHA1_DIGEST_LENGTH);
2117 		} else {
2118 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
2119 			    keylen_in_bytes);
2120 		}
2121 	}
2122 
2123 	/* get the mechanism parameters, if applicable */
2124 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
2125 		if (mechanism->cm_param == NULL ||
2126 		    mechanism->cm_param_len != sizeof (ulong_t)) {
2127 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2128 			goto bail;
2129 		}
2130 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
2131 		if (digest_len > SHA1_DIGEST_LENGTH) {
2132 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2133 			goto bail;
2134 		}
2135 	}
2136 
2137 	/* do a SHA1 update of the inner context using the specified data */
2138 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
2139 	if (ret != CRYPTO_SUCCESS)
2140 		/* the update failed, free context and bail */
2141 		goto bail;
2142 
2143 	/*
2144 	 * Do a SHA1 final on the inner context.
2145 	 */
2146 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
2147 
2148 	/*
2149 	 * Do an SHA1 update on the outer context, feeding the inner
2150 	 * digest as data.
2151 	 */
2152 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
2153 
2154 	/*
2155 	 * Do a SHA1 final on the outer context, storing the computed
2156 	 * digest in the users buffer.
2157 	 */
2158 	switch (mac->cd_format) {
2159 	case CRYPTO_DATA_RAW:
2160 		if (digest_len != SHA1_DIGEST_LENGTH) {
2161 			/*
2162 			 * The caller requested a short digest. Digest
2163 			 * into a scratch buffer and return to
2164 			 * the user only what was requested.
2165 			 */
2166 			SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
2167 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
2168 			    mac->cd_offset, digest_len);
2169 		} else {
2170 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
2171 			    mac->cd_offset, &sha1_hmac_ctx.hc_ocontext);
2172 		}
2173 		break;
2174 	case CRYPTO_DATA_UIO:
2175 		ret = sha1_digest_final_uio(&sha1_hmac_ctx.hc_ocontext, mac,
2176 		    digest_len, digest);
2177 		break;
2178 	case CRYPTO_DATA_MBLK:
2179 		ret = sha1_digest_final_mblk(&sha1_hmac_ctx.hc_ocontext, mac,
2180 		    digest_len, digest);
2181 		break;
2182 	default:
2183 		ret = CRYPTO_ARGUMENTS_BAD;
2184 	}
2185 
2186 	if (ret == CRYPTO_SUCCESS) {
2187 		mac->cd_length = digest_len;
2188 	} else {
2189 		mac->cd_length = 0;
2190 	}
2191 	/* Extra paranoia: zeroize the context on the stack */
2192 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2193 
2194 	return (ret);
2195 bail:
2196 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2197 	mac->cd_length = 0;
2198 	return (ret);
2199 }
2200 
2201 /* ARGSUSED */
2202 static int
2203 sha1_mac_verify_atomic(crypto_provider_handle_t provider,
2204     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
2205     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
2206     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
2207 {
2208 	int ret = CRYPTO_SUCCESS;
2209 	uchar_t digest[SHA1_DIGEST_LENGTH];
2210 	sha1_hmac_ctx_t sha1_hmac_ctx;
2211 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
2212 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
2213 
2214 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
2215 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
2216 		return (CRYPTO_MECHANISM_INVALID);
2217 
2218 	/* Add support for key by attributes (RFE 4706552) */
2219 	if (key->ck_format != CRYPTO_KEY_RAW)
2220 		return (CRYPTO_ARGUMENTS_BAD);
2221 
2222 	if (ctx_template != NULL) {
2223 		/* reuse context template */
2224 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2225 	} else {
2226 		/* no context template, initialize context */
2227 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
2228 			/*
2229 			 * Hash the passed-in key to get a smaller key.
2230 			 * The inner context is used since it hasn't been
2231 			 * initialized yet.
2232 			 */
2233 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
2234 			    key->ck_data, keylen_in_bytes, digest);
2235 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
2236 			    SHA1_DIGEST_LENGTH);
2237 		} else {
2238 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
2239 			    keylen_in_bytes);
2240 		}
2241 	}
2242 
2243 	/* get the mechanism parameters, if applicable */
2244 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
2245 		if (mechanism->cm_param == NULL ||
2246 		    mechanism->cm_param_len != sizeof (ulong_t)) {
2247 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2248 			goto bail;
2249 		}
2250 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
2251 		if (digest_len > SHA1_DIGEST_LENGTH) {
2252 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2253 			goto bail;
2254 		}
2255 	}
2256 
2257 	if (mac->cd_length != digest_len) {
2258 		ret = CRYPTO_INVALID_MAC;
2259 		goto bail;
2260 	}
2261 
2262 	/* do a SHA1 update of the inner context using the specified data */
2263 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
2264 	if (ret != CRYPTO_SUCCESS)
2265 		/* the update failed, free context and bail */
2266 		goto bail;
2267 
2268 	/* do a SHA1 final on the inner context */
2269 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
2270 
2271 	/*
2272 	 * Do an SHA1 update on the outer context, feeding the inner
2273 	 * digest as data.
2274 	 */
2275 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
2276 
2277 	/*
2278 	 * Do a SHA1 final on the outer context, storing the computed
2279 	 * digest in the users buffer.
2280 	 */
2281 	SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
2282 
2283 	/*
2284 	 * Compare the computed digest against the expected digest passed
2285 	 * as argument.
2286 	 */
2287 
2288 	switch (mac->cd_format) {
2289 
2290 	case CRYPTO_DATA_RAW:
2291 		if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
2292 		    mac->cd_offset, digest_len) != 0)
2293 			ret = CRYPTO_INVALID_MAC;
2294 		break;
2295 
2296 	case CRYPTO_DATA_UIO: {
2297 		off_t offset = mac->cd_offset;
2298 		uint_t vec_idx;
2299 		off_t scratch_offset = 0;
2300 		size_t length = digest_len;
2301 		size_t cur_len;
2302 
2303 		/* we support only kernel buffer */
2304 		if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
2305 			return (CRYPTO_ARGUMENTS_BAD);
2306 
2307 		/* jump to the first iovec containing the expected digest */
2308 		for (vec_idx = 0;
2309 		    offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
2310 		    vec_idx < mac->cd_uio->uio_iovcnt;
2311 		    offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len);
2312 		if (vec_idx == mac->cd_uio->uio_iovcnt) {
2313 			/*
2314 			 * The caller specified an offset that is
2315 			 * larger than the total size of the buffers
2316 			 * it provided.
2317 			 */
2318 			ret = CRYPTO_DATA_LEN_RANGE;
2319 			break;
2320 		}
2321 
2322 		/* do the comparison of computed digest vs specified one */
2323 		while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
2324 			cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
2325 			    offset, length);
2326 
2327 			if (bcmp(digest + scratch_offset,
2328 			    mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
2329 			    cur_len) != 0) {
2330 				ret = CRYPTO_INVALID_MAC;
2331 				break;
2332 			}
2333 
2334 			length -= cur_len;
2335 			vec_idx++;
2336 			scratch_offset += cur_len;
2337 			offset = 0;
2338 		}
2339 		break;
2340 	}
2341 
2342 	case CRYPTO_DATA_MBLK: {
2343 		off_t offset = mac->cd_offset;
2344 		mblk_t *mp;
2345 		off_t scratch_offset = 0;
2346 		size_t length = digest_len;
2347 		size_t cur_len;
2348 
2349 		/* jump to the first mblk_t containing the expected digest */
2350 		for (mp = mac->cd_mp; mp != NULL && offset >= MBLKL(mp);
2351 		    offset -= MBLKL(mp), mp = mp->b_cont);
2352 		if (mp == NULL) {
2353 			/*
2354 			 * The caller specified an offset that is larger than
2355 			 * the total size of the buffers it provided.
2356 			 */
2357 			ret = CRYPTO_DATA_LEN_RANGE;
2358 			break;
2359 		}
2360 
2361 		while (mp != NULL && length > 0) {
2362 			cur_len = MIN(MBLKL(mp) - offset, length);
2363 			if (bcmp(digest + scratch_offset,
2364 			    mp->b_rptr + offset, cur_len) != 0) {
2365 				ret = CRYPTO_INVALID_MAC;
2366 				break;
2367 			}
2368 
2369 			length -= cur_len;
2370 			mp = mp->b_cont;
2371 			scratch_offset += cur_len;
2372 			offset = 0;
2373 		}
2374 		break;
2375 	}
2376 
2377 	default:
2378 		ret = CRYPTO_ARGUMENTS_BAD;
2379 	}
2380 
2381 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2382 	return (ret);
2383 bail:
2384 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2385 	mac->cd_length = 0;
2386 	return (ret);
2387 }
2388 
2389 /*
2390  * KCF software provider context management entry points.
2391  */
2392 
2393 /* ARGSUSED */
2394 static int
2395 sha1_create_ctx_template(crypto_provider_handle_t provider,
2396     crypto_mechanism_t *mechanism, crypto_key_t *key,
2397     crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
2398     crypto_req_handle_t req)
2399 {
2400 	sha1_hmac_ctx_t *sha1_hmac_ctx_tmpl;
2401 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
2402 
2403 	if ((mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE) &&
2404 	    (mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)) {
2405 		return (CRYPTO_MECHANISM_INVALID);
2406 	}
2407 
2408 	/* Add support for key by attributes (RFE 4706552) */
2409 	if (key->ck_format != CRYPTO_KEY_RAW)
2410 		return (CRYPTO_ARGUMENTS_BAD);
2411 
2412 	/*
2413 	 * Allocate and initialize SHA1 context.
2414 	 */
2415 	sha1_hmac_ctx_tmpl = kmem_alloc(sizeof (sha1_hmac_ctx_t),
2416 	    crypto_kmflag(req));
2417 	if (sha1_hmac_ctx_tmpl == NULL)
2418 		return (CRYPTO_HOST_MEMORY);
2419 
2420 	if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
2421 		uchar_t digested_key[SHA1_DIGEST_LENGTH];
2422 
2423 		/*
2424 		 * Hash the passed-in key to get a smaller key.
2425 		 * The inner context is used since it hasn't been
2426 		 * initialized yet.
2427 		 */
2428 		PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx_tmpl->hc_icontext,
2429 		    key->ck_data, keylen_in_bytes, digested_key);
2430 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, digested_key,
2431 		    SHA1_DIGEST_LENGTH);
2432 	} else {
2433 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, key->ck_data,
2434 		    keylen_in_bytes);
2435 	}
2436 
2437 	sha1_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
2438 	*ctx_template = (crypto_spi_ctx_template_t)sha1_hmac_ctx_tmpl;
2439 	*ctx_template_size = sizeof (sha1_hmac_ctx_t);
2440 
2441 
2442 	return (CRYPTO_SUCCESS);
2443 }
2444 
2445 static int
2446 sha1_free_context(crypto_ctx_t *ctx)
2447 {
2448 	uint_t ctx_len;
2449 	sha1_mech_type_t mech_type;
2450 
2451 	if (ctx->cc_provider_private == NULL)
2452 		return (CRYPTO_SUCCESS);
2453 
2454 	/*
2455 	 * We have to free either SHA1 or SHA1-HMAC contexts, which
2456 	 * have different lengths.
2457 	 */
2458 
2459 	mech_type = PROV_SHA1_CTX(ctx)->sc_mech_type;
2460 	if (mech_type == SHA1_MECH_INFO_TYPE)
2461 		ctx_len = sizeof (sha1_ctx_t);
2462 	else {
2463 		ASSERT(mech_type == SHA1_HMAC_MECH_INFO_TYPE ||
2464 		    mech_type == SHA1_HMAC_GEN_MECH_INFO_TYPE);
2465 		ctx_len = sizeof (sha1_hmac_ctx_t);
2466 	}
2467 
2468 	bzero(ctx->cc_provider_private, ctx_len);
2469 	kmem_free(ctx->cc_provider_private, ctx_len);
2470 	ctx->cc_provider_private = NULL;
2471 
2472 	return (CRYPTO_SUCCESS);
2473 }
2474 
2475 #endif /* _KERNEL */
2476