xref: /titanic_50/usr/src/common/crypto/sha1/sha1.c (revision 02e56f3f1bfc8d9977bafb8cb5202f576dcded27)
1 /*
2  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 #pragma ident	"%Z%%M%	%I%	%E% SMI"
7 
8 /*
9  * The basic framework for this code came from the reference
10  * implementation for MD5.  That implementation is Copyright (C)
11  * 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved.
12  *
13  * License to copy and use this software is granted provided that it
14  * is identified as the "RSA Data Security, Inc. MD5 Message-Digest
15  * Algorithm" in all material mentioning or referencing this software
16  * or this function.
17  *
18  * License is also granted to make and use derivative works provided
19  * that such works are identified as "derived from the RSA Data
20  * Security, Inc. MD5 Message-Digest Algorithm" in all material
21  * mentioning or referencing the derived work.
22  *
23  * RSA Data Security, Inc. makes no representations concerning either
24  * the merchantability of this software or the suitability of this
25  * software for any particular purpose. It is provided "as is"
26  * without express or implied warranty of any kind.
27  *
28  * These notices must be retained in any copies of any part of this
29  * documentation and/or software.
30  *
31  * NOTE: Cleaned-up and optimized, version of SHA1, based on the FIPS 180-1
32  * standard, available at http://www.itl.nist.gov/div897/pubs/fip180-1.htm
33  * Not as fast as one would like -- further optimizations are encouraged
34  * and appreciated.
35  */
36 
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysmacros.h>
41 #include <sys/sha1.h>
42 #include <sys/sha1_consts.h>
43 
44 #ifdef _KERNEL
45 
46 #include <sys/modctl.h>
47 #include <sys/cmn_err.h>
48 #include <sys/note.h>
49 #include <sys/crypto/common.h>
50 #include <sys/crypto/spi.h>
51 #include <sys/strsun.h>
52 
53 /*
54  * The sha1 module is created with two modlinkages:
55  * - a modlmisc that allows consumers to directly call the entry points
56  *   SHA1Init, SHA1Update, and SHA1Final.
57  * - a modlcrypto that allows the module to register with the Kernel
58  *   Cryptographic Framework (KCF) as a software provider for the SHA1
59  *   mechanisms.
60  */
61 
62 #endif /* _KERNEL */
63 #ifndef	_KERNEL
64 #include <strings.h>
65 #include <stdlib.h>
66 #include <errno.h>
67 #include <sys/systeminfo.h>
68 #endif	/* !_KERNEL */
69 
70 static void Encode(uint8_t *, uint32_t *, size_t);
71 static void SHA1Transform(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t,
72     SHA1_CTX *, const uint8_t *);
73 
74 static uint8_t PADDING[64] = { 0x80, /* all zeros */ };
75 
76 /*
77  * F, G, and H are the basic SHA1 functions.
78  */
79 #define	F(b, c, d)	(((b) & (c)) | ((~b) & (d)))
80 #define	G(b, c, d)	((b) ^ (c) ^ (d))
81 #define	H(b, c, d)	(((b) & (c)) | ((b) & (d)) | ((c) & (d)))
82 
83 /*
84  * ROTATE_LEFT rotates x left n bits.
85  */
86 #define	ROTATE_LEFT(x, n)	\
87 	(((x) << (n)) | ((x) >> ((sizeof (x) * NBBY)-(n))))
88 
89 #ifdef _KERNEL
90 
91 static struct modlmisc modlmisc = {
92 	&mod_miscops,
93 	"SHA1 Message-Digest Algorithm"
94 };
95 
96 static struct modlcrypto modlcrypto = {
97 	&mod_cryptoops,
98 	"SHA1 Kernel SW Provider %I%"
99 };
100 
101 static struct modlinkage modlinkage = {
102 	MODREV_1, &modlmisc, &modlcrypto, NULL
103 };
104 
105 /*
106  * CSPI information (entry points, provider info, etc.)
107  */
108 
109 typedef enum sha1_mech_type {
110 	SHA1_MECH_INFO_TYPE,		/* SUN_CKM_SHA1 */
111 	SHA1_HMAC_MECH_INFO_TYPE,	/* SUN_CKM_SHA1_HMAC */
112 	SHA1_HMAC_GEN_MECH_INFO_TYPE	/* SUN_CKM_SHA1_HMAC_GENERAL */
113 } sha1_mech_type_t;
114 
115 #define	SHA1_DIGEST_LENGTH	20	/* SHA1 digest length in bytes */
116 #define	SHA1_HMAC_BLOCK_SIZE	64	/* SHA1-HMAC block size */
117 #define	SHA1_HMAC_MIN_KEY_LEN	8	/* SHA1-HMAC min key length in bits */
118 #define	SHA1_HMAC_MAX_KEY_LEN	INT_MAX /* SHA1-HMAC max key length in bits */
119 #define	SHA1_HMAC_INTS_PER_BLOCK	(SHA1_HMAC_BLOCK_SIZE/sizeof (uint32_t))
120 
121 /*
122  * Context for SHA1 mechanism.
123  */
124 typedef struct sha1_ctx {
125 	sha1_mech_type_t	sc_mech_type;	/* type of context */
126 	SHA1_CTX		sc_sha1_ctx;	/* SHA1 context */
127 } sha1_ctx_t;
128 
129 /*
130  * Context for SHA1-HMAC and SHA1-HMAC-GENERAL mechanisms.
131  */
132 typedef struct sha1_hmac_ctx {
133 	sha1_mech_type_t	hc_mech_type;	/* type of context */
134 	uint32_t		hc_digest_len;	/* digest len in bytes */
135 	SHA1_CTX		hc_icontext;	/* inner SHA1 context */
136 	SHA1_CTX		hc_ocontext;	/* outer SHA1 context */
137 } sha1_hmac_ctx_t;
138 
139 /*
140  * Macros to access the SHA1 or SHA1-HMAC contexts from a context passed
141  * by KCF to one of the entry points.
142  */
143 
144 #define	PROV_SHA1_CTX(ctx)	((sha1_ctx_t *)(ctx)->cc_provider_private)
145 #define	PROV_SHA1_HMAC_CTX(ctx)	((sha1_hmac_ctx_t *)(ctx)->cc_provider_private)
146 
147 /* to extract the digest length passed as mechanism parameter */
148 #define	PROV_SHA1_GET_DIGEST_LEN(m, len) {				\
149 	if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t)))		\
150 		(len) = (uint32_t)*((ulong_t *)mechanism->cm_param);	\
151 	else {								\
152 		ulong_t tmp_ulong;					\
153 		bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t));	\
154 		(len) = (uint32_t)tmp_ulong;				\
155 	}								\
156 }
157 
158 #define	PROV_SHA1_DIGEST_KEY(ctx, key, len, digest) {	\
159 	SHA1Init(ctx);					\
160 	SHA1Update(ctx, key, len);			\
161 	SHA1Final(digest, ctx);				\
162 }
163 
164 /*
165  * Mechanism info structure passed to KCF during registration.
166  */
167 static crypto_mech_info_t sha1_mech_info_tab[] = {
168 	/* SHA1 */
169 	{SUN_CKM_SHA1, SHA1_MECH_INFO_TYPE,
170 	    CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
171 	    0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
172 	/* SHA1-HMAC */
173 	{SUN_CKM_SHA1_HMAC, SHA1_HMAC_MECH_INFO_TYPE,
174 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
175 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
176 	    CRYPTO_KEYSIZE_UNIT_IN_BITS},
177 	/* SHA1-HMAC GENERAL */
178 	{SUN_CKM_SHA1_HMAC_GENERAL, SHA1_HMAC_GEN_MECH_INFO_TYPE,
179 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
180 	    SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
181 	    CRYPTO_KEYSIZE_UNIT_IN_BITS}
182 };
183 
184 static void sha1_provider_status(crypto_provider_handle_t, uint_t *);
185 
186 static crypto_control_ops_t sha1_control_ops = {
187 	sha1_provider_status
188 };
189 
190 static int sha1_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
191     crypto_req_handle_t);
192 static int sha1_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
193     crypto_req_handle_t);
194 static int sha1_digest_update(crypto_ctx_t *, crypto_data_t *,
195     crypto_req_handle_t);
196 static int sha1_digest_final(crypto_ctx_t *, crypto_data_t *,
197     crypto_req_handle_t);
198 static int sha1_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
199     crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
200     crypto_req_handle_t);
201 
202 static crypto_digest_ops_t sha1_digest_ops = {
203 	sha1_digest_init,
204 	sha1_digest,
205 	sha1_digest_update,
206 	NULL,
207 	sha1_digest_final,
208 	sha1_digest_atomic
209 };
210 
211 static int sha1_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
212     crypto_spi_ctx_template_t, crypto_req_handle_t);
213 static int sha1_mac_update(crypto_ctx_t *, crypto_data_t *,
214     crypto_req_handle_t);
215 static int sha1_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
216 static int sha1_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
217     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
218     crypto_spi_ctx_template_t, crypto_req_handle_t);
219 static int sha1_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
220     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
221     crypto_spi_ctx_template_t, crypto_req_handle_t);
222 
223 static crypto_mac_ops_t sha1_mac_ops = {
224 	sha1_mac_init,
225 	NULL,
226 	sha1_mac_update,
227 	sha1_mac_final,
228 	sha1_mac_atomic,
229 	sha1_mac_verify_atomic
230 };
231 
232 static int sha1_create_ctx_template(crypto_provider_handle_t,
233     crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
234     size_t *, crypto_req_handle_t);
235 static int sha1_free_context(crypto_ctx_t *);
236 
237 static crypto_ctx_ops_t sha1_ctx_ops = {
238 	sha1_create_ctx_template,
239 	sha1_free_context
240 };
241 
242 static crypto_ops_t sha1_crypto_ops = {
243 	&sha1_control_ops,
244 	&sha1_digest_ops,
245 	NULL,
246 	&sha1_mac_ops,
247 	NULL,
248 	NULL,
249 	NULL,
250 	NULL,
251 	NULL,
252 	NULL,
253 	NULL,
254 	NULL,
255 	NULL,
256 	&sha1_ctx_ops
257 };
258 
259 static crypto_provider_info_t sha1_prov_info = {
260 	CRYPTO_SPI_VERSION_1,
261 	"SHA1 Software Provider",
262 	CRYPTO_SW_PROVIDER,
263 	{&modlinkage},
264 	NULL,
265 	&sha1_crypto_ops,
266 	sizeof (sha1_mech_info_tab)/sizeof (crypto_mech_info_t),
267 	sha1_mech_info_tab
268 };
269 
270 static crypto_kcf_provider_handle_t sha1_prov_handle = NULL;
271 
272 int
273 _init()
274 {
275 	int ret;
276 
277 	if ((ret = mod_install(&modlinkage)) != 0)
278 		return (ret);
279 
280 	/*
281 	 * Register with KCF. If the registration fails, log an
282 	 * error but do not uninstall the module, since the functionality
283 	 * provided by misc/sha1 should still be available.
284 	 */
285 	if ((ret = crypto_register_provider(&sha1_prov_info,
286 	    &sha1_prov_handle)) != CRYPTO_SUCCESS)
287 		cmn_err(CE_WARN, "sha1 _init: "
288 		    "crypto_register_provider() failed (0x%x)", ret);
289 
290 	return (0);
291 }
292 
293 int
294 _info(struct modinfo *modinfop)
295 {
296 	return (mod_info(&modlinkage, modinfop));
297 }
298 
299 #endif /* _KERNEL */
300 
301 /*
302  * SHA1Init()
303  *
304  * purpose: initializes the sha1 context and begins and sha1 digest operation
305  *   input: SHA1_CTX *	: the context to initializes.
306  *  output: void
307  */
308 
309 void
310 SHA1Init(SHA1_CTX *ctx)
311 {
312 	ctx->count[0] = ctx->count[1] = 0;
313 
314 	/*
315 	 * load magic initialization constants. Tell lint
316 	 * that these constants are unsigned by using U.
317 	 */
318 
319 	ctx->state[0] = 0x67452301U;
320 	ctx->state[1] = 0xefcdab89U;
321 	ctx->state[2] = 0x98badcfeU;
322 	ctx->state[3] = 0x10325476U;
323 	ctx->state[4] = 0xc3d2e1f0U;
324 }
325 
326 #ifdef VIS_SHA1
327 
328 static int usevis = 0;
329 
330 #ifdef _KERNEL
331 
332 #include <sys/regset.h>
333 #include <sys/vis.h>
334 
335 /* the alignment for block stores to save fp registers */
336 #define	VIS_ALIGN	(64)
337 
338 extern int sha1_savefp(kfpu_t *, int);
339 extern void sha1_restorefp(kfpu_t *);
340 
341 uint32_t	vis_sha1_svfp_threshold = 128;
342 
343 #else /* !_KERNEL */
344 
345 static boolean_t checked_vis = B_FALSE;
346 
347 static int
348 havevis()
349 {
350 	char *buf = NULL;
351 	char *isa_token;
352 	char *lasts;
353 	int ret = 0;
354 	size_t bufsize = 255; /* UltraSPARC III needs 115 chars */
355 	int v9_isa_token, vis_isa_token, isa_token_num;
356 
357 	if (checked_vis) {
358 		return (usevis);
359 	}
360 
361 	if ((buf = malloc(bufsize)) == NULL) {
362 		return (0);
363 	}
364 
365 	if ((ret = sysinfo(SI_ISALIST, buf, bufsize)) == -1) {
366 		free(buf);
367 		return (0);
368 	} else if (ret > bufsize) {
369 		/* We lost some because our buffer was too small  */
370 		if ((buf = realloc(buf, bufsize = ret)) == NULL) {
371 			return (0);
372 		}
373 		if ((ret = sysinfo(SI_ISALIST, buf, bufsize)) == -1) {
374 			free(buf);
375 			return (0);
376 		}
377 	}
378 
379 	/*
380 	 * Check the relative posistions of sparcv9 & sparcv9+vis
381 	 * because they are listed in (best) performance order.
382 	 * For example: The Niagara chip reports it has VIS but the
383 	 * SHA1 code runs faster without this optimisation.
384 	 */
385 	isa_token = strtok_r(buf, " ", &lasts);
386 	v9_isa_token = vis_isa_token = -1;
387 	isa_token_num = 0;
388 	do {
389 		if (strcmp(isa_token, "sparcv9") == 0) {
390 			v9_isa_token = isa_token_num;
391 		} else if (strcmp(isa_token, "sparcv9+vis") == 0) {
392 			vis_isa_token = isa_token_num;
393 		}
394 		isa_token_num++;
395 	} while (isa_token = strtok_r(NULL, " ", &lasts));
396 
397 	if (vis_isa_token != -1 && vis_isa_token < v9_isa_token)
398 		usevis = 1;
399 	free(buf);
400 
401 	checked_vis = B_TRUE;
402 	return (usevis);
403 }
404 
405 #endif /* _KERNEL */
406 
407 /*
408  * VIS SHA-1 consts.
409  */
410 static uint64_t VIS[] = {
411 	0x8000000080000000ULL,
412 	0x0002000200020002ULL,
413 	0x5a8279996ed9eba1ULL,
414 	0x8f1bbcdcca62c1d6ULL,
415 	0x012389ab456789abULL};
416 
417 extern void SHA1TransformVIS(uint64_t *, uint64_t *, uint32_t *, uint64_t *);
418 
419 
420 /*
421  * SHA1Update()
422  *
423  * purpose: continues an sha1 digest operation, using the message block
424  *          to update the context.
425  *   input: SHA1_CTX *	: the context to update
426  *          uint8_t *	: the message block
427  *          uint32_t    : the length of the message block in bytes
428  *  output: void
429  */
430 
431 void
432 SHA1Update(SHA1_CTX *ctx, const uint8_t *input, uint32_t input_len)
433 {
434 	uint32_t i, buf_index, buf_len;
435 
436 	uint64_t X0[40], input64[8];
437 
438 	/* check for noop */
439 	if (input_len == 0)
440 		return;
441 
442 	/* compute number of bytes mod 64 */
443 	buf_index = (ctx->count[1] >> 3) & 0x3F;
444 
445 	/* update number of bits */
446 	if ((ctx->count[1] += (input_len << 3)) < (input_len << 3))
447 		ctx->count[0]++;
448 
449 	ctx->count[0] += (input_len >> 29);
450 
451 	buf_len = 64 - buf_index;
452 
453 	/* transform as many times as possible */
454 	i = 0;
455 	if (input_len >= buf_len) {
456 #ifdef _KERNEL
457 		uint8_t fpua[sizeof (kfpu_t) + GSR_SIZE + VIS_ALIGN];
458 		kfpu_t *fpu;
459 
460 		uint32_t len = (input_len + buf_index) & ~0x3f;
461 		int svfp_ok;
462 
463 		fpu = (kfpu_t *)P2ROUNDUP((uintptr_t)fpua, 64);
464 		svfp_ok = ((len >= vis_sha1_svfp_threshold) ? 1 : 0);
465 		usevis = sha1_savefp(fpu, svfp_ok);
466 #else
467 		if (!checked_vis)
468 			usevis = havevis();
469 #endif /* _KERNEL */
470 
471 		/*
472 		 * general optimization:
473 		 *
474 		 * only do initial bcopy() and SHA1Transform() if
475 		 * buf_index != 0.  if buf_index == 0, we're just
476 		 * wasting our time doing the bcopy() since there
477 		 * wasn't any data left over from a previous call to
478 		 * SHA1Update().
479 		 */
480 
481 		if (buf_index) {
482 			bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
483 			if (usevis) {
484 				SHA1TransformVIS(X0,
485 				    (uint64_t *)ctx->buf_un.buf8,
486 				    &ctx->state[0], VIS);
487 			} else {
488 				SHA1Transform(ctx->state[0], ctx->state[1],
489 				    ctx->state[2], ctx->state[3],
490 				    ctx->state[4], ctx, ctx->buf_un.buf8);
491 			}
492 			i = buf_len;
493 		}
494 
495 		/*
496 		 * VIS SHA-1: uses the VIS 1.0 instructions to accelerate
497 		 * SHA-1 processing. This is achieved by "offloading" the
498 		 * computation of the message schedule (MS) to the VIS units.
499 		 * This allows the VIS computation of the message schedule
500 		 * to be performed in parallel with the standard integer
501 		 * processing of the remainder of the SHA-1 computation.
502 		 * performance by up to around 1.37X, compared to an optimized
503 		 * integer-only implementation.
504 		 *
505 		 * The VIS implementation of SHA1Transform has a different API
506 		 * to the standard integer version:
507 		 *
508 		 * void SHA1TransformVIS(
509 		 *	 uint64_t *, // Pointer to MS for ith block
510 		 *	 uint64_t *, // Pointer to ith block of message data
511 		 *	 uint32_t *, // Pointer to SHA state i.e ctx->state
512 		 *	 uint64_t *, // Pointer to various VIS constants
513 		 * )
514 		 *
515 		 * Note: the message data must by 4-byte aligned.
516 		 *
517 		 * Function requires VIS 1.0 support.
518 		 *
519 		 * Handling is provided to deal with arbitrary byte alingment
520 		 * of the input data but the performance gains are reduced
521 		 * for alignments other than 4-bytes.
522 		 */
523 		if (usevis) {
524 			if (((uint64_t)(uintptr_t)(&input[i]) & 0x3)) {
525 				/*
526 				 * Main processing loop - input misaligned
527 				 */
528 				for (; i + 63 < input_len; i += 64) {
529 				    bcopy(&input[i], input64, 64);
530 				    SHA1TransformVIS(X0, input64,
531 					&ctx->state[0], VIS);
532 				}
533 			} else {
534 				/*
535 				 * Main processing loop - input 8-byte aligned
536 				 */
537 				for (; i + 63 < input_len; i += 64) {
538 					SHA1TransformVIS(X0,
539 					    (uint64_t *)&input[i],
540 					    &ctx->state[0], VIS);
541 				}
542 
543 			}
544 #ifdef _KERNEL
545 			sha1_restorefp(fpu);
546 #endif /* _KERNEL */
547 		} else {
548 			for (; i + 63 < input_len; i += 64) {
549 			    SHA1Transform(ctx->state[0], ctx->state[1],
550 				ctx->state[2], ctx->state[3], ctx->state[4],
551 				ctx, &input[i]);
552 			}
553 		}
554 
555 		/*
556 		 * general optimization:
557 		 *
558 		 * if i and input_len are the same, return now instead
559 		 * of calling bcopy(), since the bcopy() in this case
560 		 * will be an expensive nop.
561 		 */
562 
563 		if (input_len == i)
564 			return;
565 
566 		buf_index = 0;
567 	}
568 
569 	/* buffer remaining input */
570 	bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
571 }
572 
573 #else /* VIS_SHA1 */
574 
575 void
576 SHA1Update(SHA1_CTX *ctx, const uint8_t *input, uint32_t input_len)
577 {
578 	uint32_t i, buf_index, buf_len;
579 
580 	/* check for noop */
581 	if (input_len == 0)
582 		return;
583 
584 	/* compute number of bytes mod 64 */
585 	buf_index = (ctx->count[1] >> 3) & 0x3F;
586 
587 	/* update number of bits */
588 	if ((ctx->count[1] += (input_len << 3)) < (input_len << 3))
589 		ctx->count[0]++;
590 
591 	ctx->count[0] += (input_len >> 29);
592 
593 	buf_len = 64 - buf_index;
594 
595 	/* transform as many times as possible */
596 	i = 0;
597 	if (input_len >= buf_len) {
598 
599 		/*
600 		 * general optimization:
601 		 *
602 		 * only do initial bcopy() and SHA1Transform() if
603 		 * buf_index != 0.  if buf_index == 0, we're just
604 		 * wasting our time doing the bcopy() since there
605 		 * wasn't any data left over from a previous call to
606 		 * SHA1Update().
607 		 */
608 
609 		if (buf_index) {
610 			bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
611 
612 
613 			SHA1Transform(ctx->state[0], ctx->state[1],
614 			    ctx->state[2], ctx->state[3], ctx->state[4], ctx,
615 			    ctx->buf_un.buf8);
616 
617 			i = buf_len;
618 		}
619 
620 		for (; i + 63 < input_len; i += 64)
621 			SHA1Transform(ctx->state[0], ctx->state[1],
622 			    ctx->state[2], ctx->state[3], ctx->state[4],
623 			    ctx, &input[i]);
624 
625 		/*
626 		 * general optimization:
627 		 *
628 		 * if i and input_len are the same, return now instead
629 		 * of calling bcopy(), since the bcopy() in this case
630 		 * will be an expensive nop.
631 		 */
632 
633 		if (input_len == i)
634 			return;
635 
636 		buf_index = 0;
637 	}
638 
639 	/* buffer remaining input */
640 	bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
641 }
642 
643 #endif /* VIS_SHA1 */
644 
645 /*
646  * SHA1Final()
647  *
648  * purpose: ends an sha1 digest operation, finalizing the message digest and
649  *          zeroing the context.
650  *   input: uint8_t *	: a buffer to store the digest in
651  *          SHA1_CTX *  : the context to finalize, save, and zero
652  *  output: void
653  */
654 
655 void
656 SHA1Final(uint8_t *digest, SHA1_CTX *ctx)
657 {
658 	uint8_t		bitcount_be[sizeof (ctx->count)];
659 	uint32_t	index = (ctx->count[1] >> 3) & 0x3f;
660 
661 	/* store bit count, big endian */
662 	Encode(bitcount_be, ctx->count, sizeof (bitcount_be));
663 
664 	/* pad out to 56 mod 64 */
665 	SHA1Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index);
666 
667 	/* append length (before padding) */
668 	SHA1Update(ctx, bitcount_be, sizeof (bitcount_be));
669 
670 	/* store state in digest */
671 	Encode(digest, ctx->state, sizeof (ctx->state));
672 }
673 
674 /*
675  * sparc optimization:
676  *
677  * on the sparc, we can load big endian 32-bit data easily.  note that
678  * special care must be taken to ensure the address is 32-bit aligned.
679  * in the interest of speed, we don't check to make sure, since
680  * careful programming can guarantee this for us.
681  */
682 
683 #if	defined(_BIG_ENDIAN)
684 
685 #define	LOAD_BIG_32(addr)	(*(uint32_t *)(addr))
686 
687 #else	/* little endian -- will work on big endian, but slowly */
688 
689 #define	LOAD_BIG_32(addr)	\
690 	(((addr)[0] << 24) | ((addr)[1] << 16) | ((addr)[2] << 8) | (addr)[3])
691 #endif
692 
693 /*
694  * sparc register window optimization:
695  *
696  * `a', `b', `c', `d', and `e' are passed into SHA1Transform
697  * explicitly since it increases the number of registers available to
698  * the compiler.  under this scheme, these variables can be held in
699  * %i0 - %i4, which leaves more local and out registers available.
700  */
701 
702 /*
703  * SHA1Transform()
704  *
705  * purpose: sha1 transformation -- updates the digest based on `block'
706  *   input: uint32_t	: bytes  1 -  4 of the digest
707  *          uint32_t	: bytes  5 -  8 of the digest
708  *          uint32_t	: bytes  9 - 12 of the digest
709  *          uint32_t	: bytes 12 - 16 of the digest
710  *          uint32_t	: bytes 16 - 20 of the digest
711  *          SHA1_CTX *	: the context to update
712  *          uint8_t [64]: the block to use to update the digest
713  *  output: void
714  */
715 
716 void
717 SHA1Transform(uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e,
718     SHA1_CTX *ctx, const uint8_t blk[64])
719 {
720 	/*
721 	 * sparc optimization:
722 	 *
723 	 * while it is somewhat counter-intuitive, on sparc, it is
724 	 * more efficient to place all the constants used in this
725 	 * function in an array and load the values out of the array
726 	 * than to manually load the constants.  this is because
727 	 * setting a register to a 32-bit value takes two ops in most
728 	 * cases: a `sethi' and an `or', but loading a 32-bit value
729 	 * from memory only takes one `ld' (or `lduw' on v9).  while
730 	 * this increases memory usage, the compiler can find enough
731 	 * other things to do while waiting to keep the pipeline does
732 	 * not stall.  additionally, it is likely that many of these
733 	 * constants are cached so that later accesses do not even go
734 	 * out to the bus.
735 	 *
736 	 * this array is declared `static' to keep the compiler from
737 	 * having to bcopy() this array onto the stack frame of
738 	 * SHA1Transform() each time it is called -- which is
739 	 * unacceptably expensive.
740 	 *
741 	 * the `const' is to ensure that callers are good citizens and
742 	 * do not try to munge the array.  since these routines are
743 	 * going to be called from inside multithreaded kernelland,
744 	 * this is a good safety check. -- `sha1_consts' will end up in
745 	 * .rodata.
746 	 *
747 	 * unfortunately, loading from an array in this manner hurts
748 	 * performance under intel.  so, there is a macro,
749 	 * SHA1_CONST(), used in SHA1Transform(), that either expands to
750 	 * a reference to this array, or to the actual constant,
751 	 * depending on what platform this code is compiled for.
752 	 */
753 
754 #if	defined(__sparc)
755 	static const uint32_t sha1_consts[] = {
756 		SHA1_CONST_0,	SHA1_CONST_1,	SHA1_CONST_2,	SHA1_CONST_3,
757 	};
758 #endif
759 
760 	/*
761 	 * general optimization:
762 	 *
763 	 * use individual integers instead of using an array.  this is a
764 	 * win, although the amount it wins by seems to vary quite a bit.
765 	 */
766 
767 	uint32_t	w_0, w_1, w_2,  w_3,  w_4,  w_5,  w_6,  w_7;
768 	uint32_t	w_8, w_9, w_10, w_11, w_12, w_13, w_14, w_15;
769 
770 	/*
771 	 * sparc optimization:
772 	 *
773 	 * if `block' is already aligned on a 4-byte boundary, use
774 	 * LOAD_BIG_32() directly.  otherwise, bcopy() into a
775 	 * buffer that *is* aligned on a 4-byte boundary and then do
776 	 * the LOAD_BIG_32() on that buffer.  benchmarks have shown
777 	 * that using the bcopy() is better than loading the bytes
778 	 * individually and doing the endian-swap by hand.
779 	 *
780 	 * even though it's quite tempting to assign to do:
781 	 *
782 	 * blk = bcopy(ctx->buf_un.buf32, blk, sizeof (ctx->buf_un.buf32));
783 	 *
784 	 * and only have one set of LOAD_BIG_32()'s, the compiler
785 	 * *does not* like that, so please resist the urge.
786 	 */
787 
788 #if	defined(__sparc)
789 	if ((uintptr_t)blk & 0x3) {		/* not 4-byte aligned? */
790 		bcopy(blk, ctx->buf_un.buf32,  sizeof (ctx->buf_un.buf32));
791 		w_15 = LOAD_BIG_32(ctx->buf_un.buf32 + 15);
792 		w_14 = LOAD_BIG_32(ctx->buf_un.buf32 + 14);
793 		w_13 = LOAD_BIG_32(ctx->buf_un.buf32 + 13);
794 		w_12 = LOAD_BIG_32(ctx->buf_un.buf32 + 12);
795 		w_11 = LOAD_BIG_32(ctx->buf_un.buf32 + 11);
796 		w_10 = LOAD_BIG_32(ctx->buf_un.buf32 + 10);
797 		w_9  = LOAD_BIG_32(ctx->buf_un.buf32 +  9);
798 		w_8  = LOAD_BIG_32(ctx->buf_un.buf32 +  8);
799 		w_7  = LOAD_BIG_32(ctx->buf_un.buf32 +  7);
800 		w_6  = LOAD_BIG_32(ctx->buf_un.buf32 +  6);
801 		w_5  = LOAD_BIG_32(ctx->buf_un.buf32 +  5);
802 		w_4  = LOAD_BIG_32(ctx->buf_un.buf32 +  4);
803 		w_3  = LOAD_BIG_32(ctx->buf_un.buf32 +  3);
804 		w_2  = LOAD_BIG_32(ctx->buf_un.buf32 +  2);
805 		w_1  = LOAD_BIG_32(ctx->buf_un.buf32 +  1);
806 		w_0  = LOAD_BIG_32(ctx->buf_un.buf32 +  0);
807 	} else {
808 		/*LINTED*/
809 		w_15 = LOAD_BIG_32(blk + 60);
810 		/*LINTED*/
811 		w_14 = LOAD_BIG_32(blk + 56);
812 		/*LINTED*/
813 		w_13 = LOAD_BIG_32(blk + 52);
814 		/*LINTED*/
815 		w_12 = LOAD_BIG_32(blk + 48);
816 		/*LINTED*/
817 		w_11 = LOAD_BIG_32(blk + 44);
818 		/*LINTED*/
819 		w_10 = LOAD_BIG_32(blk + 40);
820 		/*LINTED*/
821 		w_9  = LOAD_BIG_32(blk + 36);
822 		/*LINTED*/
823 		w_8  = LOAD_BIG_32(blk + 32);
824 		/*LINTED*/
825 		w_7  = LOAD_BIG_32(blk + 28);
826 		/*LINTED*/
827 		w_6  = LOAD_BIG_32(blk + 24);
828 		/*LINTED*/
829 		w_5  = LOAD_BIG_32(blk + 20);
830 		/*LINTED*/
831 		w_4  = LOAD_BIG_32(blk + 16);
832 		/*LINTED*/
833 		w_3  = LOAD_BIG_32(blk + 12);
834 		/*LINTED*/
835 		w_2  = LOAD_BIG_32(blk +  8);
836 		/*LINTED*/
837 		w_1  = LOAD_BIG_32(blk +  4);
838 		/*LINTED*/
839 		w_0  = LOAD_BIG_32(blk +  0);
840 	}
841 #else
842 	w_15 = LOAD_BIG_32(blk + 60);
843 	w_14 = LOAD_BIG_32(blk + 56);
844 	w_13 = LOAD_BIG_32(blk + 52);
845 	w_12 = LOAD_BIG_32(blk + 48);
846 	w_11 = LOAD_BIG_32(blk + 44);
847 	w_10 = LOAD_BIG_32(blk + 40);
848 	w_9  = LOAD_BIG_32(blk + 36);
849 	w_8  = LOAD_BIG_32(blk + 32);
850 	w_7  = LOAD_BIG_32(blk + 28);
851 	w_6  = LOAD_BIG_32(blk + 24);
852 	w_5  = LOAD_BIG_32(blk + 20);
853 	w_4  = LOAD_BIG_32(blk + 16);
854 	w_3  = LOAD_BIG_32(blk + 12);
855 	w_2  = LOAD_BIG_32(blk +  8);
856 	w_1  = LOAD_BIG_32(blk +  4);
857 	w_0  = LOAD_BIG_32(blk +  0);
858 #endif
859 	/*
860 	 * general optimization:
861 	 *
862 	 * even though this approach is described in the standard as
863 	 * being slower algorithmically, it is 30-40% faster than the
864 	 * "faster" version under SPARC, because this version has more
865 	 * of the constraints specified at compile-time and uses fewer
866 	 * variables (and therefore has better register utilization)
867 	 * than its "speedier" brother.  (i've tried both, trust me)
868 	 *
869 	 * for either method given in the spec, there is an "assignment"
870 	 * phase where the following takes place:
871 	 *
872 	 *	tmp = (main_computation);
873 	 *	e = d; d = c; c = rotate_left(b, 30); b = a; a = tmp;
874 	 *
875 	 * we can make the algorithm go faster by not doing this work,
876 	 * but just pretending that `d' is now `e', etc. this works
877 	 * really well and obviates the need for a temporary variable.
878 	 * however, we still explictly perform the rotate action,
879 	 * since it is cheaper on SPARC to do it once than to have to
880 	 * do it over and over again.
881 	 */
882 
883 	/* round 1 */
884 	e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_0 + SHA1_CONST(0); /* 0 */
885 	b = ROTATE_LEFT(b, 30);
886 
887 	d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_1 + SHA1_CONST(0); /* 1 */
888 	a = ROTATE_LEFT(a, 30);
889 
890 	c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_2 + SHA1_CONST(0); /* 2 */
891 	e = ROTATE_LEFT(e, 30);
892 
893 	b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_3 + SHA1_CONST(0); /* 3 */
894 	d = ROTATE_LEFT(d, 30);
895 
896 	a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_4 + SHA1_CONST(0); /* 4 */
897 	c = ROTATE_LEFT(c, 30);
898 
899 	e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_5 + SHA1_CONST(0); /* 5 */
900 	b = ROTATE_LEFT(b, 30);
901 
902 	d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_6 + SHA1_CONST(0); /* 6 */
903 	a = ROTATE_LEFT(a, 30);
904 
905 	c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_7 + SHA1_CONST(0); /* 7 */
906 	e = ROTATE_LEFT(e, 30);
907 
908 	b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_8 + SHA1_CONST(0); /* 8 */
909 	d = ROTATE_LEFT(d, 30);
910 
911 	a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_9 + SHA1_CONST(0); /* 9 */
912 	c = ROTATE_LEFT(c, 30);
913 
914 	e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_10 + SHA1_CONST(0); /* 10 */
915 	b = ROTATE_LEFT(b, 30);
916 
917 	d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_11 + SHA1_CONST(0); /* 11 */
918 	a = ROTATE_LEFT(a, 30);
919 
920 	c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_12 + SHA1_CONST(0); /* 12 */
921 	e = ROTATE_LEFT(e, 30);
922 
923 	b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_13 + SHA1_CONST(0); /* 13 */
924 	d = ROTATE_LEFT(d, 30);
925 
926 	a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_14 + SHA1_CONST(0); /* 14 */
927 	c = ROTATE_LEFT(c, 30);
928 
929 	e = ROTATE_LEFT(a, 5) + F(b, c, d) + e + w_15 + SHA1_CONST(0); /* 15 */
930 	b = ROTATE_LEFT(b, 30);
931 
932 	w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1);		/* 16 */
933 	d = ROTATE_LEFT(e, 5) + F(a, b, c) + d + w_0 + SHA1_CONST(0);
934 	a = ROTATE_LEFT(a, 30);
935 
936 	w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1);		/* 17 */
937 	c = ROTATE_LEFT(d, 5) + F(e, a, b) + c + w_1 + SHA1_CONST(0);
938 	e = ROTATE_LEFT(e, 30);
939 
940 	w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1);	/* 18 */
941 	b = ROTATE_LEFT(c, 5) + F(d, e, a) + b + w_2 + SHA1_CONST(0);
942 	d = ROTATE_LEFT(d, 30);
943 
944 	w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1);		/* 19 */
945 	a = ROTATE_LEFT(b, 5) + F(c, d, e) + a + w_3 + SHA1_CONST(0);
946 	c = ROTATE_LEFT(c, 30);
947 
948 	/* round 2 */
949 	w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1);		/* 20 */
950 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_4 + SHA1_CONST(1);
951 	b = ROTATE_LEFT(b, 30);
952 
953 	w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1);		/* 21 */
954 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_5 + SHA1_CONST(1);
955 	a = ROTATE_LEFT(a, 30);
956 
957 	w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1);		/* 22 */
958 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_6 + SHA1_CONST(1);
959 	e = ROTATE_LEFT(e, 30);
960 
961 	w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1);		/* 23 */
962 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_7 + SHA1_CONST(1);
963 	d = ROTATE_LEFT(d, 30);
964 
965 	w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1);		/* 24 */
966 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_8 + SHA1_CONST(1);
967 	c = ROTATE_LEFT(c, 30);
968 
969 	w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1);		/* 25 */
970 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_9 + SHA1_CONST(1);
971 	b = ROTATE_LEFT(b, 30);
972 
973 	w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1);	/* 26 */
974 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_10 + SHA1_CONST(1);
975 	a = ROTATE_LEFT(a, 30);
976 
977 	w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1);	/* 27 */
978 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_11 + SHA1_CONST(1);
979 	e = ROTATE_LEFT(e, 30);
980 
981 	w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1);	/* 28 */
982 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_12 + SHA1_CONST(1);
983 	d = ROTATE_LEFT(d, 30);
984 
985 	w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1);	/* 29 */
986 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_13 + SHA1_CONST(1);
987 	c = ROTATE_LEFT(c, 30);
988 
989 	w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1);	/* 30 */
990 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_14 + SHA1_CONST(1);
991 	b = ROTATE_LEFT(b, 30);
992 
993 	w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1);	/* 31 */
994 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_15 + SHA1_CONST(1);
995 	a = ROTATE_LEFT(a, 30);
996 
997 	w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1);		/* 32 */
998 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_0 + SHA1_CONST(1);
999 	e = ROTATE_LEFT(e, 30);
1000 
1001 	w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1);		/* 33 */
1002 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_1 + SHA1_CONST(1);
1003 	d = ROTATE_LEFT(d, 30);
1004 
1005 	w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1);	/* 34 */
1006 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_2 + SHA1_CONST(1);
1007 	c = ROTATE_LEFT(c, 30);
1008 
1009 	w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1);		/* 35 */
1010 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_3 + SHA1_CONST(1);
1011 	b = ROTATE_LEFT(b, 30);
1012 
1013 	w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1);		/* 36 */
1014 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_4 + SHA1_CONST(1);
1015 	a = ROTATE_LEFT(a, 30);
1016 
1017 	w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1);		/* 37 */
1018 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_5 + SHA1_CONST(1);
1019 	e = ROTATE_LEFT(e, 30);
1020 
1021 	w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1);		/* 38 */
1022 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_6 + SHA1_CONST(1);
1023 	d = ROTATE_LEFT(d, 30);
1024 
1025 	w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1);		/* 39 */
1026 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_7 + SHA1_CONST(1);
1027 	c = ROTATE_LEFT(c, 30);
1028 
1029 	/* round 3 */
1030 	w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1);		/* 40 */
1031 	e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_8 + SHA1_CONST(2);
1032 	b = ROTATE_LEFT(b, 30);
1033 
1034 	w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1);		/* 41 */
1035 	d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_9 + SHA1_CONST(2);
1036 	a = ROTATE_LEFT(a, 30);
1037 
1038 	w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1);	/* 42 */
1039 	c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_10 + SHA1_CONST(2);
1040 	e = ROTATE_LEFT(e, 30);
1041 
1042 	w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1);	/* 43 */
1043 	b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_11 + SHA1_CONST(2);
1044 	d = ROTATE_LEFT(d, 30);
1045 
1046 	w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1);	/* 44 */
1047 	a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_12 + SHA1_CONST(2);
1048 	c = ROTATE_LEFT(c, 30);
1049 
1050 	w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1);	/* 45 */
1051 	e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_13 + SHA1_CONST(2);
1052 	b = ROTATE_LEFT(b, 30);
1053 
1054 	w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1);	/* 46 */
1055 	d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_14 + SHA1_CONST(2);
1056 	a = ROTATE_LEFT(a, 30);
1057 
1058 	w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1);	/* 47 */
1059 	c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_15 + SHA1_CONST(2);
1060 	e = ROTATE_LEFT(e, 30);
1061 
1062 	w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1);		/* 48 */
1063 	b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_0 + SHA1_CONST(2);
1064 	d = ROTATE_LEFT(d, 30);
1065 
1066 	w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1);		/* 49 */
1067 	a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_1 + SHA1_CONST(2);
1068 	c = ROTATE_LEFT(c, 30);
1069 
1070 	w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1);	/* 50 */
1071 	e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_2 + SHA1_CONST(2);
1072 	b = ROTATE_LEFT(b, 30);
1073 
1074 	w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1);		/* 51 */
1075 	d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_3 + SHA1_CONST(2);
1076 	a = ROTATE_LEFT(a, 30);
1077 
1078 	w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1);		/* 52 */
1079 	c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_4 + SHA1_CONST(2);
1080 	e = ROTATE_LEFT(e, 30);
1081 
1082 	w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1);		/* 53 */
1083 	b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_5 + SHA1_CONST(2);
1084 	d = ROTATE_LEFT(d, 30);
1085 
1086 	w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1);		/* 54 */
1087 	a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_6 + SHA1_CONST(2);
1088 	c = ROTATE_LEFT(c, 30);
1089 
1090 	w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1);		/* 55 */
1091 	e = ROTATE_LEFT(a, 5) + H(b, c, d) + e + w_7 + SHA1_CONST(2);
1092 	b = ROTATE_LEFT(b, 30);
1093 
1094 	w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1);		/* 56 */
1095 	d = ROTATE_LEFT(e, 5) + H(a, b, c) + d + w_8 + SHA1_CONST(2);
1096 	a = ROTATE_LEFT(a, 30);
1097 
1098 	w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1);		/* 57 */
1099 	c = ROTATE_LEFT(d, 5) + H(e, a, b) + c + w_9 + SHA1_CONST(2);
1100 	e = ROTATE_LEFT(e, 30);
1101 
1102 	w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1);	/* 58 */
1103 	b = ROTATE_LEFT(c, 5) + H(d, e, a) + b + w_10 + SHA1_CONST(2);
1104 	d = ROTATE_LEFT(d, 30);
1105 
1106 	w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1);	/* 59 */
1107 	a = ROTATE_LEFT(b, 5) + H(c, d, e) + a + w_11 + SHA1_CONST(2);
1108 	c = ROTATE_LEFT(c, 30);
1109 
1110 	/* round 4 */
1111 	w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1);	/* 60 */
1112 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_12 + SHA1_CONST(3);
1113 	b = ROTATE_LEFT(b, 30);
1114 
1115 	w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1);	/* 61 */
1116 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_13 + SHA1_CONST(3);
1117 	a = ROTATE_LEFT(a, 30);
1118 
1119 	w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1);	/* 62 */
1120 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_14 + SHA1_CONST(3);
1121 	e = ROTATE_LEFT(e, 30);
1122 
1123 	w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1);	/* 63 */
1124 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_15 + SHA1_CONST(3);
1125 	d = ROTATE_LEFT(d, 30);
1126 
1127 	w_0 = ROTATE_LEFT((w_13 ^ w_8 ^ w_2 ^ w_0), 1);		/* 64 */
1128 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_0 + SHA1_CONST(3);
1129 	c = ROTATE_LEFT(c, 30);
1130 
1131 	w_1 = ROTATE_LEFT((w_14 ^ w_9 ^ w_3 ^ w_1), 1);		/* 65 */
1132 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_1 + SHA1_CONST(3);
1133 	b = ROTATE_LEFT(b, 30);
1134 
1135 	w_2 = ROTATE_LEFT((w_15 ^ w_10 ^ w_4 ^ w_2), 1);	/* 66 */
1136 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_2 + SHA1_CONST(3);
1137 	a = ROTATE_LEFT(a, 30);
1138 
1139 	w_3 = ROTATE_LEFT((w_0 ^ w_11 ^ w_5 ^ w_3), 1);		/* 67 */
1140 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_3 + SHA1_CONST(3);
1141 	e = ROTATE_LEFT(e, 30);
1142 
1143 	w_4 = ROTATE_LEFT((w_1 ^ w_12 ^ w_6 ^ w_4), 1);		/* 68 */
1144 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_4 + SHA1_CONST(3);
1145 	d = ROTATE_LEFT(d, 30);
1146 
1147 	w_5 = ROTATE_LEFT((w_2 ^ w_13 ^ w_7 ^ w_5), 1);		/* 69 */
1148 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_5 + SHA1_CONST(3);
1149 	c = ROTATE_LEFT(c, 30);
1150 
1151 	w_6 = ROTATE_LEFT((w_3 ^ w_14 ^ w_8 ^ w_6), 1);		/* 70 */
1152 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_6 + SHA1_CONST(3);
1153 	b = ROTATE_LEFT(b, 30);
1154 
1155 	w_7 = ROTATE_LEFT((w_4 ^ w_15 ^ w_9 ^ w_7), 1);		/* 71 */
1156 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_7 + SHA1_CONST(3);
1157 	a = ROTATE_LEFT(a, 30);
1158 
1159 	w_8 = ROTATE_LEFT((w_5 ^ w_0 ^ w_10 ^ w_8), 1);		/* 72 */
1160 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_8 + SHA1_CONST(3);
1161 	e = ROTATE_LEFT(e, 30);
1162 
1163 	w_9 = ROTATE_LEFT((w_6 ^ w_1 ^ w_11 ^ w_9), 1);		/* 73 */
1164 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_9 + SHA1_CONST(3);
1165 	d = ROTATE_LEFT(d, 30);
1166 
1167 	w_10 = ROTATE_LEFT((w_7 ^ w_2 ^ w_12 ^ w_10), 1);	/* 74 */
1168 	a = ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_10 + SHA1_CONST(3);
1169 	c = ROTATE_LEFT(c, 30);
1170 
1171 	w_11 = ROTATE_LEFT((w_8 ^ w_3 ^ w_13 ^ w_11), 1);	/* 75 */
1172 	e = ROTATE_LEFT(a, 5) + G(b, c, d) + e + w_11 + SHA1_CONST(3);
1173 	b = ROTATE_LEFT(b, 30);
1174 
1175 	w_12 = ROTATE_LEFT((w_9 ^ w_4 ^ w_14 ^ w_12), 1);	/* 76 */
1176 	d = ROTATE_LEFT(e, 5) + G(a, b, c) + d + w_12 + SHA1_CONST(3);
1177 	a = ROTATE_LEFT(a, 30);
1178 
1179 	w_13 = ROTATE_LEFT((w_10 ^ w_5 ^ w_15 ^ w_13), 1);	/* 77 */
1180 	c = ROTATE_LEFT(d, 5) + G(e, a, b) + c + w_13 + SHA1_CONST(3);
1181 	e = ROTATE_LEFT(e, 30);
1182 
1183 	w_14 = ROTATE_LEFT((w_11 ^ w_6 ^ w_0 ^ w_14), 1);	/* 78 */
1184 	b = ROTATE_LEFT(c, 5) + G(d, e, a) + b + w_14 + SHA1_CONST(3);
1185 	d = ROTATE_LEFT(d, 30);
1186 
1187 	w_15 = ROTATE_LEFT((w_12 ^ w_7 ^ w_1 ^ w_15), 1);	/* 79 */
1188 
1189 	ctx->state[0] += ROTATE_LEFT(b, 5) + G(c, d, e) + a + w_15 +
1190 	    SHA1_CONST(3);
1191 	ctx->state[1] += b;
1192 	ctx->state[2] += ROTATE_LEFT(c, 30);
1193 	ctx->state[3] += d;
1194 	ctx->state[4] += e;
1195 
1196 	/* zeroize sensitive information */
1197 	w_0 = w_1 = w_2 = w_3 = w_4 = w_5 = w_6 = w_7 = w_8 = 0;
1198 	w_9 = w_10 = w_11 = w_12 = w_13 = w_14 = w_15 = 0;
1199 }
1200 
1201 /*
1202  * devpro compiler optimization:
1203  *
1204  * the compiler can generate better code if it knows that `input' and
1205  * `output' do not point to the same source.  there is no portable
1206  * way to tell the compiler this, but the sun compiler recognizes the
1207  * `_Restrict' keyword to indicate this condition.  use it if possible.
1208  */
1209 
1210 #ifdef	__RESTRICT
1211 #define	restrict	_Restrict
1212 #else
1213 #define	restrict	/* nothing */
1214 #endif
1215 
1216 /*
1217  * Encode()
1218  *
1219  * purpose: to convert a list of numbers from little endian to big endian
1220  *   input: uint8_t *	: place to store the converted big endian numbers
1221  *	    uint32_t *	: place to get numbers to convert from
1222  *          size_t	: the length of the input in bytes
1223  *  output: void
1224  */
1225 
1226 static void
1227 Encode(uint8_t *restrict output, uint32_t *restrict input, size_t len)
1228 {
1229 	size_t		i, j;
1230 
1231 #if	defined(__sparc)
1232 	if (IS_P2ALIGNED(output, sizeof (uint32_t))) {
1233 		for (i = 0, j = 0; j < len; i++, j += 4) {
1234 			/* LINTED: pointer alignment */
1235 			*((uint32_t *)(output + j)) = input[i];
1236 		}
1237 	} else {
1238 #endif	/* little endian -- will work on big endian, but slowly */
1239 		for (i = 0, j = 0; j < len; i++, j += 4) {
1240 			output[j]	= (input[i] >> 24) & 0xff;
1241 			output[j + 1]	= (input[i] >> 16) & 0xff;
1242 			output[j + 2]	= (input[i] >>  8) & 0xff;
1243 			output[j + 3]	= input[i] & 0xff;
1244 		}
1245 #if	defined(__sparc)
1246 	}
1247 #endif
1248 }
1249 
1250 
1251 #ifdef _KERNEL
1252 
1253 /*
1254  * KCF software provider control entry points.
1255  */
1256 /* ARGSUSED */
1257 static void
1258 sha1_provider_status(crypto_provider_handle_t provider, uint_t *status)
1259 {
1260 	*status = CRYPTO_PROVIDER_READY;
1261 }
1262 
1263 /*
1264  * KCF software provider digest entry points.
1265  */
1266 
1267 static int
1268 sha1_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
1269     crypto_req_handle_t req)
1270 {
1271 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
1272 		return (CRYPTO_MECHANISM_INVALID);
1273 
1274 	/*
1275 	 * Allocate and initialize SHA1 context.
1276 	 */
1277 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_ctx_t),
1278 	    crypto_kmflag(req));
1279 	if (ctx->cc_provider_private == NULL)
1280 		return (CRYPTO_HOST_MEMORY);
1281 
1282 	PROV_SHA1_CTX(ctx)->sc_mech_type = SHA1_MECH_INFO_TYPE;
1283 	SHA1Init(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
1284 
1285 	return (CRYPTO_SUCCESS);
1286 }
1287 
1288 /*
1289  * Helper SHA1 digest update function for uio data.
1290  */
1291 static int
1292 sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data)
1293 {
1294 	off_t offset = data->cd_offset;
1295 	size_t length = data->cd_length;
1296 	uint_t vec_idx;
1297 	size_t cur_len;
1298 
1299 	/* we support only kernel buffer */
1300 	if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
1301 		return (CRYPTO_ARGUMENTS_BAD);
1302 
1303 	/*
1304 	 * Jump to the first iovec containing data to be
1305 	 * digested.
1306 	 */
1307 	for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
1308 	    offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
1309 	    offset -= data->cd_uio->uio_iov[vec_idx++].iov_len);
1310 	if (vec_idx == data->cd_uio->uio_iovcnt) {
1311 		/*
1312 		 * The caller specified an offset that is larger than the
1313 		 * total size of the buffers it provided.
1314 		 */
1315 		return (CRYPTO_DATA_LEN_RANGE);
1316 	}
1317 
1318 	/*
1319 	 * Now do the digesting on the iovecs.
1320 	 */
1321 	while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
1322 		cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
1323 		    offset, length);
1324 
1325 		SHA1Update(sha1_ctx,
1326 		    (uint8_t *)data->cd_uio->uio_iov[vec_idx].iov_base + offset,
1327 		    cur_len);
1328 
1329 		length -= cur_len;
1330 		vec_idx++;
1331 		offset = 0;
1332 	}
1333 
1334 	if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
1335 		/*
1336 		 * The end of the specified iovec's was reached but
1337 		 * the length requested could not be processed, i.e.
1338 		 * The caller requested to digest more data than it provided.
1339 		 */
1340 		return (CRYPTO_DATA_LEN_RANGE);
1341 	}
1342 
1343 	return (CRYPTO_SUCCESS);
1344 }
1345 
1346 /*
1347  * Helper SHA1 digest final function for uio data.
1348  * digest_len is the length of the desired digest. If digest_len
1349  * is smaller than the default SHA1 digest length, the caller
1350  * must pass a scratch buffer, digest_scratch, which must
1351  * be at least SHA1_DIGEST_LENGTH bytes.
1352  */
1353 static int
1354 sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
1355     ulong_t digest_len, uchar_t *digest_scratch)
1356 {
1357 	off_t offset = digest->cd_offset;
1358 	uint_t vec_idx;
1359 
1360 	/* we support only kernel buffer */
1361 	if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
1362 		return (CRYPTO_ARGUMENTS_BAD);
1363 
1364 	/*
1365 	 * Jump to the first iovec containing ptr to the digest to
1366 	 * be returned.
1367 	 */
1368 	for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
1369 	    vec_idx < digest->cd_uio->uio_iovcnt;
1370 	    offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len);
1371 	if (vec_idx == digest->cd_uio->uio_iovcnt) {
1372 		/*
1373 		 * The caller specified an offset that is
1374 		 * larger than the total size of the buffers
1375 		 * it provided.
1376 		 */
1377 		return (CRYPTO_DATA_LEN_RANGE);
1378 	}
1379 
1380 	if (offset + digest_len <=
1381 	    digest->cd_uio->uio_iov[vec_idx].iov_len) {
1382 		/*
1383 		 * The computed SHA1 digest will fit in the current
1384 		 * iovec.
1385 		 */
1386 		if (digest_len != SHA1_DIGEST_LENGTH) {
1387 			/*
1388 			 * The caller requested a short digest. Digest
1389 			 * into a scratch buffer and return to
1390 			 * the user only what was requested.
1391 			 */
1392 			SHA1Final(digest_scratch, sha1_ctx);
1393 			bcopy(digest_scratch, (uchar_t *)digest->
1394 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
1395 			    digest_len);
1396 		} else {
1397 			SHA1Final((uchar_t *)digest->
1398 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
1399 			    sha1_ctx);
1400 		}
1401 	} else {
1402 		/*
1403 		 * The computed digest will be crossing one or more iovec's.
1404 		 * This is bad performance-wise but we need to support it.
1405 		 * Allocate a small scratch buffer on the stack and
1406 		 * copy it piece meal to the specified digest iovec's.
1407 		 */
1408 		uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
1409 		off_t scratch_offset = 0;
1410 		size_t length = digest_len;
1411 		size_t cur_len;
1412 
1413 		SHA1Final(digest_tmp, sha1_ctx);
1414 
1415 		while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
1416 			cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
1417 			    offset, length);
1418 			bcopy(digest_tmp + scratch_offset,
1419 			    digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
1420 			    cur_len);
1421 
1422 			length -= cur_len;
1423 			vec_idx++;
1424 			scratch_offset += cur_len;
1425 			offset = 0;
1426 		}
1427 
1428 		if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
1429 			/*
1430 			 * The end of the specified iovec's was reached but
1431 			 * the length requested could not be processed, i.e.
1432 			 * The caller requested to digest more data than it
1433 			 * provided.
1434 			 */
1435 			return (CRYPTO_DATA_LEN_RANGE);
1436 		}
1437 	}
1438 
1439 	return (CRYPTO_SUCCESS);
1440 }
1441 
1442 /*
1443  * Helper SHA1 digest update for mblk's.
1444  */
1445 static int
1446 sha1_digest_update_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *data)
1447 {
1448 	off_t offset = data->cd_offset;
1449 	size_t length = data->cd_length;
1450 	mblk_t *mp;
1451 	size_t cur_len;
1452 
1453 	/*
1454 	 * Jump to the first mblk_t containing data to be digested.
1455 	 */
1456 	for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
1457 	    offset -= MBLKL(mp), mp = mp->b_cont);
1458 	if (mp == NULL) {
1459 		/*
1460 		 * The caller specified an offset that is larger than the
1461 		 * total size of the buffers it provided.
1462 		 */
1463 		return (CRYPTO_DATA_LEN_RANGE);
1464 	}
1465 
1466 	/*
1467 	 * Now do the digesting on the mblk chain.
1468 	 */
1469 	while (mp != NULL && length > 0) {
1470 		cur_len = MIN(MBLKL(mp) - offset, length);
1471 		SHA1Update(sha1_ctx, mp->b_rptr + offset, cur_len);
1472 		length -= cur_len;
1473 		offset = 0;
1474 		mp = mp->b_cont;
1475 	}
1476 
1477 	if (mp == NULL && length > 0) {
1478 		/*
1479 		 * The end of the mblk was reached but the length requested
1480 		 * could not be processed, i.e. The caller requested
1481 		 * to digest more data than it provided.
1482 		 */
1483 		return (CRYPTO_DATA_LEN_RANGE);
1484 	}
1485 
1486 	return (CRYPTO_SUCCESS);
1487 }
1488 
1489 /*
1490  * Helper SHA1 digest final for mblk's.
1491  * digest_len is the length of the desired digest. If digest_len
1492  * is smaller than the default SHA1 digest length, the caller
1493  * must pass a scratch buffer, digest_scratch, which must
1494  * be at least SHA1_DIGEST_LENGTH bytes.
1495  */
1496 static int
1497 sha1_digest_final_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
1498     ulong_t digest_len, uchar_t *digest_scratch)
1499 {
1500 	off_t offset = digest->cd_offset;
1501 	mblk_t *mp;
1502 
1503 	/*
1504 	 * Jump to the first mblk_t that will be used to store the digest.
1505 	 */
1506 	for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
1507 	    offset -= MBLKL(mp), mp = mp->b_cont);
1508 	if (mp == NULL) {
1509 		/*
1510 		 * The caller specified an offset that is larger than the
1511 		 * total size of the buffers it provided.
1512 		 */
1513 		return (CRYPTO_DATA_LEN_RANGE);
1514 	}
1515 
1516 	if (offset + digest_len <= MBLKL(mp)) {
1517 		/*
1518 		 * The computed SHA1 digest will fit in the current mblk.
1519 		 * Do the SHA1Final() in-place.
1520 		 */
1521 		if (digest_len != SHA1_DIGEST_LENGTH) {
1522 			/*
1523 			 * The caller requested a short digest. Digest
1524 			 * into a scratch buffer and return to
1525 			 * the user only what was requested.
1526 			 */
1527 			SHA1Final(digest_scratch, sha1_ctx);
1528 			bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
1529 		} else {
1530 			SHA1Final(mp->b_rptr + offset, sha1_ctx);
1531 		}
1532 	} else {
1533 		/*
1534 		 * The computed digest will be crossing one or more mblk's.
1535 		 * This is bad performance-wise but we need to support it.
1536 		 * Allocate a small scratch buffer on the stack and
1537 		 * copy it piece meal to the specified digest iovec's.
1538 		 */
1539 		uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
1540 		off_t scratch_offset = 0;
1541 		size_t length = digest_len;
1542 		size_t cur_len;
1543 
1544 		SHA1Final(digest_tmp, sha1_ctx);
1545 
1546 		while (mp != NULL && length > 0) {
1547 			cur_len = MIN(MBLKL(mp) - offset, length);
1548 			bcopy(digest_tmp + scratch_offset,
1549 			    mp->b_rptr + offset, cur_len);
1550 
1551 			length -= cur_len;
1552 			mp = mp->b_cont;
1553 			scratch_offset += cur_len;
1554 			offset = 0;
1555 		}
1556 
1557 		if (mp == NULL && length > 0) {
1558 			/*
1559 			 * The end of the specified mblk was reached but
1560 			 * the length requested could not be processed, i.e.
1561 			 * The caller requested to digest more data than it
1562 			 * provided.
1563 			 */
1564 			return (CRYPTO_DATA_LEN_RANGE);
1565 		}
1566 	}
1567 
1568 	return (CRYPTO_SUCCESS);
1569 }
1570 
1571 /* ARGSUSED */
1572 static int
1573 sha1_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
1574     crypto_req_handle_t req)
1575 {
1576 	int ret = CRYPTO_SUCCESS;
1577 
1578 	ASSERT(ctx->cc_provider_private != NULL);
1579 
1580 	/*
1581 	 * We need to just return the length needed to store the output.
1582 	 * We should not destroy the context for the following cases.
1583 	 */
1584 	if ((digest->cd_length == 0) ||
1585 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
1586 		digest->cd_length = SHA1_DIGEST_LENGTH;
1587 		return (CRYPTO_BUFFER_TOO_SMALL);
1588 	}
1589 
1590 	/*
1591 	 * Do the SHA1 update on the specified input data.
1592 	 */
1593 	switch (data->cd_format) {
1594 	case CRYPTO_DATA_RAW:
1595 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1596 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
1597 		    data->cd_length);
1598 		break;
1599 	case CRYPTO_DATA_UIO:
1600 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1601 		    data);
1602 		break;
1603 	case CRYPTO_DATA_MBLK:
1604 		ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1605 		    data);
1606 		break;
1607 	default:
1608 		ret = CRYPTO_ARGUMENTS_BAD;
1609 	}
1610 
1611 	if (ret != CRYPTO_SUCCESS) {
1612 		/* the update failed, free context and bail */
1613 		kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
1614 		ctx->cc_provider_private = NULL;
1615 		digest->cd_length = 0;
1616 		return (ret);
1617 	}
1618 
1619 	/*
1620 	 * Do a SHA1 final, must be done separately since the digest
1621 	 * type can be different than the input data type.
1622 	 */
1623 	switch (digest->cd_format) {
1624 	case CRYPTO_DATA_RAW:
1625 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
1626 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
1627 		break;
1628 	case CRYPTO_DATA_UIO:
1629 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1630 		    digest, SHA1_DIGEST_LENGTH, NULL);
1631 		break;
1632 	case CRYPTO_DATA_MBLK:
1633 		ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1634 		    digest, SHA1_DIGEST_LENGTH, NULL);
1635 		break;
1636 	default:
1637 		ret = CRYPTO_ARGUMENTS_BAD;
1638 	}
1639 
1640 	/* all done, free context and return */
1641 
1642 	if (ret == CRYPTO_SUCCESS) {
1643 		digest->cd_length = SHA1_DIGEST_LENGTH;
1644 	} else {
1645 		digest->cd_length = 0;
1646 	}
1647 
1648 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
1649 	ctx->cc_provider_private = NULL;
1650 	return (ret);
1651 }
1652 
1653 /* ARGSUSED */
1654 static int
1655 sha1_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
1656     crypto_req_handle_t req)
1657 {
1658 	int ret = CRYPTO_SUCCESS;
1659 
1660 	ASSERT(ctx->cc_provider_private != NULL);
1661 
1662 	/*
1663 	 * Do the SHA1 update on the specified input data.
1664 	 */
1665 	switch (data->cd_format) {
1666 	case CRYPTO_DATA_RAW:
1667 		SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1668 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
1669 		    data->cd_length);
1670 		break;
1671 	case CRYPTO_DATA_UIO:
1672 		ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1673 		    data);
1674 		break;
1675 	case CRYPTO_DATA_MBLK:
1676 		ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1677 		    data);
1678 		break;
1679 	default:
1680 		ret = CRYPTO_ARGUMENTS_BAD;
1681 	}
1682 
1683 	return (ret);
1684 }
1685 
1686 /* ARGSUSED */
1687 static int
1688 sha1_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
1689     crypto_req_handle_t req)
1690 {
1691 	int ret = CRYPTO_SUCCESS;
1692 
1693 	ASSERT(ctx->cc_provider_private != NULL);
1694 
1695 	/*
1696 	 * We need to just return the length needed to store the output.
1697 	 * We should not destroy the context for the following cases.
1698 	 */
1699 	if ((digest->cd_length == 0) ||
1700 	    (digest->cd_length < SHA1_DIGEST_LENGTH)) {
1701 		digest->cd_length = SHA1_DIGEST_LENGTH;
1702 		return (CRYPTO_BUFFER_TOO_SMALL);
1703 	}
1704 
1705 	/*
1706 	 * Do a SHA1 final.
1707 	 */
1708 	switch (digest->cd_format) {
1709 	case CRYPTO_DATA_RAW:
1710 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
1711 		    digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
1712 		break;
1713 	case CRYPTO_DATA_UIO:
1714 		ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1715 		    digest, SHA1_DIGEST_LENGTH, NULL);
1716 		break;
1717 	case CRYPTO_DATA_MBLK:
1718 		ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
1719 		    digest, SHA1_DIGEST_LENGTH, NULL);
1720 		break;
1721 	default:
1722 		ret = CRYPTO_ARGUMENTS_BAD;
1723 	}
1724 
1725 	/* all done, free context and return */
1726 
1727 	if (ret == CRYPTO_SUCCESS) {
1728 		digest->cd_length = SHA1_DIGEST_LENGTH;
1729 	} else {
1730 		digest->cd_length = 0;
1731 	}
1732 
1733 	kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
1734 	ctx->cc_provider_private = NULL;
1735 
1736 	return (ret);
1737 }
1738 
1739 /* ARGSUSED */
1740 static int
1741 sha1_digest_atomic(crypto_provider_handle_t provider,
1742     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1743     crypto_data_t *data, crypto_data_t *digest,
1744     crypto_req_handle_t req)
1745 {
1746 	int ret = CRYPTO_SUCCESS;
1747 	SHA1_CTX sha1_ctx;
1748 
1749 	if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
1750 		return (CRYPTO_MECHANISM_INVALID);
1751 
1752 	/*
1753 	 * Do the SHA1 init.
1754 	 */
1755 	SHA1Init(&sha1_ctx);
1756 
1757 	/*
1758 	 * Do the SHA1 update on the specified input data.
1759 	 */
1760 	switch (data->cd_format) {
1761 	case CRYPTO_DATA_RAW:
1762 		SHA1Update(&sha1_ctx,
1763 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
1764 		    data->cd_length);
1765 		break;
1766 	case CRYPTO_DATA_UIO:
1767 		ret = sha1_digest_update_uio(&sha1_ctx, data);
1768 		break;
1769 	case CRYPTO_DATA_MBLK:
1770 		ret = sha1_digest_update_mblk(&sha1_ctx, data);
1771 		break;
1772 	default:
1773 		ret = CRYPTO_ARGUMENTS_BAD;
1774 	}
1775 
1776 	if (ret != CRYPTO_SUCCESS) {
1777 		/* the update failed, bail */
1778 		digest->cd_length = 0;
1779 		return (ret);
1780 	}
1781 
1782 	/*
1783 	 * Do a SHA1 final, must be done separately since the digest
1784 	 * type can be different than the input data type.
1785 	 */
1786 	switch (digest->cd_format) {
1787 	case CRYPTO_DATA_RAW:
1788 		SHA1Final((unsigned char *)digest->cd_raw.iov_base +
1789 		    digest->cd_offset, &sha1_ctx);
1790 		break;
1791 	case CRYPTO_DATA_UIO:
1792 		ret = sha1_digest_final_uio(&sha1_ctx, digest,
1793 		    SHA1_DIGEST_LENGTH, NULL);
1794 		break;
1795 	case CRYPTO_DATA_MBLK:
1796 		ret = sha1_digest_final_mblk(&sha1_ctx, digest,
1797 		    SHA1_DIGEST_LENGTH, NULL);
1798 		break;
1799 	default:
1800 		ret = CRYPTO_ARGUMENTS_BAD;
1801 	}
1802 
1803 	if (ret == CRYPTO_SUCCESS) {
1804 		digest->cd_length = SHA1_DIGEST_LENGTH;
1805 	} else {
1806 		digest->cd_length = 0;
1807 	}
1808 
1809 	return (ret);
1810 }
1811 
1812 /*
1813  * KCF software provider mac entry points.
1814  *
1815  * SHA1 HMAC is: SHA1(key XOR opad, SHA1(key XOR ipad, text))
1816  *
1817  * Init:
1818  * The initialization routine initializes what we denote
1819  * as the inner and outer contexts by doing
1820  * - for inner context: SHA1(key XOR ipad)
1821  * - for outer context: SHA1(key XOR opad)
1822  *
1823  * Update:
1824  * Each subsequent SHA1 HMAC update will result in an
1825  * update of the inner context with the specified data.
1826  *
1827  * Final:
1828  * The SHA1 HMAC final will do a SHA1 final operation on the
1829  * inner context, and the resulting digest will be used
1830  * as the data for an update on the outer context. Last
1831  * but not least, a SHA1 final on the outer context will
1832  * be performed to obtain the SHA1 HMAC digest to return
1833  * to the user.
1834  */
1835 
1836 /*
1837  * Initialize a SHA1-HMAC context.
1838  */
1839 static void
1840 sha1_mac_init_ctx(sha1_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
1841 {
1842 	uint32_t ipad[SHA1_HMAC_INTS_PER_BLOCK];
1843 	uint32_t opad[SHA1_HMAC_INTS_PER_BLOCK];
1844 	uint_t i;
1845 
1846 	bzero(ipad, SHA1_HMAC_BLOCK_SIZE);
1847 	bzero(opad, SHA1_HMAC_BLOCK_SIZE);
1848 
1849 	bcopy(keyval, ipad, length_in_bytes);
1850 	bcopy(keyval, opad, length_in_bytes);
1851 
1852 	/* XOR key with ipad (0x36) and opad (0x5c) */
1853 	for (i = 0; i < SHA1_HMAC_INTS_PER_BLOCK; i++) {
1854 		ipad[i] ^= 0x36363636;
1855 		opad[i] ^= 0x5c5c5c5c;
1856 	}
1857 
1858 	/* perform SHA1 on ipad */
1859 	SHA1Init(&ctx->hc_icontext);
1860 	SHA1Update(&ctx->hc_icontext, (uint8_t *)ipad, SHA1_HMAC_BLOCK_SIZE);
1861 
1862 	/* perform SHA1 on opad */
1863 	SHA1Init(&ctx->hc_ocontext);
1864 	SHA1Update(&ctx->hc_ocontext, (uint8_t *)opad, SHA1_HMAC_BLOCK_SIZE);
1865 }
1866 
1867 /*
1868  */
1869 static int
1870 sha1_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
1871     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
1872     crypto_req_handle_t req)
1873 {
1874 	int ret = CRYPTO_SUCCESS;
1875 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1876 
1877 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1878 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1879 		return (CRYPTO_MECHANISM_INVALID);
1880 
1881 	/* Add support for key by attributes (RFE 4706552) */
1882 	if (key->ck_format != CRYPTO_KEY_RAW)
1883 		return (CRYPTO_ARGUMENTS_BAD);
1884 
1885 	ctx->cc_provider_private = kmem_alloc(sizeof (sha1_hmac_ctx_t),
1886 	    crypto_kmflag(req));
1887 	if (ctx->cc_provider_private == NULL)
1888 		return (CRYPTO_HOST_MEMORY);
1889 
1890 	if (ctx_template != NULL) {
1891 		/* reuse context template */
1892 		bcopy(ctx_template, PROV_SHA1_HMAC_CTX(ctx),
1893 		    sizeof (sha1_hmac_ctx_t));
1894 	} else {
1895 		/* no context template, compute context */
1896 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1897 			uchar_t digested_key[SHA1_DIGEST_LENGTH];
1898 			sha1_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
1899 
1900 			/*
1901 			 * Hash the passed-in key to get a smaller key.
1902 			 * The inner context is used since it hasn't been
1903 			 * initialized yet.
1904 			 */
1905 			PROV_SHA1_DIGEST_KEY(&hmac_ctx->hc_icontext,
1906 			    key->ck_data, keylen_in_bytes, digested_key);
1907 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
1908 			    digested_key, SHA1_DIGEST_LENGTH);
1909 		} else {
1910 			sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
1911 			    key->ck_data, keylen_in_bytes);
1912 		}
1913 	}
1914 
1915 	/*
1916 	 * Get the mechanism parameters, if applicable.
1917 	 */
1918 	PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
1919 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1920 		if (mechanism->cm_param == NULL ||
1921 		    mechanism->cm_param_len != sizeof (ulong_t))
1922 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1923 		PROV_SHA1_GET_DIGEST_LEN(mechanism,
1924 		    PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len);
1925 		if (PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len >
1926 		    SHA1_DIGEST_LENGTH)
1927 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1928 	}
1929 
1930 	if (ret != CRYPTO_SUCCESS) {
1931 		bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1932 		kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1933 		ctx->cc_provider_private = NULL;
1934 	}
1935 
1936 	return (ret);
1937 }
1938 
1939 /* ARGSUSED */
1940 static int
1941 sha1_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
1942 {
1943 	int ret = CRYPTO_SUCCESS;
1944 
1945 	ASSERT(ctx->cc_provider_private != NULL);
1946 
1947 	/*
1948 	 * Do a SHA1 update of the inner context using the specified
1949 	 * data.
1950 	 */
1951 	switch (data->cd_format) {
1952 	case CRYPTO_DATA_RAW:
1953 		SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_icontext,
1954 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
1955 		    data->cd_length);
1956 		break;
1957 	case CRYPTO_DATA_UIO:
1958 		ret = sha1_digest_update_uio(
1959 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
1960 		break;
1961 	case CRYPTO_DATA_MBLK:
1962 		ret = sha1_digest_update_mblk(
1963 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
1964 		break;
1965 	default:
1966 		ret = CRYPTO_ARGUMENTS_BAD;
1967 	}
1968 
1969 	return (ret);
1970 }
1971 
1972 /* ARGSUSED */
1973 static int
1974 sha1_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
1975 {
1976 	int ret = CRYPTO_SUCCESS;
1977 	uchar_t digest[SHA1_DIGEST_LENGTH];
1978 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
1979 
1980 	ASSERT(ctx->cc_provider_private != NULL);
1981 
1982 	if (PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type ==
1983 	    SHA1_HMAC_GEN_MECH_INFO_TYPE)
1984 		digest_len = PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len;
1985 
1986 	/*
1987 	 * We need to just return the length needed to store the output.
1988 	 * We should not destroy the context for the following cases.
1989 	 */
1990 	if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
1991 		mac->cd_length = digest_len;
1992 		return (CRYPTO_BUFFER_TOO_SMALL);
1993 	}
1994 
1995 	/*
1996 	 * Do a SHA1 final on the inner context.
1997 	 */
1998 	SHA1Final(digest, &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext);
1999 
2000 	/*
2001 	 * Do a SHA1 update on the outer context, feeding the inner
2002 	 * digest as data.
2003 	 */
2004 	SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, digest,
2005 	    SHA1_DIGEST_LENGTH);
2006 
2007 	/*
2008 	 * Do a SHA1 final on the outer context, storing the computing
2009 	 * digest in the users buffer.
2010 	 */
2011 	switch (mac->cd_format) {
2012 	case CRYPTO_DATA_RAW:
2013 		if (digest_len != SHA1_DIGEST_LENGTH) {
2014 			/*
2015 			 * The caller requested a short digest. Digest
2016 			 * into a scratch buffer and return to
2017 			 * the user only what was requested.
2018 			 */
2019 			SHA1Final(digest,
2020 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
2021 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
2022 			    mac->cd_offset, digest_len);
2023 		} else {
2024 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
2025 			    mac->cd_offset,
2026 			    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
2027 		}
2028 		break;
2029 	case CRYPTO_DATA_UIO:
2030 		ret = sha1_digest_final_uio(
2031 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
2032 		    digest_len, digest);
2033 		break;
2034 	case CRYPTO_DATA_MBLK:
2035 		ret = sha1_digest_final_mblk(
2036 		    &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
2037 		    digest_len, digest);
2038 		break;
2039 	default:
2040 		ret = CRYPTO_ARGUMENTS_BAD;
2041 	}
2042 
2043 	if (ret == CRYPTO_SUCCESS) {
2044 		mac->cd_length = digest_len;
2045 	} else {
2046 		mac->cd_length = 0;
2047 	}
2048 
2049 	bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
2050 	kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
2051 	ctx->cc_provider_private = NULL;
2052 
2053 	return (ret);
2054 }
2055 
2056 #define	SHA1_MAC_UPDATE(data, ctx, ret) {				\
2057 	switch (data->cd_format) {					\
2058 	case CRYPTO_DATA_RAW:						\
2059 		SHA1Update(&(ctx).hc_icontext,				\
2060 		    (uint8_t *)data->cd_raw.iov_base +			\
2061 		    data->cd_offset, data->cd_length);			\
2062 		break;							\
2063 	case CRYPTO_DATA_UIO:						\
2064 		ret = sha1_digest_update_uio(&(ctx).hc_icontext, data); \
2065 		break;							\
2066 	case CRYPTO_DATA_MBLK:						\
2067 		ret = sha1_digest_update_mblk(&(ctx).hc_icontext,	\
2068 		    data);						\
2069 		break;							\
2070 	default:							\
2071 		ret = CRYPTO_ARGUMENTS_BAD;				\
2072 	}								\
2073 }
2074 
2075 /* ARGSUSED */
2076 static int
2077 sha1_mac_atomic(crypto_provider_handle_t provider,
2078     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
2079     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
2080     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
2081 {
2082 	int ret = CRYPTO_SUCCESS;
2083 	uchar_t digest[SHA1_DIGEST_LENGTH];
2084 	sha1_hmac_ctx_t sha1_hmac_ctx;
2085 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
2086 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
2087 
2088 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
2089 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
2090 		return (CRYPTO_MECHANISM_INVALID);
2091 
2092 	/* Add support for key by attributes (RFE 4706552) */
2093 	if (key->ck_format != CRYPTO_KEY_RAW)
2094 		return (CRYPTO_ARGUMENTS_BAD);
2095 
2096 	if (ctx_template != NULL) {
2097 		/* reuse context template */
2098 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2099 	} else {
2100 		/* no context template, initialize context */
2101 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
2102 			/*
2103 			 * Hash the passed-in key to get a smaller key.
2104 			 * The inner context is used since it hasn't been
2105 			 * initialized yet.
2106 			 */
2107 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
2108 			    key->ck_data, keylen_in_bytes, digest);
2109 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
2110 			    SHA1_DIGEST_LENGTH);
2111 		} else {
2112 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
2113 			    keylen_in_bytes);
2114 		}
2115 	}
2116 
2117 	/* get the mechanism parameters, if applicable */
2118 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
2119 		if (mechanism->cm_param == NULL ||
2120 		    mechanism->cm_param_len != sizeof (ulong_t)) {
2121 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2122 			goto bail;
2123 		}
2124 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
2125 		if (digest_len > SHA1_DIGEST_LENGTH) {
2126 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2127 			goto bail;
2128 		}
2129 	}
2130 
2131 	/* do a SHA1 update of the inner context using the specified data */
2132 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
2133 	if (ret != CRYPTO_SUCCESS)
2134 		/* the update failed, free context and bail */
2135 		goto bail;
2136 
2137 	/*
2138 	 * Do a SHA1 final on the inner context.
2139 	 */
2140 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
2141 
2142 	/*
2143 	 * Do an SHA1 update on the outer context, feeding the inner
2144 	 * digest as data.
2145 	 */
2146 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
2147 
2148 	/*
2149 	 * Do a SHA1 final on the outer context, storing the computed
2150 	 * digest in the users buffer.
2151 	 */
2152 	switch (mac->cd_format) {
2153 	case CRYPTO_DATA_RAW:
2154 		if (digest_len != SHA1_DIGEST_LENGTH) {
2155 			/*
2156 			 * The caller requested a short digest. Digest
2157 			 * into a scratch buffer and return to
2158 			 * the user only what was requested.
2159 			 */
2160 			SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
2161 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
2162 			    mac->cd_offset, digest_len);
2163 		} else {
2164 			SHA1Final((unsigned char *)mac->cd_raw.iov_base +
2165 			    mac->cd_offset, &sha1_hmac_ctx.hc_ocontext);
2166 		}
2167 		break;
2168 	case CRYPTO_DATA_UIO:
2169 		ret = sha1_digest_final_uio(&sha1_hmac_ctx.hc_ocontext, mac,
2170 		    digest_len, digest);
2171 		break;
2172 	case CRYPTO_DATA_MBLK:
2173 		ret = sha1_digest_final_mblk(&sha1_hmac_ctx.hc_ocontext, mac,
2174 		    digest_len, digest);
2175 		break;
2176 	default:
2177 		ret = CRYPTO_ARGUMENTS_BAD;
2178 	}
2179 
2180 	if (ret == CRYPTO_SUCCESS) {
2181 		mac->cd_length = digest_len;
2182 	} else {
2183 		mac->cd_length = 0;
2184 	}
2185 	/* Extra paranoia: zeroize the context on the stack */
2186 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2187 
2188 	return (ret);
2189 bail:
2190 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2191 	mac->cd_length = 0;
2192 	return (ret);
2193 }
2194 
2195 /* ARGSUSED */
2196 static int
2197 sha1_mac_verify_atomic(crypto_provider_handle_t provider,
2198     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
2199     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
2200     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
2201 {
2202 	int ret = CRYPTO_SUCCESS;
2203 	uchar_t digest[SHA1_DIGEST_LENGTH];
2204 	sha1_hmac_ctx_t sha1_hmac_ctx;
2205 	uint32_t digest_len = SHA1_DIGEST_LENGTH;
2206 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
2207 
2208 	if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
2209 	    mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
2210 		return (CRYPTO_MECHANISM_INVALID);
2211 
2212 	/* Add support for key by attributes (RFE 4706552) */
2213 	if (key->ck_format != CRYPTO_KEY_RAW)
2214 		return (CRYPTO_ARGUMENTS_BAD);
2215 
2216 	if (ctx_template != NULL) {
2217 		/* reuse context template */
2218 		bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2219 	} else {
2220 		/* no context template, initialize context */
2221 		if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
2222 			/*
2223 			 * Hash the passed-in key to get a smaller key.
2224 			 * The inner context is used since it hasn't been
2225 			 * initialized yet.
2226 			 */
2227 			PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
2228 			    key->ck_data, keylen_in_bytes, digest);
2229 			sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
2230 			    SHA1_DIGEST_LENGTH);
2231 		} else {
2232 			sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
2233 			    keylen_in_bytes);
2234 		}
2235 	}
2236 
2237 	/* get the mechanism parameters, if applicable */
2238 	if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
2239 		if (mechanism->cm_param == NULL ||
2240 		    mechanism->cm_param_len != sizeof (ulong_t)) {
2241 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2242 			goto bail;
2243 		}
2244 		PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
2245 		if (digest_len > SHA1_DIGEST_LENGTH) {
2246 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2247 			goto bail;
2248 		}
2249 	}
2250 
2251 	if (mac->cd_length != digest_len) {
2252 		ret = CRYPTO_INVALID_MAC;
2253 		goto bail;
2254 	}
2255 
2256 	/* do a SHA1 update of the inner context using the specified data */
2257 	SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
2258 	if (ret != CRYPTO_SUCCESS)
2259 		/* the update failed, free context and bail */
2260 		goto bail;
2261 
2262 	/* do a SHA1 final on the inner context */
2263 	SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
2264 
2265 	/*
2266 	 * Do an SHA1 update on the outer context, feeding the inner
2267 	 * digest as data.
2268 	 */
2269 	SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
2270 
2271 	/*
2272 	 * Do a SHA1 final on the outer context, storing the computed
2273 	 * digest in the users buffer.
2274 	 */
2275 	SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
2276 
2277 	/*
2278 	 * Compare the computed digest against the expected digest passed
2279 	 * as argument.
2280 	 */
2281 
2282 	switch (mac->cd_format) {
2283 
2284 	case CRYPTO_DATA_RAW:
2285 		if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
2286 		    mac->cd_offset, digest_len) != 0)
2287 			ret = CRYPTO_INVALID_MAC;
2288 		break;
2289 
2290 	case CRYPTO_DATA_UIO: {
2291 		off_t offset = mac->cd_offset;
2292 		uint_t vec_idx;
2293 		off_t scratch_offset = 0;
2294 		size_t length = digest_len;
2295 		size_t cur_len;
2296 
2297 		/* we support only kernel buffer */
2298 		if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
2299 			return (CRYPTO_ARGUMENTS_BAD);
2300 
2301 		/* jump to the first iovec containing the expected digest */
2302 		for (vec_idx = 0;
2303 		    offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
2304 		    vec_idx < mac->cd_uio->uio_iovcnt;
2305 		    offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len);
2306 		if (vec_idx == mac->cd_uio->uio_iovcnt) {
2307 			/*
2308 			 * The caller specified an offset that is
2309 			 * larger than the total size of the buffers
2310 			 * it provided.
2311 			 */
2312 			ret = CRYPTO_DATA_LEN_RANGE;
2313 			break;
2314 		}
2315 
2316 		/* do the comparison of computed digest vs specified one */
2317 		while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
2318 			cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
2319 			    offset, length);
2320 
2321 			if (bcmp(digest + scratch_offset,
2322 			    mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
2323 			    cur_len) != 0) {
2324 				ret = CRYPTO_INVALID_MAC;
2325 				break;
2326 			}
2327 
2328 			length -= cur_len;
2329 			vec_idx++;
2330 			scratch_offset += cur_len;
2331 			offset = 0;
2332 		}
2333 		break;
2334 	}
2335 
2336 	case CRYPTO_DATA_MBLK: {
2337 		off_t offset = mac->cd_offset;
2338 		mblk_t *mp;
2339 		off_t scratch_offset = 0;
2340 		size_t length = digest_len;
2341 		size_t cur_len;
2342 
2343 		/* jump to the first mblk_t containing the expected digest */
2344 		for (mp = mac->cd_mp; mp != NULL && offset >= MBLKL(mp);
2345 		    offset -= MBLKL(mp), mp = mp->b_cont);
2346 		if (mp == NULL) {
2347 			/*
2348 			 * The caller specified an offset that is larger than
2349 			 * the total size of the buffers it provided.
2350 			 */
2351 			ret = CRYPTO_DATA_LEN_RANGE;
2352 			break;
2353 		}
2354 
2355 		while (mp != NULL && length > 0) {
2356 			cur_len = MIN(MBLKL(mp) - offset, length);
2357 			if (bcmp(digest + scratch_offset,
2358 			    mp->b_rptr + offset, cur_len) != 0) {
2359 				ret = CRYPTO_INVALID_MAC;
2360 				break;
2361 			}
2362 
2363 			length -= cur_len;
2364 			mp = mp->b_cont;
2365 			scratch_offset += cur_len;
2366 			offset = 0;
2367 		}
2368 		break;
2369 	}
2370 
2371 	default:
2372 		ret = CRYPTO_ARGUMENTS_BAD;
2373 	}
2374 
2375 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2376 	return (ret);
2377 bail:
2378 	bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
2379 	mac->cd_length = 0;
2380 	return (ret);
2381 }
2382 
2383 /*
2384  * KCF software provider context management entry points.
2385  */
2386 
2387 /* ARGSUSED */
2388 static int
2389 sha1_create_ctx_template(crypto_provider_handle_t provider,
2390     crypto_mechanism_t *mechanism, crypto_key_t *key,
2391     crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
2392     crypto_req_handle_t req)
2393 {
2394 	sha1_hmac_ctx_t *sha1_hmac_ctx_tmpl;
2395 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
2396 
2397 	if ((mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE) &&
2398 	    (mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)) {
2399 		return (CRYPTO_MECHANISM_INVALID);
2400 	}
2401 
2402 	/* Add support for key by attributes (RFE 4706552) */
2403 	if (key->ck_format != CRYPTO_KEY_RAW)
2404 		return (CRYPTO_ARGUMENTS_BAD);
2405 
2406 	/*
2407 	 * Allocate and initialize SHA1 context.
2408 	 */
2409 	sha1_hmac_ctx_tmpl = kmem_alloc(sizeof (sha1_hmac_ctx_t),
2410 	    crypto_kmflag(req));
2411 	if (sha1_hmac_ctx_tmpl == NULL)
2412 		return (CRYPTO_HOST_MEMORY);
2413 
2414 	if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
2415 		uchar_t digested_key[SHA1_DIGEST_LENGTH];
2416 
2417 		/*
2418 		 * Hash the passed-in key to get a smaller key.
2419 		 * The inner context is used since it hasn't been
2420 		 * initialized yet.
2421 		 */
2422 		PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx_tmpl->hc_icontext,
2423 		    key->ck_data, keylen_in_bytes, digested_key);
2424 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, digested_key,
2425 		    SHA1_DIGEST_LENGTH);
2426 	} else {
2427 		sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, key->ck_data,
2428 		    keylen_in_bytes);
2429 	}
2430 
2431 	sha1_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
2432 	*ctx_template = (crypto_spi_ctx_template_t)sha1_hmac_ctx_tmpl;
2433 	*ctx_template_size = sizeof (sha1_hmac_ctx_t);
2434 
2435 
2436 	return (CRYPTO_SUCCESS);
2437 }
2438 
2439 static int
2440 sha1_free_context(crypto_ctx_t *ctx)
2441 {
2442 	uint_t ctx_len;
2443 	sha1_mech_type_t mech_type;
2444 
2445 	if (ctx->cc_provider_private == NULL)
2446 		return (CRYPTO_SUCCESS);
2447 
2448 	/*
2449 	 * We have to free either SHA1 or SHA1-HMAC contexts, which
2450 	 * have different lengths.
2451 	 */
2452 
2453 	mech_type = PROV_SHA1_CTX(ctx)->sc_mech_type;
2454 	if (mech_type == SHA1_MECH_INFO_TYPE)
2455 		ctx_len = sizeof (sha1_ctx_t);
2456 	else {
2457 		ASSERT(mech_type == SHA1_HMAC_MECH_INFO_TYPE ||
2458 		    mech_type == SHA1_HMAC_GEN_MECH_INFO_TYPE);
2459 		ctx_len = sizeof (sha1_hmac_ctx_t);
2460 	}
2461 
2462 	bzero(ctx->cc_provider_private, ctx_len);
2463 	kmem_free(ctx->cc_provider_private, ctx_len);
2464 	ctx->cc_provider_private = NULL;
2465 
2466 	return (CRYPTO_SUCCESS);
2467 }
2468 
2469 #endif /* _KERNEL */
2470