xref: /titanic_51/usr/src/common/crypto/md5/md5.c (revision aeb0348ba68ad95563cead4a53d0c70e207cf130)
1 /*
2  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Cleaned-up and optimized version of MD5, based on the reference
8  * implementation provided in RFC 1321.  See RSA Copyright information
9  * below.
10  *
11  * NOTE:  All compiler data was gathered with SC4.2, and verified with SC5.x,
12  *	  as used to build Solaris 2.7.  Hopefully the compiler behavior won't
13  *	  change for the worse in subsequent Solaris builds.
14  */
15 
16 #pragma ident	"%Z%%M%	%I%	%E% SMI"
17 
18 /*
19  * MD5C.C - RSA Data Security, Inc., MD5 message-digest algorithm
20  */
21 
22 /*
23  * Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
24  * rights reserved.
25  *
26  * License to copy and use this software is granted provided that it
27  * is identified as the "RSA Data Security, Inc. MD5 Message-Digest
28  * Algorithm" in all material mentioning or referencing this software
29  * or this function.
30  *
31  * License is also granted to make and use derivative works provided
32  * that such works are identified as "derived from the RSA Data
33  * Security, Inc. MD5 Message-Digest Algorithm" in all material
34  * mentioning or referencing the derived work.
35  *
36  * RSA Data Security, Inc. makes no representations concerning either
37  * the merchantability of this software or the suitability of this
38  * software for any particular purpose. It is provided "as is"
39  * without express or implied warranty of any kind.
40  *
41  * These notices must be retained in any copies of any part of this
42  * documentation and/or software.
43  */
44 
45 #include <sys/types.h>
46 #include <sys/md5.h>
47 #include <sys/md5_consts.h>	/* MD5_CONST() optimization */
48 #if	!defined(_KERNEL) || defined(_BOOT)
49 #include <strings.h>
50 #endif /* !_KERNEL || _BOOT */
51 
52 #if	defined(_KERNEL) && !defined(_BOOT)
53 
54 /*
55  * In kernel module, the md5 module is created with two modlinkages:
56  * - a modlmisc that allows consumers to directly call the entry points
57  *   MD5Init, MD5Update, and MD5Final.
58  * - a modlcrypto that allows the module to register with the Kernel
59  *   Cryptographic Framework (KCF) as a software provider for the MD5
60  *   mechanisms.
61  */
62 
63 #include <sys/systm.h>
64 #include <sys/modctl.h>
65 #include <sys/cmn_err.h>
66 #include <sys/ddi.h>
67 #include <sys/crypto/common.h>
68 #include <sys/crypto/spi.h>
69 #include <sys/sysmacros.h>
70 #include <sys/strsun.h>
71 #include <sys/note.h>
72 
73 extern struct mod_ops mod_miscops;
74 extern struct mod_ops mod_cryptoops;
75 
76 /*
77  * Module linkage information for the kernel.
78  */
79 
80 static struct modlmisc modlmisc = {
81 	&mod_miscops,
82 	"MD5 Message-Digest Algorithm"
83 };
84 
85 static struct modlcrypto modlcrypto = {
86 	&mod_cryptoops,
87 	"MD5 Kernel SW Provider %I%"
88 };
89 
90 static struct modlinkage modlinkage = {
91 	MODREV_1,
92 	(void *)&modlmisc,
93 	(void *)&modlcrypto,
94 	NULL
95 };
96 
97 /*
98  * CSPI information (entry points, provider info, etc.)
99  */
100 
101 typedef enum md5_mech_type {
102 	MD5_MECH_INFO_TYPE,		/* SUN_CKM_MD5 */
103 	MD5_HMAC_MECH_INFO_TYPE,	/* SUN_CKM_MD5_HMAC */
104 	MD5_HMAC_GEN_MECH_INFO_TYPE	/* SUN_CKM_MD5_HMAC_GENERAL */
105 } md5_mech_type_t;
106 
107 #define	MD5_DIGEST_LENGTH	16	/* MD5 digest length in bytes */
108 #define	MD5_HMAC_BLOCK_SIZE	64	/* MD5 block size */
109 #define	MD5_HMAC_MIN_KEY_LEN	8	/* MD5-HMAC min key length in bits */
110 #define	MD5_HMAC_MAX_KEY_LEN	INT_MAX	/* MD5-HMAC max key length in bits */
111 #define	MD5_HMAC_INTS_PER_BLOCK	(MD5_HMAC_BLOCK_SIZE/sizeof (uint32_t))
112 
113 /*
114  * Context for MD5 mechanism.
115  */
116 typedef struct md5_ctx {
117 	md5_mech_type_t		mc_mech_type;	/* type of context */
118 	MD5_CTX			mc_md5_ctx;	/* MD5 context */
119 } md5_ctx_t;
120 
121 /*
122  * Context for MD5-HMAC and MD5-HMAC-GENERAL mechanisms.
123  */
124 typedef struct md5_hmac_ctx {
125 	md5_mech_type_t		hc_mech_type;	/* type of context */
126 	uint32_t		hc_digest_len;	/* digest len in bytes */
127 	MD5_CTX			hc_icontext;	/* inner MD5 context */
128 	MD5_CTX			hc_ocontext;	/* outer MD5 context */
129 } md5_hmac_ctx_t;
130 
131 /*
132  * Macros to access the MD5 or MD5-HMAC contexts from a context passed
133  * by KCF to one of the entry points.
134  */
135 
136 #define	PROV_MD5_CTX(ctx)	((md5_ctx_t *)(ctx)->cc_provider_private)
137 #define	PROV_MD5_HMAC_CTX(ctx)	((md5_hmac_ctx_t *)(ctx)->cc_provider_private)
138 /* to extract the digest length passed as mechanism parameter */
139 
140 #define	PROV_MD5_GET_DIGEST_LEN(m, len) {				\
141 	if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t)))		\
142 		(len) = (uint32_t)*((ulong_t *)mechanism->cm_param);	\
143 	else {								\
144 		ulong_t tmp_ulong;					\
145 		bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t));	\
146 		(len) = (uint32_t)tmp_ulong;				\
147 	}								\
148 }
149 
150 #define	PROV_MD5_DIGEST_KEY(ctx, key, len, digest) {	\
151 	MD5Init(ctx);					\
152 	MD5Update(ctx, key, len);			\
153 	MD5Final(digest, ctx);				\
154 }
155 
156 /*
157  * Mechanism info structure passed to KCF during registration.
158  */
159 static crypto_mech_info_t md5_mech_info_tab[] = {
160 	/* MD5 */
161 	{SUN_CKM_MD5, MD5_MECH_INFO_TYPE,
162 	    CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
163 	    0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
164 	/* MD5-HMAC */
165 	{SUN_CKM_MD5_HMAC, MD5_HMAC_MECH_INFO_TYPE,
166 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
167 	    MD5_HMAC_MIN_KEY_LEN, MD5_HMAC_MAX_KEY_LEN,
168 	    CRYPTO_KEYSIZE_UNIT_IN_BITS},
169 	/* MD5-HMAC GENERAL */
170 	{SUN_CKM_MD5_HMAC_GENERAL, MD5_HMAC_GEN_MECH_INFO_TYPE,
171 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
172 	    MD5_HMAC_MIN_KEY_LEN, MD5_HMAC_MAX_KEY_LEN,
173 	    CRYPTO_KEYSIZE_UNIT_IN_BITS}
174 };
175 
176 static void md5_provider_status(crypto_provider_handle_t, uint_t *);
177 
178 static crypto_control_ops_t md5_control_ops = {
179 	md5_provider_status
180 };
181 
182 static int md5_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
183     crypto_req_handle_t);
184 static int md5_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
185     crypto_req_handle_t);
186 static int md5_digest_update(crypto_ctx_t *, crypto_data_t *,
187     crypto_req_handle_t);
188 static int md5_digest_final(crypto_ctx_t *, crypto_data_t *,
189     crypto_req_handle_t);
190 static int md5_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
191     crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
192     crypto_req_handle_t);
193 
194 static crypto_digest_ops_t md5_digest_ops = {
195 	md5_digest_init,
196 	md5_digest,
197 	md5_digest_update,
198 	NULL,
199 	md5_digest_final,
200 	md5_digest_atomic
201 };
202 
203 static int md5_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
204     crypto_spi_ctx_template_t, crypto_req_handle_t);
205 static int md5_mac_update(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
206 static int md5_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
207 static int md5_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
208     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
209     crypto_spi_ctx_template_t, crypto_req_handle_t);
210 static int md5_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
211     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
212     crypto_spi_ctx_template_t, crypto_req_handle_t);
213 
214 static crypto_mac_ops_t md5_mac_ops = {
215 	md5_mac_init,
216 	NULL,
217 	md5_mac_update,
218 	md5_mac_final,
219 	md5_mac_atomic,
220 	md5_mac_verify_atomic
221 };
222 
223 static int md5_create_ctx_template(crypto_provider_handle_t,
224     crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
225     size_t *, crypto_req_handle_t);
226 static int md5_free_context(crypto_ctx_t *);
227 
228 static crypto_ctx_ops_t md5_ctx_ops = {
229 	md5_create_ctx_template,
230 	md5_free_context
231 };
232 
233 static crypto_ops_t md5_crypto_ops = {
234 	&md5_control_ops,
235 	&md5_digest_ops,
236 	NULL,
237 	&md5_mac_ops,
238 	NULL,
239 	NULL,
240 	NULL,
241 	NULL,
242 	NULL,
243 	NULL,
244 	NULL,
245 	NULL,
246 	NULL,
247 	&md5_ctx_ops
248 };
249 
250 static crypto_provider_info_t md5_prov_info = {
251 	CRYPTO_SPI_VERSION_1,
252 	"MD5 Software Provider",
253 	CRYPTO_SW_PROVIDER,
254 	{&modlinkage},
255 	NULL,
256 	&md5_crypto_ops,
257 	sizeof (md5_mech_info_tab)/sizeof (crypto_mech_info_t),
258 	md5_mech_info_tab
259 };
260 
261 static crypto_kcf_provider_handle_t md5_prov_handle = NULL;
262 
263 int
264 _init(void)
265 {
266 	int ret;
267 
268 	if ((ret = mod_install(&modlinkage)) != 0)
269 		return (ret);
270 
271 	/*
272 	 * Register with KCF. If the registration fails, log an
273 	 * error but do not uninstall the module, since the functionality
274 	 * provided by misc/md5 should still be available.
275 	 */
276 	if ((ret = crypto_register_provider(&md5_prov_info,
277 	    &md5_prov_handle)) != CRYPTO_SUCCESS)
278 		cmn_err(CE_WARN, "md5 _init: "
279 		    "crypto_register_provider() failed (0x%x)", ret);
280 
281 	return (0);
282 }
283 
284 int
285 _fini(void)
286 {
287 	int ret;
288 
289 	/*
290 	 * Unregister from KCF if previous registration succeeded.
291 	 */
292 	if (md5_prov_handle != NULL) {
293 		if ((ret = crypto_unregister_provider(md5_prov_handle)) !=
294 		    CRYPTO_SUCCESS) {
295 			cmn_err(CE_WARN, "md5 _fini: "
296 			    "crypto_unregister_provider() failed (0x%x)", ret);
297 			return (EBUSY);
298 		}
299 		md5_prov_handle = NULL;
300 	}
301 
302 	return (mod_remove(&modlinkage));
303 }
304 
305 int
306 _info(struct modinfo *modinfop)
307 {
308 	return (mod_info(&modlinkage, modinfop));
309 }
310 #endif	/* _KERNEL && !_BOOT */
311 
312 static void Encode(uint8_t *, uint32_t *, size_t);
313 static void MD5Transform(uint32_t, uint32_t, uint32_t, uint32_t, MD5_CTX *,
314     const uint8_t [64]);
315 
316 static uint8_t PADDING[64] = { 0x80, /* all zeros */ };
317 
318 /*
319  * F, G, H and I are the basic MD5 functions.
320  */
321 #define	F(b, c, d)	(((b) & (c)) | ((~b) & (d)))
322 #define	G(b, c, d)	(((b) & (d)) | ((c) & (~d)))
323 #define	H(b, c, d)	((b) ^ (c) ^ (d))
324 #define	I(b, c, d)	((c) ^ ((b) | (~d)))
325 
326 /*
327  * ROTATE_LEFT rotates x left n bits.
328  */
329 #define	ROTATE_LEFT(x, n)	\
330 	(((x) << (n)) | ((x) >> ((sizeof (x) << 3) - (n))))
331 
332 /*
333  * FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4.
334  * Rotation is separate from addition to prevent recomputation.
335  */
336 
337 #define	FF(a, b, c, d, x, s, ac) { \
338 	(a) += F((b), (c), (d)) + (x) + ((unsigned long long)(ac)); \
339 	(a) = ROTATE_LEFT((a), (s)); \
340 	(a) += (b); \
341 	}
342 
343 #define	GG(a, b, c, d, x, s, ac) { \
344 	(a) += G((b), (c), (d)) + (x) + ((unsigned long long)(ac)); \
345 	(a) = ROTATE_LEFT((a), (s)); \
346 	(a) += (b); \
347 	}
348 
349 #define	HH(a, b, c, d, x, s, ac) { \
350 	(a) += H((b), (c), (d)) + (x) + ((unsigned long long)(ac)); \
351 	(a) = ROTATE_LEFT((a), (s)); \
352 	(a) += (b); \
353 	}
354 
355 #define	II(a, b, c, d, x, s, ac) { \
356 	(a) += I((b), (c), (d)) + (x) + ((unsigned long long)(ac)); \
357 	(a) = ROTATE_LEFT((a), (s)); \
358 	(a) += (b); \
359 	}
360 
361 /*
362  * Loading 32-bit constants on a RISC is expensive since it involves both a
363  * `sethi' and an `or'.  thus, we instead have the compiler generate `ld's to
364  * load the constants from an array called `md5_consts'.  however, on intel
365  * (and other CISC processors), it is cheaper to load the constant
366  * directly.  thus, the c code in MD5Transform() uses the macro MD5_CONST()
367  * which either expands to a constant or an array reference, depending on the
368  * architecture the code is being compiled for.
369  *
370  * Right now, i386 and amd64 are the CISC exceptions.
371  * If we get another CISC ISA, we'll have to change the ifdef.
372  */
373 
374 /*
375  * Using the %asi register to achieve little endian loads - register
376  * is set using a inline template.
377  *
378  * Saves a few arithmetic ops as can now use an immediate offset with the
379  * lduwa instructions.
380  */
381 
382 extern void set_little(uint32_t);
383 extern uint32_t get_little();
384 
385 #if defined(__i386) || defined(__amd64)
386 
387 #define	MD5_CONST(x)		(MD5_CONST_ ## x)
388 #define	MD5_CONST_e(x)		MD5_CONST(x)
389 #define	MD5_CONST_o(x)		MD5_CONST(x)
390 
391 #else
392 /*
393  * sparc/RISC optimization:
394  *
395  * while it is somewhat counter-intuitive, on sparc (and presumably other RISC
396  * machines), it is more efficient to place all the constants used in this
397  * function in an array and load the values out of the array than to manually
398  * load the constants.  this is because setting a register to a 32-bit value
399  * takes two ops in most cases: a `sethi' and an `or', but loading a 32-bit
400  * value from memory only takes one `ld' (or `lduw' on v9).  while this
401  * increases memory usage, the compiler can find enough other things to do
402  * while waiting to keep the pipeline does not stall.  additionally, it is
403  * likely that many of these constants are cached so that later accesses do
404  * not even go out to the bus.
405  *
406  * this array is declared `static' to keep the compiler from having to
407  * bcopy() this array onto the stack frame of MD5Transform() each time it is
408  * called -- which is unacceptably expensive.
409  *
410  * the `const' is to ensure that callers are good citizens and do not try to
411  * munge the array.  since these routines are going to be called from inside
412  * multithreaded kernelland, this is a good safety check. -- `constants' will
413  * end up in .rodata.
414  *
415  * unfortunately, loading from an array in this manner hurts performance under
416  * intel (and presumably other CISC machines).  so, there is a macro,
417  * MD5_CONST(), used in MD5Transform(), that either expands to a reference to
418  * this array, or to the actual constant, depending on what platform this code
419  * is compiled for.
420  */
421 
422 #ifdef sun4v
423 
424 /*
425  * Going to load these consts in 8B chunks, so need to enforce 8B alignment
426  */
427 
428 /* CSTYLED */
429 #pragma align 64 (md5_consts)
430 
431 #endif /* sun4v */
432 
433 static const uint32_t md5_consts[] = {
434 	MD5_CONST_0,	MD5_CONST_1,	MD5_CONST_2,	MD5_CONST_3,
435 	MD5_CONST_4,	MD5_CONST_5,	MD5_CONST_6,	MD5_CONST_7,
436 	MD5_CONST_8,	MD5_CONST_9,	MD5_CONST_10,	MD5_CONST_11,
437 	MD5_CONST_12,	MD5_CONST_13,	MD5_CONST_14,	MD5_CONST_15,
438 	MD5_CONST_16,	MD5_CONST_17,	MD5_CONST_18,	MD5_CONST_19,
439 	MD5_CONST_20,	MD5_CONST_21,	MD5_CONST_22,	MD5_CONST_23,
440 	MD5_CONST_24,	MD5_CONST_25,	MD5_CONST_26,	MD5_CONST_27,
441 	MD5_CONST_28,	MD5_CONST_29,	MD5_CONST_30,	MD5_CONST_31,
442 	MD5_CONST_32,	MD5_CONST_33,	MD5_CONST_34,	MD5_CONST_35,
443 	MD5_CONST_36,	MD5_CONST_37,	MD5_CONST_38,	MD5_CONST_39,
444 	MD5_CONST_40,	MD5_CONST_41,	MD5_CONST_42,	MD5_CONST_43,
445 	MD5_CONST_44,	MD5_CONST_45,	MD5_CONST_46,	MD5_CONST_47,
446 	MD5_CONST_48,	MD5_CONST_49,	MD5_CONST_50,	MD5_CONST_51,
447 	MD5_CONST_52,	MD5_CONST_53,	MD5_CONST_54,	MD5_CONST_55,
448 	MD5_CONST_56,	MD5_CONST_57,	MD5_CONST_58,	MD5_CONST_59,
449 	MD5_CONST_60,	MD5_CONST_61,	MD5_CONST_62,	MD5_CONST_63
450 };
451 
452 
453 #ifdef sun4v
454 /*
455  * To reduce the number of loads, load consts in 64-bit
456  * chunks and then split.
457  *
458  * No need to mask upper 32-bits, as just interested in
459  * low 32-bits (saves an & operation and means that this
460  * optimization doesn't increases the icount.
461  */
462 #define	MD5_CONST_e(x)		(md5_consts64[x/2] >> 32)
463 #define	MD5_CONST_o(x)		(md5_consts64[x/2])
464 
465 #else
466 
467 #define	MD5_CONST_e(x)		(md5_consts[x])
468 #define	MD5_CONST_o(x)		(md5_consts[x])
469 
470 #endif /* sun4v */
471 
472 #endif
473 
474 /*
475  * MD5Init()
476  *
477  * purpose: initializes the md5 context and begins and md5 digest operation
478  *   input: MD5_CTX *	: the context to initialize.
479  *  output: void
480  */
481 
482 void
483 MD5Init(MD5_CTX *ctx)
484 {
485 	ctx->count[0] = ctx->count[1] = 0;
486 
487 	/* load magic initialization constants */
488 	ctx->state[0] = MD5_INIT_CONST_1;
489 	ctx->state[1] = MD5_INIT_CONST_2;
490 	ctx->state[2] = MD5_INIT_CONST_3;
491 	ctx->state[3] = MD5_INIT_CONST_4;
492 }
493 
494 /*
495  * MD5Update()
496  *
497  * purpose: continues an md5 digest operation, using the message block
498  *          to update the context.
499  *   input: MD5_CTX *	: the context to update
500  *          uint8_t *	: the message block
501  *          uint32_t    : the length of the message block in bytes
502  *  output: void
503  *
504  * MD5 crunches in 64-byte blocks.  All numeric constants here are related to
505  * that property of MD5.
506  */
507 
508 void
509 MD5Update(MD5_CTX *ctx, const void *inpp, unsigned int input_len)
510 {
511 	uint32_t		i, buf_index, buf_len;
512 #ifdef	sun4v
513 	uint32_t		old_asi;
514 #endif	/* sun4v */
515 	const unsigned char 	*input = (const unsigned char *)inpp;
516 
517 	/* compute (number of bytes computed so far) mod 64 */
518 	buf_index = (ctx->count[0] >> 3) & 0x3F;
519 
520 	/* update number of bits hashed into this MD5 computation so far */
521 	if ((ctx->count[0] += (input_len << 3)) < (input_len << 3))
522 	    ctx->count[1]++;
523 	ctx->count[1] += (input_len >> 29);
524 
525 	buf_len = 64 - buf_index;
526 
527 	/* transform as many times as possible */
528 	i = 0;
529 	if (input_len >= buf_len) {
530 
531 		/*
532 		 * general optimization:
533 		 *
534 		 * only do initial bcopy() and MD5Transform() if
535 		 * buf_index != 0.  if buf_index == 0, we're just
536 		 * wasting our time doing the bcopy() since there
537 		 * wasn't any data left over from a previous call to
538 		 * MD5Update().
539 		 */
540 
541 #ifdef sun4v
542 		/*
543 		 * For N1 use %asi register. However, costly to repeatedly set
544 		 * in MD5Transform. Therefore, set once here.
545 		 * Should probably restore the old value afterwards...
546 		 */
547 		old_asi = get_little();
548 		set_little(0x88);
549 #endif /* sun4v */
550 
551 		if (buf_index) {
552 			bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
553 
554 			MD5Transform(ctx->state[0], ctx->state[1],
555 			    ctx->state[2], ctx->state[3], ctx,
556 			    ctx->buf_un.buf8);
557 
558 			i = buf_len;
559 		}
560 
561 		for (; i + 63 < input_len; i += 64)
562 			MD5Transform(ctx->state[0], ctx->state[1],
563 			    ctx->state[2], ctx->state[3], ctx, &input[i]);
564 
565 
566 #ifdef sun4v
567 		/*
568 		 * Restore old %ASI value
569 		 */
570 		set_little(old_asi);
571 #endif /* sun4v */
572 
573 		/*
574 		 * general optimization:
575 		 *
576 		 * if i and input_len are the same, return now instead
577 		 * of calling bcopy(), since the bcopy() in this
578 		 * case will be an expensive nop.
579 		 */
580 
581 		if (input_len == i)
582 			return;
583 
584 		buf_index = 0;
585 	}
586 
587 	/* buffer remaining input */
588 	bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
589 }
590 
591 /*
592  * MD5Final()
593  *
594  * purpose: ends an md5 digest operation, finalizing the message digest and
595  *          zeroing the context.
596  *   input: uint8_t *	: a buffer to store the digest in
597  *          MD5_CTX *   : the context to finalize, save, and zero
598  *  output: void
599  */
600 
601 void
602 MD5Final(unsigned char *digest, MD5_CTX *ctx)
603 {
604 	uint8_t		bitcount_le[sizeof (ctx->count)];
605 	uint32_t	index = (ctx->count[0] >> 3) & 0x3f;
606 
607 	/* store bit count, little endian */
608 	Encode(bitcount_le, ctx->count, sizeof (bitcount_le));
609 
610 	/* pad out to 56 mod 64 */
611 	MD5Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index);
612 
613 	/* append length (before padding) */
614 	MD5Update(ctx, bitcount_le, sizeof (bitcount_le));
615 
616 	/* store state in digest */
617 	Encode(digest, ctx->state, sizeof (ctx->state));
618 }
619 
620 #ifndef	_KERNEL
621 
622 void
623 md5_calc(unsigned char *output, unsigned char *input, unsigned int inlen)
624 {
625 	MD5_CTX context;
626 
627 	MD5Init(&context);
628 	MD5Update(&context, input, inlen);
629 	MD5Final(output, &context);
630 }
631 
632 #endif	/* !_KERNEL */
633 
634 /*
635  * Little-endian optimization:  I don't need to do any weirdness.   On
636  * some little-endian boxen, I'll have to do alignment checks, but I can do
637  * that below.
638  */
639 
640 #ifdef _LITTLE_ENDIAN
641 
642 #if !defined(__i386) && !defined(__amd64)
643 /*
644  * i386 and amd64 don't require aligned 4-byte loads.  The symbol
645  * _MD5_CHECK_ALIGNMENT indicates below whether the MD5Transform function
646  * requires alignment checking.
647  */
648 #define	_MD5_CHECK_ALIGNMENT
649 #endif /* !__i386 && !__amd64 */
650 
651 #define	LOAD_LITTLE_32(addr)	(*(uint32_t *)(addr))
652 
653 /*
654  * sparc v9/v8plus optimization:
655  *
656  * on the sparc v9/v8plus, we can load data little endian.  however, since
657  * the compiler doesn't have direct support for little endian, we
658  * link to an assembly-language routine `load_little_32' to do
659  * the magic.  note that special care must be taken to ensure the
660  * address is 32-bit aligned -- in the interest of speed, we don't
661  * check to make sure, since careful programming can guarantee this
662  * for us.
663  */
664 
665 #elif	defined(sun4u)
666 
667 /* Define alignment check because we can 4-byte load as little endian. */
668 #define	_MD5_CHECK_ALIGNMENT
669 
670 extern  uint32_t load_little_32(uint32_t *);
671 #define	LOAD_LITTLE_32(addr)    load_little_32((uint32_t *)(addr))
672 
673 #ifdef sun4v
674 
675 /*
676  * For N1 want to minimize number of arithmetic operations. This is best
677  * achieved by using the %asi register to specify ASI for the lduwa operations.
678  * Also, have a separate inline template for each word, so can utilize the
679  * immediate offset in lduwa, without relying on the compiler to do the right
680  * thing.
681  *
682  * Moving to 64-bit loads might also be beneficial.
683  */
684 
685 extern	uint32_t load_little_32_0(uint32_t *);
686 extern	uint32_t load_little_32_1(uint32_t *);
687 extern	uint32_t load_little_32_2(uint32_t *);
688 extern	uint32_t load_little_32_3(uint32_t *);
689 extern	uint32_t load_little_32_4(uint32_t *);
690 extern	uint32_t load_little_32_5(uint32_t *);
691 extern	uint32_t load_little_32_6(uint32_t *);
692 extern	uint32_t load_little_32_7(uint32_t *);
693 extern	uint32_t load_little_32_8(uint32_t *);
694 extern	uint32_t load_little_32_9(uint32_t *);
695 extern	uint32_t load_little_32_a(uint32_t *);
696 extern	uint32_t load_little_32_b(uint32_t *);
697 extern	uint32_t load_little_32_c(uint32_t *);
698 extern	uint32_t load_little_32_d(uint32_t *);
699 extern	uint32_t load_little_32_e(uint32_t *);
700 extern	uint32_t load_little_32_f(uint32_t *);
701 #define	LOAD_LITTLE_32_0(addr)	load_little_32_0((uint32_t *)(addr))
702 #define	LOAD_LITTLE_32_1(addr)	load_little_32_1((uint32_t *)(addr))
703 #define	LOAD_LITTLE_32_2(addr)	load_little_32_2((uint32_t *)(addr))
704 #define	LOAD_LITTLE_32_3(addr)	load_little_32_3((uint32_t *)(addr))
705 #define	LOAD_LITTLE_32_4(addr)	load_little_32_4((uint32_t *)(addr))
706 #define	LOAD_LITTLE_32_5(addr)	load_little_32_5((uint32_t *)(addr))
707 #define	LOAD_LITTLE_32_6(addr)	load_little_32_6((uint32_t *)(addr))
708 #define	LOAD_LITTLE_32_7(addr)	load_little_32_7((uint32_t *)(addr))
709 #define	LOAD_LITTLE_32_8(addr)	load_little_32_8((uint32_t *)(addr))
710 #define	LOAD_LITTLE_32_9(addr)	load_little_32_9((uint32_t *)(addr))
711 #define	LOAD_LITTLE_32_a(addr)	load_little_32_a((uint32_t *)(addr))
712 #define	LOAD_LITTLE_32_b(addr)	load_little_32_b((uint32_t *)(addr))
713 #define	LOAD_LITTLE_32_c(addr)	load_little_32_c((uint32_t *)(addr))
714 #define	LOAD_LITTLE_32_d(addr)	load_little_32_d((uint32_t *)(addr))
715 #define	LOAD_LITTLE_32_e(addr)	load_little_32_e((uint32_t *)(addr))
716 #define	LOAD_LITTLE_32_f(addr)	load_little_32_f((uint32_t *)(addr))
717 #endif /* sun4v */
718 
719 /* Placate lint */
720 #if	defined(__lint)
721 uint32_t
722 load_little_32(uint32_t *addr)
723 {
724 	return (*addr);
725 }
726 #endif
727 
728 #else	/* big endian -- will work on little endian, but slowly */
729 
730 /* Since we do byte operations, we don't have to check for alignment. */
731 #define	LOAD_LITTLE_32(addr)	\
732 	((addr)[0] | ((addr)[1] << 8) | ((addr)[2] << 16) | ((addr)[3] << 24))
733 #endif
734 
735 /*
736  * sparc register window optimization:
737  *
738  * `a', `b', `c', and `d' are passed into MD5Transform explicitly
739  * since it increases the number of registers available to the
740  * compiler.  under this scheme, these variables can be held in
741  * %i0 - %i3, which leaves more local and out registers available.
742  */
743 
744 /*
745  * MD5Transform()
746  *
747  * purpose: md5 transformation -- updates the digest based on `block'
748  *   input: uint32_t	: bytes  1 -  4 of the digest
749  *          uint32_t	: bytes  5 -  8 of the digest
750  *          uint32_t	: bytes  9 - 12 of the digest
751  *          uint32_t	: bytes 12 - 16 of the digest
752  *          MD5_CTX *   : the context to update
753  *          uint8_t [64]: the block to use to update the digest
754  *  output: void
755  */
756 
757 static void
758 MD5Transform(uint32_t a, uint32_t b, uint32_t c, uint32_t d,
759     MD5_CTX *ctx, const uint8_t block[64])
760 {
761 	/*
762 	 * general optimization:
763 	 *
764 	 * use individual integers instead of using an array.  this is a
765 	 * win, although the amount it wins by seems to vary quite a bit.
766 	 */
767 
768 	register uint32_t	x_0, x_1, x_2,  x_3,  x_4,  x_5,  x_6,  x_7;
769 	register uint32_t	x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15;
770 #ifdef sun4v
771 	unsigned long long 	*md5_consts64;
772 
773 	md5_consts64 = (unsigned long long *) md5_consts;
774 #endif	/* sun4v */
775 
776 	/*
777 	 * general optimization:
778 	 *
779 	 * the compiler (at least SC4.2/5.x) generates better code if
780 	 * variable use is localized.  in this case, swapping the integers in
781 	 * this order allows `x_0 'to be swapped nearest to its first use in
782 	 * FF(), and likewise for `x_1' and up.  note that the compiler
783 	 * prefers this to doing each swap right before the FF() that
784 	 * uses it.
785 	 */
786 
787 	/*
788 	 * sparc v9/v8plus optimization:
789 	 *
790 	 * if `block' is already aligned on a 4-byte boundary, use the
791 	 * optimized load_little_32() directly.  otherwise, bcopy()
792 	 * into a buffer that *is* aligned on a 4-byte boundary and
793 	 * then do the load_little_32() on that buffer.  benchmarks
794 	 * have shown that using the bcopy() is better than loading
795 	 * the bytes individually and doing the endian-swap by hand.
796 	 *
797 	 * even though it's quite tempting to assign to do:
798 	 *
799 	 * blk = bcopy(blk, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32));
800 	 *
801 	 * and only have one set of LOAD_LITTLE_32()'s, the compiler (at least
802 	 * SC4.2/5.x) *does not* like that, so please resist the urge.
803 	 */
804 
805 #ifdef _MD5_CHECK_ALIGNMENT
806 	if ((uintptr_t)block & 0x3) {		/* not 4-byte aligned? */
807 		bcopy(block, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32));
808 
809 #ifdef sun4v
810 		x_15 = LOAD_LITTLE_32_f(ctx->buf_un.buf32);
811 		x_14 = LOAD_LITTLE_32_e(ctx->buf_un.buf32);
812 		x_13 = LOAD_LITTLE_32_d(ctx->buf_un.buf32);
813 		x_12 = LOAD_LITTLE_32_c(ctx->buf_un.buf32);
814 		x_11 = LOAD_LITTLE_32_b(ctx->buf_un.buf32);
815 		x_10 = LOAD_LITTLE_32_a(ctx->buf_un.buf32);
816 		x_9  = LOAD_LITTLE_32_9(ctx->buf_un.buf32);
817 		x_8  = LOAD_LITTLE_32_8(ctx->buf_un.buf32);
818 		x_7  = LOAD_LITTLE_32_7(ctx->buf_un.buf32);
819 		x_6  = LOAD_LITTLE_32_6(ctx->buf_un.buf32);
820 		x_5  = LOAD_LITTLE_32_5(ctx->buf_un.buf32);
821 		x_4  = LOAD_LITTLE_32_4(ctx->buf_un.buf32);
822 		x_3  = LOAD_LITTLE_32_3(ctx->buf_un.buf32);
823 		x_2  = LOAD_LITTLE_32_2(ctx->buf_un.buf32);
824 		x_1  = LOAD_LITTLE_32_1(ctx->buf_un.buf32);
825 		x_0  = LOAD_LITTLE_32_0(ctx->buf_un.buf32);
826 #else
827 		x_15 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 15);
828 		x_14 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 14);
829 		x_13 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 13);
830 		x_12 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 12);
831 		x_11 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 11);
832 		x_10 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 10);
833 		x_9  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  9);
834 		x_8  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  8);
835 		x_7  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  7);
836 		x_6  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  6);
837 		x_5  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  5);
838 		x_4  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  4);
839 		x_3  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  3);
840 		x_2  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  2);
841 		x_1  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  1);
842 		x_0  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  0);
843 #endif /* sun4v */
844 	} else
845 #endif
846 	{
847 
848 #ifdef sun4v
849 		x_15 = LOAD_LITTLE_32_f(block);
850 		x_14 = LOAD_LITTLE_32_e(block);
851 		x_13 = LOAD_LITTLE_32_d(block);
852 		x_12 = LOAD_LITTLE_32_c(block);
853 		x_11 = LOAD_LITTLE_32_b(block);
854 		x_10 = LOAD_LITTLE_32_a(block);
855 		x_9  = LOAD_LITTLE_32_9(block);
856 		x_8  = LOAD_LITTLE_32_8(block);
857 		x_7  = LOAD_LITTLE_32_7(block);
858 		x_6  = LOAD_LITTLE_32_6(block);
859 		x_5  = LOAD_LITTLE_32_5(block);
860 		x_4  = LOAD_LITTLE_32_4(block);
861 		x_3  = LOAD_LITTLE_32_3(block);
862 		x_2  = LOAD_LITTLE_32_2(block);
863 		x_1  = LOAD_LITTLE_32_1(block);
864 		x_0  = LOAD_LITTLE_32_0(block);
865 #else
866 		x_15 = LOAD_LITTLE_32(block + 60);
867 		x_14 = LOAD_LITTLE_32(block + 56);
868 		x_13 = LOAD_LITTLE_32(block + 52);
869 		x_12 = LOAD_LITTLE_32(block + 48);
870 		x_11 = LOAD_LITTLE_32(block + 44);
871 		x_10 = LOAD_LITTLE_32(block + 40);
872 		x_9  = LOAD_LITTLE_32(block + 36);
873 		x_8  = LOAD_LITTLE_32(block + 32);
874 		x_7  = LOAD_LITTLE_32(block + 28);
875 		x_6  = LOAD_LITTLE_32(block + 24);
876 		x_5  = LOAD_LITTLE_32(block + 20);
877 		x_4  = LOAD_LITTLE_32(block + 16);
878 		x_3  = LOAD_LITTLE_32(block + 12);
879 		x_2  = LOAD_LITTLE_32(block +  8);
880 		x_1  = LOAD_LITTLE_32(block +  4);
881 		x_0  = LOAD_LITTLE_32(block +  0);
882 #endif /* sun4v */
883 	}
884 
885 	/* round 1 */
886 	FF(a, b, c, d, 	x_0, MD5_SHIFT_11, MD5_CONST_e(0));  /* 1 */
887 	FF(d, a, b, c, 	x_1, MD5_SHIFT_12, MD5_CONST_o(1));  /* 2 */
888 	FF(c, d, a, b, 	x_2, MD5_SHIFT_13, MD5_CONST_e(2));  /* 3 */
889 	FF(b, c, d, a, 	x_3, MD5_SHIFT_14, MD5_CONST_o(3));  /* 4 */
890 	FF(a, b, c, d, 	x_4, MD5_SHIFT_11, MD5_CONST_e(4));  /* 5 */
891 	FF(d, a, b, c, 	x_5, MD5_SHIFT_12, MD5_CONST_o(5));  /* 6 */
892 	FF(c, d, a, b, 	x_6, MD5_SHIFT_13, MD5_CONST_e(6));  /* 7 */
893 	FF(b, c, d, a, 	x_7, MD5_SHIFT_14, MD5_CONST_o(7));  /* 8 */
894 	FF(a, b, c, d, 	x_8, MD5_SHIFT_11, MD5_CONST_e(8));  /* 9 */
895 	FF(d, a, b, c, 	x_9, MD5_SHIFT_12, MD5_CONST_o(9));  /* 10 */
896 	FF(c, d, a, b, x_10, MD5_SHIFT_13, MD5_CONST_e(10)); /* 11 */
897 	FF(b, c, d, a, x_11, MD5_SHIFT_14, MD5_CONST_o(11)); /* 12 */
898 	FF(a, b, c, d, x_12, MD5_SHIFT_11, MD5_CONST_e(12)); /* 13 */
899 	FF(d, a, b, c, x_13, MD5_SHIFT_12, MD5_CONST_o(13)); /* 14 */
900 	FF(c, d, a, b, x_14, MD5_SHIFT_13, MD5_CONST_e(14)); /* 15 */
901 	FF(b, c, d, a, x_15, MD5_SHIFT_14, MD5_CONST_o(15)); /* 16 */
902 
903 	/* round 2 */
904 	GG(a, b, c, d,  x_1, MD5_SHIFT_21, MD5_CONST_e(16)); /* 17 */
905 	GG(d, a, b, c,  x_6, MD5_SHIFT_22, MD5_CONST_o(17)); /* 18 */
906 	GG(c, d, a, b, x_11, MD5_SHIFT_23, MD5_CONST_e(18)); /* 19 */
907 	GG(b, c, d, a,  x_0, MD5_SHIFT_24, MD5_CONST_o(19)); /* 20 */
908 	GG(a, b, c, d,  x_5, MD5_SHIFT_21, MD5_CONST_e(20)); /* 21 */
909 	GG(d, a, b, c, x_10, MD5_SHIFT_22, MD5_CONST_o(21)); /* 22 */
910 	GG(c, d, a, b, x_15, MD5_SHIFT_23, MD5_CONST_e(22)); /* 23 */
911 	GG(b, c, d, a,  x_4, MD5_SHIFT_24, MD5_CONST_o(23)); /* 24 */
912 	GG(a, b, c, d,  x_9, MD5_SHIFT_21, MD5_CONST_e(24)); /* 25 */
913 	GG(d, a, b, c, x_14, MD5_SHIFT_22, MD5_CONST_o(25)); /* 26 */
914 	GG(c, d, a, b,  x_3, MD5_SHIFT_23, MD5_CONST_e(26)); /* 27 */
915 	GG(b, c, d, a,  x_8, MD5_SHIFT_24, MD5_CONST_o(27)); /* 28 */
916 	GG(a, b, c, d, x_13, MD5_SHIFT_21, MD5_CONST_e(28)); /* 29 */
917 	GG(d, a, b, c,  x_2, MD5_SHIFT_22, MD5_CONST_o(29)); /* 30 */
918 	GG(c, d, a, b,  x_7, MD5_SHIFT_23, MD5_CONST_e(30)); /* 31 */
919 	GG(b, c, d, a, x_12, MD5_SHIFT_24, MD5_CONST_o(31)); /* 32 */
920 
921 	/* round 3 */
922 	HH(a, b, c, d,  x_5, MD5_SHIFT_31, MD5_CONST_e(32)); /* 33 */
923 	HH(d, a, b, c,  x_8, MD5_SHIFT_32, MD5_CONST_o(33)); /* 34 */
924 	HH(c, d, a, b, x_11, MD5_SHIFT_33, MD5_CONST_e(34)); /* 35 */
925 	HH(b, c, d, a, x_14, MD5_SHIFT_34, MD5_CONST_o(35)); /* 36 */
926 	HH(a, b, c, d,  x_1, MD5_SHIFT_31, MD5_CONST_e(36)); /* 37 */
927 	HH(d, a, b, c,  x_4, MD5_SHIFT_32, MD5_CONST_o(37)); /* 38 */
928 	HH(c, d, a, b,  x_7, MD5_SHIFT_33, MD5_CONST_e(38)); /* 39 */
929 	HH(b, c, d, a, x_10, MD5_SHIFT_34, MD5_CONST_o(39)); /* 40 */
930 	HH(a, b, c, d, x_13, MD5_SHIFT_31, MD5_CONST_e(40)); /* 41 */
931 	HH(d, a, b, c,  x_0, MD5_SHIFT_32, MD5_CONST_o(41)); /* 42 */
932 	HH(c, d, a, b,  x_3, MD5_SHIFT_33, MD5_CONST_e(42)); /* 43 */
933 	HH(b, c, d, a,  x_6, MD5_SHIFT_34, MD5_CONST_o(43)); /* 44 */
934 	HH(a, b, c, d,  x_9, MD5_SHIFT_31, MD5_CONST_e(44)); /* 45 */
935 	HH(d, a, b, c, x_12, MD5_SHIFT_32, MD5_CONST_o(45)); /* 46 */
936 	HH(c, d, a, b, x_15, MD5_SHIFT_33, MD5_CONST_e(46)); /* 47 */
937 	HH(b, c, d, a,  x_2, MD5_SHIFT_34, MD5_CONST_o(47)); /* 48 */
938 
939 	/* round 4 */
940 	II(a, b, c, d,  x_0, MD5_SHIFT_41, MD5_CONST_e(48)); /* 49 */
941 	II(d, a, b, c,  x_7, MD5_SHIFT_42, MD5_CONST_o(49)); /* 50 */
942 	II(c, d, a, b, x_14, MD5_SHIFT_43, MD5_CONST_e(50)); /* 51 */
943 	II(b, c, d, a,  x_5, MD5_SHIFT_44, MD5_CONST_o(51)); /* 52 */
944 	II(a, b, c, d, x_12, MD5_SHIFT_41, MD5_CONST_e(52)); /* 53 */
945 	II(d, a, b, c,  x_3, MD5_SHIFT_42, MD5_CONST_o(53)); /* 54 */
946 	II(c, d, a, b, x_10, MD5_SHIFT_43, MD5_CONST_e(54)); /* 55 */
947 	II(b, c, d, a,  x_1, MD5_SHIFT_44, MD5_CONST_o(55)); /* 56 */
948 	II(a, b, c, d,  x_8, MD5_SHIFT_41, MD5_CONST_e(56)); /* 57 */
949 	II(d, a, b, c, x_15, MD5_SHIFT_42, MD5_CONST_o(57)); /* 58 */
950 	II(c, d, a, b,  x_6, MD5_SHIFT_43, MD5_CONST_e(58)); /* 59 */
951 	II(b, c, d, a, x_13, MD5_SHIFT_44, MD5_CONST_o(59)); /* 60 */
952 	II(a, b, c, d,  x_4, MD5_SHIFT_41, MD5_CONST_e(60)); /* 61 */
953 	II(d, a, b, c, x_11, MD5_SHIFT_42, MD5_CONST_o(61)); /* 62 */
954 	II(c, d, a, b,  x_2, MD5_SHIFT_43, MD5_CONST_e(62)); /* 63 */
955 	II(b, c, d, a,  x_9, MD5_SHIFT_44, MD5_CONST_o(63)); /* 64 */
956 
957 	ctx->state[0] += a;
958 	ctx->state[1] += b;
959 	ctx->state[2] += c;
960 	ctx->state[3] += d;
961 
962 	/*
963 	 * zeroize sensitive information -- compiler will optimize
964 	 * this out if everything is kept in registers
965 	 */
966 
967 	x_0 = x_1  = x_2  = x_3  = x_4  = x_5  = x_6  = x_7 = x_8 = 0;
968 	x_9 = x_10 = x_11 = x_12 = x_13 = x_14 = x_15 = 0;
969 }
970 
971 /*
972  * devpro compiler optimization:
973  *
974  * the compiler can generate better code if it knows that `input' and
975  * `output' do not point to the same source.  there is no portable
976  * way to tell the compiler this, but the devpro compiler recognizes the
977  * `_Restrict' keyword to indicate this condition.  use it if possible.
978  */
979 
980 #if defined(__RESTRICT) && !defined(__GNUC__)
981 #define	restrict	_Restrict
982 #else
983 #define	restrict	/* nothing */
984 #endif
985 
986 /*
987  * Encode()
988  *
989  * purpose: to convert a list of numbers from big endian to little endian
990  *   input: uint8_t *	: place to store the converted little endian numbers
991  *	    uint32_t *	: place to get numbers to convert from
992  *          size_t	: the length of the input in bytes
993  *  output: void
994  */
995 
996 static void
997 Encode(uint8_t *restrict output, uint32_t *restrict input, size_t input_len)
998 {
999 	size_t		i, j;
1000 
1001 	for (i = 0, j = 0; j < input_len; i++, j += sizeof (uint32_t)) {
1002 
1003 #ifdef _LITTLE_ENDIAN
1004 
1005 #ifdef _MD5_CHECK_ALIGNMENT
1006 		if ((uintptr_t)output & 0x3)	/* Not 4-byte aligned */
1007 			bcopy(input + i, output + j, 4);
1008 		else *(uint32_t *)(output + j) = input[i];
1009 #else
1010 		*(uint32_t *)(output + j) = input[i];
1011 #endif /* _MD5_CHECK_ALIGNMENT */
1012 
1013 #else	/* big endian -- will work on little endian, but slowly */
1014 
1015 		output[j] = input[i] & 0xff;
1016 		output[j + 1] = (input[i] >> 8)  & 0xff;
1017 		output[j + 2] = (input[i] >> 16) & 0xff;
1018 		output[j + 3] = (input[i] >> 24) & 0xff;
1019 #endif
1020 	}
1021 }
1022 
1023 #if	defined(_KERNEL) && !defined(_BOOT)
1024 
1025 /*
1026  * KCF software provider control entry points.
1027  */
1028 /* ARGSUSED */
1029 static void
1030 md5_provider_status(crypto_provider_handle_t provider, uint_t *status)
1031 {
1032 	*status = CRYPTO_PROVIDER_READY;
1033 }
1034 
1035 /*
1036  * KCF software provider digest entry points.
1037  */
1038 
1039 static int
1040 md5_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
1041     crypto_req_handle_t req)
1042 {
1043 	if (mechanism->cm_type != MD5_MECH_INFO_TYPE)
1044 		return (CRYPTO_MECHANISM_INVALID);
1045 
1046 	/*
1047 	 * Allocate and initialize MD5 context.
1048 	 */
1049 	ctx->cc_provider_private = kmem_alloc(sizeof (md5_ctx_t),
1050 	    crypto_kmflag(req));
1051 	if (ctx->cc_provider_private == NULL)
1052 		return (CRYPTO_HOST_MEMORY);
1053 
1054 	PROV_MD5_CTX(ctx)->mc_mech_type = MD5_MECH_INFO_TYPE;
1055 	MD5Init(&PROV_MD5_CTX(ctx)->mc_md5_ctx);
1056 
1057 	return (CRYPTO_SUCCESS);
1058 }
1059 
1060 /*
1061  * Helper MD5 digest update function for uio data.
1062  */
1063 static int
1064 md5_digest_update_uio(MD5_CTX *md5_ctx, crypto_data_t *data)
1065 {
1066 	off_t offset = data->cd_offset;
1067 	size_t length = data->cd_length;
1068 	uint_t vec_idx;
1069 	size_t cur_len;
1070 
1071 	/* we support only kernel buffer */
1072 	if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
1073 		return (CRYPTO_ARGUMENTS_BAD);
1074 
1075 	/*
1076 	 * Jump to the first iovec containing data to be
1077 	 * digested.
1078 	 */
1079 	for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
1080 	    offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
1081 	    offset -= data->cd_uio->uio_iov[vec_idx++].iov_len);
1082 	if (vec_idx == data->cd_uio->uio_iovcnt) {
1083 		/*
1084 		 * The caller specified an offset that is larger than the
1085 		 * total size of the buffers it provided.
1086 		 */
1087 		return (CRYPTO_DATA_LEN_RANGE);
1088 	}
1089 
1090 	/*
1091 	 * Now do the digesting on the iovecs.
1092 	 */
1093 	while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
1094 		cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
1095 		    offset, length);
1096 
1097 		MD5Update(md5_ctx, data->cd_uio->uio_iov[vec_idx].iov_base +
1098 		    offset, cur_len);
1099 
1100 		length -= cur_len;
1101 		vec_idx++;
1102 		offset = 0;
1103 	}
1104 
1105 	if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
1106 		/*
1107 		 * The end of the specified iovec's was reached but
1108 		 * the length requested could not be processed, i.e.
1109 		 * The caller requested to digest more data than it provided.
1110 		 */
1111 		return (CRYPTO_DATA_LEN_RANGE);
1112 	}
1113 
1114 	return (CRYPTO_SUCCESS);
1115 }
1116 
1117 /*
1118  * Helper MD5 digest final function for uio data.
1119  * digest_len is the length of the desired digest. If digest_len
1120  * is smaller than the default MD5 digest length, the caller
1121  * must pass a scratch buffer, digest_scratch, which must
1122  * be at least MD5_DIGEST_LENGTH bytes.
1123  */
1124 static int
1125 md5_digest_final_uio(MD5_CTX *md5_ctx, crypto_data_t *digest,
1126     ulong_t digest_len, uchar_t *digest_scratch)
1127 {
1128 	off_t offset = digest->cd_offset;
1129 	uint_t vec_idx;
1130 
1131 	/* we support only kernel buffer */
1132 	if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
1133 		return (CRYPTO_ARGUMENTS_BAD);
1134 
1135 	/*
1136 	 * Jump to the first iovec containing ptr to the digest to
1137 	 * be returned.
1138 	 */
1139 	for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
1140 	    vec_idx < digest->cd_uio->uio_iovcnt;
1141 	    offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len);
1142 	if (vec_idx == digest->cd_uio->uio_iovcnt) {
1143 		/*
1144 		 * The caller specified an offset that is
1145 		 * larger than the total size of the buffers
1146 		 * it provided.
1147 		 */
1148 		return (CRYPTO_DATA_LEN_RANGE);
1149 	}
1150 
1151 	if (offset + digest_len <=
1152 	    digest->cd_uio->uio_iov[vec_idx].iov_len) {
1153 		/*
1154 		 * The computed MD5 digest will fit in the current
1155 		 * iovec.
1156 		 */
1157 		if (digest_len != MD5_DIGEST_LENGTH) {
1158 			/*
1159 			 * The caller requested a short digest. Digest
1160 			 * into a scratch buffer and return to
1161 			 * the user only what was requested.
1162 			 */
1163 			MD5Final(digest_scratch, md5_ctx);
1164 			bcopy(digest_scratch, (uchar_t *)digest->
1165 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
1166 			    digest_len);
1167 		} else {
1168 			MD5Final((uchar_t *)digest->
1169 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
1170 			    md5_ctx);
1171 		}
1172 	} else {
1173 		/*
1174 		 * The computed digest will be crossing one or more iovec's.
1175 		 * This is bad performance-wise but we need to support it.
1176 		 * Allocate a small scratch buffer on the stack and
1177 		 * copy it piece meal to the specified digest iovec's.
1178 		 */
1179 		uchar_t digest_tmp[MD5_DIGEST_LENGTH];
1180 		off_t scratch_offset = 0;
1181 		size_t length = digest_len;
1182 		size_t cur_len;
1183 
1184 		MD5Final(digest_tmp, md5_ctx);
1185 
1186 		while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
1187 			cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
1188 			    offset, length);
1189 			bcopy(digest_tmp + scratch_offset,
1190 			    digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
1191 			    cur_len);
1192 
1193 			length -= cur_len;
1194 			vec_idx++;
1195 			scratch_offset += cur_len;
1196 			offset = 0;
1197 		}
1198 
1199 		if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
1200 			/*
1201 			 * The end of the specified iovec's was reached but
1202 			 * the length requested could not be processed, i.e.
1203 			 * The caller requested to digest more data than it
1204 			 * provided.
1205 			 */
1206 			return (CRYPTO_DATA_LEN_RANGE);
1207 		}
1208 	}
1209 
1210 	return (CRYPTO_SUCCESS);
1211 }
1212 
1213 /*
1214  * Helper MD5 digest update for mblk's.
1215  */
1216 static int
1217 md5_digest_update_mblk(MD5_CTX *md5_ctx, crypto_data_t *data)
1218 {
1219 	off_t offset = data->cd_offset;
1220 	size_t length = data->cd_length;
1221 	mblk_t *mp;
1222 	size_t cur_len;
1223 
1224 	/*
1225 	 * Jump to the first mblk_t containing data to be digested.
1226 	 */
1227 	for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
1228 	    offset -= MBLKL(mp), mp = mp->b_cont);
1229 	if (mp == NULL) {
1230 		/*
1231 		 * The caller specified an offset that is larger than the
1232 		 * total size of the buffers it provided.
1233 		 */
1234 		return (CRYPTO_DATA_LEN_RANGE);
1235 	}
1236 
1237 	/*
1238 	 * Now do the digesting on the mblk chain.
1239 	 */
1240 	while (mp != NULL && length > 0) {
1241 		cur_len = MIN(MBLKL(mp) - offset, length);
1242 		MD5Update(md5_ctx, mp->b_rptr + offset, cur_len);
1243 		length -= cur_len;
1244 		offset = 0;
1245 		mp = mp->b_cont;
1246 	}
1247 
1248 	if (mp == NULL && length > 0) {
1249 		/*
1250 		 * The end of the mblk was reached but the length requested
1251 		 * could not be processed, i.e. The caller requested
1252 		 * to digest more data than it provided.
1253 		 */
1254 		return (CRYPTO_DATA_LEN_RANGE);
1255 	}
1256 
1257 	return (CRYPTO_SUCCESS);
1258 }
1259 
1260 /*
1261  * Helper MD5 digest final for mblk's.
1262  * digest_len is the length of the desired digest. If digest_len
1263  * is smaller than the default MD5 digest length, the caller
1264  * must pass a scratch buffer, digest_scratch, which must
1265  * be at least MD5_DIGEST_LENGTH bytes.
1266  */
1267 static int
1268 md5_digest_final_mblk(MD5_CTX *md5_ctx, crypto_data_t *digest,
1269     ulong_t digest_len, uchar_t *digest_scratch)
1270 {
1271 	off_t offset = digest->cd_offset;
1272 	mblk_t *mp;
1273 
1274 	/*
1275 	 * Jump to the first mblk_t that will be used to store the digest.
1276 	 */
1277 	for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
1278 	    offset -= MBLKL(mp), mp = mp->b_cont);
1279 	if (mp == NULL) {
1280 		/*
1281 		 * The caller specified an offset that is larger than the
1282 		 * total size of the buffers it provided.
1283 		 */
1284 		return (CRYPTO_DATA_LEN_RANGE);
1285 	}
1286 
1287 	if (offset + digest_len <= MBLKL(mp)) {
1288 		/*
1289 		 * The computed MD5 digest will fit in the current mblk.
1290 		 * Do the MD5Final() in-place.
1291 		 */
1292 		if (digest_len != MD5_DIGEST_LENGTH) {
1293 			/*
1294 			 * The caller requested a short digest. Digest
1295 			 * into a scratch buffer and return to
1296 			 * the user only what was requested.
1297 			 */
1298 			MD5Final(digest_scratch, md5_ctx);
1299 			bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
1300 		} else {
1301 			MD5Final(mp->b_rptr + offset, md5_ctx);
1302 		}
1303 	} else {
1304 		/*
1305 		 * The computed digest will be crossing one or more mblk's.
1306 		 * This is bad performance-wise but we need to support it.
1307 		 * Allocate a small scratch buffer on the stack and
1308 		 * copy it piece meal to the specified digest iovec's.
1309 		 */
1310 		uchar_t digest_tmp[MD5_DIGEST_LENGTH];
1311 		off_t scratch_offset = 0;
1312 		size_t length = digest_len;
1313 		size_t cur_len;
1314 
1315 		MD5Final(digest_tmp, md5_ctx);
1316 
1317 		while (mp != NULL && length > 0) {
1318 			cur_len = MIN(MBLKL(mp) - offset, length);
1319 			bcopy(digest_tmp + scratch_offset,
1320 			    mp->b_rptr + offset, cur_len);
1321 
1322 			length -= cur_len;
1323 			mp = mp->b_cont;
1324 			scratch_offset += cur_len;
1325 			offset = 0;
1326 		}
1327 
1328 		if (mp == NULL && length > 0) {
1329 			/*
1330 			 * The end of the specified mblk was reached but
1331 			 * the length requested could not be processed, i.e.
1332 			 * The caller requested to digest more data than it
1333 			 * provided.
1334 			 */
1335 			return (CRYPTO_DATA_LEN_RANGE);
1336 		}
1337 	}
1338 
1339 	return (CRYPTO_SUCCESS);
1340 }
1341 
1342 /* ARGSUSED */
1343 static int
1344 md5_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
1345     crypto_req_handle_t req)
1346 {
1347 	int ret = CRYPTO_SUCCESS;
1348 
1349 	ASSERT(ctx->cc_provider_private != NULL);
1350 
1351 	/*
1352 	 * We need to just return the length needed to store the output.
1353 	 * We should not destroy the context for the following cases.
1354 	 */
1355 	if ((digest->cd_length == 0) ||
1356 	    (digest->cd_length < MD5_DIGEST_LENGTH)) {
1357 		digest->cd_length = MD5_DIGEST_LENGTH;
1358 		return (CRYPTO_BUFFER_TOO_SMALL);
1359 	}
1360 
1361 	/*
1362 	 * Do the MD5 update on the specified input data.
1363 	 */
1364 	switch (data->cd_format) {
1365 	case CRYPTO_DATA_RAW:
1366 		MD5Update(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1367 		    data->cd_raw.iov_base + data->cd_offset,
1368 		    data->cd_length);
1369 		break;
1370 	case CRYPTO_DATA_UIO:
1371 		ret = md5_digest_update_uio(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1372 		    data);
1373 		break;
1374 	case CRYPTO_DATA_MBLK:
1375 		ret = md5_digest_update_mblk(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1376 		    data);
1377 		break;
1378 	default:
1379 		ret = CRYPTO_ARGUMENTS_BAD;
1380 	}
1381 
1382 	if (ret != CRYPTO_SUCCESS) {
1383 		/* the update failed, free context and bail */
1384 		kmem_free(ctx->cc_provider_private, sizeof (md5_ctx_t));
1385 		ctx->cc_provider_private = NULL;
1386 		digest->cd_length = 0;
1387 		return (ret);
1388 	}
1389 
1390 	/*
1391 	 * Do an MD5 final, must be done separately since the digest
1392 	 * type can be different than the input data type.
1393 	 */
1394 	switch (digest->cd_format) {
1395 	case CRYPTO_DATA_RAW:
1396 		MD5Final((unsigned char *)digest->cd_raw.iov_base +
1397 		    digest->cd_offset, &PROV_MD5_CTX(ctx)->mc_md5_ctx);
1398 		break;
1399 	case CRYPTO_DATA_UIO:
1400 		ret = md5_digest_final_uio(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1401 		    digest, MD5_DIGEST_LENGTH, NULL);
1402 		break;
1403 	case CRYPTO_DATA_MBLK:
1404 		ret = md5_digest_final_mblk(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1405 		    digest, MD5_DIGEST_LENGTH, NULL);
1406 		break;
1407 	default:
1408 		ret = CRYPTO_ARGUMENTS_BAD;
1409 	}
1410 
1411 	/* all done, free context and return */
1412 
1413 	if (ret == CRYPTO_SUCCESS) {
1414 		digest->cd_length = MD5_DIGEST_LENGTH;
1415 	} else {
1416 		digest->cd_length = 0;
1417 	}
1418 
1419 	kmem_free(ctx->cc_provider_private, sizeof (md5_ctx_t));
1420 	ctx->cc_provider_private = NULL;
1421 	return (ret);
1422 }
1423 
1424 /* ARGSUSED */
1425 static int
1426 md5_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
1427     crypto_req_handle_t req)
1428 {
1429 	int ret = CRYPTO_SUCCESS;
1430 
1431 	ASSERT(ctx->cc_provider_private != NULL);
1432 
1433 	/*
1434 	 * Do the MD5 update on the specified input data.
1435 	 */
1436 	switch (data->cd_format) {
1437 	case CRYPTO_DATA_RAW:
1438 		MD5Update(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1439 		    data->cd_raw.iov_base + data->cd_offset,
1440 		    data->cd_length);
1441 		break;
1442 	case CRYPTO_DATA_UIO:
1443 		ret = md5_digest_update_uio(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1444 		    data);
1445 		break;
1446 	case CRYPTO_DATA_MBLK:
1447 		ret = md5_digest_update_mblk(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1448 		    data);
1449 		break;
1450 	default:
1451 		ret = CRYPTO_ARGUMENTS_BAD;
1452 	}
1453 
1454 	return (ret);
1455 }
1456 
1457 /* ARGSUSED */
1458 static int
1459 md5_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
1460     crypto_req_handle_t req)
1461 {
1462 	int ret = CRYPTO_SUCCESS;
1463 
1464 	ASSERT(ctx->cc_provider_private != NULL);
1465 
1466 	/*
1467 	 * We need to just return the length needed to store the output.
1468 	 * We should not destroy the context for the following cases.
1469 	 */
1470 	if ((digest->cd_length == 0) ||
1471 	    (digest->cd_length < MD5_DIGEST_LENGTH)) {
1472 		digest->cd_length = MD5_DIGEST_LENGTH;
1473 		return (CRYPTO_BUFFER_TOO_SMALL);
1474 	}
1475 
1476 	/*
1477 	 * Do an MD5 final.
1478 	 */
1479 	switch (digest->cd_format) {
1480 	case CRYPTO_DATA_RAW:
1481 		MD5Final((unsigned char *)digest->cd_raw.iov_base +
1482 		    digest->cd_offset, &PROV_MD5_CTX(ctx)->mc_md5_ctx);
1483 		break;
1484 	case CRYPTO_DATA_UIO:
1485 		ret = md5_digest_final_uio(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1486 		    digest, MD5_DIGEST_LENGTH, NULL);
1487 		break;
1488 	case CRYPTO_DATA_MBLK:
1489 		ret = md5_digest_final_mblk(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1490 		    digest, MD5_DIGEST_LENGTH, NULL);
1491 		break;
1492 	default:
1493 		ret = CRYPTO_ARGUMENTS_BAD;
1494 	}
1495 
1496 	/* all done, free context and return */
1497 
1498 	if (ret == CRYPTO_SUCCESS) {
1499 		digest->cd_length = MD5_DIGEST_LENGTH;
1500 	} else {
1501 		digest->cd_length = 0;
1502 	}
1503 
1504 	kmem_free(ctx->cc_provider_private, sizeof (md5_ctx_t));
1505 	ctx->cc_provider_private = NULL;
1506 
1507 	return (ret);
1508 }
1509 
1510 /* ARGSUSED */
1511 static int
1512 md5_digest_atomic(crypto_provider_handle_t provider,
1513     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1514     crypto_data_t *data, crypto_data_t *digest,
1515     crypto_req_handle_t req)
1516 {
1517 	int ret = CRYPTO_SUCCESS;
1518 	MD5_CTX md5_ctx;
1519 
1520 	if (mechanism->cm_type != MD5_MECH_INFO_TYPE)
1521 		return (CRYPTO_MECHANISM_INVALID);
1522 
1523 	/*
1524 	 * Do the MD5 init.
1525 	 */
1526 	MD5Init(&md5_ctx);
1527 
1528 	/*
1529 	 * Do the MD5 update on the specified input data.
1530 	 */
1531 	switch (data->cd_format) {
1532 	case CRYPTO_DATA_RAW:
1533 		MD5Update(&md5_ctx, data->cd_raw.iov_base + data->cd_offset,
1534 		    data->cd_length);
1535 		break;
1536 	case CRYPTO_DATA_UIO:
1537 		ret = md5_digest_update_uio(&md5_ctx, data);
1538 		break;
1539 	case CRYPTO_DATA_MBLK:
1540 		ret = md5_digest_update_mblk(&md5_ctx, data);
1541 		break;
1542 	default:
1543 		ret = CRYPTO_ARGUMENTS_BAD;
1544 	}
1545 
1546 	if (ret != CRYPTO_SUCCESS) {
1547 		/* the update failed, bail */
1548 		digest->cd_length = 0;
1549 		return (ret);
1550 	}
1551 
1552 	/*
1553 	 * Do an MD5 final, must be done separately since the digest
1554 	 * type can be different than the input data type.
1555 	 */
1556 	switch (digest->cd_format) {
1557 	case CRYPTO_DATA_RAW:
1558 		MD5Final((unsigned char *)digest->cd_raw.iov_base +
1559 		    digest->cd_offset, &md5_ctx);
1560 		break;
1561 	case CRYPTO_DATA_UIO:
1562 		ret = md5_digest_final_uio(&md5_ctx, digest,
1563 		    MD5_DIGEST_LENGTH, NULL);
1564 		break;
1565 	case CRYPTO_DATA_MBLK:
1566 		ret = md5_digest_final_mblk(&md5_ctx, digest,
1567 		    MD5_DIGEST_LENGTH, NULL);
1568 		break;
1569 	default:
1570 		ret = CRYPTO_ARGUMENTS_BAD;
1571 	}
1572 
1573 	if (ret == CRYPTO_SUCCESS) {
1574 		digest->cd_length = MD5_DIGEST_LENGTH;
1575 	} else {
1576 		digest->cd_length = 0;
1577 	}
1578 
1579 	return (ret);
1580 }
1581 
1582 /*
1583  * KCF software provider mac entry points.
1584  *
1585  * MD5 HMAC is: MD5(key XOR opad, MD5(key XOR ipad, text))
1586  *
1587  * Init:
1588  * The initialization routine initializes what we denote
1589  * as the inner and outer contexts by doing
1590  * - for inner context: MD5(key XOR ipad)
1591  * - for outer context: MD5(key XOR opad)
1592  *
1593  * Update:
1594  * Each subsequent MD5 HMAC update will result in an
1595  * update of the inner context with the specified data.
1596  *
1597  * Final:
1598  * The MD5 HMAC final will do a MD5 final operation on the
1599  * inner context, and the resulting digest will be used
1600  * as the data for an update on the outer context. Last
1601  * but not least, an MD5 final on the outer context will
1602  * be performed to obtain the MD5 HMAC digest to return
1603  * to the user.
1604  */
1605 
1606 /*
1607  * Initialize a MD5-HMAC context.
1608  */
1609 static void
1610 md5_mac_init_ctx(md5_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
1611 {
1612 	uint32_t ipad[MD5_HMAC_INTS_PER_BLOCK];
1613 	uint32_t opad[MD5_HMAC_INTS_PER_BLOCK];
1614 	uint_t i;
1615 
1616 	bzero(ipad, MD5_HMAC_BLOCK_SIZE);
1617 	bzero(opad, MD5_HMAC_BLOCK_SIZE);
1618 
1619 	bcopy(keyval, ipad, length_in_bytes);
1620 	bcopy(keyval, opad, length_in_bytes);
1621 
1622 	/* XOR key with ipad (0x36) and opad (0x5c) */
1623 	for (i = 0; i < MD5_HMAC_INTS_PER_BLOCK; i++) {
1624 		ipad[i] ^= 0x36363636;
1625 		opad[i] ^= 0x5c5c5c5c;
1626 	}
1627 
1628 	/* perform MD5 on ipad */
1629 	MD5Init(&ctx->hc_icontext);
1630 	MD5Update(&ctx->hc_icontext, ipad, MD5_HMAC_BLOCK_SIZE);
1631 
1632 	/* perform MD5 on opad */
1633 	MD5Init(&ctx->hc_ocontext);
1634 	MD5Update(&ctx->hc_ocontext, opad, MD5_HMAC_BLOCK_SIZE);
1635 }
1636 
1637 /*
1638  * Initializes a multi-part MAC operation.
1639  */
1640 static int
1641 md5_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
1642     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
1643     crypto_req_handle_t req)
1644 {
1645 	int ret = CRYPTO_SUCCESS;
1646 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1647 
1648 	if (mechanism->cm_type != MD5_HMAC_MECH_INFO_TYPE &&
1649 	    mechanism->cm_type != MD5_HMAC_GEN_MECH_INFO_TYPE)
1650 		return (CRYPTO_MECHANISM_INVALID);
1651 
1652 	/* Add support for key by attributes (RFE 4706552) */
1653 	if (key->ck_format != CRYPTO_KEY_RAW)
1654 		return (CRYPTO_ARGUMENTS_BAD);
1655 
1656 	ctx->cc_provider_private = kmem_alloc(sizeof (md5_hmac_ctx_t),
1657 	    crypto_kmflag(req));
1658 	if (ctx->cc_provider_private == NULL)
1659 		return (CRYPTO_HOST_MEMORY);
1660 
1661 	if (ctx_template != NULL) {
1662 		/* reuse context template */
1663 		bcopy(ctx_template, PROV_MD5_HMAC_CTX(ctx),
1664 		    sizeof (md5_hmac_ctx_t));
1665 	} else {
1666 		/* no context template, compute context */
1667 		if (keylen_in_bytes > MD5_HMAC_BLOCK_SIZE) {
1668 			uchar_t digested_key[MD5_DIGEST_LENGTH];
1669 			md5_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
1670 
1671 			/*
1672 			 * Hash the passed-in key to get a smaller key.
1673 			 * The inner context is used since it hasn't been
1674 			 * initialized yet.
1675 			 */
1676 			PROV_MD5_DIGEST_KEY(&hmac_ctx->hc_icontext,
1677 			    key->ck_data, keylen_in_bytes, digested_key);
1678 			md5_mac_init_ctx(PROV_MD5_HMAC_CTX(ctx),
1679 			    digested_key, MD5_DIGEST_LENGTH);
1680 		} else {
1681 			md5_mac_init_ctx(PROV_MD5_HMAC_CTX(ctx),
1682 			    key->ck_data, keylen_in_bytes);
1683 		}
1684 	}
1685 
1686 	/*
1687 	 * Get the mechanism parameters, if applicable.
1688 	 */
1689 	PROV_MD5_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
1690 	if (mechanism->cm_type == MD5_HMAC_GEN_MECH_INFO_TYPE) {
1691 		if (mechanism->cm_param == NULL ||
1692 		    mechanism->cm_param_len != sizeof (ulong_t))
1693 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1694 		PROV_MD5_GET_DIGEST_LEN(mechanism,
1695 		    PROV_MD5_HMAC_CTX(ctx)->hc_digest_len);
1696 		if (PROV_MD5_HMAC_CTX(ctx)->hc_digest_len >
1697 		    MD5_DIGEST_LENGTH)
1698 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1699 	}
1700 
1701 	if (ret != CRYPTO_SUCCESS) {
1702 		bzero(ctx->cc_provider_private, sizeof (md5_hmac_ctx_t));
1703 		kmem_free(ctx->cc_provider_private, sizeof (md5_hmac_ctx_t));
1704 		ctx->cc_provider_private = NULL;
1705 	}
1706 
1707 	return (ret);
1708 }
1709 
1710 
1711 /* ARGSUSED */
1712 static int
1713 md5_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
1714 {
1715 	int ret = CRYPTO_SUCCESS;
1716 
1717 	ASSERT(ctx->cc_provider_private != NULL);
1718 
1719 	/*
1720 	 * Do an MD5 update of the inner context using the specified
1721 	 * data.
1722 	 */
1723 	switch (data->cd_format) {
1724 	case CRYPTO_DATA_RAW:
1725 		MD5Update(&PROV_MD5_HMAC_CTX(ctx)->hc_icontext,
1726 		    data->cd_raw.iov_base + data->cd_offset,
1727 		    data->cd_length);
1728 		break;
1729 	case CRYPTO_DATA_UIO:
1730 		ret = md5_digest_update_uio(
1731 		    &PROV_MD5_HMAC_CTX(ctx)->hc_icontext, data);
1732 		break;
1733 	case CRYPTO_DATA_MBLK:
1734 		ret = md5_digest_update_mblk(
1735 		    &PROV_MD5_HMAC_CTX(ctx)->hc_icontext, data);
1736 		break;
1737 	default:
1738 		ret = CRYPTO_ARGUMENTS_BAD;
1739 	}
1740 
1741 	return (ret);
1742 }
1743 
1744 /* ARGSUSED */
1745 static int
1746 md5_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
1747 {
1748 	int ret = CRYPTO_SUCCESS;
1749 	uchar_t digest[MD5_DIGEST_LENGTH];
1750 	uint32_t digest_len = MD5_DIGEST_LENGTH;
1751 
1752 	ASSERT(ctx->cc_provider_private != NULL);
1753 
1754 	if (PROV_MD5_HMAC_CTX(ctx)->hc_mech_type == MD5_HMAC_GEN_MECH_INFO_TYPE)
1755 	    digest_len = PROV_MD5_HMAC_CTX(ctx)->hc_digest_len;
1756 
1757 	/*
1758 	 * We need to just return the length needed to store the output.
1759 	 * We should not destroy the context for the following cases.
1760 	 */
1761 	if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
1762 		mac->cd_length = digest_len;
1763 		return (CRYPTO_BUFFER_TOO_SMALL);
1764 	}
1765 
1766 	/*
1767 	 * Do an MD5 final on the inner context.
1768 	 */
1769 	MD5Final(digest, &PROV_MD5_HMAC_CTX(ctx)->hc_icontext);
1770 
1771 	/*
1772 	 * Do an MD5 update on the outer context, feeding the inner
1773 	 * digest as data.
1774 	 */
1775 	MD5Update(&PROV_MD5_HMAC_CTX(ctx)->hc_ocontext, digest,
1776 	    MD5_DIGEST_LENGTH);
1777 
1778 	/*
1779 	 * Do an MD5 final on the outer context, storing the computing
1780 	 * digest in the users buffer.
1781 	 */
1782 	switch (mac->cd_format) {
1783 	case CRYPTO_DATA_RAW:
1784 		if (digest_len != MD5_DIGEST_LENGTH) {
1785 			/*
1786 			 * The caller requested a short digest. Digest
1787 			 * into a scratch buffer and return to
1788 			 * the user only what was requested.
1789 			 */
1790 			MD5Final(digest,
1791 			    &PROV_MD5_HMAC_CTX(ctx)->hc_ocontext);
1792 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1793 			    mac->cd_offset, digest_len);
1794 		} else {
1795 			MD5Final((unsigned char *)mac->cd_raw.iov_base +
1796 			    mac->cd_offset,
1797 			    &PROV_MD5_HMAC_CTX(ctx)->hc_ocontext);
1798 		}
1799 		break;
1800 	case CRYPTO_DATA_UIO:
1801 		ret = md5_digest_final_uio(
1802 		    &PROV_MD5_HMAC_CTX(ctx)->hc_ocontext, mac,
1803 		    digest_len, digest);
1804 		break;
1805 	case CRYPTO_DATA_MBLK:
1806 		ret = md5_digest_final_mblk(
1807 		    &PROV_MD5_HMAC_CTX(ctx)->hc_ocontext, mac,
1808 		    digest_len, digest);
1809 		break;
1810 	default:
1811 		ret = CRYPTO_ARGUMENTS_BAD;
1812 	}
1813 
1814 	if (ret == CRYPTO_SUCCESS) {
1815 		mac->cd_length = digest_len;
1816 	} else {
1817 		mac->cd_length = 0;
1818 	}
1819 
1820 	bzero(ctx->cc_provider_private, sizeof (md5_hmac_ctx_t));
1821 	kmem_free(ctx->cc_provider_private, sizeof (md5_hmac_ctx_t));
1822 	ctx->cc_provider_private = NULL;
1823 
1824 	return (ret);
1825 }
1826 
1827 #define	MD5_MAC_UPDATE(data, ctx, ret) {				\
1828 	switch (data->cd_format) {					\
1829 	case CRYPTO_DATA_RAW:						\
1830 		MD5Update(&(ctx).hc_icontext,				\
1831 		    data->cd_raw.iov_base + data->cd_offset,		\
1832 		    data->cd_length);					\
1833 		break;							\
1834 	case CRYPTO_DATA_UIO:						\
1835 		ret = md5_digest_update_uio(&(ctx).hc_icontext,	data);	\
1836 		break;							\
1837 	case CRYPTO_DATA_MBLK:						\
1838 		ret = md5_digest_update_mblk(&(ctx).hc_icontext,	\
1839 		    data);						\
1840 		break;							\
1841 	default:							\
1842 		ret = CRYPTO_ARGUMENTS_BAD;				\
1843 	}								\
1844 }
1845 
1846 
1847 /* ARGSUSED */
1848 static int
1849 md5_mac_atomic(crypto_provider_handle_t provider,
1850     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1851     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1852     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1853 {
1854 	int ret = CRYPTO_SUCCESS;
1855 	uchar_t digest[MD5_DIGEST_LENGTH];
1856 	md5_hmac_ctx_t md5_hmac_ctx;
1857 	uint32_t digest_len = MD5_DIGEST_LENGTH;
1858 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1859 
1860 	if (mechanism->cm_type != MD5_HMAC_MECH_INFO_TYPE &&
1861 	    mechanism->cm_type != MD5_HMAC_GEN_MECH_INFO_TYPE)
1862 		return (CRYPTO_MECHANISM_INVALID);
1863 
1864 	/* Add support for key by attributes (RFE 4706552) */
1865 	if (key->ck_format != CRYPTO_KEY_RAW)
1866 		return (CRYPTO_ARGUMENTS_BAD);
1867 
1868 	if (ctx_template != NULL) {
1869 		/* reuse context template */
1870 		bcopy(ctx_template, &md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
1871 	} else {
1872 		/* no context template, compute context */
1873 		if (keylen_in_bytes > MD5_HMAC_BLOCK_SIZE) {
1874 			/*
1875 			 * Hash the passed-in key to get a smaller key.
1876 			 * The inner context is used since it hasn't been
1877 			 * initialized yet.
1878 			 */
1879 			PROV_MD5_DIGEST_KEY(&md5_hmac_ctx.hc_icontext,
1880 			    key->ck_data, keylen_in_bytes, digest);
1881 			md5_mac_init_ctx(&md5_hmac_ctx, digest,
1882 			    MD5_DIGEST_LENGTH);
1883 		} else {
1884 			md5_mac_init_ctx(&md5_hmac_ctx, key->ck_data,
1885 			    keylen_in_bytes);
1886 		}
1887 	}
1888 
1889 	/*
1890 	 * Get the mechanism parameters, if applicable.
1891 	 */
1892 	if (mechanism->cm_type == MD5_HMAC_GEN_MECH_INFO_TYPE) {
1893 		if (mechanism->cm_param == NULL ||
1894 		    mechanism->cm_param_len != sizeof (ulong_t)) {
1895 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1896 			goto bail;
1897 		}
1898 		PROV_MD5_GET_DIGEST_LEN(mechanism, digest_len);
1899 		if (digest_len > MD5_DIGEST_LENGTH) {
1900 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1901 			goto bail;
1902 		}
1903 	}
1904 
1905 	/* do an MD5 update of the inner context using the specified data */
1906 	MD5_MAC_UPDATE(data, md5_hmac_ctx, ret);
1907 	if (ret != CRYPTO_SUCCESS)
1908 		/* the update failed, free context and bail */
1909 		goto bail;
1910 
1911 	/* do an MD5 final on the inner context */
1912 	MD5Final(digest, &md5_hmac_ctx.hc_icontext);
1913 
1914 	/*
1915 	 * Do an MD5 update on the outer context, feeding the inner
1916 	 * digest as data.
1917 	 */
1918 	MD5Update(&md5_hmac_ctx.hc_ocontext, digest, MD5_DIGEST_LENGTH);
1919 
1920 	/*
1921 	 * Do an MD5 final on the outer context, storing the computed
1922 	 * digest in the users buffer.
1923 	 */
1924 	switch (mac->cd_format) {
1925 	case CRYPTO_DATA_RAW:
1926 		if (digest_len != MD5_DIGEST_LENGTH) {
1927 			/*
1928 			 * The caller requested a short digest. Digest
1929 			 * into a scratch buffer and return to
1930 			 * the user only what was requested.
1931 			 */
1932 			MD5Final(digest, &md5_hmac_ctx.hc_ocontext);
1933 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1934 			    mac->cd_offset, digest_len);
1935 		} else {
1936 			MD5Final((unsigned char *)mac->cd_raw.iov_base +
1937 			    mac->cd_offset, &md5_hmac_ctx.hc_ocontext);
1938 		}
1939 		break;
1940 	case CRYPTO_DATA_UIO:
1941 		ret = md5_digest_final_uio(&md5_hmac_ctx.hc_ocontext, mac,
1942 		    digest_len, digest);
1943 		break;
1944 	case CRYPTO_DATA_MBLK:
1945 		ret = md5_digest_final_mblk(&md5_hmac_ctx.hc_ocontext, mac,
1946 		    digest_len, digest);
1947 		break;
1948 	default:
1949 		ret = CRYPTO_ARGUMENTS_BAD;
1950 	}
1951 
1952 	if (ret == CRYPTO_SUCCESS) {
1953 		mac->cd_length = digest_len;
1954 	} else {
1955 		mac->cd_length = 0;
1956 	}
1957 	/* Extra paranoia: zeroizing the local context on the stack */
1958 	bzero(&md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
1959 
1960 	return (ret);
1961 bail:
1962 	bzero(&md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
1963 	mac->cd_length = 0;
1964 	return (ret);
1965 }
1966 
1967 /* ARGSUSED */
1968 static int
1969 md5_mac_verify_atomic(crypto_provider_handle_t provider,
1970     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1971     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1972     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1973 {
1974 	int ret = CRYPTO_SUCCESS;
1975 	uchar_t digest[MD5_DIGEST_LENGTH];
1976 	md5_hmac_ctx_t md5_hmac_ctx;
1977 	uint32_t digest_len = MD5_DIGEST_LENGTH;
1978 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1979 
1980 	if (mechanism->cm_type != MD5_HMAC_MECH_INFO_TYPE &&
1981 	    mechanism->cm_type != MD5_HMAC_GEN_MECH_INFO_TYPE)
1982 		return (CRYPTO_MECHANISM_INVALID);
1983 
1984 	/* Add support for key by attributes (RFE 4706552) */
1985 	if (key->ck_format != CRYPTO_KEY_RAW)
1986 		return (CRYPTO_ARGUMENTS_BAD);
1987 
1988 	if (ctx_template != NULL) {
1989 		/* reuse context template */
1990 		bcopy(ctx_template, &md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
1991 	} else {
1992 		/* no context template, compute context */
1993 		if (keylen_in_bytes > MD5_HMAC_BLOCK_SIZE) {
1994 			/*
1995 			 * Hash the passed-in key to get a smaller key.
1996 			 * The inner context is used since it hasn't been
1997 			 * initialized yet.
1998 			 */
1999 			PROV_MD5_DIGEST_KEY(&md5_hmac_ctx.hc_icontext,
2000 			    key->ck_data, keylen_in_bytes, digest);
2001 			md5_mac_init_ctx(&md5_hmac_ctx, digest,
2002 			    MD5_DIGEST_LENGTH);
2003 		} else {
2004 			md5_mac_init_ctx(&md5_hmac_ctx, key->ck_data,
2005 			    keylen_in_bytes);
2006 		}
2007 	}
2008 
2009 	/*
2010 	 * Get the mechanism parameters, if applicable.
2011 	 */
2012 	if (mechanism->cm_type == MD5_HMAC_GEN_MECH_INFO_TYPE) {
2013 		if (mechanism->cm_param == NULL ||
2014 		    mechanism->cm_param_len != sizeof (ulong_t)) {
2015 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2016 			goto bail;
2017 		}
2018 		PROV_MD5_GET_DIGEST_LEN(mechanism, digest_len);
2019 		if (digest_len > MD5_DIGEST_LENGTH) {
2020 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2021 			goto bail;
2022 		}
2023 	}
2024 
2025 	if (mac->cd_length != digest_len) {
2026 		ret = CRYPTO_INVALID_MAC;
2027 		goto bail;
2028 	}
2029 
2030 	/* do an MD5 update of the inner context using the specified data */
2031 	MD5_MAC_UPDATE(data, md5_hmac_ctx, ret);
2032 	if (ret != CRYPTO_SUCCESS)
2033 		/* the update failed, free context and bail */
2034 		goto bail;
2035 
2036 	/* do an MD5 final on the inner context */
2037 	MD5Final(digest, &md5_hmac_ctx.hc_icontext);
2038 
2039 	/*
2040 	 * Do an MD5 update on the outer context, feeding the inner
2041 	 * digest as data.
2042 	 */
2043 	MD5Update(&md5_hmac_ctx.hc_ocontext, digest, MD5_DIGEST_LENGTH);
2044 
2045 	/*
2046 	 * Do an MD5 final on the outer context, storing the computed
2047 	 * digest in the local digest buffer.
2048 	 */
2049 	MD5Final(digest, &md5_hmac_ctx.hc_ocontext);
2050 
2051 	/*
2052 	 * Compare the computed digest against the expected digest passed
2053 	 * as argument.
2054 	 */
2055 	switch (mac->cd_format) {
2056 
2057 	case CRYPTO_DATA_RAW:
2058 		if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
2059 		    mac->cd_offset, digest_len) != 0)
2060 			ret = CRYPTO_INVALID_MAC;
2061 		break;
2062 
2063 	case CRYPTO_DATA_UIO: {
2064 		off_t offset = mac->cd_offset;
2065 		uint_t vec_idx;
2066 		off_t scratch_offset = 0;
2067 		size_t length = digest_len;
2068 		size_t cur_len;
2069 
2070 		/* we support only kernel buffer */
2071 		if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
2072 			return (CRYPTO_ARGUMENTS_BAD);
2073 
2074 		/* jump to the first iovec containing the expected digest */
2075 		for (vec_idx = 0;
2076 		    offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
2077 		    vec_idx < mac->cd_uio->uio_iovcnt;
2078 		    offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len);
2079 		if (vec_idx == mac->cd_uio->uio_iovcnt) {
2080 			/*
2081 			 * The caller specified an offset that is
2082 			 * larger than the total size of the buffers
2083 			 * it provided.
2084 			 */
2085 			ret = CRYPTO_DATA_LEN_RANGE;
2086 			break;
2087 		}
2088 
2089 		/* do the comparison of computed digest vs specified one */
2090 		while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
2091 			cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
2092 			    offset, length);
2093 
2094 			if (bcmp(digest + scratch_offset,
2095 			    mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
2096 			    cur_len) != 0) {
2097 				ret = CRYPTO_INVALID_MAC;
2098 				break;
2099 			}
2100 
2101 			length -= cur_len;
2102 			vec_idx++;
2103 			scratch_offset += cur_len;
2104 			offset = 0;
2105 		}
2106 		break;
2107 	}
2108 
2109 	case CRYPTO_DATA_MBLK: {
2110 		off_t offset = mac->cd_offset;
2111 		mblk_t *mp;
2112 		off_t scratch_offset = 0;
2113 		size_t length = digest_len;
2114 		size_t cur_len;
2115 
2116 		/* jump to the first mblk_t containing the expected digest */
2117 		for (mp = mac->cd_mp; mp != NULL && offset >= MBLKL(mp);
2118 		    offset -= MBLKL(mp), mp = mp->b_cont);
2119 		if (mp == NULL) {
2120 			/*
2121 			 * The caller specified an offset that is larger than
2122 			 * the total size of the buffers it provided.
2123 			 */
2124 			ret = CRYPTO_DATA_LEN_RANGE;
2125 			break;
2126 		}
2127 
2128 		while (mp != NULL && length > 0) {
2129 			cur_len = MIN(MBLKL(mp) - offset, length);
2130 			if (bcmp(digest + scratch_offset,
2131 			    mp->b_rptr + offset, cur_len) != 0) {
2132 				ret = CRYPTO_INVALID_MAC;
2133 				break;
2134 			}
2135 
2136 			length -= cur_len;
2137 			mp = mp->b_cont;
2138 			scratch_offset += cur_len;
2139 			offset = 0;
2140 		}
2141 		break;
2142 	}
2143 
2144 	default:
2145 		ret = CRYPTO_ARGUMENTS_BAD;
2146 	}
2147 
2148 	bzero(&md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
2149 	return (ret);
2150 bail:
2151 	bzero(&md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
2152 	mac->cd_length = 0;
2153 	return (ret);
2154 }
2155 
2156 /*
2157  * KCF software provider context management entry points.
2158  */
2159 
2160 /* ARGSUSED */
2161 static int
2162 md5_create_ctx_template(crypto_provider_handle_t provider,
2163     crypto_mechanism_t *mechanism, crypto_key_t *key,
2164     crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
2165     crypto_req_handle_t req)
2166 {
2167 	md5_hmac_ctx_t *md5_hmac_ctx_tmpl;
2168 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
2169 
2170 	if ((mechanism->cm_type != MD5_HMAC_MECH_INFO_TYPE) &&
2171 	    (mechanism->cm_type != MD5_HMAC_GEN_MECH_INFO_TYPE))
2172 		return (CRYPTO_MECHANISM_INVALID);
2173 
2174 	/* Add support for key by attributes (RFE 4706552) */
2175 	if (key->ck_format != CRYPTO_KEY_RAW)
2176 		return (CRYPTO_ARGUMENTS_BAD);
2177 
2178 	/*
2179 	 * Allocate and initialize MD5 context.
2180 	 */
2181 	md5_hmac_ctx_tmpl = kmem_alloc(sizeof (md5_hmac_ctx_t),
2182 	    crypto_kmflag(req));
2183 	if (md5_hmac_ctx_tmpl == NULL)
2184 		return (CRYPTO_HOST_MEMORY);
2185 
2186 	if (keylen_in_bytes > MD5_HMAC_BLOCK_SIZE) {
2187 		uchar_t digested_key[MD5_DIGEST_LENGTH];
2188 
2189 		/*
2190 		 * Hash the passed-in key to get a smaller key.
2191 		 * The inner context is used since it hasn't been
2192 		 * initialized yet.
2193 		 */
2194 		PROV_MD5_DIGEST_KEY(&md5_hmac_ctx_tmpl->hc_icontext,
2195 		    key->ck_data, keylen_in_bytes, digested_key);
2196 		md5_mac_init_ctx(md5_hmac_ctx_tmpl, digested_key,
2197 		    MD5_DIGEST_LENGTH);
2198 	} else {
2199 		md5_mac_init_ctx(md5_hmac_ctx_tmpl, key->ck_data,
2200 		    keylen_in_bytes);
2201 	}
2202 
2203 	md5_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
2204 	*ctx_template = (crypto_spi_ctx_template_t)md5_hmac_ctx_tmpl;
2205 	*ctx_template_size = sizeof (md5_hmac_ctx_t);
2206 
2207 	return (CRYPTO_SUCCESS);
2208 }
2209 
2210 static int
2211 md5_free_context(crypto_ctx_t *ctx)
2212 {
2213 	uint_t ctx_len;
2214 	md5_mech_type_t mech_type;
2215 
2216 	if (ctx->cc_provider_private == NULL)
2217 		return (CRYPTO_SUCCESS);
2218 
2219 	/*
2220 	 * We have to free either MD5 or MD5-HMAC contexts, which
2221 	 * have different lengths.
2222 	 */
2223 
2224 	mech_type = PROV_MD5_CTX(ctx)->mc_mech_type;
2225 	if (mech_type == MD5_MECH_INFO_TYPE)
2226 		ctx_len = sizeof (md5_ctx_t);
2227 	else {
2228 		ASSERT(mech_type == MD5_HMAC_MECH_INFO_TYPE ||
2229 		    mech_type == MD5_HMAC_GEN_MECH_INFO_TYPE);
2230 		ctx_len = sizeof (md5_hmac_ctx_t);
2231 	}
2232 
2233 	bzero(ctx->cc_provider_private, ctx_len);
2234 	kmem_free(ctx->cc_provider_private, ctx_len);
2235 	ctx->cc_provider_private = NULL;
2236 
2237 	return (CRYPTO_SUCCESS);
2238 }
2239 
2240 #endif	/* _KERNEL && !_BOOT */
2241