xref: /illumos-gate/usr/src/common/crypto/md5/md5.c (revision 814a60b13c0ad90e5d2edfd29a7a84bbf416cc1a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Cleaned-up and optimized version of MD5, based on the reference
29  * implementation provided in RFC 1321.  See RSA Copyright information
30  * below.
31  *
32  * NOTE:  All compiler data was gathered with SC4.2, and verified with SC5.x,
33  *	  as used to build Solaris 2.7.  Hopefully the compiler behavior won't
34  *	  change for the worse in subsequent Solaris builds.
35  */
36 
37 #pragma ident	"%Z%%M%	%I%	%E% SMI"
38 
39 /*
40  * MD5C.C - RSA Data Security, Inc., MD5 message-digest algorithm
41  */
42 
43 /*
44  * Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
45  * rights reserved.
46  *
47  * License to copy and use this software is granted provided that it
48  * is identified as the "RSA Data Security, Inc. MD5 Message-Digest
49  * Algorithm" in all material mentioning or referencing this software
50  * or this function.
51  *
52  * License is also granted to make and use derivative works provided
53  * that such works are identified as "derived from the RSA Data
54  * Security, Inc. MD5 Message-Digest Algorithm" in all material
55  * mentioning or referencing the derived work.
56  *
57  * RSA Data Security, Inc. makes no representations concerning either
58  * the merchantability of this software or the suitability of this
59  * software for any particular purpose. It is provided "as is"
60  * without express or implied warranty of any kind.
61  *
62  * These notices must be retained in any copies of any part of this
63  * documentation and/or software.
64  */
65 
66 #include <sys/types.h>
67 #include <sys/md5.h>
68 #include <sys/md5_consts.h>	/* MD5_CONST() optimization */
69 #if	!defined(_KERNEL) || defined(_BOOT)
70 #include <strings.h>
71 #endif /* !_KERNEL || _BOOT */
72 
73 #if	defined(_KERNEL) && !defined(_BOOT)
74 
75 /*
76  * In kernel module, the md5 module is created with two modlinkages:
77  * - a modlmisc that allows consumers to directly call the entry points
78  *   MD5Init, MD5Update, and MD5Final.
79  * - a modlcrypto that allows the module to register with the Kernel
80  *   Cryptographic Framework (KCF) as a software provider for the MD5
81  *   mechanisms.
82  */
83 
84 #include <sys/systm.h>
85 #include <sys/modctl.h>
86 #include <sys/cmn_err.h>
87 #include <sys/ddi.h>
88 #include <sys/crypto/common.h>
89 #include <sys/crypto/spi.h>
90 #include <sys/sysmacros.h>
91 #include <sys/strsun.h>
92 #include <sys/note.h>
93 
94 extern struct mod_ops mod_miscops;
95 extern struct mod_ops mod_cryptoops;
96 
97 /*
98  * Module linkage information for the kernel.
99  */
100 
101 static struct modlmisc modlmisc = {
102 	&mod_miscops,
103 	"MD5 Message-Digest Algorithm"
104 };
105 
106 static struct modlcrypto modlcrypto = {
107 	&mod_cryptoops,
108 	"MD5 Kernel SW Provider %I%"
109 };
110 
111 static struct modlinkage modlinkage = {
112 	MODREV_1,
113 	(void *)&modlmisc,
114 	(void *)&modlcrypto,
115 	NULL
116 };
117 
118 /*
119  * CSPI information (entry points, provider info, etc.)
120  */
121 
122 typedef enum md5_mech_type {
123 	MD5_MECH_INFO_TYPE,		/* SUN_CKM_MD5 */
124 	MD5_HMAC_MECH_INFO_TYPE,	/* SUN_CKM_MD5_HMAC */
125 	MD5_HMAC_GEN_MECH_INFO_TYPE	/* SUN_CKM_MD5_HMAC_GENERAL */
126 } md5_mech_type_t;
127 
128 #define	MD5_DIGEST_LENGTH	16	/* MD5 digest length in bytes */
129 #define	MD5_HMAC_BLOCK_SIZE	64	/* MD5 block size */
130 #define	MD5_HMAC_MIN_KEY_LEN	8	/* MD5-HMAC min key length in bits */
131 #define	MD5_HMAC_MAX_KEY_LEN	INT_MAX	/* MD5-HMAC max key length in bits */
132 #define	MD5_HMAC_INTS_PER_BLOCK	(MD5_HMAC_BLOCK_SIZE/sizeof (uint32_t))
133 
134 /*
135  * Context for MD5 mechanism.
136  */
137 typedef struct md5_ctx {
138 	md5_mech_type_t		mc_mech_type;	/* type of context */
139 	MD5_CTX			mc_md5_ctx;	/* MD5 context */
140 } md5_ctx_t;
141 
142 /*
143  * Context for MD5-HMAC and MD5-HMAC-GENERAL mechanisms.
144  */
145 typedef struct md5_hmac_ctx {
146 	md5_mech_type_t		hc_mech_type;	/* type of context */
147 	uint32_t		hc_digest_len;	/* digest len in bytes */
148 	MD5_CTX			hc_icontext;	/* inner MD5 context */
149 	MD5_CTX			hc_ocontext;	/* outer MD5 context */
150 } md5_hmac_ctx_t;
151 
152 /*
153  * Macros to access the MD5 or MD5-HMAC contexts from a context passed
154  * by KCF to one of the entry points.
155  */
156 
157 #define	PROV_MD5_CTX(ctx)	((md5_ctx_t *)(ctx)->cc_provider_private)
158 #define	PROV_MD5_HMAC_CTX(ctx)	((md5_hmac_ctx_t *)(ctx)->cc_provider_private)
159 /* to extract the digest length passed as mechanism parameter */
160 
161 #define	PROV_MD5_GET_DIGEST_LEN(m, len) {				\
162 	if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t)))		\
163 		(len) = (uint32_t)*((ulong_t *)mechanism->cm_param);	\
164 	else {								\
165 		ulong_t tmp_ulong;					\
166 		bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t));	\
167 		(len) = (uint32_t)tmp_ulong;				\
168 	}								\
169 }
170 
171 #define	PROV_MD5_DIGEST_KEY(ctx, key, len, digest) {	\
172 	MD5Init(ctx);					\
173 	MD5Update(ctx, key, len);			\
174 	MD5Final(digest, ctx);				\
175 }
176 
177 /*
178  * Mechanism info structure passed to KCF during registration.
179  */
180 static crypto_mech_info_t md5_mech_info_tab[] = {
181 	/* MD5 */
182 	{SUN_CKM_MD5, MD5_MECH_INFO_TYPE,
183 	    CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
184 	    0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
185 	/* MD5-HMAC */
186 	{SUN_CKM_MD5_HMAC, MD5_HMAC_MECH_INFO_TYPE,
187 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
188 	    MD5_HMAC_MIN_KEY_LEN, MD5_HMAC_MAX_KEY_LEN,
189 	    CRYPTO_KEYSIZE_UNIT_IN_BITS},
190 	/* MD5-HMAC GENERAL */
191 	{SUN_CKM_MD5_HMAC_GENERAL, MD5_HMAC_GEN_MECH_INFO_TYPE,
192 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
193 	    MD5_HMAC_MIN_KEY_LEN, MD5_HMAC_MAX_KEY_LEN,
194 	    CRYPTO_KEYSIZE_UNIT_IN_BITS}
195 };
196 
197 static void md5_provider_status(crypto_provider_handle_t, uint_t *);
198 
199 static crypto_control_ops_t md5_control_ops = {
200 	md5_provider_status
201 };
202 
203 static int md5_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
204     crypto_req_handle_t);
205 static int md5_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
206     crypto_req_handle_t);
207 static int md5_digest_update(crypto_ctx_t *, crypto_data_t *,
208     crypto_req_handle_t);
209 static int md5_digest_final(crypto_ctx_t *, crypto_data_t *,
210     crypto_req_handle_t);
211 static int md5_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
212     crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
213     crypto_req_handle_t);
214 
215 static crypto_digest_ops_t md5_digest_ops = {
216 	md5_digest_init,
217 	md5_digest,
218 	md5_digest_update,
219 	NULL,
220 	md5_digest_final,
221 	md5_digest_atomic
222 };
223 
224 static int md5_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
225     crypto_spi_ctx_template_t, crypto_req_handle_t);
226 static int md5_mac_update(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
227 static int md5_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
228 static int md5_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
229     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
230     crypto_spi_ctx_template_t, crypto_req_handle_t);
231 static int md5_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
232     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
233     crypto_spi_ctx_template_t, crypto_req_handle_t);
234 
235 static crypto_mac_ops_t md5_mac_ops = {
236 	md5_mac_init,
237 	NULL,
238 	md5_mac_update,
239 	md5_mac_final,
240 	md5_mac_atomic,
241 	md5_mac_verify_atomic
242 };
243 
244 static int md5_create_ctx_template(crypto_provider_handle_t,
245     crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
246     size_t *, crypto_req_handle_t);
247 static int md5_free_context(crypto_ctx_t *);
248 
249 static crypto_ctx_ops_t md5_ctx_ops = {
250 	md5_create_ctx_template,
251 	md5_free_context
252 };
253 
254 static crypto_ops_t md5_crypto_ops = {
255 	&md5_control_ops,
256 	&md5_digest_ops,
257 	NULL,
258 	&md5_mac_ops,
259 	NULL,
260 	NULL,
261 	NULL,
262 	NULL,
263 	NULL,
264 	NULL,
265 	NULL,
266 	NULL,
267 	NULL,
268 	&md5_ctx_ops
269 };
270 
271 static crypto_provider_info_t md5_prov_info = {
272 	CRYPTO_SPI_VERSION_1,
273 	"MD5 Software Provider",
274 	CRYPTO_SW_PROVIDER,
275 	{&modlinkage},
276 	NULL,
277 	&md5_crypto_ops,
278 	sizeof (md5_mech_info_tab)/sizeof (crypto_mech_info_t),
279 	md5_mech_info_tab
280 };
281 
282 static crypto_kcf_provider_handle_t md5_prov_handle = NULL;
283 
284 int
285 _init(void)
286 {
287 	int ret;
288 
289 	if ((ret = mod_install(&modlinkage)) != 0)
290 		return (ret);
291 
292 	/*
293 	 * Register with KCF. If the registration fails, log an
294 	 * error but do not uninstall the module, since the functionality
295 	 * provided by misc/md5 should still be available.
296 	 */
297 	if ((ret = crypto_register_provider(&md5_prov_info,
298 	    &md5_prov_handle)) != CRYPTO_SUCCESS)
299 		cmn_err(CE_WARN, "md5 _init: "
300 		    "crypto_register_provider() failed (0x%x)", ret);
301 
302 	return (0);
303 }
304 
305 int
306 _fini(void)
307 {
308 	int ret;
309 
310 	/*
311 	 * Unregister from KCF if previous registration succeeded.
312 	 */
313 	if (md5_prov_handle != NULL) {
314 		if ((ret = crypto_unregister_provider(md5_prov_handle)) !=
315 		    CRYPTO_SUCCESS) {
316 			cmn_err(CE_WARN, "md5 _fini: "
317 			    "crypto_unregister_provider() failed (0x%x)", ret);
318 			return (EBUSY);
319 		}
320 		md5_prov_handle = NULL;
321 	}
322 
323 	return (mod_remove(&modlinkage));
324 }
325 
326 int
327 _info(struct modinfo *modinfop)
328 {
329 	return (mod_info(&modlinkage, modinfop));
330 }
331 #endif	/* _KERNEL && !_BOOT */
332 
333 static void Encode(uint8_t *, uint32_t *, size_t);
334 static void MD5Transform(uint32_t, uint32_t, uint32_t, uint32_t, MD5_CTX *,
335     const uint8_t [64]);
336 
337 static uint8_t PADDING[64] = { 0x80, /* all zeros */ };
338 
339 /*
340  * F, G, H and I are the basic MD5 functions.
341  */
342 #define	F(b, c, d)	(((b) & (c)) | ((~b) & (d)))
343 #define	G(b, c, d)	(((b) & (d)) | ((c) & (~d)))
344 #define	H(b, c, d)	((b) ^ (c) ^ (d))
345 #define	I(b, c, d)	((c) ^ ((b) | (~d)))
346 
347 /*
348  * ROTATE_LEFT rotates x left n bits.
349  */
350 #define	ROTATE_LEFT(x, n)	\
351 	(((x) << (n)) | ((x) >> ((sizeof (x) << 3) - (n))))
352 
353 /*
354  * FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4.
355  * Rotation is separate from addition to prevent recomputation.
356  */
357 
358 #define	FF(a, b, c, d, x, s, ac) { \
359 	(a) += F((b), (c), (d)) + (x) + ((unsigned long long)(ac)); \
360 	(a) = ROTATE_LEFT((a), (s)); \
361 	(a) += (b); \
362 	}
363 
364 #define	GG(a, b, c, d, x, s, ac) { \
365 	(a) += G((b), (c), (d)) + (x) + ((unsigned long long)(ac)); \
366 	(a) = ROTATE_LEFT((a), (s)); \
367 	(a) += (b); \
368 	}
369 
370 #define	HH(a, b, c, d, x, s, ac) { \
371 	(a) += H((b), (c), (d)) + (x) + ((unsigned long long)(ac)); \
372 	(a) = ROTATE_LEFT((a), (s)); \
373 	(a) += (b); \
374 	}
375 
376 #define	II(a, b, c, d, x, s, ac) { \
377 	(a) += I((b), (c), (d)) + (x) + ((unsigned long long)(ac)); \
378 	(a) = ROTATE_LEFT((a), (s)); \
379 	(a) += (b); \
380 	}
381 
382 /*
383  * Loading 32-bit constants on a RISC is expensive since it involves both a
384  * `sethi' and an `or'.  thus, we instead have the compiler generate `ld's to
385  * load the constants from an array called `md5_consts'.  however, on intel
386  * (and other CISC processors), it is cheaper to load the constant
387  * directly.  thus, the c code in MD5Transform() uses the macro MD5_CONST()
388  * which either expands to a constant or an array reference, depending on the
389  * architecture the code is being compiled for.
390  *
391  * Right now, i386 and amd64 are the CISC exceptions.
392  * If we get another CISC ISA, we'll have to change the ifdef.
393  */
394 
395 /*
396  * Using the %asi register to achieve little endian loads - register
397  * is set using a inline template.
398  *
399  * Saves a few arithmetic ops as can now use an immediate offset with the
400  * lduwa instructions.
401  */
402 
403 extern void set_little(uint32_t);
404 extern uint32_t get_little();
405 
406 #if defined(__i386) || defined(__amd64)
407 
408 #define	MD5_CONST(x)		(MD5_CONST_ ## x)
409 #define	MD5_CONST_e(x)		MD5_CONST(x)
410 #define	MD5_CONST_o(x)		MD5_CONST(x)
411 
412 #else
413 /*
414  * sparc/RISC optimization:
415  *
416  * while it is somewhat counter-intuitive, on sparc (and presumably other RISC
417  * machines), it is more efficient to place all the constants used in this
418  * function in an array and load the values out of the array than to manually
419  * load the constants.  this is because setting a register to a 32-bit value
420  * takes two ops in most cases: a `sethi' and an `or', but loading a 32-bit
421  * value from memory only takes one `ld' (or `lduw' on v9).  while this
422  * increases memory usage, the compiler can find enough other things to do
423  * while waiting to keep the pipeline does not stall.  additionally, it is
424  * likely that many of these constants are cached so that later accesses do
425  * not even go out to the bus.
426  *
427  * this array is declared `static' to keep the compiler from having to
428  * bcopy() this array onto the stack frame of MD5Transform() each time it is
429  * called -- which is unacceptably expensive.
430  *
431  * the `const' is to ensure that callers are good citizens and do not try to
432  * munge the array.  since these routines are going to be called from inside
433  * multithreaded kernelland, this is a good safety check. -- `constants' will
434  * end up in .rodata.
435  *
436  * unfortunately, loading from an array in this manner hurts performance under
437  * intel (and presumably other CISC machines).  so, there is a macro,
438  * MD5_CONST(), used in MD5Transform(), that either expands to a reference to
439  * this array, or to the actual constant, depending on what platform this code
440  * is compiled for.
441  */
442 
443 #ifdef sun4v
444 
445 /*
446  * Going to load these consts in 8B chunks, so need to enforce 8B alignment
447  */
448 
449 /* CSTYLED */
450 #pragma align 64 (md5_consts)
451 
452 #endif /* sun4v */
453 
454 static const uint32_t md5_consts[] = {
455 	MD5_CONST_0,	MD5_CONST_1,	MD5_CONST_2,	MD5_CONST_3,
456 	MD5_CONST_4,	MD5_CONST_5,	MD5_CONST_6,	MD5_CONST_7,
457 	MD5_CONST_8,	MD5_CONST_9,	MD5_CONST_10,	MD5_CONST_11,
458 	MD5_CONST_12,	MD5_CONST_13,	MD5_CONST_14,	MD5_CONST_15,
459 	MD5_CONST_16,	MD5_CONST_17,	MD5_CONST_18,	MD5_CONST_19,
460 	MD5_CONST_20,	MD5_CONST_21,	MD5_CONST_22,	MD5_CONST_23,
461 	MD5_CONST_24,	MD5_CONST_25,	MD5_CONST_26,	MD5_CONST_27,
462 	MD5_CONST_28,	MD5_CONST_29,	MD5_CONST_30,	MD5_CONST_31,
463 	MD5_CONST_32,	MD5_CONST_33,	MD5_CONST_34,	MD5_CONST_35,
464 	MD5_CONST_36,	MD5_CONST_37,	MD5_CONST_38,	MD5_CONST_39,
465 	MD5_CONST_40,	MD5_CONST_41,	MD5_CONST_42,	MD5_CONST_43,
466 	MD5_CONST_44,	MD5_CONST_45,	MD5_CONST_46,	MD5_CONST_47,
467 	MD5_CONST_48,	MD5_CONST_49,	MD5_CONST_50,	MD5_CONST_51,
468 	MD5_CONST_52,	MD5_CONST_53,	MD5_CONST_54,	MD5_CONST_55,
469 	MD5_CONST_56,	MD5_CONST_57,	MD5_CONST_58,	MD5_CONST_59,
470 	MD5_CONST_60,	MD5_CONST_61,	MD5_CONST_62,	MD5_CONST_63
471 };
472 
473 
474 #ifdef sun4v
475 /*
476  * To reduce the number of loads, load consts in 64-bit
477  * chunks and then split.
478  *
479  * No need to mask upper 32-bits, as just interested in
480  * low 32-bits (saves an & operation and means that this
481  * optimization doesn't increases the icount.
482  */
483 #define	MD5_CONST_e(x)		(md5_consts64[x/2] >> 32)
484 #define	MD5_CONST_o(x)		(md5_consts64[x/2])
485 
486 #else
487 
488 #define	MD5_CONST_e(x)		(md5_consts[x])
489 #define	MD5_CONST_o(x)		(md5_consts[x])
490 
491 #endif /* sun4v */
492 
493 #endif
494 
495 /*
496  * MD5Init()
497  *
498  * purpose: initializes the md5 context and begins and md5 digest operation
499  *   input: MD5_CTX *	: the context to initialize.
500  *  output: void
501  */
502 
503 void
504 MD5Init(MD5_CTX *ctx)
505 {
506 	ctx->count[0] = ctx->count[1] = 0;
507 
508 	/* load magic initialization constants */
509 	ctx->state[0] = MD5_INIT_CONST_1;
510 	ctx->state[1] = MD5_INIT_CONST_2;
511 	ctx->state[2] = MD5_INIT_CONST_3;
512 	ctx->state[3] = MD5_INIT_CONST_4;
513 }
514 
515 /*
516  * MD5Update()
517  *
518  * purpose: continues an md5 digest operation, using the message block
519  *          to update the context.
520  *   input: MD5_CTX *	: the context to update
521  *          uint8_t *	: the message block
522  *          uint32_t    : the length of the message block in bytes
523  *  output: void
524  *
525  * MD5 crunches in 64-byte blocks.  All numeric constants here are related to
526  * that property of MD5.
527  */
528 
529 void
530 MD5Update(MD5_CTX *ctx, const void *inpp, unsigned int input_len)
531 {
532 	uint32_t		i, buf_index, buf_len;
533 #ifdef	sun4v
534 	uint32_t		old_asi;
535 #endif	/* sun4v */
536 	const unsigned char 	*input = (const unsigned char *)inpp;
537 
538 	/* compute (number of bytes computed so far) mod 64 */
539 	buf_index = (ctx->count[0] >> 3) & 0x3F;
540 
541 	/* update number of bits hashed into this MD5 computation so far */
542 	if ((ctx->count[0] += (input_len << 3)) < (input_len << 3))
543 	    ctx->count[1]++;
544 	ctx->count[1] += (input_len >> 29);
545 
546 	buf_len = 64 - buf_index;
547 
548 	/* transform as many times as possible */
549 	i = 0;
550 	if (input_len >= buf_len) {
551 
552 		/*
553 		 * general optimization:
554 		 *
555 		 * only do initial bcopy() and MD5Transform() if
556 		 * buf_index != 0.  if buf_index == 0, we're just
557 		 * wasting our time doing the bcopy() since there
558 		 * wasn't any data left over from a previous call to
559 		 * MD5Update().
560 		 */
561 
562 #ifdef sun4v
563 		/*
564 		 * For N1 use %asi register. However, costly to repeatedly set
565 		 * in MD5Transform. Therefore, set once here.
566 		 * Should probably restore the old value afterwards...
567 		 */
568 		old_asi = get_little();
569 		set_little(0x88);
570 #endif /* sun4v */
571 
572 		if (buf_index) {
573 			bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
574 
575 			MD5Transform(ctx->state[0], ctx->state[1],
576 			    ctx->state[2], ctx->state[3], ctx,
577 			    ctx->buf_un.buf8);
578 
579 			i = buf_len;
580 		}
581 
582 		for (; i + 63 < input_len; i += 64)
583 			MD5Transform(ctx->state[0], ctx->state[1],
584 			    ctx->state[2], ctx->state[3], ctx, &input[i]);
585 
586 
587 #ifdef sun4v
588 		/*
589 		 * Restore old %ASI value
590 		 */
591 		set_little(old_asi);
592 #endif /* sun4v */
593 
594 		/*
595 		 * general optimization:
596 		 *
597 		 * if i and input_len are the same, return now instead
598 		 * of calling bcopy(), since the bcopy() in this
599 		 * case will be an expensive nop.
600 		 */
601 
602 		if (input_len == i)
603 			return;
604 
605 		buf_index = 0;
606 	}
607 
608 	/* buffer remaining input */
609 	bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
610 }
611 
612 /*
613  * MD5Final()
614  *
615  * purpose: ends an md5 digest operation, finalizing the message digest and
616  *          zeroing the context.
617  *   input: uint8_t *	: a buffer to store the digest in
618  *          MD5_CTX *   : the context to finalize, save, and zero
619  *  output: void
620  */
621 
622 void
623 MD5Final(unsigned char *digest, MD5_CTX *ctx)
624 {
625 	uint8_t		bitcount_le[sizeof (ctx->count)];
626 	uint32_t	index = (ctx->count[0] >> 3) & 0x3f;
627 
628 	/* store bit count, little endian */
629 	Encode(bitcount_le, ctx->count, sizeof (bitcount_le));
630 
631 	/* pad out to 56 mod 64 */
632 	MD5Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index);
633 
634 	/* append length (before padding) */
635 	MD5Update(ctx, bitcount_le, sizeof (bitcount_le));
636 
637 	/* store state in digest */
638 	Encode(digest, ctx->state, sizeof (ctx->state));
639 }
640 
641 #ifndef	_KERNEL
642 
643 void
644 md5_calc(unsigned char *output, unsigned char *input, unsigned int inlen)
645 {
646 	MD5_CTX context;
647 
648 	MD5Init(&context);
649 	MD5Update(&context, input, inlen);
650 	MD5Final(output, &context);
651 }
652 
653 #endif	/* !_KERNEL */
654 
655 /*
656  * Little-endian optimization:  I don't need to do any weirdness.   On
657  * some little-endian boxen, I'll have to do alignment checks, but I can do
658  * that below.
659  */
660 
661 #ifdef _LITTLE_ENDIAN
662 
663 #if !defined(__i386) && !defined(__amd64)
664 /*
665  * i386 and amd64 don't require aligned 4-byte loads.  The symbol
666  * _MD5_CHECK_ALIGNMENT indicates below whether the MD5Transform function
667  * requires alignment checking.
668  */
669 #define	_MD5_CHECK_ALIGNMENT
670 #endif /* !__i386 && !__amd64 */
671 
672 #define	LOAD_LITTLE_32(addr)	(*(uint32_t *)(addr))
673 
674 /*
675  * sparc v9/v8plus optimization:
676  *
677  * on the sparc v9/v8plus, we can load data little endian.  however, since
678  * the compiler doesn't have direct support for little endian, we
679  * link to an assembly-language routine `load_little_32' to do
680  * the magic.  note that special care must be taken to ensure the
681  * address is 32-bit aligned -- in the interest of speed, we don't
682  * check to make sure, since careful programming can guarantee this
683  * for us.
684  */
685 
686 #elif	defined(sun4u)
687 
688 /* Define alignment check because we can 4-byte load as little endian. */
689 #define	_MD5_CHECK_ALIGNMENT
690 
691 extern  uint32_t load_little_32(uint32_t *);
692 #define	LOAD_LITTLE_32(addr)    load_little_32((uint32_t *)(addr))
693 
694 #ifdef sun4v
695 
696 /*
697  * For N1 want to minimize number of arithmetic operations. This is best
698  * achieved by using the %asi register to specify ASI for the lduwa operations.
699  * Also, have a separate inline template for each word, so can utilize the
700  * immediate offset in lduwa, without relying on the compiler to do the right
701  * thing.
702  *
703  * Moving to 64-bit loads might also be beneficial.
704  */
705 
706 extern	uint32_t load_little_32_0(uint32_t *);
707 extern	uint32_t load_little_32_1(uint32_t *);
708 extern	uint32_t load_little_32_2(uint32_t *);
709 extern	uint32_t load_little_32_3(uint32_t *);
710 extern	uint32_t load_little_32_4(uint32_t *);
711 extern	uint32_t load_little_32_5(uint32_t *);
712 extern	uint32_t load_little_32_6(uint32_t *);
713 extern	uint32_t load_little_32_7(uint32_t *);
714 extern	uint32_t load_little_32_8(uint32_t *);
715 extern	uint32_t load_little_32_9(uint32_t *);
716 extern	uint32_t load_little_32_a(uint32_t *);
717 extern	uint32_t load_little_32_b(uint32_t *);
718 extern	uint32_t load_little_32_c(uint32_t *);
719 extern	uint32_t load_little_32_d(uint32_t *);
720 extern	uint32_t load_little_32_e(uint32_t *);
721 extern	uint32_t load_little_32_f(uint32_t *);
722 #define	LOAD_LITTLE_32_0(addr)	load_little_32_0((uint32_t *)(addr))
723 #define	LOAD_LITTLE_32_1(addr)	load_little_32_1((uint32_t *)(addr))
724 #define	LOAD_LITTLE_32_2(addr)	load_little_32_2((uint32_t *)(addr))
725 #define	LOAD_LITTLE_32_3(addr)	load_little_32_3((uint32_t *)(addr))
726 #define	LOAD_LITTLE_32_4(addr)	load_little_32_4((uint32_t *)(addr))
727 #define	LOAD_LITTLE_32_5(addr)	load_little_32_5((uint32_t *)(addr))
728 #define	LOAD_LITTLE_32_6(addr)	load_little_32_6((uint32_t *)(addr))
729 #define	LOAD_LITTLE_32_7(addr)	load_little_32_7((uint32_t *)(addr))
730 #define	LOAD_LITTLE_32_8(addr)	load_little_32_8((uint32_t *)(addr))
731 #define	LOAD_LITTLE_32_9(addr)	load_little_32_9((uint32_t *)(addr))
732 #define	LOAD_LITTLE_32_a(addr)	load_little_32_a((uint32_t *)(addr))
733 #define	LOAD_LITTLE_32_b(addr)	load_little_32_b((uint32_t *)(addr))
734 #define	LOAD_LITTLE_32_c(addr)	load_little_32_c((uint32_t *)(addr))
735 #define	LOAD_LITTLE_32_d(addr)	load_little_32_d((uint32_t *)(addr))
736 #define	LOAD_LITTLE_32_e(addr)	load_little_32_e((uint32_t *)(addr))
737 #define	LOAD_LITTLE_32_f(addr)	load_little_32_f((uint32_t *)(addr))
738 #endif /* sun4v */
739 
740 /* Placate lint */
741 #if	defined(__lint)
742 uint32_t
743 load_little_32(uint32_t *addr)
744 {
745 	return (*addr);
746 }
747 #endif
748 
749 #else	/* big endian -- will work on little endian, but slowly */
750 
751 /* Since we do byte operations, we don't have to check for alignment. */
752 #define	LOAD_LITTLE_32(addr)	\
753 	((addr)[0] | ((addr)[1] << 8) | ((addr)[2] << 16) | ((addr)[3] << 24))
754 #endif
755 
756 /*
757  * sparc register window optimization:
758  *
759  * `a', `b', `c', and `d' are passed into MD5Transform explicitly
760  * since it increases the number of registers available to the
761  * compiler.  under this scheme, these variables can be held in
762  * %i0 - %i3, which leaves more local and out registers available.
763  */
764 
765 /*
766  * MD5Transform()
767  *
768  * purpose: md5 transformation -- updates the digest based on `block'
769  *   input: uint32_t	: bytes  1 -  4 of the digest
770  *          uint32_t	: bytes  5 -  8 of the digest
771  *          uint32_t	: bytes  9 - 12 of the digest
772  *          uint32_t	: bytes 12 - 16 of the digest
773  *          MD5_CTX *   : the context to update
774  *          uint8_t [64]: the block to use to update the digest
775  *  output: void
776  */
777 
778 static void
779 MD5Transform(uint32_t a, uint32_t b, uint32_t c, uint32_t d,
780     MD5_CTX *ctx, const uint8_t block[64])
781 {
782 	/*
783 	 * general optimization:
784 	 *
785 	 * use individual integers instead of using an array.  this is a
786 	 * win, although the amount it wins by seems to vary quite a bit.
787 	 */
788 
789 	register uint32_t	x_0, x_1, x_2,  x_3,  x_4,  x_5,  x_6,  x_7;
790 	register uint32_t	x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15;
791 #ifdef sun4v
792 	unsigned long long 	*md5_consts64;
793 
794 	md5_consts64 = (unsigned long long *) md5_consts;
795 #endif	/* sun4v */
796 
797 	/*
798 	 * general optimization:
799 	 *
800 	 * the compiler (at least SC4.2/5.x) generates better code if
801 	 * variable use is localized.  in this case, swapping the integers in
802 	 * this order allows `x_0 'to be swapped nearest to its first use in
803 	 * FF(), and likewise for `x_1' and up.  note that the compiler
804 	 * prefers this to doing each swap right before the FF() that
805 	 * uses it.
806 	 */
807 
808 	/*
809 	 * sparc v9/v8plus optimization:
810 	 *
811 	 * if `block' is already aligned on a 4-byte boundary, use the
812 	 * optimized load_little_32() directly.  otherwise, bcopy()
813 	 * into a buffer that *is* aligned on a 4-byte boundary and
814 	 * then do the load_little_32() on that buffer.  benchmarks
815 	 * have shown that using the bcopy() is better than loading
816 	 * the bytes individually and doing the endian-swap by hand.
817 	 *
818 	 * even though it's quite tempting to assign to do:
819 	 *
820 	 * blk = bcopy(blk, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32));
821 	 *
822 	 * and only have one set of LOAD_LITTLE_32()'s, the compiler (at least
823 	 * SC4.2/5.x) *does not* like that, so please resist the urge.
824 	 */
825 
826 #ifdef _MD5_CHECK_ALIGNMENT
827 	if ((uintptr_t)block & 0x3) {		/* not 4-byte aligned? */
828 		bcopy(block, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32));
829 
830 #ifdef sun4v
831 		x_15 = LOAD_LITTLE_32_f(ctx->buf_un.buf32);
832 		x_14 = LOAD_LITTLE_32_e(ctx->buf_un.buf32);
833 		x_13 = LOAD_LITTLE_32_d(ctx->buf_un.buf32);
834 		x_12 = LOAD_LITTLE_32_c(ctx->buf_un.buf32);
835 		x_11 = LOAD_LITTLE_32_b(ctx->buf_un.buf32);
836 		x_10 = LOAD_LITTLE_32_a(ctx->buf_un.buf32);
837 		x_9  = LOAD_LITTLE_32_9(ctx->buf_un.buf32);
838 		x_8  = LOAD_LITTLE_32_8(ctx->buf_un.buf32);
839 		x_7  = LOAD_LITTLE_32_7(ctx->buf_un.buf32);
840 		x_6  = LOAD_LITTLE_32_6(ctx->buf_un.buf32);
841 		x_5  = LOAD_LITTLE_32_5(ctx->buf_un.buf32);
842 		x_4  = LOAD_LITTLE_32_4(ctx->buf_un.buf32);
843 		x_3  = LOAD_LITTLE_32_3(ctx->buf_un.buf32);
844 		x_2  = LOAD_LITTLE_32_2(ctx->buf_un.buf32);
845 		x_1  = LOAD_LITTLE_32_1(ctx->buf_un.buf32);
846 		x_0  = LOAD_LITTLE_32_0(ctx->buf_un.buf32);
847 #else
848 		x_15 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 15);
849 		x_14 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 14);
850 		x_13 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 13);
851 		x_12 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 12);
852 		x_11 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 11);
853 		x_10 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 10);
854 		x_9  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  9);
855 		x_8  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  8);
856 		x_7  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  7);
857 		x_6  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  6);
858 		x_5  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  5);
859 		x_4  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  4);
860 		x_3  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  3);
861 		x_2  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  2);
862 		x_1  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  1);
863 		x_0  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  0);
864 #endif /* sun4v */
865 	} else
866 #endif
867 	{
868 
869 #ifdef sun4v
870 		x_15 = LOAD_LITTLE_32_f(block);
871 		x_14 = LOAD_LITTLE_32_e(block);
872 		x_13 = LOAD_LITTLE_32_d(block);
873 		x_12 = LOAD_LITTLE_32_c(block);
874 		x_11 = LOAD_LITTLE_32_b(block);
875 		x_10 = LOAD_LITTLE_32_a(block);
876 		x_9  = LOAD_LITTLE_32_9(block);
877 		x_8  = LOAD_LITTLE_32_8(block);
878 		x_7  = LOAD_LITTLE_32_7(block);
879 		x_6  = LOAD_LITTLE_32_6(block);
880 		x_5  = LOAD_LITTLE_32_5(block);
881 		x_4  = LOAD_LITTLE_32_4(block);
882 		x_3  = LOAD_LITTLE_32_3(block);
883 		x_2  = LOAD_LITTLE_32_2(block);
884 		x_1  = LOAD_LITTLE_32_1(block);
885 		x_0  = LOAD_LITTLE_32_0(block);
886 #else
887 		x_15 = LOAD_LITTLE_32(block + 60);
888 		x_14 = LOAD_LITTLE_32(block + 56);
889 		x_13 = LOAD_LITTLE_32(block + 52);
890 		x_12 = LOAD_LITTLE_32(block + 48);
891 		x_11 = LOAD_LITTLE_32(block + 44);
892 		x_10 = LOAD_LITTLE_32(block + 40);
893 		x_9  = LOAD_LITTLE_32(block + 36);
894 		x_8  = LOAD_LITTLE_32(block + 32);
895 		x_7  = LOAD_LITTLE_32(block + 28);
896 		x_6  = LOAD_LITTLE_32(block + 24);
897 		x_5  = LOAD_LITTLE_32(block + 20);
898 		x_4  = LOAD_LITTLE_32(block + 16);
899 		x_3  = LOAD_LITTLE_32(block + 12);
900 		x_2  = LOAD_LITTLE_32(block +  8);
901 		x_1  = LOAD_LITTLE_32(block +  4);
902 		x_0  = LOAD_LITTLE_32(block +  0);
903 #endif /* sun4v */
904 	}
905 
906 	/* round 1 */
907 	FF(a, b, c, d, 	x_0, MD5_SHIFT_11, MD5_CONST_e(0));  /* 1 */
908 	FF(d, a, b, c, 	x_1, MD5_SHIFT_12, MD5_CONST_o(1));  /* 2 */
909 	FF(c, d, a, b, 	x_2, MD5_SHIFT_13, MD5_CONST_e(2));  /* 3 */
910 	FF(b, c, d, a, 	x_3, MD5_SHIFT_14, MD5_CONST_o(3));  /* 4 */
911 	FF(a, b, c, d, 	x_4, MD5_SHIFT_11, MD5_CONST_e(4));  /* 5 */
912 	FF(d, a, b, c, 	x_5, MD5_SHIFT_12, MD5_CONST_o(5));  /* 6 */
913 	FF(c, d, a, b, 	x_6, MD5_SHIFT_13, MD5_CONST_e(6));  /* 7 */
914 	FF(b, c, d, a, 	x_7, MD5_SHIFT_14, MD5_CONST_o(7));  /* 8 */
915 	FF(a, b, c, d, 	x_8, MD5_SHIFT_11, MD5_CONST_e(8));  /* 9 */
916 	FF(d, a, b, c, 	x_9, MD5_SHIFT_12, MD5_CONST_o(9));  /* 10 */
917 	FF(c, d, a, b, x_10, MD5_SHIFT_13, MD5_CONST_e(10)); /* 11 */
918 	FF(b, c, d, a, x_11, MD5_SHIFT_14, MD5_CONST_o(11)); /* 12 */
919 	FF(a, b, c, d, x_12, MD5_SHIFT_11, MD5_CONST_e(12)); /* 13 */
920 	FF(d, a, b, c, x_13, MD5_SHIFT_12, MD5_CONST_o(13)); /* 14 */
921 	FF(c, d, a, b, x_14, MD5_SHIFT_13, MD5_CONST_e(14)); /* 15 */
922 	FF(b, c, d, a, x_15, MD5_SHIFT_14, MD5_CONST_o(15)); /* 16 */
923 
924 	/* round 2 */
925 	GG(a, b, c, d,  x_1, MD5_SHIFT_21, MD5_CONST_e(16)); /* 17 */
926 	GG(d, a, b, c,  x_6, MD5_SHIFT_22, MD5_CONST_o(17)); /* 18 */
927 	GG(c, d, a, b, x_11, MD5_SHIFT_23, MD5_CONST_e(18)); /* 19 */
928 	GG(b, c, d, a,  x_0, MD5_SHIFT_24, MD5_CONST_o(19)); /* 20 */
929 	GG(a, b, c, d,  x_5, MD5_SHIFT_21, MD5_CONST_e(20)); /* 21 */
930 	GG(d, a, b, c, x_10, MD5_SHIFT_22, MD5_CONST_o(21)); /* 22 */
931 	GG(c, d, a, b, x_15, MD5_SHIFT_23, MD5_CONST_e(22)); /* 23 */
932 	GG(b, c, d, a,  x_4, MD5_SHIFT_24, MD5_CONST_o(23)); /* 24 */
933 	GG(a, b, c, d,  x_9, MD5_SHIFT_21, MD5_CONST_e(24)); /* 25 */
934 	GG(d, a, b, c, x_14, MD5_SHIFT_22, MD5_CONST_o(25)); /* 26 */
935 	GG(c, d, a, b,  x_3, MD5_SHIFT_23, MD5_CONST_e(26)); /* 27 */
936 	GG(b, c, d, a,  x_8, MD5_SHIFT_24, MD5_CONST_o(27)); /* 28 */
937 	GG(a, b, c, d, x_13, MD5_SHIFT_21, MD5_CONST_e(28)); /* 29 */
938 	GG(d, a, b, c,  x_2, MD5_SHIFT_22, MD5_CONST_o(29)); /* 30 */
939 	GG(c, d, a, b,  x_7, MD5_SHIFT_23, MD5_CONST_e(30)); /* 31 */
940 	GG(b, c, d, a, x_12, MD5_SHIFT_24, MD5_CONST_o(31)); /* 32 */
941 
942 	/* round 3 */
943 	HH(a, b, c, d,  x_5, MD5_SHIFT_31, MD5_CONST_e(32)); /* 33 */
944 	HH(d, a, b, c,  x_8, MD5_SHIFT_32, MD5_CONST_o(33)); /* 34 */
945 	HH(c, d, a, b, x_11, MD5_SHIFT_33, MD5_CONST_e(34)); /* 35 */
946 	HH(b, c, d, a, x_14, MD5_SHIFT_34, MD5_CONST_o(35)); /* 36 */
947 	HH(a, b, c, d,  x_1, MD5_SHIFT_31, MD5_CONST_e(36)); /* 37 */
948 	HH(d, a, b, c,  x_4, MD5_SHIFT_32, MD5_CONST_o(37)); /* 38 */
949 	HH(c, d, a, b,  x_7, MD5_SHIFT_33, MD5_CONST_e(38)); /* 39 */
950 	HH(b, c, d, a, x_10, MD5_SHIFT_34, MD5_CONST_o(39)); /* 40 */
951 	HH(a, b, c, d, x_13, MD5_SHIFT_31, MD5_CONST_e(40)); /* 41 */
952 	HH(d, a, b, c,  x_0, MD5_SHIFT_32, MD5_CONST_o(41)); /* 42 */
953 	HH(c, d, a, b,  x_3, MD5_SHIFT_33, MD5_CONST_e(42)); /* 43 */
954 	HH(b, c, d, a,  x_6, MD5_SHIFT_34, MD5_CONST_o(43)); /* 44 */
955 	HH(a, b, c, d,  x_9, MD5_SHIFT_31, MD5_CONST_e(44)); /* 45 */
956 	HH(d, a, b, c, x_12, MD5_SHIFT_32, MD5_CONST_o(45)); /* 46 */
957 	HH(c, d, a, b, x_15, MD5_SHIFT_33, MD5_CONST_e(46)); /* 47 */
958 	HH(b, c, d, a,  x_2, MD5_SHIFT_34, MD5_CONST_o(47)); /* 48 */
959 
960 	/* round 4 */
961 	II(a, b, c, d,  x_0, MD5_SHIFT_41, MD5_CONST_e(48)); /* 49 */
962 	II(d, a, b, c,  x_7, MD5_SHIFT_42, MD5_CONST_o(49)); /* 50 */
963 	II(c, d, a, b, x_14, MD5_SHIFT_43, MD5_CONST_e(50)); /* 51 */
964 	II(b, c, d, a,  x_5, MD5_SHIFT_44, MD5_CONST_o(51)); /* 52 */
965 	II(a, b, c, d, x_12, MD5_SHIFT_41, MD5_CONST_e(52)); /* 53 */
966 	II(d, a, b, c,  x_3, MD5_SHIFT_42, MD5_CONST_o(53)); /* 54 */
967 	II(c, d, a, b, x_10, MD5_SHIFT_43, MD5_CONST_e(54)); /* 55 */
968 	II(b, c, d, a,  x_1, MD5_SHIFT_44, MD5_CONST_o(55)); /* 56 */
969 	II(a, b, c, d,  x_8, MD5_SHIFT_41, MD5_CONST_e(56)); /* 57 */
970 	II(d, a, b, c, x_15, MD5_SHIFT_42, MD5_CONST_o(57)); /* 58 */
971 	II(c, d, a, b,  x_6, MD5_SHIFT_43, MD5_CONST_e(58)); /* 59 */
972 	II(b, c, d, a, x_13, MD5_SHIFT_44, MD5_CONST_o(59)); /* 60 */
973 	II(a, b, c, d,  x_4, MD5_SHIFT_41, MD5_CONST_e(60)); /* 61 */
974 	II(d, a, b, c, x_11, MD5_SHIFT_42, MD5_CONST_o(61)); /* 62 */
975 	II(c, d, a, b,  x_2, MD5_SHIFT_43, MD5_CONST_e(62)); /* 63 */
976 	II(b, c, d, a,  x_9, MD5_SHIFT_44, MD5_CONST_o(63)); /* 64 */
977 
978 	ctx->state[0] += a;
979 	ctx->state[1] += b;
980 	ctx->state[2] += c;
981 	ctx->state[3] += d;
982 
983 	/*
984 	 * zeroize sensitive information -- compiler will optimize
985 	 * this out if everything is kept in registers
986 	 */
987 
988 	x_0 = x_1  = x_2  = x_3  = x_4  = x_5  = x_6  = x_7 = x_8 = 0;
989 	x_9 = x_10 = x_11 = x_12 = x_13 = x_14 = x_15 = 0;
990 }
991 
992 /*
993  * devpro compiler optimization:
994  *
995  * the compiler can generate better code if it knows that `input' and
996  * `output' do not point to the same source.  there is no portable
997  * way to tell the compiler this, but the devpro compiler recognizes the
998  * `_Restrict' keyword to indicate this condition.  use it if possible.
999  */
1000 
1001 #if defined(__RESTRICT) && !defined(__GNUC__)
1002 #define	restrict	_Restrict
1003 #else
1004 #define	restrict	/* nothing */
1005 #endif
1006 
1007 /*
1008  * Encode()
1009  *
1010  * purpose: to convert a list of numbers from big endian to little endian
1011  *   input: uint8_t *	: place to store the converted little endian numbers
1012  *	    uint32_t *	: place to get numbers to convert from
1013  *          size_t	: the length of the input in bytes
1014  *  output: void
1015  */
1016 
1017 static void
1018 Encode(uint8_t *restrict output, uint32_t *restrict input, size_t input_len)
1019 {
1020 	size_t		i, j;
1021 
1022 	for (i = 0, j = 0; j < input_len; i++, j += sizeof (uint32_t)) {
1023 
1024 #ifdef _LITTLE_ENDIAN
1025 
1026 #ifdef _MD5_CHECK_ALIGNMENT
1027 		if ((uintptr_t)output & 0x3)	/* Not 4-byte aligned */
1028 			bcopy(input + i, output + j, 4);
1029 		else *(uint32_t *)(output + j) = input[i];
1030 #else
1031 		*(uint32_t *)(output + j) = input[i];
1032 #endif /* _MD5_CHECK_ALIGNMENT */
1033 
1034 #else	/* big endian -- will work on little endian, but slowly */
1035 
1036 		output[j] = input[i] & 0xff;
1037 		output[j + 1] = (input[i] >> 8)  & 0xff;
1038 		output[j + 2] = (input[i] >> 16) & 0xff;
1039 		output[j + 3] = (input[i] >> 24) & 0xff;
1040 #endif
1041 	}
1042 }
1043 
1044 #if	defined(_KERNEL) && !defined(_BOOT)
1045 
1046 /*
1047  * KCF software provider control entry points.
1048  */
1049 /* ARGSUSED */
1050 static void
1051 md5_provider_status(crypto_provider_handle_t provider, uint_t *status)
1052 {
1053 	*status = CRYPTO_PROVIDER_READY;
1054 }
1055 
1056 /*
1057  * KCF software provider digest entry points.
1058  */
1059 
1060 static int
1061 md5_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
1062     crypto_req_handle_t req)
1063 {
1064 	if (mechanism->cm_type != MD5_MECH_INFO_TYPE)
1065 		return (CRYPTO_MECHANISM_INVALID);
1066 
1067 	/*
1068 	 * Allocate and initialize MD5 context.
1069 	 */
1070 	ctx->cc_provider_private = kmem_alloc(sizeof (md5_ctx_t),
1071 	    crypto_kmflag(req));
1072 	if (ctx->cc_provider_private == NULL)
1073 		return (CRYPTO_HOST_MEMORY);
1074 
1075 	PROV_MD5_CTX(ctx)->mc_mech_type = MD5_MECH_INFO_TYPE;
1076 	MD5Init(&PROV_MD5_CTX(ctx)->mc_md5_ctx);
1077 
1078 	return (CRYPTO_SUCCESS);
1079 }
1080 
1081 /*
1082  * Helper MD5 digest update function for uio data.
1083  */
1084 static int
1085 md5_digest_update_uio(MD5_CTX *md5_ctx, crypto_data_t *data)
1086 {
1087 	off_t offset = data->cd_offset;
1088 	size_t length = data->cd_length;
1089 	uint_t vec_idx;
1090 	size_t cur_len;
1091 
1092 	/* we support only kernel buffer */
1093 	if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
1094 		return (CRYPTO_ARGUMENTS_BAD);
1095 
1096 	/*
1097 	 * Jump to the first iovec containing data to be
1098 	 * digested.
1099 	 */
1100 	for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
1101 	    offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
1102 	    offset -= data->cd_uio->uio_iov[vec_idx++].iov_len);
1103 	if (vec_idx == data->cd_uio->uio_iovcnt) {
1104 		/*
1105 		 * The caller specified an offset that is larger than the
1106 		 * total size of the buffers it provided.
1107 		 */
1108 		return (CRYPTO_DATA_LEN_RANGE);
1109 	}
1110 
1111 	/*
1112 	 * Now do the digesting on the iovecs.
1113 	 */
1114 	while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
1115 		cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
1116 		    offset, length);
1117 
1118 		MD5Update(md5_ctx, data->cd_uio->uio_iov[vec_idx].iov_base +
1119 		    offset, cur_len);
1120 
1121 		length -= cur_len;
1122 		vec_idx++;
1123 		offset = 0;
1124 	}
1125 
1126 	if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
1127 		/*
1128 		 * The end of the specified iovec's was reached but
1129 		 * the length requested could not be processed, i.e.
1130 		 * The caller requested to digest more data than it provided.
1131 		 */
1132 		return (CRYPTO_DATA_LEN_RANGE);
1133 	}
1134 
1135 	return (CRYPTO_SUCCESS);
1136 }
1137 
1138 /*
1139  * Helper MD5 digest final function for uio data.
1140  * digest_len is the length of the desired digest. If digest_len
1141  * is smaller than the default MD5 digest length, the caller
1142  * must pass a scratch buffer, digest_scratch, which must
1143  * be at least MD5_DIGEST_LENGTH bytes.
1144  */
1145 static int
1146 md5_digest_final_uio(MD5_CTX *md5_ctx, crypto_data_t *digest,
1147     ulong_t digest_len, uchar_t *digest_scratch)
1148 {
1149 	off_t offset = digest->cd_offset;
1150 	uint_t vec_idx;
1151 
1152 	/* we support only kernel buffer */
1153 	if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
1154 		return (CRYPTO_ARGUMENTS_BAD);
1155 
1156 	/*
1157 	 * Jump to the first iovec containing ptr to the digest to
1158 	 * be returned.
1159 	 */
1160 	for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
1161 	    vec_idx < digest->cd_uio->uio_iovcnt;
1162 	    offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len);
1163 	if (vec_idx == digest->cd_uio->uio_iovcnt) {
1164 		/*
1165 		 * The caller specified an offset that is
1166 		 * larger than the total size of the buffers
1167 		 * it provided.
1168 		 */
1169 		return (CRYPTO_DATA_LEN_RANGE);
1170 	}
1171 
1172 	if (offset + digest_len <=
1173 	    digest->cd_uio->uio_iov[vec_idx].iov_len) {
1174 		/*
1175 		 * The computed MD5 digest will fit in the current
1176 		 * iovec.
1177 		 */
1178 		if (digest_len != MD5_DIGEST_LENGTH) {
1179 			/*
1180 			 * The caller requested a short digest. Digest
1181 			 * into a scratch buffer and return to
1182 			 * the user only what was requested.
1183 			 */
1184 			MD5Final(digest_scratch, md5_ctx);
1185 			bcopy(digest_scratch, (uchar_t *)digest->
1186 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
1187 			    digest_len);
1188 		} else {
1189 			MD5Final((uchar_t *)digest->
1190 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
1191 			    md5_ctx);
1192 		}
1193 	} else {
1194 		/*
1195 		 * The computed digest will be crossing one or more iovec's.
1196 		 * This is bad performance-wise but we need to support it.
1197 		 * Allocate a small scratch buffer on the stack and
1198 		 * copy it piece meal to the specified digest iovec's.
1199 		 */
1200 		uchar_t digest_tmp[MD5_DIGEST_LENGTH];
1201 		off_t scratch_offset = 0;
1202 		size_t length = digest_len;
1203 		size_t cur_len;
1204 
1205 		MD5Final(digest_tmp, md5_ctx);
1206 
1207 		while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
1208 			cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
1209 			    offset, length);
1210 			bcopy(digest_tmp + scratch_offset,
1211 			    digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
1212 			    cur_len);
1213 
1214 			length -= cur_len;
1215 			vec_idx++;
1216 			scratch_offset += cur_len;
1217 			offset = 0;
1218 		}
1219 
1220 		if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
1221 			/*
1222 			 * The end of the specified iovec's was reached but
1223 			 * the length requested could not be processed, i.e.
1224 			 * The caller requested to digest more data than it
1225 			 * provided.
1226 			 */
1227 			return (CRYPTO_DATA_LEN_RANGE);
1228 		}
1229 	}
1230 
1231 	return (CRYPTO_SUCCESS);
1232 }
1233 
1234 /*
1235  * Helper MD5 digest update for mblk's.
1236  */
1237 static int
1238 md5_digest_update_mblk(MD5_CTX *md5_ctx, crypto_data_t *data)
1239 {
1240 	off_t offset = data->cd_offset;
1241 	size_t length = data->cd_length;
1242 	mblk_t *mp;
1243 	size_t cur_len;
1244 
1245 	/*
1246 	 * Jump to the first mblk_t containing data to be digested.
1247 	 */
1248 	for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
1249 	    offset -= MBLKL(mp), mp = mp->b_cont);
1250 	if (mp == NULL) {
1251 		/*
1252 		 * The caller specified an offset that is larger than the
1253 		 * total size of the buffers it provided.
1254 		 */
1255 		return (CRYPTO_DATA_LEN_RANGE);
1256 	}
1257 
1258 	/*
1259 	 * Now do the digesting on the mblk chain.
1260 	 */
1261 	while (mp != NULL && length > 0) {
1262 		cur_len = MIN(MBLKL(mp) - offset, length);
1263 		MD5Update(md5_ctx, mp->b_rptr + offset, cur_len);
1264 		length -= cur_len;
1265 		offset = 0;
1266 		mp = mp->b_cont;
1267 	}
1268 
1269 	if (mp == NULL && length > 0) {
1270 		/*
1271 		 * The end of the mblk was reached but the length requested
1272 		 * could not be processed, i.e. The caller requested
1273 		 * to digest more data than it provided.
1274 		 */
1275 		return (CRYPTO_DATA_LEN_RANGE);
1276 	}
1277 
1278 	return (CRYPTO_SUCCESS);
1279 }
1280 
1281 /*
1282  * Helper MD5 digest final for mblk's.
1283  * digest_len is the length of the desired digest. If digest_len
1284  * is smaller than the default MD5 digest length, the caller
1285  * must pass a scratch buffer, digest_scratch, which must
1286  * be at least MD5_DIGEST_LENGTH bytes.
1287  */
1288 static int
1289 md5_digest_final_mblk(MD5_CTX *md5_ctx, crypto_data_t *digest,
1290     ulong_t digest_len, uchar_t *digest_scratch)
1291 {
1292 	off_t offset = digest->cd_offset;
1293 	mblk_t *mp;
1294 
1295 	/*
1296 	 * Jump to the first mblk_t that will be used to store the digest.
1297 	 */
1298 	for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
1299 	    offset -= MBLKL(mp), mp = mp->b_cont);
1300 	if (mp == NULL) {
1301 		/*
1302 		 * The caller specified an offset that is larger than the
1303 		 * total size of the buffers it provided.
1304 		 */
1305 		return (CRYPTO_DATA_LEN_RANGE);
1306 	}
1307 
1308 	if (offset + digest_len <= MBLKL(mp)) {
1309 		/*
1310 		 * The computed MD5 digest will fit in the current mblk.
1311 		 * Do the MD5Final() in-place.
1312 		 */
1313 		if (digest_len != MD5_DIGEST_LENGTH) {
1314 			/*
1315 			 * The caller requested a short digest. Digest
1316 			 * into a scratch buffer and return to
1317 			 * the user only what was requested.
1318 			 */
1319 			MD5Final(digest_scratch, md5_ctx);
1320 			bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
1321 		} else {
1322 			MD5Final(mp->b_rptr + offset, md5_ctx);
1323 		}
1324 	} else {
1325 		/*
1326 		 * The computed digest will be crossing one or more mblk's.
1327 		 * This is bad performance-wise but we need to support it.
1328 		 * Allocate a small scratch buffer on the stack and
1329 		 * copy it piece meal to the specified digest iovec's.
1330 		 */
1331 		uchar_t digest_tmp[MD5_DIGEST_LENGTH];
1332 		off_t scratch_offset = 0;
1333 		size_t length = digest_len;
1334 		size_t cur_len;
1335 
1336 		MD5Final(digest_tmp, md5_ctx);
1337 
1338 		while (mp != NULL && length > 0) {
1339 			cur_len = MIN(MBLKL(mp) - offset, length);
1340 			bcopy(digest_tmp + scratch_offset,
1341 			    mp->b_rptr + offset, cur_len);
1342 
1343 			length -= cur_len;
1344 			mp = mp->b_cont;
1345 			scratch_offset += cur_len;
1346 			offset = 0;
1347 		}
1348 
1349 		if (mp == NULL && length > 0) {
1350 			/*
1351 			 * The end of the specified mblk was reached but
1352 			 * the length requested could not be processed, i.e.
1353 			 * The caller requested to digest more data than it
1354 			 * provided.
1355 			 */
1356 			return (CRYPTO_DATA_LEN_RANGE);
1357 		}
1358 	}
1359 
1360 	return (CRYPTO_SUCCESS);
1361 }
1362 
1363 /* ARGSUSED */
1364 static int
1365 md5_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
1366     crypto_req_handle_t req)
1367 {
1368 	int ret = CRYPTO_SUCCESS;
1369 
1370 	ASSERT(ctx->cc_provider_private != NULL);
1371 
1372 	/*
1373 	 * We need to just return the length needed to store the output.
1374 	 * We should not destroy the context for the following cases.
1375 	 */
1376 	if ((digest->cd_length == 0) ||
1377 	    (digest->cd_length < MD5_DIGEST_LENGTH)) {
1378 		digest->cd_length = MD5_DIGEST_LENGTH;
1379 		return (CRYPTO_BUFFER_TOO_SMALL);
1380 	}
1381 
1382 	/*
1383 	 * Do the MD5 update on the specified input data.
1384 	 */
1385 	switch (data->cd_format) {
1386 	case CRYPTO_DATA_RAW:
1387 		MD5Update(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1388 		    data->cd_raw.iov_base + data->cd_offset,
1389 		    data->cd_length);
1390 		break;
1391 	case CRYPTO_DATA_UIO:
1392 		ret = md5_digest_update_uio(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1393 		    data);
1394 		break;
1395 	case CRYPTO_DATA_MBLK:
1396 		ret = md5_digest_update_mblk(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1397 		    data);
1398 		break;
1399 	default:
1400 		ret = CRYPTO_ARGUMENTS_BAD;
1401 	}
1402 
1403 	if (ret != CRYPTO_SUCCESS) {
1404 		/* the update failed, free context and bail */
1405 		kmem_free(ctx->cc_provider_private, sizeof (md5_ctx_t));
1406 		ctx->cc_provider_private = NULL;
1407 		digest->cd_length = 0;
1408 		return (ret);
1409 	}
1410 
1411 	/*
1412 	 * Do an MD5 final, must be done separately since the digest
1413 	 * type can be different than the input data type.
1414 	 */
1415 	switch (digest->cd_format) {
1416 	case CRYPTO_DATA_RAW:
1417 		MD5Final((unsigned char *)digest->cd_raw.iov_base +
1418 		    digest->cd_offset, &PROV_MD5_CTX(ctx)->mc_md5_ctx);
1419 		break;
1420 	case CRYPTO_DATA_UIO:
1421 		ret = md5_digest_final_uio(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1422 		    digest, MD5_DIGEST_LENGTH, NULL);
1423 		break;
1424 	case CRYPTO_DATA_MBLK:
1425 		ret = md5_digest_final_mblk(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1426 		    digest, MD5_DIGEST_LENGTH, NULL);
1427 		break;
1428 	default:
1429 		ret = CRYPTO_ARGUMENTS_BAD;
1430 	}
1431 
1432 	/* all done, free context and return */
1433 
1434 	if (ret == CRYPTO_SUCCESS) {
1435 		digest->cd_length = MD5_DIGEST_LENGTH;
1436 	} else {
1437 		digest->cd_length = 0;
1438 	}
1439 
1440 	kmem_free(ctx->cc_provider_private, sizeof (md5_ctx_t));
1441 	ctx->cc_provider_private = NULL;
1442 	return (ret);
1443 }
1444 
1445 /* ARGSUSED */
1446 static int
1447 md5_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
1448     crypto_req_handle_t req)
1449 {
1450 	int ret = CRYPTO_SUCCESS;
1451 
1452 	ASSERT(ctx->cc_provider_private != NULL);
1453 
1454 	/*
1455 	 * Do the MD5 update on the specified input data.
1456 	 */
1457 	switch (data->cd_format) {
1458 	case CRYPTO_DATA_RAW:
1459 		MD5Update(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1460 		    data->cd_raw.iov_base + data->cd_offset,
1461 		    data->cd_length);
1462 		break;
1463 	case CRYPTO_DATA_UIO:
1464 		ret = md5_digest_update_uio(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1465 		    data);
1466 		break;
1467 	case CRYPTO_DATA_MBLK:
1468 		ret = md5_digest_update_mblk(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1469 		    data);
1470 		break;
1471 	default:
1472 		ret = CRYPTO_ARGUMENTS_BAD;
1473 	}
1474 
1475 	return (ret);
1476 }
1477 
1478 /* ARGSUSED */
1479 static int
1480 md5_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
1481     crypto_req_handle_t req)
1482 {
1483 	int ret = CRYPTO_SUCCESS;
1484 
1485 	ASSERT(ctx->cc_provider_private != NULL);
1486 
1487 	/*
1488 	 * We need to just return the length needed to store the output.
1489 	 * We should not destroy the context for the following cases.
1490 	 */
1491 	if ((digest->cd_length == 0) ||
1492 	    (digest->cd_length < MD5_DIGEST_LENGTH)) {
1493 		digest->cd_length = MD5_DIGEST_LENGTH;
1494 		return (CRYPTO_BUFFER_TOO_SMALL);
1495 	}
1496 
1497 	/*
1498 	 * Do an MD5 final.
1499 	 */
1500 	switch (digest->cd_format) {
1501 	case CRYPTO_DATA_RAW:
1502 		MD5Final((unsigned char *)digest->cd_raw.iov_base +
1503 		    digest->cd_offset, &PROV_MD5_CTX(ctx)->mc_md5_ctx);
1504 		break;
1505 	case CRYPTO_DATA_UIO:
1506 		ret = md5_digest_final_uio(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1507 		    digest, MD5_DIGEST_LENGTH, NULL);
1508 		break;
1509 	case CRYPTO_DATA_MBLK:
1510 		ret = md5_digest_final_mblk(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1511 		    digest, MD5_DIGEST_LENGTH, NULL);
1512 		break;
1513 	default:
1514 		ret = CRYPTO_ARGUMENTS_BAD;
1515 	}
1516 
1517 	/* all done, free context and return */
1518 
1519 	if (ret == CRYPTO_SUCCESS) {
1520 		digest->cd_length = MD5_DIGEST_LENGTH;
1521 	} else {
1522 		digest->cd_length = 0;
1523 	}
1524 
1525 	kmem_free(ctx->cc_provider_private, sizeof (md5_ctx_t));
1526 	ctx->cc_provider_private = NULL;
1527 
1528 	return (ret);
1529 }
1530 
1531 /* ARGSUSED */
1532 static int
1533 md5_digest_atomic(crypto_provider_handle_t provider,
1534     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1535     crypto_data_t *data, crypto_data_t *digest,
1536     crypto_req_handle_t req)
1537 {
1538 	int ret = CRYPTO_SUCCESS;
1539 	MD5_CTX md5_ctx;
1540 
1541 	if (mechanism->cm_type != MD5_MECH_INFO_TYPE)
1542 		return (CRYPTO_MECHANISM_INVALID);
1543 
1544 	/*
1545 	 * Do the MD5 init.
1546 	 */
1547 	MD5Init(&md5_ctx);
1548 
1549 	/*
1550 	 * Do the MD5 update on the specified input data.
1551 	 */
1552 	switch (data->cd_format) {
1553 	case CRYPTO_DATA_RAW:
1554 		MD5Update(&md5_ctx, data->cd_raw.iov_base + data->cd_offset,
1555 		    data->cd_length);
1556 		break;
1557 	case CRYPTO_DATA_UIO:
1558 		ret = md5_digest_update_uio(&md5_ctx, data);
1559 		break;
1560 	case CRYPTO_DATA_MBLK:
1561 		ret = md5_digest_update_mblk(&md5_ctx, data);
1562 		break;
1563 	default:
1564 		ret = CRYPTO_ARGUMENTS_BAD;
1565 	}
1566 
1567 	if (ret != CRYPTO_SUCCESS) {
1568 		/* the update failed, bail */
1569 		digest->cd_length = 0;
1570 		return (ret);
1571 	}
1572 
1573 	/*
1574 	 * Do an MD5 final, must be done separately since the digest
1575 	 * type can be different than the input data type.
1576 	 */
1577 	switch (digest->cd_format) {
1578 	case CRYPTO_DATA_RAW:
1579 		MD5Final((unsigned char *)digest->cd_raw.iov_base +
1580 		    digest->cd_offset, &md5_ctx);
1581 		break;
1582 	case CRYPTO_DATA_UIO:
1583 		ret = md5_digest_final_uio(&md5_ctx, digest,
1584 		    MD5_DIGEST_LENGTH, NULL);
1585 		break;
1586 	case CRYPTO_DATA_MBLK:
1587 		ret = md5_digest_final_mblk(&md5_ctx, digest,
1588 		    MD5_DIGEST_LENGTH, NULL);
1589 		break;
1590 	default:
1591 		ret = CRYPTO_ARGUMENTS_BAD;
1592 	}
1593 
1594 	if (ret == CRYPTO_SUCCESS) {
1595 		digest->cd_length = MD5_DIGEST_LENGTH;
1596 	} else {
1597 		digest->cd_length = 0;
1598 	}
1599 
1600 	return (ret);
1601 }
1602 
1603 /*
1604  * KCF software provider mac entry points.
1605  *
1606  * MD5 HMAC is: MD5(key XOR opad, MD5(key XOR ipad, text))
1607  *
1608  * Init:
1609  * The initialization routine initializes what we denote
1610  * as the inner and outer contexts by doing
1611  * - for inner context: MD5(key XOR ipad)
1612  * - for outer context: MD5(key XOR opad)
1613  *
1614  * Update:
1615  * Each subsequent MD5 HMAC update will result in an
1616  * update of the inner context with the specified data.
1617  *
1618  * Final:
1619  * The MD5 HMAC final will do a MD5 final operation on the
1620  * inner context, and the resulting digest will be used
1621  * as the data for an update on the outer context. Last
1622  * but not least, an MD5 final on the outer context will
1623  * be performed to obtain the MD5 HMAC digest to return
1624  * to the user.
1625  */
1626 
1627 /*
1628  * Initialize a MD5-HMAC context.
1629  */
1630 static void
1631 md5_mac_init_ctx(md5_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
1632 {
1633 	uint32_t ipad[MD5_HMAC_INTS_PER_BLOCK];
1634 	uint32_t opad[MD5_HMAC_INTS_PER_BLOCK];
1635 	uint_t i;
1636 
1637 	bzero(ipad, MD5_HMAC_BLOCK_SIZE);
1638 	bzero(opad, MD5_HMAC_BLOCK_SIZE);
1639 
1640 	bcopy(keyval, ipad, length_in_bytes);
1641 	bcopy(keyval, opad, length_in_bytes);
1642 
1643 	/* XOR key with ipad (0x36) and opad (0x5c) */
1644 	for (i = 0; i < MD5_HMAC_INTS_PER_BLOCK; i++) {
1645 		ipad[i] ^= 0x36363636;
1646 		opad[i] ^= 0x5c5c5c5c;
1647 	}
1648 
1649 	/* perform MD5 on ipad */
1650 	MD5Init(&ctx->hc_icontext);
1651 	MD5Update(&ctx->hc_icontext, ipad, MD5_HMAC_BLOCK_SIZE);
1652 
1653 	/* perform MD5 on opad */
1654 	MD5Init(&ctx->hc_ocontext);
1655 	MD5Update(&ctx->hc_ocontext, opad, MD5_HMAC_BLOCK_SIZE);
1656 }
1657 
1658 /*
1659  * Initializes a multi-part MAC operation.
1660  */
1661 static int
1662 md5_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
1663     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
1664     crypto_req_handle_t req)
1665 {
1666 	int ret = CRYPTO_SUCCESS;
1667 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1668 
1669 	if (mechanism->cm_type != MD5_HMAC_MECH_INFO_TYPE &&
1670 	    mechanism->cm_type != MD5_HMAC_GEN_MECH_INFO_TYPE)
1671 		return (CRYPTO_MECHANISM_INVALID);
1672 
1673 	/* Add support for key by attributes (RFE 4706552) */
1674 	if (key->ck_format != CRYPTO_KEY_RAW)
1675 		return (CRYPTO_ARGUMENTS_BAD);
1676 
1677 	ctx->cc_provider_private = kmem_alloc(sizeof (md5_hmac_ctx_t),
1678 	    crypto_kmflag(req));
1679 	if (ctx->cc_provider_private == NULL)
1680 		return (CRYPTO_HOST_MEMORY);
1681 
1682 	if (ctx_template != NULL) {
1683 		/* reuse context template */
1684 		bcopy(ctx_template, PROV_MD5_HMAC_CTX(ctx),
1685 		    sizeof (md5_hmac_ctx_t));
1686 	} else {
1687 		/* no context template, compute context */
1688 		if (keylen_in_bytes > MD5_HMAC_BLOCK_SIZE) {
1689 			uchar_t digested_key[MD5_DIGEST_LENGTH];
1690 			md5_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
1691 
1692 			/*
1693 			 * Hash the passed-in key to get a smaller key.
1694 			 * The inner context is used since it hasn't been
1695 			 * initialized yet.
1696 			 */
1697 			PROV_MD5_DIGEST_KEY(&hmac_ctx->hc_icontext,
1698 			    key->ck_data, keylen_in_bytes, digested_key);
1699 			md5_mac_init_ctx(PROV_MD5_HMAC_CTX(ctx),
1700 			    digested_key, MD5_DIGEST_LENGTH);
1701 		} else {
1702 			md5_mac_init_ctx(PROV_MD5_HMAC_CTX(ctx),
1703 			    key->ck_data, keylen_in_bytes);
1704 		}
1705 	}
1706 
1707 	/*
1708 	 * Get the mechanism parameters, if applicable.
1709 	 */
1710 	PROV_MD5_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
1711 	if (mechanism->cm_type == MD5_HMAC_GEN_MECH_INFO_TYPE) {
1712 		if (mechanism->cm_param == NULL ||
1713 		    mechanism->cm_param_len != sizeof (ulong_t))
1714 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1715 		PROV_MD5_GET_DIGEST_LEN(mechanism,
1716 		    PROV_MD5_HMAC_CTX(ctx)->hc_digest_len);
1717 		if (PROV_MD5_HMAC_CTX(ctx)->hc_digest_len >
1718 		    MD5_DIGEST_LENGTH)
1719 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1720 	}
1721 
1722 	if (ret != CRYPTO_SUCCESS) {
1723 		bzero(ctx->cc_provider_private, sizeof (md5_hmac_ctx_t));
1724 		kmem_free(ctx->cc_provider_private, sizeof (md5_hmac_ctx_t));
1725 		ctx->cc_provider_private = NULL;
1726 	}
1727 
1728 	return (ret);
1729 }
1730 
1731 
1732 /* ARGSUSED */
1733 static int
1734 md5_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
1735 {
1736 	int ret = CRYPTO_SUCCESS;
1737 
1738 	ASSERT(ctx->cc_provider_private != NULL);
1739 
1740 	/*
1741 	 * Do an MD5 update of the inner context using the specified
1742 	 * data.
1743 	 */
1744 	switch (data->cd_format) {
1745 	case CRYPTO_DATA_RAW:
1746 		MD5Update(&PROV_MD5_HMAC_CTX(ctx)->hc_icontext,
1747 		    data->cd_raw.iov_base + data->cd_offset,
1748 		    data->cd_length);
1749 		break;
1750 	case CRYPTO_DATA_UIO:
1751 		ret = md5_digest_update_uio(
1752 		    &PROV_MD5_HMAC_CTX(ctx)->hc_icontext, data);
1753 		break;
1754 	case CRYPTO_DATA_MBLK:
1755 		ret = md5_digest_update_mblk(
1756 		    &PROV_MD5_HMAC_CTX(ctx)->hc_icontext, data);
1757 		break;
1758 	default:
1759 		ret = CRYPTO_ARGUMENTS_BAD;
1760 	}
1761 
1762 	return (ret);
1763 }
1764 
1765 /* ARGSUSED */
1766 static int
1767 md5_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
1768 {
1769 	int ret = CRYPTO_SUCCESS;
1770 	uchar_t digest[MD5_DIGEST_LENGTH];
1771 	uint32_t digest_len = MD5_DIGEST_LENGTH;
1772 
1773 	ASSERT(ctx->cc_provider_private != NULL);
1774 
1775 	if (PROV_MD5_HMAC_CTX(ctx)->hc_mech_type == MD5_HMAC_GEN_MECH_INFO_TYPE)
1776 	    digest_len = PROV_MD5_HMAC_CTX(ctx)->hc_digest_len;
1777 
1778 	/*
1779 	 * We need to just return the length needed to store the output.
1780 	 * We should not destroy the context for the following cases.
1781 	 */
1782 	if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
1783 		mac->cd_length = digest_len;
1784 		return (CRYPTO_BUFFER_TOO_SMALL);
1785 	}
1786 
1787 	/*
1788 	 * Do an MD5 final on the inner context.
1789 	 */
1790 	MD5Final(digest, &PROV_MD5_HMAC_CTX(ctx)->hc_icontext);
1791 
1792 	/*
1793 	 * Do an MD5 update on the outer context, feeding the inner
1794 	 * digest as data.
1795 	 */
1796 	MD5Update(&PROV_MD5_HMAC_CTX(ctx)->hc_ocontext, digest,
1797 	    MD5_DIGEST_LENGTH);
1798 
1799 	/*
1800 	 * Do an MD5 final on the outer context, storing the computing
1801 	 * digest in the users buffer.
1802 	 */
1803 	switch (mac->cd_format) {
1804 	case CRYPTO_DATA_RAW:
1805 		if (digest_len != MD5_DIGEST_LENGTH) {
1806 			/*
1807 			 * The caller requested a short digest. Digest
1808 			 * into a scratch buffer and return to
1809 			 * the user only what was requested.
1810 			 */
1811 			MD5Final(digest,
1812 			    &PROV_MD5_HMAC_CTX(ctx)->hc_ocontext);
1813 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1814 			    mac->cd_offset, digest_len);
1815 		} else {
1816 			MD5Final((unsigned char *)mac->cd_raw.iov_base +
1817 			    mac->cd_offset,
1818 			    &PROV_MD5_HMAC_CTX(ctx)->hc_ocontext);
1819 		}
1820 		break;
1821 	case CRYPTO_DATA_UIO:
1822 		ret = md5_digest_final_uio(
1823 		    &PROV_MD5_HMAC_CTX(ctx)->hc_ocontext, mac,
1824 		    digest_len, digest);
1825 		break;
1826 	case CRYPTO_DATA_MBLK:
1827 		ret = md5_digest_final_mblk(
1828 		    &PROV_MD5_HMAC_CTX(ctx)->hc_ocontext, mac,
1829 		    digest_len, digest);
1830 		break;
1831 	default:
1832 		ret = CRYPTO_ARGUMENTS_BAD;
1833 	}
1834 
1835 	if (ret == CRYPTO_SUCCESS) {
1836 		mac->cd_length = digest_len;
1837 	} else {
1838 		mac->cd_length = 0;
1839 	}
1840 
1841 	bzero(ctx->cc_provider_private, sizeof (md5_hmac_ctx_t));
1842 	kmem_free(ctx->cc_provider_private, sizeof (md5_hmac_ctx_t));
1843 	ctx->cc_provider_private = NULL;
1844 
1845 	return (ret);
1846 }
1847 
1848 #define	MD5_MAC_UPDATE(data, ctx, ret) {				\
1849 	switch (data->cd_format) {					\
1850 	case CRYPTO_DATA_RAW:						\
1851 		MD5Update(&(ctx).hc_icontext,				\
1852 		    data->cd_raw.iov_base + data->cd_offset,		\
1853 		    data->cd_length);					\
1854 		break;							\
1855 	case CRYPTO_DATA_UIO:						\
1856 		ret = md5_digest_update_uio(&(ctx).hc_icontext,	data);	\
1857 		break;							\
1858 	case CRYPTO_DATA_MBLK:						\
1859 		ret = md5_digest_update_mblk(&(ctx).hc_icontext,	\
1860 		    data);						\
1861 		break;							\
1862 	default:							\
1863 		ret = CRYPTO_ARGUMENTS_BAD;				\
1864 	}								\
1865 }
1866 
1867 
1868 /* ARGSUSED */
1869 static int
1870 md5_mac_atomic(crypto_provider_handle_t provider,
1871     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1872     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1873     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1874 {
1875 	int ret = CRYPTO_SUCCESS;
1876 	uchar_t digest[MD5_DIGEST_LENGTH];
1877 	md5_hmac_ctx_t md5_hmac_ctx;
1878 	uint32_t digest_len = MD5_DIGEST_LENGTH;
1879 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1880 
1881 	if (mechanism->cm_type != MD5_HMAC_MECH_INFO_TYPE &&
1882 	    mechanism->cm_type != MD5_HMAC_GEN_MECH_INFO_TYPE)
1883 		return (CRYPTO_MECHANISM_INVALID);
1884 
1885 	/* Add support for key by attributes (RFE 4706552) */
1886 	if (key->ck_format != CRYPTO_KEY_RAW)
1887 		return (CRYPTO_ARGUMENTS_BAD);
1888 
1889 	if (ctx_template != NULL) {
1890 		/* reuse context template */
1891 		bcopy(ctx_template, &md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
1892 	} else {
1893 		/* no context template, compute context */
1894 		if (keylen_in_bytes > MD5_HMAC_BLOCK_SIZE) {
1895 			/*
1896 			 * Hash the passed-in key to get a smaller key.
1897 			 * The inner context is used since it hasn't been
1898 			 * initialized yet.
1899 			 */
1900 			PROV_MD5_DIGEST_KEY(&md5_hmac_ctx.hc_icontext,
1901 			    key->ck_data, keylen_in_bytes, digest);
1902 			md5_mac_init_ctx(&md5_hmac_ctx, digest,
1903 			    MD5_DIGEST_LENGTH);
1904 		} else {
1905 			md5_mac_init_ctx(&md5_hmac_ctx, key->ck_data,
1906 			    keylen_in_bytes);
1907 		}
1908 	}
1909 
1910 	/*
1911 	 * Get the mechanism parameters, if applicable.
1912 	 */
1913 	if (mechanism->cm_type == MD5_HMAC_GEN_MECH_INFO_TYPE) {
1914 		if (mechanism->cm_param == NULL ||
1915 		    mechanism->cm_param_len != sizeof (ulong_t)) {
1916 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1917 			goto bail;
1918 		}
1919 		PROV_MD5_GET_DIGEST_LEN(mechanism, digest_len);
1920 		if (digest_len > MD5_DIGEST_LENGTH) {
1921 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1922 			goto bail;
1923 		}
1924 	}
1925 
1926 	/* do an MD5 update of the inner context using the specified data */
1927 	MD5_MAC_UPDATE(data, md5_hmac_ctx, ret);
1928 	if (ret != CRYPTO_SUCCESS)
1929 		/* the update failed, free context and bail */
1930 		goto bail;
1931 
1932 	/* do an MD5 final on the inner context */
1933 	MD5Final(digest, &md5_hmac_ctx.hc_icontext);
1934 
1935 	/*
1936 	 * Do an MD5 update on the outer context, feeding the inner
1937 	 * digest as data.
1938 	 */
1939 	MD5Update(&md5_hmac_ctx.hc_ocontext, digest, MD5_DIGEST_LENGTH);
1940 
1941 	/*
1942 	 * Do an MD5 final on the outer context, storing the computed
1943 	 * digest in the users buffer.
1944 	 */
1945 	switch (mac->cd_format) {
1946 	case CRYPTO_DATA_RAW:
1947 		if (digest_len != MD5_DIGEST_LENGTH) {
1948 			/*
1949 			 * The caller requested a short digest. Digest
1950 			 * into a scratch buffer and return to
1951 			 * the user only what was requested.
1952 			 */
1953 			MD5Final(digest, &md5_hmac_ctx.hc_ocontext);
1954 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1955 			    mac->cd_offset, digest_len);
1956 		} else {
1957 			MD5Final((unsigned char *)mac->cd_raw.iov_base +
1958 			    mac->cd_offset, &md5_hmac_ctx.hc_ocontext);
1959 		}
1960 		break;
1961 	case CRYPTO_DATA_UIO:
1962 		ret = md5_digest_final_uio(&md5_hmac_ctx.hc_ocontext, mac,
1963 		    digest_len, digest);
1964 		break;
1965 	case CRYPTO_DATA_MBLK:
1966 		ret = md5_digest_final_mblk(&md5_hmac_ctx.hc_ocontext, mac,
1967 		    digest_len, digest);
1968 		break;
1969 	default:
1970 		ret = CRYPTO_ARGUMENTS_BAD;
1971 	}
1972 
1973 	if (ret == CRYPTO_SUCCESS) {
1974 		mac->cd_length = digest_len;
1975 	} else {
1976 		mac->cd_length = 0;
1977 	}
1978 	/* Extra paranoia: zeroizing the local context on the stack */
1979 	bzero(&md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
1980 
1981 	return (ret);
1982 bail:
1983 	bzero(&md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
1984 	mac->cd_length = 0;
1985 	return (ret);
1986 }
1987 
1988 /* ARGSUSED */
1989 static int
1990 md5_mac_verify_atomic(crypto_provider_handle_t provider,
1991     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1992     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1993     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1994 {
1995 	int ret = CRYPTO_SUCCESS;
1996 	uchar_t digest[MD5_DIGEST_LENGTH];
1997 	md5_hmac_ctx_t md5_hmac_ctx;
1998 	uint32_t digest_len = MD5_DIGEST_LENGTH;
1999 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
2000 
2001 	if (mechanism->cm_type != MD5_HMAC_MECH_INFO_TYPE &&
2002 	    mechanism->cm_type != MD5_HMAC_GEN_MECH_INFO_TYPE)
2003 		return (CRYPTO_MECHANISM_INVALID);
2004 
2005 	/* Add support for key by attributes (RFE 4706552) */
2006 	if (key->ck_format != CRYPTO_KEY_RAW)
2007 		return (CRYPTO_ARGUMENTS_BAD);
2008 
2009 	if (ctx_template != NULL) {
2010 		/* reuse context template */
2011 		bcopy(ctx_template, &md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
2012 	} else {
2013 		/* no context template, compute context */
2014 		if (keylen_in_bytes > MD5_HMAC_BLOCK_SIZE) {
2015 			/*
2016 			 * Hash the passed-in key to get a smaller key.
2017 			 * The inner context is used since it hasn't been
2018 			 * initialized yet.
2019 			 */
2020 			PROV_MD5_DIGEST_KEY(&md5_hmac_ctx.hc_icontext,
2021 			    key->ck_data, keylen_in_bytes, digest);
2022 			md5_mac_init_ctx(&md5_hmac_ctx, digest,
2023 			    MD5_DIGEST_LENGTH);
2024 		} else {
2025 			md5_mac_init_ctx(&md5_hmac_ctx, key->ck_data,
2026 			    keylen_in_bytes);
2027 		}
2028 	}
2029 
2030 	/*
2031 	 * Get the mechanism parameters, if applicable.
2032 	 */
2033 	if (mechanism->cm_type == MD5_HMAC_GEN_MECH_INFO_TYPE) {
2034 		if (mechanism->cm_param == NULL ||
2035 		    mechanism->cm_param_len != sizeof (ulong_t)) {
2036 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2037 			goto bail;
2038 		}
2039 		PROV_MD5_GET_DIGEST_LEN(mechanism, digest_len);
2040 		if (digest_len > MD5_DIGEST_LENGTH) {
2041 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
2042 			goto bail;
2043 		}
2044 	}
2045 
2046 	if (mac->cd_length != digest_len) {
2047 		ret = CRYPTO_INVALID_MAC;
2048 		goto bail;
2049 	}
2050 
2051 	/* do an MD5 update of the inner context using the specified data */
2052 	MD5_MAC_UPDATE(data, md5_hmac_ctx, ret);
2053 	if (ret != CRYPTO_SUCCESS)
2054 		/* the update failed, free context and bail */
2055 		goto bail;
2056 
2057 	/* do an MD5 final on the inner context */
2058 	MD5Final(digest, &md5_hmac_ctx.hc_icontext);
2059 
2060 	/*
2061 	 * Do an MD5 update on the outer context, feeding the inner
2062 	 * digest as data.
2063 	 */
2064 	MD5Update(&md5_hmac_ctx.hc_ocontext, digest, MD5_DIGEST_LENGTH);
2065 
2066 	/*
2067 	 * Do an MD5 final on the outer context, storing the computed
2068 	 * digest in the local digest buffer.
2069 	 */
2070 	MD5Final(digest, &md5_hmac_ctx.hc_ocontext);
2071 
2072 	/*
2073 	 * Compare the computed digest against the expected digest passed
2074 	 * as argument.
2075 	 */
2076 	switch (mac->cd_format) {
2077 
2078 	case CRYPTO_DATA_RAW:
2079 		if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
2080 		    mac->cd_offset, digest_len) != 0)
2081 			ret = CRYPTO_INVALID_MAC;
2082 		break;
2083 
2084 	case CRYPTO_DATA_UIO: {
2085 		off_t offset = mac->cd_offset;
2086 		uint_t vec_idx;
2087 		off_t scratch_offset = 0;
2088 		size_t length = digest_len;
2089 		size_t cur_len;
2090 
2091 		/* we support only kernel buffer */
2092 		if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
2093 			return (CRYPTO_ARGUMENTS_BAD);
2094 
2095 		/* jump to the first iovec containing the expected digest */
2096 		for (vec_idx = 0;
2097 		    offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
2098 		    vec_idx < mac->cd_uio->uio_iovcnt;
2099 		    offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len);
2100 		if (vec_idx == mac->cd_uio->uio_iovcnt) {
2101 			/*
2102 			 * The caller specified an offset that is
2103 			 * larger than the total size of the buffers
2104 			 * it provided.
2105 			 */
2106 			ret = CRYPTO_DATA_LEN_RANGE;
2107 			break;
2108 		}
2109 
2110 		/* do the comparison of computed digest vs specified one */
2111 		while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
2112 			cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
2113 			    offset, length);
2114 
2115 			if (bcmp(digest + scratch_offset,
2116 			    mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
2117 			    cur_len) != 0) {
2118 				ret = CRYPTO_INVALID_MAC;
2119 				break;
2120 			}
2121 
2122 			length -= cur_len;
2123 			vec_idx++;
2124 			scratch_offset += cur_len;
2125 			offset = 0;
2126 		}
2127 		break;
2128 	}
2129 
2130 	case CRYPTO_DATA_MBLK: {
2131 		off_t offset = mac->cd_offset;
2132 		mblk_t *mp;
2133 		off_t scratch_offset = 0;
2134 		size_t length = digest_len;
2135 		size_t cur_len;
2136 
2137 		/* jump to the first mblk_t containing the expected digest */
2138 		for (mp = mac->cd_mp; mp != NULL && offset >= MBLKL(mp);
2139 		    offset -= MBLKL(mp), mp = mp->b_cont);
2140 		if (mp == NULL) {
2141 			/*
2142 			 * The caller specified an offset that is larger than
2143 			 * the total size of the buffers it provided.
2144 			 */
2145 			ret = CRYPTO_DATA_LEN_RANGE;
2146 			break;
2147 		}
2148 
2149 		while (mp != NULL && length > 0) {
2150 			cur_len = MIN(MBLKL(mp) - offset, length);
2151 			if (bcmp(digest + scratch_offset,
2152 			    mp->b_rptr + offset, cur_len) != 0) {
2153 				ret = CRYPTO_INVALID_MAC;
2154 				break;
2155 			}
2156 
2157 			length -= cur_len;
2158 			mp = mp->b_cont;
2159 			scratch_offset += cur_len;
2160 			offset = 0;
2161 		}
2162 		break;
2163 	}
2164 
2165 	default:
2166 		ret = CRYPTO_ARGUMENTS_BAD;
2167 	}
2168 
2169 	bzero(&md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
2170 	return (ret);
2171 bail:
2172 	bzero(&md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
2173 	mac->cd_length = 0;
2174 	return (ret);
2175 }
2176 
2177 /*
2178  * KCF software provider context management entry points.
2179  */
2180 
2181 /* ARGSUSED */
2182 static int
2183 md5_create_ctx_template(crypto_provider_handle_t provider,
2184     crypto_mechanism_t *mechanism, crypto_key_t *key,
2185     crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
2186     crypto_req_handle_t req)
2187 {
2188 	md5_hmac_ctx_t *md5_hmac_ctx_tmpl;
2189 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
2190 
2191 	if ((mechanism->cm_type != MD5_HMAC_MECH_INFO_TYPE) &&
2192 	    (mechanism->cm_type != MD5_HMAC_GEN_MECH_INFO_TYPE))
2193 		return (CRYPTO_MECHANISM_INVALID);
2194 
2195 	/* Add support for key by attributes (RFE 4706552) */
2196 	if (key->ck_format != CRYPTO_KEY_RAW)
2197 		return (CRYPTO_ARGUMENTS_BAD);
2198 
2199 	/*
2200 	 * Allocate and initialize MD5 context.
2201 	 */
2202 	md5_hmac_ctx_tmpl = kmem_alloc(sizeof (md5_hmac_ctx_t),
2203 	    crypto_kmflag(req));
2204 	if (md5_hmac_ctx_tmpl == NULL)
2205 		return (CRYPTO_HOST_MEMORY);
2206 
2207 	if (keylen_in_bytes > MD5_HMAC_BLOCK_SIZE) {
2208 		uchar_t digested_key[MD5_DIGEST_LENGTH];
2209 
2210 		/*
2211 		 * Hash the passed-in key to get a smaller key.
2212 		 * The inner context is used since it hasn't been
2213 		 * initialized yet.
2214 		 */
2215 		PROV_MD5_DIGEST_KEY(&md5_hmac_ctx_tmpl->hc_icontext,
2216 		    key->ck_data, keylen_in_bytes, digested_key);
2217 		md5_mac_init_ctx(md5_hmac_ctx_tmpl, digested_key,
2218 		    MD5_DIGEST_LENGTH);
2219 	} else {
2220 		md5_mac_init_ctx(md5_hmac_ctx_tmpl, key->ck_data,
2221 		    keylen_in_bytes);
2222 	}
2223 
2224 	md5_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
2225 	*ctx_template = (crypto_spi_ctx_template_t)md5_hmac_ctx_tmpl;
2226 	*ctx_template_size = sizeof (md5_hmac_ctx_t);
2227 
2228 	return (CRYPTO_SUCCESS);
2229 }
2230 
2231 static int
2232 md5_free_context(crypto_ctx_t *ctx)
2233 {
2234 	uint_t ctx_len;
2235 	md5_mech_type_t mech_type;
2236 
2237 	if (ctx->cc_provider_private == NULL)
2238 		return (CRYPTO_SUCCESS);
2239 
2240 	/*
2241 	 * We have to free either MD5 or MD5-HMAC contexts, which
2242 	 * have different lengths.
2243 	 */
2244 
2245 	mech_type = PROV_MD5_CTX(ctx)->mc_mech_type;
2246 	if (mech_type == MD5_MECH_INFO_TYPE)
2247 		ctx_len = sizeof (md5_ctx_t);
2248 	else {
2249 		ASSERT(mech_type == MD5_HMAC_MECH_INFO_TYPE ||
2250 		    mech_type == MD5_HMAC_GEN_MECH_INFO_TYPE);
2251 		ctx_len = sizeof (md5_hmac_ctx_t);
2252 	}
2253 
2254 	bzero(ctx->cc_provider_private, ctx_len);
2255 	kmem_free(ctx->cc_provider_private, ctx_len);
2256 	ctx->cc_provider_private = NULL;
2257 
2258 	return (CRYPTO_SUCCESS);
2259 }
2260 
2261 #endif	/* _KERNEL && !_BOOT */
2262