xref: /titanic_50/usr/src/common/crypto/md5/md5.c (revision 8eea8e29cc4374d1ee24c25a07f45af132db3499)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Cleaned-up and optimized version of MD5, based on the reference
29  * implementation provided in RFC 1321.  See RSA Copyright information
30  * below.
31  *
32  * NOTE:  All compiler data was gathered with SC4.2, and verified with SC5.x,
33  *	  as used to build Solaris 2.7.  Hopefully the compiler behavior won't
34  *	  change for the worse in subsequent Solaris builds.
35  */
36 
37 #pragma ident	"%Z%%M%	%I%	%E% SMI"
38 
39 /*
40  * MD5C.C - RSA Data Security, Inc., MD5 message-digest algorithm
41  */
42 
43 /*
44  * Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
45  * rights reserved.
46  *
47  * License to copy and use this software is granted provided that it
48  * is identified as the "RSA Data Security, Inc. MD5 Message-Digest
49  * Algorithm" in all material mentioning or referencing this software
50  * or this function.
51  *
52  * License is also granted to make and use derivative works provided
53  * that such works are identified as "derived from the RSA Data
54  * Security, Inc. MD5 Message-Digest Algorithm" in all material
55  * mentioning or referencing the derived work.
56  *
57  * RSA Data Security, Inc. makes no representations concerning either
58  * the merchantability of this software or the suitability of this
59  * software for any particular purpose. It is provided "as is"
60  * without express or implied warranty of any kind.
61  *
62  * These notices must be retained in any copies of any part of this
63  * documentation and/or software.
64  */
65 
66 #include <sys/types.h>
67 #include <sys/md5.h>
68 #include <sys/md5_consts.h>	/* MD5_CONST() optimization */
69 #if	!defined(_KERNEL) || defined(_BOOT)
70 #include <strings.h>
71 #endif /* !_KERNEL || _BOOT */
72 
73 #if	defined(_KERNEL) && !defined(_BOOT)
74 
75 /*
76  * In kernel module, the md5 module is created with two modlinkages:
77  * - a modlmisc that allows consumers to directly call the entry points
78  *   MD5Init, MD5Update, and MD5Final.
79  * - a modlcrypto that allows the module to register with the Kernel
80  *   Cryptographic Framework (KCF) as a software provider for the MD5
81  *   mechanisms.
82  */
83 
84 #include <sys/systm.h>
85 #include <sys/modctl.h>
86 #include <sys/cmn_err.h>
87 #include <sys/ddi.h>
88 #include <sys/crypto/common.h>
89 #include <sys/crypto/spi.h>
90 #include <sys/sysmacros.h>
91 #include <sys/strsun.h>
92 #include <sys/note.h>
93 
94 extern struct mod_ops mod_miscops;
95 extern struct mod_ops mod_cryptoops;
96 
97 /*
98  * Module linkage information for the kernel.
99  */
100 
101 static struct modlmisc modlmisc = {
102 	&mod_miscops,
103 	"MD5 Message-Digest Algorithm"
104 };
105 
106 static struct modlcrypto modlcrypto = {
107 	&mod_cryptoops,
108 	"MD5 Kernel SW Provider %I%"
109 };
110 
111 static struct modlinkage modlinkage = {
112 	MODREV_1,
113 	(void *)&modlmisc,
114 	(void *)&modlcrypto,
115 	NULL
116 };
117 
118 /*
119  * CSPI information (entry points, provider info, etc.)
120  */
121 
122 typedef enum md5_mech_type {
123 	MD5_MECH_INFO_TYPE,		/* SUN_CKM_MD5 */
124 	MD5_HMAC_MECH_INFO_TYPE,	/* SUN_CKM_MD5_HMAC */
125 	MD5_HMAC_GEN_MECH_INFO_TYPE	/* SUN_CKM_MD5_HMAC_GENERAL */
126 } md5_mech_type_t;
127 
128 #define	MD5_DIGEST_LENGTH	16	/* MD5 digest length in bytes */
129 #define	MD5_HMAC_BLOCK_SIZE	64	/* MD5 block size */
130 #define	MD5_HMAC_MIN_KEY_LEN	8	/* MD5-HMAC min key length in bits */
131 #define	MD5_HMAC_MAX_KEY_LEN	INT_MAX	/* MD5-HMAC max key length in bits */
132 #define	MD5_HMAC_INTS_PER_BLOCK	(MD5_HMAC_BLOCK_SIZE/sizeof (uint32_t))
133 
134 /*
135  * Context for MD5 mechanism.
136  */
137 typedef struct md5_ctx {
138 	md5_mech_type_t		mc_mech_type;	/* type of context */
139 	MD5_CTX			mc_md5_ctx;	/* MD5 context */
140 } md5_ctx_t;
141 
142 /*
143  * Context for MD5-HMAC and MD5-HMAC-GENERAL mechanisms.
144  */
145 typedef struct md5_hmac_ctx {
146 	md5_mech_type_t		hc_mech_type;	/* type of context */
147 	uint32_t		hc_digest_len;	/* digest len in bytes */
148 	MD5_CTX			hc_icontext;	/* inner MD5 context */
149 	MD5_CTX			hc_ocontext;	/* outer MD5 context */
150 } md5_hmac_ctx_t;
151 
152 /*
153  * Macros to access the MD5 or MD5-HMAC contexts from a context passed
154  * by KCF to one of the entry points.
155  */
156 
157 #define	PROV_MD5_CTX(ctx)	((md5_ctx_t *)(ctx)->cc_provider_private)
158 #define	PROV_MD5_HMAC_CTX(ctx)	((md5_hmac_ctx_t *)(ctx)->cc_provider_private)
159 /* to extract the digest length passed as mechanism parameter */
160 
161 #define	PROV_MD5_GET_DIGEST_LEN(m, len) {				\
162 	if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t)))		\
163 		(len) = (uint32_t)*((ulong_t *)mechanism->cm_param);	\
164 	else {								\
165 		ulong_t tmp_ulong;					\
166 		bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t));	\
167 		(len) = (uint32_t)tmp_ulong;				\
168 	}								\
169 }
170 
171 #define	PROV_MD5_DIGEST_KEY(ctx, key, len, digest) {	\
172 	MD5Init(ctx);					\
173 	MD5Update(ctx, key, len);			\
174 	MD5Final(digest, ctx);				\
175 }
176 
177 /*
178  * Mechanism info structure passed to KCF during registration.
179  */
180 static crypto_mech_info_t md5_mech_info_tab[] = {
181 	/* MD5 */
182 	{SUN_CKM_MD5, MD5_MECH_INFO_TYPE,
183 	    CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
184 	    0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
185 	/* MD5-HMAC */
186 	{SUN_CKM_MD5_HMAC, MD5_HMAC_MECH_INFO_TYPE,
187 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
188 	    MD5_HMAC_MIN_KEY_LEN, MD5_HMAC_MAX_KEY_LEN,
189 	    CRYPTO_KEYSIZE_UNIT_IN_BITS},
190 	/* MD5-HMAC GENERAL */
191 	{SUN_CKM_MD5_HMAC_GENERAL, MD5_HMAC_GEN_MECH_INFO_TYPE,
192 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
193 	    MD5_HMAC_MIN_KEY_LEN, MD5_HMAC_MAX_KEY_LEN,
194 	    CRYPTO_KEYSIZE_UNIT_IN_BITS}
195 };
196 
197 static void md5_provider_status(crypto_provider_handle_t, uint_t *);
198 
199 static crypto_control_ops_t md5_control_ops = {
200 	md5_provider_status
201 };
202 
203 static int md5_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
204     crypto_req_handle_t);
205 static int md5_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
206     crypto_req_handle_t);
207 static int md5_digest_update(crypto_ctx_t *, crypto_data_t *,
208     crypto_req_handle_t);
209 static int md5_digest_final(crypto_ctx_t *, crypto_data_t *,
210     crypto_req_handle_t);
211 static int md5_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
212     crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
213     crypto_req_handle_t);
214 
215 static crypto_digest_ops_t md5_digest_ops = {
216 	md5_digest_init,
217 	md5_digest,
218 	md5_digest_update,
219 	NULL,
220 	md5_digest_final,
221 	md5_digest_atomic
222 };
223 
224 static int md5_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
225     crypto_spi_ctx_template_t, crypto_req_handle_t);
226 static int md5_mac_update(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
227 static int md5_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
228 static int md5_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
229     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
230     crypto_spi_ctx_template_t, crypto_req_handle_t);
231 static int md5_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
232     crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
233     crypto_spi_ctx_template_t, crypto_req_handle_t);
234 
235 static crypto_mac_ops_t md5_mac_ops = {
236 	md5_mac_init,
237 	NULL,
238 	md5_mac_update,
239 	md5_mac_final,
240 	md5_mac_atomic,
241 	md5_mac_verify_atomic
242 };
243 
244 static int md5_create_ctx_template(crypto_provider_handle_t,
245     crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
246     size_t *, crypto_req_handle_t);
247 static int md5_free_context(crypto_ctx_t *);
248 
249 static crypto_ctx_ops_t md5_ctx_ops = {
250 	md5_create_ctx_template,
251 	md5_free_context
252 };
253 
254 static crypto_ops_t md5_crypto_ops = {
255 	&md5_control_ops,
256 	&md5_digest_ops,
257 	NULL,
258 	&md5_mac_ops,
259 	NULL,
260 	NULL,
261 	NULL,
262 	NULL,
263 	NULL,
264 	NULL,
265 	NULL,
266 	NULL,
267 	NULL,
268 	&md5_ctx_ops
269 };
270 
271 static crypto_provider_info_t md5_prov_info = {
272 	CRYPTO_SPI_VERSION_1,
273 	"MD5 Software Provider",
274 	CRYPTO_SW_PROVIDER,
275 	{&modlinkage},
276 	NULL,
277 	&md5_crypto_ops,
278 	sizeof (md5_mech_info_tab)/sizeof (crypto_mech_info_t),
279 	md5_mech_info_tab
280 };
281 
282 static crypto_kcf_provider_handle_t md5_prov_handle = NULL;
283 
284 int
285 _init(void)
286 {
287 	int ret;
288 
289 	if ((ret = mod_install(&modlinkage)) != 0)
290 		return (ret);
291 
292 	/*
293 	 * Register with KCF. If the registration fails, log an
294 	 * error but do not uninstall the module, since the functionality
295 	 * provided by misc/md5 should still be available.
296 	 */
297 	if ((ret = crypto_register_provider(&md5_prov_info,
298 	    &md5_prov_handle)) != CRYPTO_SUCCESS)
299 		cmn_err(CE_WARN, "md5 _init: "
300 		    "crypto_register_provider() failed (0x%x)", ret);
301 
302 	return (0);
303 }
304 
305 int
306 _fini(void)
307 {
308 	int ret;
309 
310 	/*
311 	 * Unregister from KCF if previous registration succeeded.
312 	 */
313 	if (md5_prov_handle != NULL) {
314 		if ((ret = crypto_unregister_provider(md5_prov_handle)) !=
315 		    CRYPTO_SUCCESS) {
316 			cmn_err(CE_WARN, "md5 _fini: "
317 			    "crypto_unregister_provider() failed (0x%x)", ret);
318 			return (EBUSY);
319 		}
320 		md5_prov_handle = NULL;
321 	}
322 
323 	return (mod_remove(&modlinkage));
324 }
325 
326 int
327 _info(struct modinfo *modinfop)
328 {
329 	return (mod_info(&modlinkage, modinfop));
330 }
331 #endif	/* _KERNEL && !_BOOT */
332 
333 static void Encode(uint8_t *, uint32_t *, size_t);
334 static void MD5Transform(uint32_t, uint32_t, uint32_t, uint32_t, MD5_CTX *,
335     const uint8_t [64]);
336 
337 static uint8_t PADDING[64] = { 0x80, /* all zeros */ };
338 
339 /*
340  * F, G, H and I are the basic MD5 functions.
341  */
342 #define	F(b, c, d)	(((b) & (c)) | ((~b) & (d)))
343 #define	G(b, c, d)	(((b) & (d)) | ((c) & (~d)))
344 #define	H(b, c, d)	((b) ^ (c) ^ (d))
345 #define	I(b, c, d)	((c) ^ ((b) | (~d)))
346 
347 /*
348  * ROTATE_LEFT rotates x left n bits.
349  */
350 #define	ROTATE_LEFT(x, n)	\
351 	(((x) << (n)) | ((x) >> ((sizeof (x) << 3) - (n))))
352 
353 /*
354  * FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4.
355  * Rotation is separate from addition to prevent recomputation.
356  */
357 
358 #define	FF(a, b, c, d, x, s, ac) { \
359 	(a) += F((b), (c), (d)) + (x) + (uint32_t)(ac); \
360 	(a) = ROTATE_LEFT((a), (s)); \
361 	(a) += (b); \
362 	}
363 
364 #define	GG(a, b, c, d, x, s, ac) { \
365 	(a) += G((b), (c), (d)) + (x) + (uint32_t)(ac); \
366 	(a) = ROTATE_LEFT((a), (s)); \
367 	(a) += (b); \
368 	}
369 
370 #define	HH(a, b, c, d, x, s, ac) { \
371 	(a) += H((b), (c), (d)) + (x) + (uint32_t)(ac); \
372 	(a) = ROTATE_LEFT((a), (s)); \
373 	(a) += (b); \
374 	}
375 
376 #define	II(a, b, c, d, x, s, ac) { \
377 	(a) += I((b), (c), (d)) + (x) + (uint32_t)(ac); \
378 	(a) = ROTATE_LEFT((a), (s)); \
379 	(a) += (b); \
380 	}
381 
382 /*
383  * Loading 32-bit constants on a RISC is expensive since it involves both a
384  * `sethi' and an `or'.  thus, we instead have the compiler generate `ld's to
385  * load the constants from an array called `md5_consts'.  however, on intel
386  * (and other CISC processors), it is cheaper to load the constant
387  * directly.  thus, the c code in MD5Transform() uses the macro MD5_CONST()
388  * which either expands to a constant or an array reference, depending on the
389  * architecture the code is being compiled for.
390  *
391  * Right now, i386 and amd64 are the CISC exceptions.
392  * If we get another CISC ISA, we'll have to change the ifdef.
393  */
394 
395 #if defined(__i386) || defined(__amd64)
396 
397 #define	MD5_CONST(x)		(MD5_CONST_ ## x)
398 
399 #else
400 /*
401  * sparc/RISC optimization:
402  *
403  * while it is somewhat counter-intuitive, on sparc (and presumably other RISC
404  * machines), it is more efficient to place all the constants used in this
405  * function in an array and load the values out of the array than to manually
406  * load the constants.  this is because setting a register to a 32-bit value
407  * takes two ops in most cases: a `sethi' and an `or', but loading a 32-bit
408  * value from memory only takes one `ld' (or `lduw' on v9).  while this
409  * increases memory usage, the compiler can find enough other things to do
410  * while waiting to keep the pipeline does not stall.  additionally, it is
411  * likely that many of these constants are cached so that later accesses do
412  * not even go out to the bus.
413  *
414  * this array is declared `static' to keep the compiler from having to
415  * bcopy() this array onto the stack frame of MD5Transform() each time it is
416  * called -- which is unacceptably expensive.
417  *
418  * the `const' is to ensure that callers are good citizens and do not try to
419  * munge the array.  since these routines are going to be called from inside
420  * multithreaded kernelland, this is a good safety check. -- `constants' will
421  * end up in .rodata.
422  *
423  * unfortunately, loading from an array in this manner hurts performance under
424  * intel (and presumably other CISC machines).  so, there is a macro,
425  * MD5_CONST(), used in MD5Transform(), that either expands to a reference to
426  * this array, or to the actual constant, depending on what platform this code
427  * is compiled for.
428  */
429 
430 static const uint32_t md5_consts[] = {
431 	MD5_CONST_0,	MD5_CONST_1,	MD5_CONST_2,	MD5_CONST_3,
432 	MD5_CONST_4,	MD5_CONST_5,	MD5_CONST_6,	MD5_CONST_7,
433 	MD5_CONST_8,	MD5_CONST_9,	MD5_CONST_10,	MD5_CONST_11,
434 	MD5_CONST_12,	MD5_CONST_13,	MD5_CONST_14,	MD5_CONST_15,
435 	MD5_CONST_16,	MD5_CONST_17,	MD5_CONST_18,	MD5_CONST_19,
436 	MD5_CONST_20,	MD5_CONST_21,	MD5_CONST_22,	MD5_CONST_23,
437 	MD5_CONST_24,	MD5_CONST_25,	MD5_CONST_26,	MD5_CONST_27,
438 	MD5_CONST_28,	MD5_CONST_29,	MD5_CONST_30,	MD5_CONST_31,
439 	MD5_CONST_32,	MD5_CONST_33,	MD5_CONST_34,	MD5_CONST_35,
440 	MD5_CONST_36,	MD5_CONST_37,	MD5_CONST_38,	MD5_CONST_39,
441 	MD5_CONST_40,	MD5_CONST_41,	MD5_CONST_42,	MD5_CONST_43,
442 	MD5_CONST_44,	MD5_CONST_45,	MD5_CONST_46,	MD5_CONST_47,
443 	MD5_CONST_48,	MD5_CONST_49,	MD5_CONST_50,	MD5_CONST_51,
444 	MD5_CONST_52,	MD5_CONST_53,	MD5_CONST_54,	MD5_CONST_55,
445 	MD5_CONST_56,	MD5_CONST_57,	MD5_CONST_58,	MD5_CONST_59,
446 	MD5_CONST_60,	MD5_CONST_61,	MD5_CONST_62,	MD5_CONST_63
447 };
448 
449 #define	MD5_CONST(x)		(md5_consts[x])
450 
451 #endif
452 
453 /*
454  * MD5Init()
455  *
456  * purpose: initializes the md5 context and begins and md5 digest operation
457  *   input: MD5_CTX *	: the context to initialize.
458  *  output: void
459  */
460 
461 void
462 MD5Init(MD5_CTX *ctx)
463 {
464 	ctx->count[0] = ctx->count[1] = 0;
465 
466 	/* load magic initialization constants */
467 	ctx->state[0] = MD5_INIT_CONST_1;
468 	ctx->state[1] = MD5_INIT_CONST_2;
469 	ctx->state[2] = MD5_INIT_CONST_3;
470 	ctx->state[3] = MD5_INIT_CONST_4;
471 }
472 
473 /*
474  * MD5Update()
475  *
476  * purpose: continues an md5 digest operation, using the message block
477  *          to update the context.
478  *   input: MD5_CTX *	: the context to update
479  *          uint8_t *	: the message block
480  *          uint32_t    : the length of the message block in bytes
481  *  output: void
482  *
483  * MD5 crunches in 64-byte blocks.  All numeric constants here are related to
484  * that property of MD5.
485  */
486 
487 void
488 MD5Update(MD5_CTX *ctx, const void *inpp, unsigned int input_len)
489 {
490 	uint32_t		i, buf_index, buf_len;
491 	const unsigned char 	*input = (const unsigned char *)inpp;
492 
493 	/* compute (number of bytes computed so far) mod 64 */
494 	buf_index = (ctx->count[0] >> 3) & 0x3F;
495 
496 	/* update number of bits hashed into this MD5 computation so far */
497 	if ((ctx->count[0] += (input_len << 3)) < (input_len << 3))
498 	    ctx->count[1]++;
499 	ctx->count[1] += (input_len >> 29);
500 
501 	buf_len = 64 - buf_index;
502 
503 	/* transform as many times as possible */
504 	i = 0;
505 	if (input_len >= buf_len) {
506 
507 		/*
508 		 * general optimization:
509 		 *
510 		 * only do initial bcopy() and MD5Transform() if
511 		 * buf_index != 0.  if buf_index == 0, we're just
512 		 * wasting our time doing the bcopy() since there
513 		 * wasn't any data left over from a previous call to
514 		 * MD5Update().
515 		 */
516 
517 		if (buf_index) {
518 			bcopy(input, &ctx->buf_un.buf8[buf_index], buf_len);
519 
520 			MD5Transform(ctx->state[0], ctx->state[1],
521 			    ctx->state[2], ctx->state[3], ctx,
522 			    ctx->buf_un.buf8);
523 
524 			i = buf_len;
525 		}
526 
527 		for (; i + 63 < input_len; i += 64)
528 			MD5Transform(ctx->state[0], ctx->state[1],
529 			    ctx->state[2], ctx->state[3], ctx, &input[i]);
530 
531 		/*
532 		 * general optimization:
533 		 *
534 		 * if i and input_len are the same, return now instead
535 		 * of calling bcopy(), since the bcopy() in this
536 		 * case will be an expensive nop.
537 		 */
538 
539 		if (input_len == i)
540 			return;
541 
542 		buf_index = 0;
543 	}
544 
545 	/* buffer remaining input */
546 	bcopy(&input[i], &ctx->buf_un.buf8[buf_index], input_len - i);
547 }
548 
549 /*
550  * MD5Final()
551  *
552  * purpose: ends an md5 digest operation, finalizing the message digest and
553  *          zeroing the context.
554  *   input: uint8_t *	: a buffer to store the digest in
555  *          MD5_CTX *   : the context to finalize, save, and zero
556  *  output: void
557  */
558 
559 void
560 MD5Final(unsigned char *digest, MD5_CTX *ctx)
561 {
562 	uint8_t		bitcount_le[sizeof (ctx->count)];
563 	uint32_t	index = (ctx->count[0] >> 3) & 0x3f;
564 
565 	/* store bit count, little endian */
566 	Encode(bitcount_le, ctx->count, sizeof (bitcount_le));
567 
568 	/* pad out to 56 mod 64 */
569 	MD5Update(ctx, PADDING, ((index < 56) ? 56 : 120) - index);
570 
571 	/* append length (before padding) */
572 	MD5Update(ctx, bitcount_le, sizeof (bitcount_le));
573 
574 	/* store state in digest */
575 	Encode(digest, ctx->state, sizeof (ctx->state));
576 }
577 
578 #ifndef	_KERNEL
579 
580 void
581 md5_calc(unsigned char *output, unsigned char *input, unsigned int inlen)
582 {
583 	MD5_CTX context;
584 
585 	MD5Init(&context);
586 	MD5Update(&context, input, inlen);
587 	MD5Final(output, &context);
588 }
589 
590 #endif	/* !_KERNEL */
591 
592 /*
593  * Little-endian optimization:  I don't need to do any weirdness.   On
594  * some little-endian boxen, I'll have to do alignment checks, but I can do
595  * that below.
596  */
597 
598 #ifdef _LITTLE_ENDIAN
599 
600 #if !defined(__i386) && !defined(__amd64)
601 /*
602  * i386 and amd64 don't require aligned 4-byte loads.  The symbol
603  * _MD5_CHECK_ALIGNMENT indicates below whether the MD5Transform function
604  * requires alignment checking.
605  */
606 #define	_MD5_CHECK_ALIGNMENT
607 #endif /* !__i386 && !__amd64 */
608 
609 #define	LOAD_LITTLE_32(addr)	(*(uint32_t *)(addr))
610 
611 /*
612  * sparc v9/v8plus optimization:
613  *
614  * on the sparc v9/v8plus, we can load data little endian.  however, since
615  * the compiler doesn't have direct support for little endian, we
616  * link to an assembly-language routine `load_little_32' to do
617  * the magic.  note that special care must be taken to ensure the
618  * address is 32-bit aligned -- in the interest of speed, we don't
619  * check to make sure, since careful programming can guarantee this
620  * for us.
621  */
622 
623 #elif	defined(sun4u)
624 
625 /* Define alignment check because we can 4-byte load as little endian. */
626 #define	_MD5_CHECK_ALIGNMENT
627 extern	uint32_t load_little_32(uint32_t *);
628 #define	LOAD_LITTLE_32(addr)	load_little_32((uint32_t *)(addr))
629 
630 /* Placate lint */
631 #if	defined(__lint)
632 uint32_t
633 load_little_32(uint32_t *addr)
634 {
635 	return (*addr);
636 }
637 #endif
638 
639 #else	/* big endian -- will work on little endian, but slowly */
640 
641 /* Since we do byte operations, we don't have to check for alignment. */
642 #define	LOAD_LITTLE_32(addr)	\
643 	((addr)[0] | ((addr)[1] << 8) | ((addr)[2] << 16) | ((addr)[3] << 24))
644 #endif
645 
646 /*
647  * sparc register window optimization:
648  *
649  * `a', `b', `c', and `d' are passed into MD5Transform explicitly
650  * since it increases the number of registers available to the
651  * compiler.  under this scheme, these variables can be held in
652  * %i0 - %i3, which leaves more local and out registers available.
653  */
654 
655 /*
656  * MD5Transform()
657  *
658  * purpose: md5 transformation -- updates the digest based on `block'
659  *   input: uint32_t	: bytes  1 -  4 of the digest
660  *          uint32_t	: bytes  5 -  8 of the digest
661  *          uint32_t	: bytes  9 - 12 of the digest
662  *          uint32_t	: bytes 12 - 16 of the digest
663  *          MD5_CTX *   : the context to update
664  *          uint8_t [64]: the block to use to update the digest
665  *  output: void
666  */
667 
668 static void
669 MD5Transform(uint32_t a, uint32_t b, uint32_t c, uint32_t d,
670     MD5_CTX *ctx, const uint8_t block[64])
671 {
672 	/*
673 	 * general optimization:
674 	 *
675 	 * use individual integers instead of using an array.  this is a
676 	 * win, although the amount it wins by seems to vary quite a bit.
677 	 */
678 
679 	register uint32_t	x_0, x_1, x_2,  x_3,  x_4,  x_5,  x_6,  x_7;
680 	register uint32_t	x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15;
681 
682 	/*
683 	 * general optimization:
684 	 *
685 	 * the compiler (at least SC4.2/5.x) generates better code if
686 	 * variable use is localized.  in this case, swapping the integers in
687 	 * this order allows `x_0 'to be swapped nearest to its first use in
688 	 * FF(), and likewise for `x_1' and up.  note that the compiler
689 	 * prefers this to doing each swap right before the FF() that
690 	 * uses it.
691 	 */
692 
693 	/*
694 	 * sparc v9/v8plus optimization:
695 	 *
696 	 * if `block' is already aligned on a 4-byte boundary, use the
697 	 * optimized load_little_32() directly.  otherwise, bcopy()
698 	 * into a buffer that *is* aligned on a 4-byte boundary and
699 	 * then do the load_little_32() on that buffer.  benchmarks
700 	 * have shown that using the bcopy() is better than loading
701 	 * the bytes individually and doing the endian-swap by hand.
702 	 *
703 	 * even though it's quite tempting to assign to do:
704 	 *
705 	 * blk = bcopy(blk, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32));
706 	 *
707 	 * and only have one set of LOAD_LITTLE_32()'s, the compiler (at least
708 	 * SC4.2/5.x) *does not* like that, so please resist the urge.
709 	 */
710 
711 #ifdef _MD5_CHECK_ALIGNMENT
712 	if ((uintptr_t)block & 0x3) {		/* not 4-byte aligned? */
713 		bcopy(block, ctx->buf_un.buf32, sizeof (ctx->buf_un.buf32));
714 		x_15 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 15);
715 		x_14 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 14);
716 		x_13 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 13);
717 		x_12 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 12);
718 		x_11 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 11);
719 		x_10 = LOAD_LITTLE_32(ctx->buf_un.buf32 + 10);
720 		x_9  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  9);
721 		x_8  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  8);
722 		x_7  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  7);
723 		x_6  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  6);
724 		x_5  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  5);
725 		x_4  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  4);
726 		x_3  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  3);
727 		x_2  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  2);
728 		x_1  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  1);
729 		x_0  = LOAD_LITTLE_32(ctx->buf_un.buf32 +  0);
730 	} else
731 #endif
732 	{
733 		x_15 = LOAD_LITTLE_32(block + 60);
734 		x_14 = LOAD_LITTLE_32(block + 56);
735 		x_13 = LOAD_LITTLE_32(block + 52);
736 		x_12 = LOAD_LITTLE_32(block + 48);
737 		x_11 = LOAD_LITTLE_32(block + 44);
738 		x_10 = LOAD_LITTLE_32(block + 40);
739 		x_9  = LOAD_LITTLE_32(block + 36);
740 		x_8  = LOAD_LITTLE_32(block + 32);
741 		x_7  = LOAD_LITTLE_32(block + 28);
742 		x_6  = LOAD_LITTLE_32(block + 24);
743 		x_5  = LOAD_LITTLE_32(block + 20);
744 		x_4  = LOAD_LITTLE_32(block + 16);
745 		x_3  = LOAD_LITTLE_32(block + 12);
746 		x_2  = LOAD_LITTLE_32(block +  8);
747 		x_1  = LOAD_LITTLE_32(block +  4);
748 		x_0  = LOAD_LITTLE_32(block +  0);
749 	}
750 
751 	/* round 1 */
752 	FF(a, b, c, d, 	x_0, MD5_SHIFT_11, MD5_CONST(0));  /* 1 */
753 	FF(d, a, b, c, 	x_1, MD5_SHIFT_12, MD5_CONST(1));  /* 2 */
754 	FF(c, d, a, b, 	x_2, MD5_SHIFT_13, MD5_CONST(2));  /* 3 */
755 	FF(b, c, d, a, 	x_3, MD5_SHIFT_14, MD5_CONST(3));  /* 4 */
756 	FF(a, b, c, d, 	x_4, MD5_SHIFT_11, MD5_CONST(4));  /* 5 */
757 	FF(d, a, b, c, 	x_5, MD5_SHIFT_12, MD5_CONST(5));  /* 6 */
758 	FF(c, d, a, b, 	x_6, MD5_SHIFT_13, MD5_CONST(6));  /* 7 */
759 	FF(b, c, d, a, 	x_7, MD5_SHIFT_14, MD5_CONST(7));  /* 8 */
760 	FF(a, b, c, d, 	x_8, MD5_SHIFT_11, MD5_CONST(8));  /* 9 */
761 	FF(d, a, b, c, 	x_9, MD5_SHIFT_12, MD5_CONST(9));  /* 10 */
762 	FF(c, d, a, b, x_10, MD5_SHIFT_13, MD5_CONST(10)); /* 11 */
763 	FF(b, c, d, a, x_11, MD5_SHIFT_14, MD5_CONST(11)); /* 12 */
764 	FF(a, b, c, d, x_12, MD5_SHIFT_11, MD5_CONST(12)); /* 13 */
765 	FF(d, a, b, c, x_13, MD5_SHIFT_12, MD5_CONST(13)); /* 14 */
766 	FF(c, d, a, b, x_14, MD5_SHIFT_13, MD5_CONST(14)); /* 15 */
767 	FF(b, c, d, a, x_15, MD5_SHIFT_14, MD5_CONST(15)); /* 16 */
768 
769 	/* round 2 */
770 	GG(a, b, c, d,  x_1, MD5_SHIFT_21, MD5_CONST(16)); /* 17 */
771 	GG(d, a, b, c,  x_6, MD5_SHIFT_22, MD5_CONST(17)); /* 18 */
772 	GG(c, d, a, b, x_11, MD5_SHIFT_23, MD5_CONST(18)); /* 19 */
773 	GG(b, c, d, a,  x_0, MD5_SHIFT_24, MD5_CONST(19)); /* 20 */
774 	GG(a, b, c, d,  x_5, MD5_SHIFT_21, MD5_CONST(20)); /* 21 */
775 	GG(d, a, b, c, x_10, MD5_SHIFT_22, MD5_CONST(21)); /* 22 */
776 	GG(c, d, a, b, x_15, MD5_SHIFT_23, MD5_CONST(22)); /* 23 */
777 	GG(b, c, d, a,  x_4, MD5_SHIFT_24, MD5_CONST(23)); /* 24 */
778 	GG(a, b, c, d,  x_9, MD5_SHIFT_21, MD5_CONST(24)); /* 25 */
779 	GG(d, a, b, c, x_14, MD5_SHIFT_22, MD5_CONST(25)); /* 26 */
780 	GG(c, d, a, b,  x_3, MD5_SHIFT_23, MD5_CONST(26)); /* 27 */
781 	GG(b, c, d, a,  x_8, MD5_SHIFT_24, MD5_CONST(27)); /* 28 */
782 	GG(a, b, c, d, x_13, MD5_SHIFT_21, MD5_CONST(28)); /* 29 */
783 	GG(d, a, b, c,  x_2, MD5_SHIFT_22, MD5_CONST(29)); /* 30 */
784 	GG(c, d, a, b,  x_7, MD5_SHIFT_23, MD5_CONST(30)); /* 31 */
785 	GG(b, c, d, a, x_12, MD5_SHIFT_24, MD5_CONST(31)); /* 32 */
786 
787 	/* round 3 */
788 	HH(a, b, c, d,  x_5, MD5_SHIFT_31, MD5_CONST(32)); /* 33 */
789 	HH(d, a, b, c,  x_8, MD5_SHIFT_32, MD5_CONST(33)); /* 34 */
790 	HH(c, d, a, b, x_11, MD5_SHIFT_33, MD5_CONST(34)); /* 35 */
791 	HH(b, c, d, a, x_14, MD5_SHIFT_34, MD5_CONST(35)); /* 36 */
792 	HH(a, b, c, d,  x_1, MD5_SHIFT_31, MD5_CONST(36)); /* 37 */
793 	HH(d, a, b, c,  x_4, MD5_SHIFT_32, MD5_CONST(37)); /* 38 */
794 	HH(c, d, a, b,  x_7, MD5_SHIFT_33, MD5_CONST(38)); /* 39 */
795 	HH(b, c, d, a, x_10, MD5_SHIFT_34, MD5_CONST(39)); /* 40 */
796 	HH(a, b, c, d, x_13, MD5_SHIFT_31, MD5_CONST(40)); /* 41 */
797 	HH(d, a, b, c,  x_0, MD5_SHIFT_32, MD5_CONST(41)); /* 42 */
798 	HH(c, d, a, b,  x_3, MD5_SHIFT_33, MD5_CONST(42)); /* 43 */
799 	HH(b, c, d, a,  x_6, MD5_SHIFT_34, MD5_CONST(43)); /* 44 */
800 	HH(a, b, c, d,  x_9, MD5_SHIFT_31, MD5_CONST(44)); /* 45 */
801 	HH(d, a, b, c, x_12, MD5_SHIFT_32, MD5_CONST(45)); /* 46 */
802 	HH(c, d, a, b, x_15, MD5_SHIFT_33, MD5_CONST(46)); /* 47 */
803 	HH(b, c, d, a,  x_2, MD5_SHIFT_34, MD5_CONST(47)); /* 48 */
804 
805 	/* round 4 */
806 	II(a, b, c, d,  x_0, MD5_SHIFT_41, MD5_CONST(48)); /* 49 */
807 	II(d, a, b, c,  x_7, MD5_SHIFT_42, MD5_CONST(49)); /* 50 */
808 	II(c, d, a, b, x_14, MD5_SHIFT_43, MD5_CONST(50)); /* 51 */
809 	II(b, c, d, a,  x_5, MD5_SHIFT_44, MD5_CONST(51)); /* 52 */
810 	II(a, b, c, d, x_12, MD5_SHIFT_41, MD5_CONST(52)); /* 53 */
811 	II(d, a, b, c,  x_3, MD5_SHIFT_42, MD5_CONST(53)); /* 54 */
812 	II(c, d, a, b, x_10, MD5_SHIFT_43, MD5_CONST(54)); /* 55 */
813 	II(b, c, d, a,  x_1, MD5_SHIFT_44, MD5_CONST(55)); /* 56 */
814 	II(a, b, c, d,  x_8, MD5_SHIFT_41, MD5_CONST(56)); /* 57 */
815 	II(d, a, b, c, x_15, MD5_SHIFT_42, MD5_CONST(57)); /* 58 */
816 	II(c, d, a, b,  x_6, MD5_SHIFT_43, MD5_CONST(58)); /* 59 */
817 	II(b, c, d, a, x_13, MD5_SHIFT_44, MD5_CONST(59)); /* 60 */
818 	II(a, b, c, d,  x_4, MD5_SHIFT_41, MD5_CONST(60)); /* 61 */
819 	II(d, a, b, c, x_11, MD5_SHIFT_42, MD5_CONST(61)); /* 62 */
820 	II(c, d, a, b,  x_2, MD5_SHIFT_43, MD5_CONST(62)); /* 63 */
821 	II(b, c, d, a,  x_9, MD5_SHIFT_44, MD5_CONST(63)); /* 64 */
822 
823 	ctx->state[0] += a;
824 	ctx->state[1] += b;
825 	ctx->state[2] += c;
826 	ctx->state[3] += d;
827 
828 	/*
829 	 * zeroize sensitive information -- compiler will optimize
830 	 * this out if everything is kept in registers
831 	 */
832 
833 	x_0 = x_1  = x_2  = x_3  = x_4  = x_5  = x_6  = x_7 = x_8 = 0;
834 	x_9 = x_10 = x_11 = x_12 = x_13 = x_14 = x_15 = 0;
835 }
836 
837 /*
838  * devpro compiler optimization:
839  *
840  * the compiler can generate better code if it knows that `input' and
841  * `output' do not point to the same source.  there is no portable
842  * way to tell the compiler this, but the devpro compiler recognizes the
843  * `_Restrict' keyword to indicate this condition.  use it if possible.
844  */
845 
846 #if defined(__RESTRICT) && !defined(__GNUC__)
847 #define	restrict	_Restrict
848 #else
849 #define	restrict	/* nothing */
850 #endif
851 
852 /*
853  * Encode()
854  *
855  * purpose: to convert a list of numbers from big endian to little endian
856  *   input: uint8_t *	: place to store the converted little endian numbers
857  *	    uint32_t *	: place to get numbers to convert from
858  *          size_t	: the length of the input in bytes
859  *  output: void
860  */
861 
862 static void
863 Encode(uint8_t *restrict output, uint32_t *restrict input, size_t input_len)
864 {
865 	size_t		i, j;
866 
867 	for (i = 0, j = 0; j < input_len; i++, j += sizeof (uint32_t)) {
868 
869 #ifdef _LITTLE_ENDIAN
870 
871 #ifdef _MD5_CHECK_ALIGNMENT
872 		if ((uintptr_t)output & 0x3)	/* Not 4-byte aligned */
873 			bcopy(input + i, output + j, 4);
874 		else *(uint32_t *)(output + j) = input[i];
875 #else
876 		*(uint32_t *)(output + j) = input[i];
877 #endif /* _MD5_CHECK_ALIGNMENT */
878 
879 #else	/* big endian -- will work on little endian, but slowly */
880 
881 		output[j] = input[i] & 0xff;
882 		output[j + 1] = (input[i] >> 8)  & 0xff;
883 		output[j + 2] = (input[i] >> 16) & 0xff;
884 		output[j + 3] = (input[i] >> 24) & 0xff;
885 #endif
886 	}
887 }
888 
889 #if	defined(_KERNEL) && !defined(_BOOT)
890 
891 /*
892  * KCF software provider control entry points.
893  */
894 /* ARGSUSED */
895 static void
896 md5_provider_status(crypto_provider_handle_t provider, uint_t *status)
897 {
898 	*status = CRYPTO_PROVIDER_READY;
899 }
900 
901 /*
902  * KCF software provider digest entry points.
903  */
904 
905 static int
906 md5_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
907     crypto_req_handle_t req)
908 {
909 	if (mechanism->cm_type != MD5_MECH_INFO_TYPE)
910 		return (CRYPTO_MECHANISM_INVALID);
911 
912 	/*
913 	 * Allocate and initialize MD5 context.
914 	 */
915 	ctx->cc_provider_private = kmem_alloc(sizeof (md5_ctx_t),
916 	    crypto_kmflag(req));
917 	if (ctx->cc_provider_private == NULL)
918 		return (CRYPTO_HOST_MEMORY);
919 
920 	PROV_MD5_CTX(ctx)->mc_mech_type = MD5_MECH_INFO_TYPE;
921 	MD5Init(&PROV_MD5_CTX(ctx)->mc_md5_ctx);
922 
923 	return (CRYPTO_SUCCESS);
924 }
925 
926 /*
927  * Helper MD5 digest update function for uio data.
928  */
929 static int
930 md5_digest_update_uio(MD5_CTX *md5_ctx, crypto_data_t *data)
931 {
932 	off_t offset = data->cd_offset;
933 	size_t length = data->cd_length;
934 	uint_t vec_idx;
935 	size_t cur_len;
936 
937 	/* we support only kernel buffer */
938 	if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
939 		return (CRYPTO_ARGUMENTS_BAD);
940 
941 	/*
942 	 * Jump to the first iovec containing data to be
943 	 * digested.
944 	 */
945 	for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
946 	    offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
947 	    offset -= data->cd_uio->uio_iov[vec_idx++].iov_len);
948 	if (vec_idx == data->cd_uio->uio_iovcnt) {
949 		/*
950 		 * The caller specified an offset that is larger than the
951 		 * total size of the buffers it provided.
952 		 */
953 		return (CRYPTO_DATA_LEN_RANGE);
954 	}
955 
956 	/*
957 	 * Now do the digesting on the iovecs.
958 	 */
959 	while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
960 		cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
961 		    offset, length);
962 
963 		MD5Update(md5_ctx, data->cd_uio->uio_iov[vec_idx].iov_base +
964 		    offset, cur_len);
965 
966 		length -= cur_len;
967 		vec_idx++;
968 		offset = 0;
969 	}
970 
971 	if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
972 		/*
973 		 * The end of the specified iovec's was reached but
974 		 * the length requested could not be processed, i.e.
975 		 * The caller requested to digest more data than it provided.
976 		 */
977 		return (CRYPTO_DATA_LEN_RANGE);
978 	}
979 
980 	return (CRYPTO_SUCCESS);
981 }
982 
983 /*
984  * Helper MD5 digest final function for uio data.
985  * digest_len is the length of the desired digest. If digest_len
986  * is smaller than the default MD5 digest length, the caller
987  * must pass a scratch buffer, digest_scratch, which must
988  * be at least MD5_DIGEST_LENGTH bytes.
989  */
990 static int
991 md5_digest_final_uio(MD5_CTX *md5_ctx, crypto_data_t *digest,
992     ulong_t digest_len, uchar_t *digest_scratch)
993 {
994 	off_t offset = digest->cd_offset;
995 	uint_t vec_idx;
996 
997 	/* we support only kernel buffer */
998 	if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
999 		return (CRYPTO_ARGUMENTS_BAD);
1000 
1001 	/*
1002 	 * Jump to the first iovec containing ptr to the digest to
1003 	 * be returned.
1004 	 */
1005 	for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
1006 	    vec_idx < digest->cd_uio->uio_iovcnt;
1007 	    offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len);
1008 	if (vec_idx == digest->cd_uio->uio_iovcnt) {
1009 		/*
1010 		 * The caller specified an offset that is
1011 		 * larger than the total size of the buffers
1012 		 * it provided.
1013 		 */
1014 		return (CRYPTO_DATA_LEN_RANGE);
1015 	}
1016 
1017 	if (offset + digest_len <=
1018 	    digest->cd_uio->uio_iov[vec_idx].iov_len) {
1019 		/*
1020 		 * The computed MD5 digest will fit in the current
1021 		 * iovec.
1022 		 */
1023 		if (digest_len != MD5_DIGEST_LENGTH) {
1024 			/*
1025 			 * The caller requested a short digest. Digest
1026 			 * into a scratch buffer and return to
1027 			 * the user only what was requested.
1028 			 */
1029 			MD5Final(digest_scratch, md5_ctx);
1030 			bcopy(digest_scratch, (uchar_t *)digest->
1031 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
1032 			    digest_len);
1033 		} else {
1034 			MD5Final((uchar_t *)digest->
1035 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
1036 			    md5_ctx);
1037 		}
1038 	} else {
1039 		/*
1040 		 * The computed digest will be crossing one or more iovec's.
1041 		 * This is bad performance-wise but we need to support it.
1042 		 * Allocate a small scratch buffer on the stack and
1043 		 * copy it piece meal to the specified digest iovec's.
1044 		 */
1045 		uchar_t digest_tmp[MD5_DIGEST_LENGTH];
1046 		off_t scratch_offset = 0;
1047 		size_t length = digest_len;
1048 		size_t cur_len;
1049 
1050 		MD5Final(digest_tmp, md5_ctx);
1051 
1052 		while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
1053 			cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
1054 			    offset, length);
1055 			bcopy(digest_tmp + scratch_offset,
1056 			    digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
1057 			    cur_len);
1058 
1059 			length -= cur_len;
1060 			vec_idx++;
1061 			scratch_offset += cur_len;
1062 			offset = 0;
1063 		}
1064 
1065 		if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
1066 			/*
1067 			 * The end of the specified iovec's was reached but
1068 			 * the length requested could not be processed, i.e.
1069 			 * The caller requested to digest more data than it
1070 			 * provided.
1071 			 */
1072 			return (CRYPTO_DATA_LEN_RANGE);
1073 		}
1074 	}
1075 
1076 	return (CRYPTO_SUCCESS);
1077 }
1078 
1079 /*
1080  * Helper MD5 digest update for mblk's.
1081  */
1082 static int
1083 md5_digest_update_mblk(MD5_CTX *md5_ctx, crypto_data_t *data)
1084 {
1085 	off_t offset = data->cd_offset;
1086 	size_t length = data->cd_length;
1087 	mblk_t *mp;
1088 	size_t cur_len;
1089 
1090 	/*
1091 	 * Jump to the first mblk_t containing data to be digested.
1092 	 */
1093 	for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
1094 	    offset -= MBLKL(mp), mp = mp->b_cont);
1095 	if (mp == NULL) {
1096 		/*
1097 		 * The caller specified an offset that is larger than the
1098 		 * total size of the buffers it provided.
1099 		 */
1100 		return (CRYPTO_DATA_LEN_RANGE);
1101 	}
1102 
1103 	/*
1104 	 * Now do the digesting on the mblk chain.
1105 	 */
1106 	while (mp != NULL && length > 0) {
1107 		cur_len = MIN(MBLKL(mp) - offset, length);
1108 		MD5Update(md5_ctx, mp->b_rptr + offset, cur_len);
1109 		length -= cur_len;
1110 		offset = 0;
1111 		mp = mp->b_cont;
1112 	}
1113 
1114 	if (mp == NULL && length > 0) {
1115 		/*
1116 		 * The end of the mblk was reached but the length requested
1117 		 * could not be processed, i.e. The caller requested
1118 		 * to digest more data than it provided.
1119 		 */
1120 		return (CRYPTO_DATA_LEN_RANGE);
1121 	}
1122 
1123 	return (CRYPTO_SUCCESS);
1124 }
1125 
1126 /*
1127  * Helper MD5 digest final for mblk's.
1128  * digest_len is the length of the desired digest. If digest_len
1129  * is smaller than the default MD5 digest length, the caller
1130  * must pass a scratch buffer, digest_scratch, which must
1131  * be at least MD5_DIGEST_LENGTH bytes.
1132  */
1133 static int
1134 md5_digest_final_mblk(MD5_CTX *md5_ctx, crypto_data_t *digest,
1135     ulong_t digest_len, uchar_t *digest_scratch)
1136 {
1137 	off_t offset = digest->cd_offset;
1138 	mblk_t *mp;
1139 
1140 	/*
1141 	 * Jump to the first mblk_t that will be used to store the digest.
1142 	 */
1143 	for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
1144 	    offset -= MBLKL(mp), mp = mp->b_cont);
1145 	if (mp == NULL) {
1146 		/*
1147 		 * The caller specified an offset that is larger than the
1148 		 * total size of the buffers it provided.
1149 		 */
1150 		return (CRYPTO_DATA_LEN_RANGE);
1151 	}
1152 
1153 	if (offset + digest_len <= MBLKL(mp)) {
1154 		/*
1155 		 * The computed MD5 digest will fit in the current mblk.
1156 		 * Do the MD5Final() in-place.
1157 		 */
1158 		if (digest_len != MD5_DIGEST_LENGTH) {
1159 			/*
1160 			 * The caller requested a short digest. Digest
1161 			 * into a scratch buffer and return to
1162 			 * the user only what was requested.
1163 			 */
1164 			MD5Final(digest_scratch, md5_ctx);
1165 			bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
1166 		} else {
1167 			MD5Final(mp->b_rptr + offset, md5_ctx);
1168 		}
1169 	} else {
1170 		/*
1171 		 * The computed digest will be crossing one or more mblk's.
1172 		 * This is bad performance-wise but we need to support it.
1173 		 * Allocate a small scratch buffer on the stack and
1174 		 * copy it piece meal to the specified digest iovec's.
1175 		 */
1176 		uchar_t digest_tmp[MD5_DIGEST_LENGTH];
1177 		off_t scratch_offset = 0;
1178 		size_t length = digest_len;
1179 		size_t cur_len;
1180 
1181 		MD5Final(digest_tmp, md5_ctx);
1182 
1183 		while (mp != NULL && length > 0) {
1184 			cur_len = MIN(MBLKL(mp) - offset, length);
1185 			bcopy(digest_tmp + scratch_offset,
1186 			    mp->b_rptr + offset, cur_len);
1187 
1188 			length -= cur_len;
1189 			mp = mp->b_cont;
1190 			scratch_offset += cur_len;
1191 			offset = 0;
1192 		}
1193 
1194 		if (mp == NULL && length > 0) {
1195 			/*
1196 			 * The end of the specified mblk was reached but
1197 			 * the length requested could not be processed, i.e.
1198 			 * The caller requested to digest more data than it
1199 			 * provided.
1200 			 */
1201 			return (CRYPTO_DATA_LEN_RANGE);
1202 		}
1203 	}
1204 
1205 	return (CRYPTO_SUCCESS);
1206 }
1207 
1208 /* ARGSUSED */
1209 static int
1210 md5_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
1211     crypto_req_handle_t req)
1212 {
1213 	int ret = CRYPTO_SUCCESS;
1214 
1215 	ASSERT(ctx->cc_provider_private != NULL);
1216 
1217 	/*
1218 	 * We need to just return the length needed to store the output.
1219 	 * We should not destroy the context for the following cases.
1220 	 */
1221 	if ((digest->cd_length == 0) ||
1222 	    (digest->cd_length < MD5_DIGEST_LENGTH)) {
1223 		digest->cd_length = MD5_DIGEST_LENGTH;
1224 		return (CRYPTO_BUFFER_TOO_SMALL);
1225 	}
1226 
1227 	/*
1228 	 * Do the MD5 update on the specified input data.
1229 	 */
1230 	switch (data->cd_format) {
1231 	case CRYPTO_DATA_RAW:
1232 		MD5Update(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1233 		    data->cd_raw.iov_base + data->cd_offset,
1234 		    data->cd_length);
1235 		break;
1236 	case CRYPTO_DATA_UIO:
1237 		ret = md5_digest_update_uio(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1238 		    data);
1239 		break;
1240 	case CRYPTO_DATA_MBLK:
1241 		ret = md5_digest_update_mblk(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1242 		    data);
1243 		break;
1244 	default:
1245 		ret = CRYPTO_ARGUMENTS_BAD;
1246 	}
1247 
1248 	if (ret != CRYPTO_SUCCESS) {
1249 		/* the update failed, free context and bail */
1250 		kmem_free(ctx->cc_provider_private, sizeof (md5_ctx_t));
1251 		ctx->cc_provider_private = NULL;
1252 		digest->cd_length = 0;
1253 		return (ret);
1254 	}
1255 
1256 	/*
1257 	 * Do an MD5 final, must be done separately since the digest
1258 	 * type can be different than the input data type.
1259 	 */
1260 	switch (digest->cd_format) {
1261 	case CRYPTO_DATA_RAW:
1262 		MD5Final((unsigned char *)digest->cd_raw.iov_base +
1263 		    digest->cd_offset, &PROV_MD5_CTX(ctx)->mc_md5_ctx);
1264 		break;
1265 	case CRYPTO_DATA_UIO:
1266 		ret = md5_digest_final_uio(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1267 		    digest, MD5_DIGEST_LENGTH, NULL);
1268 		break;
1269 	case CRYPTO_DATA_MBLK:
1270 		ret = md5_digest_final_mblk(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1271 		    digest, MD5_DIGEST_LENGTH, NULL);
1272 		break;
1273 	default:
1274 		ret = CRYPTO_ARGUMENTS_BAD;
1275 	}
1276 
1277 	/* all done, free context and return */
1278 
1279 	if (ret == CRYPTO_SUCCESS) {
1280 		digest->cd_length = MD5_DIGEST_LENGTH;
1281 	} else {
1282 		digest->cd_length = 0;
1283 	}
1284 
1285 	kmem_free(ctx->cc_provider_private, sizeof (md5_ctx_t));
1286 	ctx->cc_provider_private = NULL;
1287 	return (ret);
1288 }
1289 
1290 /* ARGSUSED */
1291 static int
1292 md5_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
1293     crypto_req_handle_t req)
1294 {
1295 	int ret = CRYPTO_SUCCESS;
1296 
1297 	ASSERT(ctx->cc_provider_private != NULL);
1298 
1299 	/*
1300 	 * Do the MD5 update on the specified input data.
1301 	 */
1302 	switch (data->cd_format) {
1303 	case CRYPTO_DATA_RAW:
1304 		MD5Update(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1305 		    data->cd_raw.iov_base + data->cd_offset,
1306 		    data->cd_length);
1307 		break;
1308 	case CRYPTO_DATA_UIO:
1309 		ret = md5_digest_update_uio(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1310 		    data);
1311 		break;
1312 	case CRYPTO_DATA_MBLK:
1313 		ret = md5_digest_update_mblk(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1314 		    data);
1315 		break;
1316 	default:
1317 		ret = CRYPTO_ARGUMENTS_BAD;
1318 	}
1319 
1320 	return (ret);
1321 }
1322 
1323 /* ARGSUSED */
1324 static int
1325 md5_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
1326     crypto_req_handle_t req)
1327 {
1328 	int ret = CRYPTO_SUCCESS;
1329 
1330 	ASSERT(ctx->cc_provider_private != NULL);
1331 
1332 	/*
1333 	 * We need to just return the length needed to store the output.
1334 	 * We should not destroy the context for the following cases.
1335 	 */
1336 	if ((digest->cd_length == 0) ||
1337 	    (digest->cd_length < MD5_DIGEST_LENGTH)) {
1338 		digest->cd_length = MD5_DIGEST_LENGTH;
1339 		return (CRYPTO_BUFFER_TOO_SMALL);
1340 	}
1341 
1342 	/*
1343 	 * Do an MD5 final.
1344 	 */
1345 	switch (digest->cd_format) {
1346 	case CRYPTO_DATA_RAW:
1347 		MD5Final((unsigned char *)digest->cd_raw.iov_base +
1348 		    digest->cd_offset, &PROV_MD5_CTX(ctx)->mc_md5_ctx);
1349 		break;
1350 	case CRYPTO_DATA_UIO:
1351 		ret = md5_digest_final_uio(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1352 		    digest, MD5_DIGEST_LENGTH, NULL);
1353 		break;
1354 	case CRYPTO_DATA_MBLK:
1355 		ret = md5_digest_final_mblk(&PROV_MD5_CTX(ctx)->mc_md5_ctx,
1356 		    digest, MD5_DIGEST_LENGTH, NULL);
1357 		break;
1358 	default:
1359 		ret = CRYPTO_ARGUMENTS_BAD;
1360 	}
1361 
1362 	/* all done, free context and return */
1363 
1364 	if (ret == CRYPTO_SUCCESS) {
1365 		digest->cd_length = MD5_DIGEST_LENGTH;
1366 	} else {
1367 		digest->cd_length = 0;
1368 	}
1369 
1370 	kmem_free(ctx->cc_provider_private, sizeof (md5_ctx_t));
1371 	ctx->cc_provider_private = NULL;
1372 
1373 	return (ret);
1374 }
1375 
1376 /* ARGSUSED */
1377 static int
1378 md5_digest_atomic(crypto_provider_handle_t provider,
1379     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1380     crypto_data_t *data, crypto_data_t *digest,
1381     crypto_req_handle_t req)
1382 {
1383 	int ret = CRYPTO_SUCCESS;
1384 	MD5_CTX md5_ctx;
1385 
1386 	if (mechanism->cm_type != MD5_MECH_INFO_TYPE)
1387 		return (CRYPTO_MECHANISM_INVALID);
1388 
1389 	/*
1390 	 * Do the MD5 init.
1391 	 */
1392 	MD5Init(&md5_ctx);
1393 
1394 	/*
1395 	 * Do the MD5 update on the specified input data.
1396 	 */
1397 	switch (data->cd_format) {
1398 	case CRYPTO_DATA_RAW:
1399 		MD5Update(&md5_ctx, data->cd_raw.iov_base + data->cd_offset,
1400 		    data->cd_length);
1401 		break;
1402 	case CRYPTO_DATA_UIO:
1403 		ret = md5_digest_update_uio(&md5_ctx, data);
1404 		break;
1405 	case CRYPTO_DATA_MBLK:
1406 		ret = md5_digest_update_mblk(&md5_ctx, data);
1407 		break;
1408 	default:
1409 		ret = CRYPTO_ARGUMENTS_BAD;
1410 	}
1411 
1412 	if (ret != CRYPTO_SUCCESS) {
1413 		/* the update failed, bail */
1414 		digest->cd_length = 0;
1415 		return (ret);
1416 	}
1417 
1418 	/*
1419 	 * Do an MD5 final, must be done separately since the digest
1420 	 * type can be different than the input data type.
1421 	 */
1422 	switch (digest->cd_format) {
1423 	case CRYPTO_DATA_RAW:
1424 		MD5Final((unsigned char *)digest->cd_raw.iov_base +
1425 		    digest->cd_offset, &md5_ctx);
1426 		break;
1427 	case CRYPTO_DATA_UIO:
1428 		ret = md5_digest_final_uio(&md5_ctx, digest,
1429 		    MD5_DIGEST_LENGTH, NULL);
1430 		break;
1431 	case CRYPTO_DATA_MBLK:
1432 		ret = md5_digest_final_mblk(&md5_ctx, digest,
1433 		    MD5_DIGEST_LENGTH, NULL);
1434 		break;
1435 	default:
1436 		ret = CRYPTO_ARGUMENTS_BAD;
1437 	}
1438 
1439 	if (ret == CRYPTO_SUCCESS) {
1440 		digest->cd_length = MD5_DIGEST_LENGTH;
1441 	} else {
1442 		digest->cd_length = 0;
1443 	}
1444 
1445 	return (ret);
1446 }
1447 
1448 /*
1449  * KCF software provider mac entry points.
1450  *
1451  * MD5 HMAC is: MD5(key XOR opad, MD5(key XOR ipad, text))
1452  *
1453  * Init:
1454  * The initialization routine initializes what we denote
1455  * as the inner and outer contexts by doing
1456  * - for inner context: MD5(key XOR ipad)
1457  * - for outer context: MD5(key XOR opad)
1458  *
1459  * Update:
1460  * Each subsequent MD5 HMAC update will result in an
1461  * update of the inner context with the specified data.
1462  *
1463  * Final:
1464  * The MD5 HMAC final will do a MD5 final operation on the
1465  * inner context, and the resulting digest will be used
1466  * as the data for an update on the outer context. Last
1467  * but not least, an MD5 final on the outer context will
1468  * be performed to obtain the MD5 HMAC digest to return
1469  * to the user.
1470  */
1471 
1472 /*
1473  * Initialize a MD5-HMAC context.
1474  */
1475 static void
1476 md5_mac_init_ctx(md5_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
1477 {
1478 	uint32_t ipad[MD5_HMAC_INTS_PER_BLOCK];
1479 	uint32_t opad[MD5_HMAC_INTS_PER_BLOCK];
1480 	uint_t i;
1481 
1482 	bzero(ipad, MD5_HMAC_BLOCK_SIZE);
1483 	bzero(opad, MD5_HMAC_BLOCK_SIZE);
1484 
1485 	bcopy(keyval, ipad, length_in_bytes);
1486 	bcopy(keyval, opad, length_in_bytes);
1487 
1488 	/* XOR key with ipad (0x36) and opad (0x5c) */
1489 	for (i = 0; i < MD5_HMAC_INTS_PER_BLOCK; i++) {
1490 		ipad[i] ^= 0x36363636;
1491 		opad[i] ^= 0x5c5c5c5c;
1492 	}
1493 
1494 	/* perform MD5 on ipad */
1495 	MD5Init(&ctx->hc_icontext);
1496 	MD5Update(&ctx->hc_icontext, ipad, MD5_HMAC_BLOCK_SIZE);
1497 
1498 	/* perform MD5 on opad */
1499 	MD5Init(&ctx->hc_ocontext);
1500 	MD5Update(&ctx->hc_ocontext, opad, MD5_HMAC_BLOCK_SIZE);
1501 }
1502 
1503 /*
1504  * Initializes a multi-part MAC operation.
1505  */
1506 static int
1507 md5_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
1508     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
1509     crypto_req_handle_t req)
1510 {
1511 	int ret = CRYPTO_SUCCESS;
1512 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1513 
1514 	if (mechanism->cm_type != MD5_HMAC_MECH_INFO_TYPE &&
1515 	    mechanism->cm_type != MD5_HMAC_GEN_MECH_INFO_TYPE)
1516 		return (CRYPTO_MECHANISM_INVALID);
1517 
1518 	/* Add support for key by attributes (RFE 4706552) */
1519 	if (key->ck_format != CRYPTO_KEY_RAW)
1520 		return (CRYPTO_ARGUMENTS_BAD);
1521 
1522 	ctx->cc_provider_private = kmem_alloc(sizeof (md5_hmac_ctx_t),
1523 	    crypto_kmflag(req));
1524 	if (ctx->cc_provider_private == NULL)
1525 		return (CRYPTO_HOST_MEMORY);
1526 
1527 	if (ctx_template != NULL) {
1528 		/* reuse context template */
1529 		bcopy(ctx_template, PROV_MD5_HMAC_CTX(ctx),
1530 		    sizeof (md5_hmac_ctx_t));
1531 	} else {
1532 		/* no context template, compute context */
1533 		if (keylen_in_bytes > MD5_HMAC_BLOCK_SIZE) {
1534 			uchar_t digested_key[MD5_DIGEST_LENGTH];
1535 			md5_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
1536 
1537 			/*
1538 			 * Hash the passed-in key to get a smaller key.
1539 			 * The inner context is used since it hasn't been
1540 			 * initialized yet.
1541 			 */
1542 			PROV_MD5_DIGEST_KEY(&hmac_ctx->hc_icontext,
1543 			    key->ck_data, keylen_in_bytes, digested_key);
1544 			md5_mac_init_ctx(PROV_MD5_HMAC_CTX(ctx),
1545 			    digested_key, MD5_DIGEST_LENGTH);
1546 		} else {
1547 			md5_mac_init_ctx(PROV_MD5_HMAC_CTX(ctx),
1548 			    key->ck_data, keylen_in_bytes);
1549 		}
1550 	}
1551 
1552 	/*
1553 	 * Get the mechanism parameters, if applicable.
1554 	 */
1555 	PROV_MD5_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
1556 	if (mechanism->cm_type == MD5_HMAC_GEN_MECH_INFO_TYPE) {
1557 		if (mechanism->cm_param == NULL ||
1558 		    mechanism->cm_param_len != sizeof (ulong_t))
1559 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1560 		PROV_MD5_GET_DIGEST_LEN(mechanism,
1561 		    PROV_MD5_HMAC_CTX(ctx)->hc_digest_len);
1562 		if (PROV_MD5_HMAC_CTX(ctx)->hc_digest_len >
1563 		    MD5_DIGEST_LENGTH)
1564 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1565 	}
1566 
1567 	if (ret != CRYPTO_SUCCESS) {
1568 		bzero(ctx->cc_provider_private, sizeof (md5_hmac_ctx_t));
1569 		kmem_free(ctx->cc_provider_private, sizeof (md5_hmac_ctx_t));
1570 		ctx->cc_provider_private = NULL;
1571 	}
1572 
1573 	return (ret);
1574 }
1575 
1576 
1577 /* ARGSUSED */
1578 static int
1579 md5_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
1580 {
1581 	int ret = CRYPTO_SUCCESS;
1582 
1583 	ASSERT(ctx->cc_provider_private != NULL);
1584 
1585 	/*
1586 	 * Do an MD5 update of the inner context using the specified
1587 	 * data.
1588 	 */
1589 	switch (data->cd_format) {
1590 	case CRYPTO_DATA_RAW:
1591 		MD5Update(&PROV_MD5_HMAC_CTX(ctx)->hc_icontext,
1592 		    data->cd_raw.iov_base + data->cd_offset,
1593 		    data->cd_length);
1594 		break;
1595 	case CRYPTO_DATA_UIO:
1596 		ret = md5_digest_update_uio(
1597 		    &PROV_MD5_HMAC_CTX(ctx)->hc_icontext, data);
1598 		break;
1599 	case CRYPTO_DATA_MBLK:
1600 		ret = md5_digest_update_mblk(
1601 		    &PROV_MD5_HMAC_CTX(ctx)->hc_icontext, data);
1602 		break;
1603 	default:
1604 		ret = CRYPTO_ARGUMENTS_BAD;
1605 	}
1606 
1607 	return (ret);
1608 }
1609 
1610 /* ARGSUSED */
1611 static int
1612 md5_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
1613 {
1614 	int ret = CRYPTO_SUCCESS;
1615 	uchar_t digest[MD5_DIGEST_LENGTH];
1616 	uint32_t digest_len = MD5_DIGEST_LENGTH;
1617 
1618 	ASSERT(ctx->cc_provider_private != NULL);
1619 
1620 	if (PROV_MD5_HMAC_CTX(ctx)->hc_mech_type == MD5_HMAC_GEN_MECH_INFO_TYPE)
1621 	    digest_len = PROV_MD5_HMAC_CTX(ctx)->hc_digest_len;
1622 
1623 	/*
1624 	 * We need to just return the length needed to store the output.
1625 	 * We should not destroy the context for the following cases.
1626 	 */
1627 	if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
1628 		mac->cd_length = digest_len;
1629 		return (CRYPTO_BUFFER_TOO_SMALL);
1630 	}
1631 
1632 	/*
1633 	 * Do an MD5 final on the inner context.
1634 	 */
1635 	MD5Final(digest, &PROV_MD5_HMAC_CTX(ctx)->hc_icontext);
1636 
1637 	/*
1638 	 * Do an MD5 update on the outer context, feeding the inner
1639 	 * digest as data.
1640 	 */
1641 	MD5Update(&PROV_MD5_HMAC_CTX(ctx)->hc_ocontext, digest,
1642 	    MD5_DIGEST_LENGTH);
1643 
1644 	/*
1645 	 * Do an MD5 final on the outer context, storing the computing
1646 	 * digest in the users buffer.
1647 	 */
1648 	switch (mac->cd_format) {
1649 	case CRYPTO_DATA_RAW:
1650 		if (digest_len != MD5_DIGEST_LENGTH) {
1651 			/*
1652 			 * The caller requested a short digest. Digest
1653 			 * into a scratch buffer and return to
1654 			 * the user only what was requested.
1655 			 */
1656 			MD5Final(digest,
1657 			    &PROV_MD5_HMAC_CTX(ctx)->hc_ocontext);
1658 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1659 			    mac->cd_offset, digest_len);
1660 		} else {
1661 			MD5Final((unsigned char *)mac->cd_raw.iov_base +
1662 			    mac->cd_offset,
1663 			    &PROV_MD5_HMAC_CTX(ctx)->hc_ocontext);
1664 		}
1665 		break;
1666 	case CRYPTO_DATA_UIO:
1667 		ret = md5_digest_final_uio(
1668 		    &PROV_MD5_HMAC_CTX(ctx)->hc_ocontext, mac,
1669 		    digest_len, digest);
1670 		break;
1671 	case CRYPTO_DATA_MBLK:
1672 		ret = md5_digest_final_mblk(
1673 		    &PROV_MD5_HMAC_CTX(ctx)->hc_ocontext, mac,
1674 		    digest_len, digest);
1675 		break;
1676 	default:
1677 		ret = CRYPTO_ARGUMENTS_BAD;
1678 	}
1679 
1680 	if (ret == CRYPTO_SUCCESS) {
1681 		mac->cd_length = digest_len;
1682 	} else {
1683 		mac->cd_length = 0;
1684 	}
1685 
1686 	bzero(ctx->cc_provider_private, sizeof (md5_hmac_ctx_t));
1687 	kmem_free(ctx->cc_provider_private, sizeof (md5_hmac_ctx_t));
1688 	ctx->cc_provider_private = NULL;
1689 
1690 	return (ret);
1691 }
1692 
1693 #define	MD5_MAC_UPDATE(data, ctx, ret) {				\
1694 	switch (data->cd_format) {					\
1695 	case CRYPTO_DATA_RAW:						\
1696 		MD5Update(&(ctx).hc_icontext,				\
1697 		    data->cd_raw.iov_base + data->cd_offset,		\
1698 		    data->cd_length);					\
1699 		break;							\
1700 	case CRYPTO_DATA_UIO:						\
1701 		ret = md5_digest_update_uio(&(ctx).hc_icontext,	data);	\
1702 		break;							\
1703 	case CRYPTO_DATA_MBLK:						\
1704 		ret = md5_digest_update_mblk(&(ctx).hc_icontext,	\
1705 		    data);						\
1706 		break;							\
1707 	default:							\
1708 		ret = CRYPTO_ARGUMENTS_BAD;				\
1709 	}								\
1710 }
1711 
1712 
1713 /* ARGSUSED */
1714 static int
1715 md5_mac_atomic(crypto_provider_handle_t provider,
1716     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1717     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1718     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1719 {
1720 	int ret = CRYPTO_SUCCESS;
1721 	uchar_t digest[MD5_DIGEST_LENGTH];
1722 	md5_hmac_ctx_t md5_hmac_ctx;
1723 	uint32_t digest_len = MD5_DIGEST_LENGTH;
1724 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1725 
1726 	if (mechanism->cm_type != MD5_HMAC_MECH_INFO_TYPE &&
1727 	    mechanism->cm_type != MD5_HMAC_GEN_MECH_INFO_TYPE)
1728 		return (CRYPTO_MECHANISM_INVALID);
1729 
1730 	/* Add support for key by attributes (RFE 4706552) */
1731 	if (key->ck_format != CRYPTO_KEY_RAW)
1732 		return (CRYPTO_ARGUMENTS_BAD);
1733 
1734 	if (ctx_template != NULL) {
1735 		/* reuse context template */
1736 		bcopy(ctx_template, &md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
1737 	} else {
1738 		/* no context template, compute context */
1739 		if (keylen_in_bytes > MD5_HMAC_BLOCK_SIZE) {
1740 			/*
1741 			 * Hash the passed-in key to get a smaller key.
1742 			 * The inner context is used since it hasn't been
1743 			 * initialized yet.
1744 			 */
1745 			PROV_MD5_DIGEST_KEY(&md5_hmac_ctx.hc_icontext,
1746 			    key->ck_data, keylen_in_bytes, digest);
1747 			md5_mac_init_ctx(&md5_hmac_ctx, digest,
1748 			    MD5_DIGEST_LENGTH);
1749 		} else {
1750 			md5_mac_init_ctx(&md5_hmac_ctx, key->ck_data,
1751 			    keylen_in_bytes);
1752 		}
1753 	}
1754 
1755 	/*
1756 	 * Get the mechanism parameters, if applicable.
1757 	 */
1758 	if (mechanism->cm_type == MD5_HMAC_GEN_MECH_INFO_TYPE) {
1759 		if (mechanism->cm_param == NULL ||
1760 		    mechanism->cm_param_len != sizeof (ulong_t)) {
1761 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1762 			goto bail;
1763 		}
1764 		PROV_MD5_GET_DIGEST_LEN(mechanism, digest_len);
1765 		if (digest_len > MD5_DIGEST_LENGTH) {
1766 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1767 			goto bail;
1768 		}
1769 	}
1770 
1771 	/* do an MD5 update of the inner context using the specified data */
1772 	MD5_MAC_UPDATE(data, md5_hmac_ctx, ret);
1773 	if (ret != CRYPTO_SUCCESS)
1774 		/* the update failed, free context and bail */
1775 		goto bail;
1776 
1777 	/* do an MD5 final on the inner context */
1778 	MD5Final(digest, &md5_hmac_ctx.hc_icontext);
1779 
1780 	/*
1781 	 * Do an MD5 update on the outer context, feeding the inner
1782 	 * digest as data.
1783 	 */
1784 	MD5Update(&md5_hmac_ctx.hc_ocontext, digest, MD5_DIGEST_LENGTH);
1785 
1786 	/*
1787 	 * Do an MD5 final on the outer context, storing the computed
1788 	 * digest in the users buffer.
1789 	 */
1790 	switch (mac->cd_format) {
1791 	case CRYPTO_DATA_RAW:
1792 		if (digest_len != MD5_DIGEST_LENGTH) {
1793 			/*
1794 			 * The caller requested a short digest. Digest
1795 			 * into a scratch buffer and return to
1796 			 * the user only what was requested.
1797 			 */
1798 			MD5Final(digest, &md5_hmac_ctx.hc_ocontext);
1799 			bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1800 			    mac->cd_offset, digest_len);
1801 		} else {
1802 			MD5Final((unsigned char *)mac->cd_raw.iov_base +
1803 			    mac->cd_offset, &md5_hmac_ctx.hc_ocontext);
1804 		}
1805 		break;
1806 	case CRYPTO_DATA_UIO:
1807 		ret = md5_digest_final_uio(&md5_hmac_ctx.hc_ocontext, mac,
1808 		    digest_len, digest);
1809 		break;
1810 	case CRYPTO_DATA_MBLK:
1811 		ret = md5_digest_final_mblk(&md5_hmac_ctx.hc_ocontext, mac,
1812 		    digest_len, digest);
1813 		break;
1814 	default:
1815 		ret = CRYPTO_ARGUMENTS_BAD;
1816 	}
1817 
1818 	if (ret == CRYPTO_SUCCESS) {
1819 		mac->cd_length = digest_len;
1820 	} else {
1821 		mac->cd_length = 0;
1822 	}
1823 	/* Extra paranoia: zeroizing the local context on the stack */
1824 	bzero(&md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
1825 
1826 	return (ret);
1827 bail:
1828 	bzero(&md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
1829 	mac->cd_length = 0;
1830 	return (ret);
1831 }
1832 
1833 /* ARGSUSED */
1834 static int
1835 md5_mac_verify_atomic(crypto_provider_handle_t provider,
1836     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1837     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1838     crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1839 {
1840 	int ret = CRYPTO_SUCCESS;
1841 	uchar_t digest[MD5_DIGEST_LENGTH];
1842 	md5_hmac_ctx_t md5_hmac_ctx;
1843 	uint32_t digest_len = MD5_DIGEST_LENGTH;
1844 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1845 
1846 	if (mechanism->cm_type != MD5_HMAC_MECH_INFO_TYPE &&
1847 	    mechanism->cm_type != MD5_HMAC_GEN_MECH_INFO_TYPE)
1848 		return (CRYPTO_MECHANISM_INVALID);
1849 
1850 	/* Add support for key by attributes (RFE 4706552) */
1851 	if (key->ck_format != CRYPTO_KEY_RAW)
1852 		return (CRYPTO_ARGUMENTS_BAD);
1853 
1854 	if (ctx_template != NULL) {
1855 		/* reuse context template */
1856 		bcopy(ctx_template, &md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
1857 	} else {
1858 		/* no context template, compute context */
1859 		if (keylen_in_bytes > MD5_HMAC_BLOCK_SIZE) {
1860 			/*
1861 			 * Hash the passed-in key to get a smaller key.
1862 			 * The inner context is used since it hasn't been
1863 			 * initialized yet.
1864 			 */
1865 			PROV_MD5_DIGEST_KEY(&md5_hmac_ctx.hc_icontext,
1866 			    key->ck_data, keylen_in_bytes, digest);
1867 			md5_mac_init_ctx(&md5_hmac_ctx, digest,
1868 			    MD5_DIGEST_LENGTH);
1869 		} else {
1870 			md5_mac_init_ctx(&md5_hmac_ctx, key->ck_data,
1871 			    keylen_in_bytes);
1872 		}
1873 	}
1874 
1875 	/*
1876 	 * Get the mechanism parameters, if applicable.
1877 	 */
1878 	if (mechanism->cm_type == MD5_HMAC_GEN_MECH_INFO_TYPE) {
1879 		if (mechanism->cm_param == NULL ||
1880 		    mechanism->cm_param_len != sizeof (ulong_t)) {
1881 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1882 			goto bail;
1883 		}
1884 		PROV_MD5_GET_DIGEST_LEN(mechanism, digest_len);
1885 		if (digest_len > MD5_DIGEST_LENGTH) {
1886 			ret = CRYPTO_MECHANISM_PARAM_INVALID;
1887 			goto bail;
1888 		}
1889 	}
1890 
1891 	if (mac->cd_length != digest_len) {
1892 		ret = CRYPTO_INVALID_MAC;
1893 		goto bail;
1894 	}
1895 
1896 	/* do an MD5 update of the inner context using the specified data */
1897 	MD5_MAC_UPDATE(data, md5_hmac_ctx, ret);
1898 	if (ret != CRYPTO_SUCCESS)
1899 		/* the update failed, free context and bail */
1900 		goto bail;
1901 
1902 	/* do an MD5 final on the inner context */
1903 	MD5Final(digest, &md5_hmac_ctx.hc_icontext);
1904 
1905 	/*
1906 	 * Do an MD5 update on the outer context, feeding the inner
1907 	 * digest as data.
1908 	 */
1909 	MD5Update(&md5_hmac_ctx.hc_ocontext, digest, MD5_DIGEST_LENGTH);
1910 
1911 	/*
1912 	 * Do an MD5 final on the outer context, storing the computed
1913 	 * digest in the local digest buffer.
1914 	 */
1915 	MD5Final(digest, &md5_hmac_ctx.hc_ocontext);
1916 
1917 	/*
1918 	 * Compare the computed digest against the expected digest passed
1919 	 * as argument.
1920 	 */
1921 	switch (mac->cd_format) {
1922 
1923 	case CRYPTO_DATA_RAW:
1924 		if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
1925 		    mac->cd_offset, digest_len) != 0)
1926 			ret = CRYPTO_INVALID_MAC;
1927 		break;
1928 
1929 	case CRYPTO_DATA_UIO: {
1930 		off_t offset = mac->cd_offset;
1931 		uint_t vec_idx;
1932 		off_t scratch_offset = 0;
1933 		size_t length = digest_len;
1934 		size_t cur_len;
1935 
1936 		/* we support only kernel buffer */
1937 		if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
1938 			return (CRYPTO_ARGUMENTS_BAD);
1939 
1940 		/* jump to the first iovec containing the expected digest */
1941 		for (vec_idx = 0;
1942 		    offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
1943 		    vec_idx < mac->cd_uio->uio_iovcnt;
1944 		    offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len);
1945 		if (vec_idx == mac->cd_uio->uio_iovcnt) {
1946 			/*
1947 			 * The caller specified an offset that is
1948 			 * larger than the total size of the buffers
1949 			 * it provided.
1950 			 */
1951 			ret = CRYPTO_DATA_LEN_RANGE;
1952 			break;
1953 		}
1954 
1955 		/* do the comparison of computed digest vs specified one */
1956 		while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
1957 			cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
1958 			    offset, length);
1959 
1960 			if (bcmp(digest + scratch_offset,
1961 			    mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
1962 			    cur_len) != 0) {
1963 				ret = CRYPTO_INVALID_MAC;
1964 				break;
1965 			}
1966 
1967 			length -= cur_len;
1968 			vec_idx++;
1969 			scratch_offset += cur_len;
1970 			offset = 0;
1971 		}
1972 		break;
1973 	}
1974 
1975 	case CRYPTO_DATA_MBLK: {
1976 		off_t offset = mac->cd_offset;
1977 		mblk_t *mp;
1978 		off_t scratch_offset = 0;
1979 		size_t length = digest_len;
1980 		size_t cur_len;
1981 
1982 		/* jump to the first mblk_t containing the expected digest */
1983 		for (mp = mac->cd_mp; mp != NULL && offset >= MBLKL(mp);
1984 		    offset -= MBLKL(mp), mp = mp->b_cont);
1985 		if (mp == NULL) {
1986 			/*
1987 			 * The caller specified an offset that is larger than
1988 			 * the total size of the buffers it provided.
1989 			 */
1990 			ret = CRYPTO_DATA_LEN_RANGE;
1991 			break;
1992 		}
1993 
1994 		while (mp != NULL && length > 0) {
1995 			cur_len = MIN(MBLKL(mp) - offset, length);
1996 			if (bcmp(digest + scratch_offset,
1997 			    mp->b_rptr + offset, cur_len) != 0) {
1998 				ret = CRYPTO_INVALID_MAC;
1999 				break;
2000 			}
2001 
2002 			length -= cur_len;
2003 			mp = mp->b_cont;
2004 			scratch_offset += cur_len;
2005 			offset = 0;
2006 		}
2007 		break;
2008 	}
2009 
2010 	default:
2011 		ret = CRYPTO_ARGUMENTS_BAD;
2012 	}
2013 
2014 	bzero(&md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
2015 	return (ret);
2016 bail:
2017 	bzero(&md5_hmac_ctx, sizeof (md5_hmac_ctx_t));
2018 	mac->cd_length = 0;
2019 	return (ret);
2020 }
2021 
2022 /*
2023  * KCF software provider context management entry points.
2024  */
2025 
2026 /* ARGSUSED */
2027 static int
2028 md5_create_ctx_template(crypto_provider_handle_t provider,
2029     crypto_mechanism_t *mechanism, crypto_key_t *key,
2030     crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
2031     crypto_req_handle_t req)
2032 {
2033 	md5_hmac_ctx_t *md5_hmac_ctx_tmpl;
2034 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
2035 
2036 	if ((mechanism->cm_type != MD5_HMAC_MECH_INFO_TYPE) &&
2037 	    (mechanism->cm_type != MD5_HMAC_GEN_MECH_INFO_TYPE))
2038 		return (CRYPTO_MECHANISM_INVALID);
2039 
2040 	/* Add support for key by attributes (RFE 4706552) */
2041 	if (key->ck_format != CRYPTO_KEY_RAW)
2042 		return (CRYPTO_ARGUMENTS_BAD);
2043 
2044 	/*
2045 	 * Allocate and initialize MD5 context.
2046 	 */
2047 	md5_hmac_ctx_tmpl = kmem_alloc(sizeof (md5_hmac_ctx_t),
2048 	    crypto_kmflag(req));
2049 	if (md5_hmac_ctx_tmpl == NULL)
2050 		return (CRYPTO_HOST_MEMORY);
2051 
2052 	if (keylen_in_bytes > MD5_HMAC_BLOCK_SIZE) {
2053 		uchar_t digested_key[MD5_DIGEST_LENGTH];
2054 
2055 		/*
2056 		 * Hash the passed-in key to get a smaller key.
2057 		 * The inner context is used since it hasn't been
2058 		 * initialized yet.
2059 		 */
2060 		PROV_MD5_DIGEST_KEY(&md5_hmac_ctx_tmpl->hc_icontext,
2061 		    key->ck_data, keylen_in_bytes, digested_key);
2062 		md5_mac_init_ctx(md5_hmac_ctx_tmpl, digested_key,
2063 		    MD5_DIGEST_LENGTH);
2064 	} else {
2065 		md5_mac_init_ctx(md5_hmac_ctx_tmpl, key->ck_data,
2066 		    keylen_in_bytes);
2067 	}
2068 
2069 	md5_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
2070 	*ctx_template = (crypto_spi_ctx_template_t)md5_hmac_ctx_tmpl;
2071 	*ctx_template_size = sizeof (md5_hmac_ctx_t);
2072 
2073 	return (CRYPTO_SUCCESS);
2074 }
2075 
2076 static int
2077 md5_free_context(crypto_ctx_t *ctx)
2078 {
2079 	uint_t ctx_len;
2080 	md5_mech_type_t mech_type;
2081 
2082 	if (ctx->cc_provider_private == NULL)
2083 		return (CRYPTO_SUCCESS);
2084 
2085 	/*
2086 	 * We have to free either MD5 or MD5-HMAC contexts, which
2087 	 * have different lengths.
2088 	 */
2089 
2090 	mech_type = PROV_MD5_CTX(ctx)->mc_mech_type;
2091 	if (mech_type == MD5_MECH_INFO_TYPE)
2092 		ctx_len = sizeof (md5_ctx_t);
2093 	else {
2094 		ASSERT(mech_type == MD5_HMAC_MECH_INFO_TYPE ||
2095 		    mech_type == MD5_HMAC_GEN_MECH_INFO_TYPE);
2096 		ctx_len = sizeof (md5_hmac_ctx_t);
2097 	}
2098 
2099 	bzero(ctx->cc_provider_private, ctx_len);
2100 	kmem_free(ctx->cc_provider_private, ctx_len);
2101 	ctx->cc_provider_private = NULL;
2102 
2103 	return (CRYPTO_SUCCESS);
2104 }
2105 
2106 #endif	/* _KERNEL && !_BOOT */
2107