xref: /titanic_41/usr/src/uts/common/crypto/io/md4_mod.c (revision 31ceb98b622e1a310256f4c4a1472beb92046db3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * In kernel module, the md4 module is created with one modlinkage,
30  * this is different to md5 and sha1 modules which have a legacy misc
31  * variant for direct calls to the Init/Update/Final routines.
32  *
33  * - a modlcrypto that allows the module to register with the Kernel
34  *   Cryptographic Framework (KCF) as a software provider for the MD4
35  *   mechanisms.
36  */
37 
38 #include <sys/types.h>
39 #include <sys/systm.h>
40 #include <sys/modctl.h>
41 #include <sys/cmn_err.h>
42 #include <sys/ddi.h>
43 #include <sys/crypto/common.h>
44 #include <sys/crypto/spi.h>
45 #include <sys/sysmacros.h>
46 #include <sys/strsun.h>
47 #include <sys/note.h>
48 #include <sys/md4.h>
49 
50 extern struct mod_ops mod_miscops;
51 extern struct mod_ops mod_cryptoops;
52 
53 /*
54  * Module linkage information for the kernel.
55  */
56 
57 static struct modlcrypto modlcrypto = {
58 	&mod_cryptoops,
59 	"MD4 Kernel SW Provider %I%"
60 };
61 
62 static struct modlinkage modlinkage = {
63 	MODREV_1,
64 	(void *)&modlcrypto,
65 	NULL
66 };
67 
68 /*
69  * CSPI information (entry points, provider info, etc.)
70  */
71 
72 typedef enum md4_mech_type {
73 	MD4_MECH_INFO_TYPE,		/* SUN_CKM_MD4 */
74 } md4_mech_type_t;
75 
76 #define	MD4_DIGEST_LENGTH	16	/* MD4 digest length in bytes */
77 
78 /*
79  * Context for MD4 mechanism.
80  */
81 typedef struct md4_ctx {
82 	md4_mech_type_t		mc_mech_type;	/* type of context */
83 	MD4_CTX			mc_md4_ctx;	/* MD4 context */
84 } md4_ctx_t;
85 
86 /*
87  * Macros to access the MD4 contexts from a context passed
88  * by KCF to one of the entry points.
89  */
90 
91 #define	PROV_MD4_CTX(ctx)	((md4_ctx_t *)(ctx)->cc_provider_private)
92 
93 /*
94  * Mechanism info structure passed to KCF during registration.
95  */
96 static crypto_mech_info_t md4_mech_info_tab[] = {
97 	/* MD4 */
98 	{SUN_CKM_MD4, MD4_MECH_INFO_TYPE,
99 	    CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
100 	    0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
101 };
102 
103 static void md4_provider_status(crypto_provider_handle_t, uint_t *);
104 
105 static crypto_control_ops_t md4_control_ops = {
106 	md4_provider_status
107 };
108 
109 static int md4_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
110     crypto_req_handle_t);
111 static int md4_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
112     crypto_req_handle_t);
113 static int md4_digest_update(crypto_ctx_t *, crypto_data_t *,
114     crypto_req_handle_t);
115 static int md4_digest_final(crypto_ctx_t *, crypto_data_t *,
116     crypto_req_handle_t);
117 static int md4_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
118     crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
119     crypto_req_handle_t);
120 
121 static crypto_digest_ops_t md4_digest_ops = {
122 	md4_digest_init,
123 	md4_digest,
124 	md4_digest_update,
125 	NULL,
126 	md4_digest_final,
127 	md4_digest_atomic
128 };
129 
130 static crypto_ops_t md4_crypto_ops = {
131 	&md4_control_ops,
132 	&md4_digest_ops,
133 	NULL,
134 	NULL,
135 	NULL,
136 	NULL,
137 	NULL,
138 	NULL,
139 	NULL,
140 	NULL,
141 	NULL,
142 	NULL,
143 	NULL,
144 	NULL,
145 };
146 
147 static crypto_provider_info_t md4_prov_info = {
148 	CRYPTO_SPI_VERSION_1,
149 	"MD4 Software Provider",
150 	CRYPTO_SW_PROVIDER,
151 	{&modlinkage},
152 	NULL,
153 	&md4_crypto_ops,
154 	sizeof (md4_mech_info_tab)/sizeof (crypto_mech_info_t),
155 	md4_mech_info_tab
156 };
157 
158 static crypto_kcf_provider_handle_t md4_prov_handle = NULL;
159 
160 int
161 _init(void)
162 {
163 	int ret;
164 
165 	if ((ret = mod_install(&modlinkage)) != 0)
166 		return (ret);
167 
168 	/*
169 	 * Register with KCF. If the registration fails, log an
170 	 * error and uninstall the module.
171 	 */
172 	if ((ret = crypto_register_provider(&md4_prov_info,
173 	    &md4_prov_handle)) != CRYPTO_SUCCESS) {
174 		cmn_err(CE_WARN, "md4 _init: "
175 		    "crypto_register_provider() failed (0x%x)", ret);
176 		(void) mod_remove(&modlinkage);
177 		return (ret);
178 	}
179 
180 	return (0);
181 }
182 
183 int
184 _fini(void)
185 {
186 	int ret;
187 
188 	/*
189 	 * Unregister from KCF if previous registration succeeded.
190 	 */
191 	if (md4_prov_handle != NULL) {
192 		if ((ret = crypto_unregister_provider(md4_prov_handle)) !=
193 		    CRYPTO_SUCCESS) {
194 			cmn_err(CE_WARN, "md4 _fini: "
195 			    "crypto_unregister_provider() failed (0x%x)", ret);
196 			return (EBUSY);
197 		}
198 		md4_prov_handle = NULL;
199 	}
200 
201 	return (mod_remove(&modlinkage));
202 }
203 
204 int
205 _info(struct modinfo *modinfop)
206 {
207 	return (mod_info(&modlinkage, modinfop));
208 }
209 
210 /*
211  * KCF software provider control entry points.
212  */
213 /* ARGSUSED */
214 static void
215 md4_provider_status(crypto_provider_handle_t provider, uint_t *status)
216 {
217 	*status = CRYPTO_PROVIDER_READY;
218 }
219 
220 /*
221  * KCF software provider digest entry points.
222  */
223 
224 static int
225 md4_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
226     crypto_req_handle_t req)
227 {
228 	if (mechanism->cm_type != MD4_MECH_INFO_TYPE)
229 		return (CRYPTO_MECHANISM_INVALID);
230 
231 	/*
232 	 * Allocate and initialize MD4 context.
233 	 */
234 	ctx->cc_provider_private = kmem_alloc(sizeof (md4_ctx_t),
235 	    crypto_kmflag(req));
236 	if (ctx->cc_provider_private == NULL)
237 		return (CRYPTO_HOST_MEMORY);
238 
239 	PROV_MD4_CTX(ctx)->mc_mech_type = MD4_MECH_INFO_TYPE;
240 	MD4Init(&PROV_MD4_CTX(ctx)->mc_md4_ctx);
241 
242 	return (CRYPTO_SUCCESS);
243 }
244 
245 /*
246  * Helper MD4 digest update function for uio data.
247  */
248 static int
249 md4_digest_update_uio(MD4_CTX *md4_ctx, crypto_data_t *data)
250 {
251 	off_t offset = data->cd_offset;
252 	size_t length = data->cd_length;
253 	uint_t vec_idx;
254 	size_t cur_len;
255 
256 	/* we support only kernel buffer */
257 	if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
258 		return (CRYPTO_ARGUMENTS_BAD);
259 
260 	/*
261 	 * Jump to the first iovec containing data to be
262 	 * digested.
263 	 */
264 	for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
265 	    offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
266 	    offset -= data->cd_uio->uio_iov[vec_idx++].iov_len);
267 	if (vec_idx == data->cd_uio->uio_iovcnt) {
268 		/*
269 		 * The caller specified an offset that is larger than the
270 		 * total size of the buffers it provided.
271 		 */
272 		return (CRYPTO_DATA_LEN_RANGE);
273 	}
274 
275 	/*
276 	 * Now do the digesting on the iovecs.
277 	 */
278 	while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
279 		cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
280 		    offset, length);
281 
282 		MD4Update(md4_ctx, data->cd_uio->uio_iov[vec_idx].iov_base +
283 		    offset, cur_len);
284 
285 		length -= cur_len;
286 		vec_idx++;
287 		offset = 0;
288 	}
289 
290 	if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
291 		/*
292 		 * The end of the specified iovec's was reached but
293 		 * the length requested could not be processed, i.e.
294 		 * The caller requested to digest more data than it provided.
295 		 */
296 		return (CRYPTO_DATA_LEN_RANGE);
297 	}
298 
299 	return (CRYPTO_SUCCESS);
300 }
301 
302 /*
303  * Helper MD4 digest final function for uio data.
304  * digest_len is the length of the desired digest. If digest_len
305  * is smaller than the default MD4 digest length, the caller
306  * must pass a scratch buffer, digest_scratch, which must
307  * be at least MD4_DIGEST_LENGTH bytes.
308  */
309 static int
310 md4_digest_final_uio(MD4_CTX *md4_ctx, crypto_data_t *digest,
311     ulong_t digest_len, uchar_t *digest_scratch)
312 {
313 	off_t offset = digest->cd_offset;
314 	uint_t vec_idx;
315 
316 	/* we support only kernel buffer */
317 	if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
318 		return (CRYPTO_ARGUMENTS_BAD);
319 
320 	/*
321 	 * Jump to the first iovec containing ptr to the digest to
322 	 * be returned.
323 	 */
324 	for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
325 	    vec_idx < digest->cd_uio->uio_iovcnt;
326 	    offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len);
327 	if (vec_idx == digest->cd_uio->uio_iovcnt) {
328 		/*
329 		 * The caller specified an offset that is
330 		 * larger than the total size of the buffers
331 		 * it provided.
332 		 */
333 		return (CRYPTO_DATA_LEN_RANGE);
334 	}
335 
336 	if (offset + digest_len <=
337 	    digest->cd_uio->uio_iov[vec_idx].iov_len) {
338 		/*
339 		 * The computed MD4 digest will fit in the current
340 		 * iovec.
341 		 */
342 		if (digest_len != MD4_DIGEST_LENGTH) {
343 			/*
344 			 * The caller requested a short digest. Digest
345 			 * into a scratch buffer and return to
346 			 * the user only what was requested.
347 			 */
348 			MD4Final(digest_scratch, md4_ctx);
349 			bcopy(digest_scratch, (uchar_t *)digest->
350 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
351 			    digest_len);
352 		} else {
353 			MD4Final((uchar_t *)digest->
354 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
355 			    md4_ctx);
356 		}
357 	} else {
358 		/*
359 		 * The computed digest will be crossing one or more iovec's.
360 		 * This is bad performance-wise but we need to support it.
361 		 * Allocate a small scratch buffer on the stack and
362 		 * copy it piece meal to the specified digest iovec's.
363 		 */
364 		uchar_t digest_tmp[MD4_DIGEST_LENGTH];
365 		off_t scratch_offset = 0;
366 		size_t length = digest_len;
367 		size_t cur_len;
368 
369 		MD4Final(digest_tmp, md4_ctx);
370 
371 		while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
372 			cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
373 			    offset, length);
374 			bcopy(digest_tmp + scratch_offset,
375 			    digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
376 			    cur_len);
377 
378 			length -= cur_len;
379 			vec_idx++;
380 			scratch_offset += cur_len;
381 			offset = 0;
382 		}
383 
384 		if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
385 			/*
386 			 * The end of the specified iovec's was reached but
387 			 * the length requested could not be processed, i.e.
388 			 * The caller requested to digest more data than it
389 			 * provided.
390 			 */
391 			return (CRYPTO_DATA_LEN_RANGE);
392 		}
393 	}
394 
395 	return (CRYPTO_SUCCESS);
396 }
397 
398 /*
399  * Helper MD4 digest update for mblk's.
400  */
401 static int
402 md4_digest_update_mblk(MD4_CTX *md4_ctx, crypto_data_t *data)
403 {
404 	off_t offset = data->cd_offset;
405 	size_t length = data->cd_length;
406 	mblk_t *mp;
407 	size_t cur_len;
408 
409 	/*
410 	 * Jump to the first mblk_t containing data to be digested.
411 	 */
412 	for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
413 	    offset -= MBLKL(mp), mp = mp->b_cont);
414 	if (mp == NULL) {
415 		/*
416 		 * The caller specified an offset that is larger than the
417 		 * total size of the buffers it provided.
418 		 */
419 		return (CRYPTO_DATA_LEN_RANGE);
420 	}
421 
422 	/*
423 	 * Now do the digesting on the mblk chain.
424 	 */
425 	while (mp != NULL && length > 0) {
426 		cur_len = MIN(MBLKL(mp) - offset, length);
427 		MD4Update(md4_ctx, mp->b_rptr + offset, cur_len);
428 		length -= cur_len;
429 		offset = 0;
430 		mp = mp->b_cont;
431 	}
432 
433 	if (mp == NULL && length > 0) {
434 		/*
435 		 * The end of the mblk was reached but the length requested
436 		 * could not be processed, i.e. The caller requested
437 		 * to digest more data than it provided.
438 		 */
439 		return (CRYPTO_DATA_LEN_RANGE);
440 	}
441 
442 	return (CRYPTO_SUCCESS);
443 }
444 
445 /*
446  * Helper MD4 digest final for mblk's.
447  * digest_len is the length of the desired digest. If digest_len
448  * is smaller than the default MD4 digest length, the caller
449  * must pass a scratch buffer, digest_scratch, which must
450  * be at least MD4_DIGEST_LENGTH bytes.
451  */
452 static int
453 md4_digest_final_mblk(MD4_CTX *md4_ctx, crypto_data_t *digest,
454     ulong_t digest_len, uchar_t *digest_scratch)
455 {
456 	off_t offset = digest->cd_offset;
457 	mblk_t *mp;
458 
459 	/*
460 	 * Jump to the first mblk_t that will be used to store the digest.
461 	 */
462 	for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
463 	    offset -= MBLKL(mp), mp = mp->b_cont);
464 	if (mp == NULL) {
465 		/*
466 		 * The caller specified an offset that is larger than the
467 		 * total size of the buffers it provided.
468 		 */
469 		return (CRYPTO_DATA_LEN_RANGE);
470 	}
471 
472 	if (offset + digest_len <= MBLKL(mp)) {
473 		/*
474 		 * The computed MD4 digest will fit in the current mblk.
475 		 * Do the MD4Final() in-place.
476 		 */
477 		if (digest_len != MD4_DIGEST_LENGTH) {
478 			/*
479 			 * The caller requested a short digest. Digest
480 			 * into a scratch buffer and return to
481 			 * the user only what was requested.
482 			 */
483 			MD4Final(digest_scratch, md4_ctx);
484 			bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
485 		} else {
486 			MD4Final(mp->b_rptr + offset, md4_ctx);
487 		}
488 	} else {
489 		/*
490 		 * The computed digest will be crossing one or more mblk's.
491 		 * This is bad performance-wise but we need to support it.
492 		 * Allocate a small scratch buffer on the stack and
493 		 * copy it piece meal to the specified digest iovec's.
494 		 */
495 		uchar_t digest_tmp[MD4_DIGEST_LENGTH];
496 		off_t scratch_offset = 0;
497 		size_t length = digest_len;
498 		size_t cur_len;
499 
500 		MD4Final(digest_tmp, md4_ctx);
501 
502 		while (mp != NULL && length > 0) {
503 			cur_len = MIN(MBLKL(mp) - offset, length);
504 			bcopy(digest_tmp + scratch_offset,
505 			    mp->b_rptr + offset, cur_len);
506 
507 			length -= cur_len;
508 			mp = mp->b_cont;
509 			scratch_offset += cur_len;
510 			offset = 0;
511 		}
512 
513 		if (mp == NULL && length > 0) {
514 			/*
515 			 * The end of the specified mblk was reached but
516 			 * the length requested could not be processed, i.e.
517 			 * The caller requested to digest more data than it
518 			 * provided.
519 			 */
520 			return (CRYPTO_DATA_LEN_RANGE);
521 		}
522 	}
523 
524 	return (CRYPTO_SUCCESS);
525 }
526 
527 /* ARGSUSED */
528 static int
529 md4_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
530     crypto_req_handle_t req)
531 {
532 	int ret = CRYPTO_SUCCESS;
533 
534 	ASSERT(ctx->cc_provider_private != NULL);
535 
536 	/*
537 	 * We need to just return the length needed to store the output.
538 	 * We should not destroy the context for the following cases.
539 	 */
540 	if ((digest->cd_length == 0) ||
541 	    (digest->cd_length < MD4_DIGEST_LENGTH)) {
542 		digest->cd_length = MD4_DIGEST_LENGTH;
543 		return (CRYPTO_BUFFER_TOO_SMALL);
544 	}
545 
546 	/*
547 	 * Do the MD4 update on the specified input data.
548 	 */
549 	switch (data->cd_format) {
550 	case CRYPTO_DATA_RAW:
551 		MD4Update(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
552 		    data->cd_raw.iov_base + data->cd_offset,
553 		    data->cd_length);
554 		break;
555 	case CRYPTO_DATA_UIO:
556 		ret = md4_digest_update_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
557 		    data);
558 		break;
559 	case CRYPTO_DATA_MBLK:
560 		ret = md4_digest_update_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
561 		    data);
562 		break;
563 	default:
564 		ret = CRYPTO_ARGUMENTS_BAD;
565 	}
566 
567 	if (ret != CRYPTO_SUCCESS) {
568 		/* the update failed, free context and bail */
569 		kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
570 		ctx->cc_provider_private = NULL;
571 		digest->cd_length = 0;
572 		return (ret);
573 	}
574 
575 	/*
576 	 * Do an MD4 final, must be done separately since the digest
577 	 * type can be different than the input data type.
578 	 */
579 	switch (digest->cd_format) {
580 	case CRYPTO_DATA_RAW:
581 		MD4Final((unsigned char *)digest->cd_raw.iov_base +
582 		    digest->cd_offset, &PROV_MD4_CTX(ctx)->mc_md4_ctx);
583 		break;
584 	case CRYPTO_DATA_UIO:
585 		ret = md4_digest_final_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
586 		    digest, MD4_DIGEST_LENGTH, NULL);
587 		break;
588 	case CRYPTO_DATA_MBLK:
589 		ret = md4_digest_final_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
590 		    digest, MD4_DIGEST_LENGTH, NULL);
591 		break;
592 	default:
593 		ret = CRYPTO_ARGUMENTS_BAD;
594 	}
595 
596 	/* all done, free context and return */
597 
598 	if (ret == CRYPTO_SUCCESS) {
599 		digest->cd_length = MD4_DIGEST_LENGTH;
600 	} else {
601 		digest->cd_length = 0;
602 	}
603 
604 	kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
605 	ctx->cc_provider_private = NULL;
606 	return (ret);
607 }
608 
609 /* ARGSUSED */
610 static int
611 md4_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
612     crypto_req_handle_t req)
613 {
614 	int ret = CRYPTO_SUCCESS;
615 
616 	ASSERT(ctx->cc_provider_private != NULL);
617 
618 	/*
619 	 * Do the MD4 update on the specified input data.
620 	 */
621 	switch (data->cd_format) {
622 	case CRYPTO_DATA_RAW:
623 		MD4Update(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
624 		    data->cd_raw.iov_base + data->cd_offset,
625 		    data->cd_length);
626 		break;
627 	case CRYPTO_DATA_UIO:
628 		ret = md4_digest_update_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
629 		    data);
630 		break;
631 	case CRYPTO_DATA_MBLK:
632 		ret = md4_digest_update_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
633 		    data);
634 		break;
635 	default:
636 		ret = CRYPTO_ARGUMENTS_BAD;
637 	}
638 
639 	return (ret);
640 }
641 
642 /* ARGSUSED */
643 static int
644 md4_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
645     crypto_req_handle_t req)
646 {
647 	int ret = CRYPTO_SUCCESS;
648 
649 	ASSERT(ctx->cc_provider_private != NULL);
650 
651 	/*
652 	 * We need to just return the length needed to store the output.
653 	 * We should not destroy the context for the following cases.
654 	 */
655 	if ((digest->cd_length == 0) ||
656 	    (digest->cd_length < MD4_DIGEST_LENGTH)) {
657 		digest->cd_length = MD4_DIGEST_LENGTH;
658 		return (CRYPTO_BUFFER_TOO_SMALL);
659 	}
660 
661 	/*
662 	 * Do an MD4 final.
663 	 */
664 	switch (digest->cd_format) {
665 	case CRYPTO_DATA_RAW:
666 		MD4Final((unsigned char *)digest->cd_raw.iov_base +
667 		    digest->cd_offset, &PROV_MD4_CTX(ctx)->mc_md4_ctx);
668 		break;
669 	case CRYPTO_DATA_UIO:
670 		ret = md4_digest_final_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
671 		    digest, MD4_DIGEST_LENGTH, NULL);
672 		break;
673 	case CRYPTO_DATA_MBLK:
674 		ret = md4_digest_final_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
675 		    digest, MD4_DIGEST_LENGTH, NULL);
676 		break;
677 	default:
678 		ret = CRYPTO_ARGUMENTS_BAD;
679 	}
680 
681 	/* all done, free context and return */
682 
683 	if (ret == CRYPTO_SUCCESS) {
684 		digest->cd_length = MD4_DIGEST_LENGTH;
685 	} else {
686 		digest->cd_length = 0;
687 	}
688 
689 	kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
690 	ctx->cc_provider_private = NULL;
691 
692 	return (ret);
693 }
694 
695 /* ARGSUSED */
696 static int
697 md4_digest_atomic(crypto_provider_handle_t provider,
698     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
699     crypto_data_t *data, crypto_data_t *digest,
700     crypto_req_handle_t req)
701 {
702 	int ret = CRYPTO_SUCCESS;
703 	MD4_CTX md4_ctx;
704 
705 	if (mechanism->cm_type != MD4_MECH_INFO_TYPE)
706 		return (CRYPTO_MECHANISM_INVALID);
707 
708 	/*
709 	 * Do the MD4 init.
710 	 */
711 	MD4Init(&md4_ctx);
712 
713 	/*
714 	 * Do the MD4 update on the specified input data.
715 	 */
716 	switch (data->cd_format) {
717 	case CRYPTO_DATA_RAW:
718 		MD4Update(&md4_ctx, data->cd_raw.iov_base + data->cd_offset,
719 		    data->cd_length);
720 		break;
721 	case CRYPTO_DATA_UIO:
722 		ret = md4_digest_update_uio(&md4_ctx, data);
723 		break;
724 	case CRYPTO_DATA_MBLK:
725 		ret = md4_digest_update_mblk(&md4_ctx, data);
726 		break;
727 	default:
728 		ret = CRYPTO_ARGUMENTS_BAD;
729 	}
730 
731 	if (ret != CRYPTO_SUCCESS) {
732 		/* the update failed, bail */
733 		digest->cd_length = 0;
734 		return (ret);
735 	}
736 
737 	/*
738 	 * Do an MD4 final, must be done separately since the digest
739 	 * type can be different than the input data type.
740 	 */
741 	switch (digest->cd_format) {
742 	case CRYPTO_DATA_RAW:
743 		MD4Final((unsigned char *)digest->cd_raw.iov_base +
744 		    digest->cd_offset, &md4_ctx);
745 		break;
746 	case CRYPTO_DATA_UIO:
747 		ret = md4_digest_final_uio(&md4_ctx, digest,
748 		    MD4_DIGEST_LENGTH, NULL);
749 		break;
750 	case CRYPTO_DATA_MBLK:
751 		ret = md4_digest_final_mblk(&md4_ctx, digest,
752 		    MD4_DIGEST_LENGTH, NULL);
753 		break;
754 	default:
755 		ret = CRYPTO_ARGUMENTS_BAD;
756 	}
757 
758 	if (ret == CRYPTO_SUCCESS) {
759 		digest->cd_length = MD4_DIGEST_LENGTH;
760 	} else {
761 		digest->cd_length = 0;
762 	}
763 
764 	return (ret);
765 }
766