xref: /titanic_52/usr/src/uts/common/crypto/io/md4_mod.c (revision b6c3f7863936abeae522e48a13887dddeb691a45)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * In kernel module, the md4 module is created with one modlinkage,
30  * this is different to md5 and sha1 modules which have a legacy misc
31  * variant for direct calls to the Init/Update/Final routines.
32  *
33  * - a modlcrypto that allows the module to register with the Kernel
34  *   Cryptographic Framework (KCF) as a software provider for the MD4
35  *   mechanisms.
36  */
37 
38 #include <sys/types.h>
39 #include <sys/systm.h>
40 #include <sys/modctl.h>
41 #include <sys/cmn_err.h>
42 #include <sys/ddi.h>
43 #include <sys/crypto/common.h>
44 #include <sys/crypto/spi.h>
45 #include <sys/sysmacros.h>
46 #include <sys/strsun.h>
47 #include <sys/note.h>
48 #include <sys/md4.h>
49 
50 extern struct mod_ops mod_miscops;
51 extern struct mod_ops mod_cryptoops;
52 
53 /*
54  * Module linkage information for the kernel.
55  */
56 
57 static struct modlcrypto modlcrypto = {
58 	&mod_cryptoops,
59 	"MD4 Kernel SW Provider"
60 };
61 
62 static struct modlinkage modlinkage = {
63 	MODREV_1,
64 	(void *)&modlcrypto,
65 	NULL
66 };
67 
68 /*
69  * CSPI information (entry points, provider info, etc.)
70  */
71 
72 typedef enum md4_mech_type {
73 	MD4_MECH_INFO_TYPE,		/* SUN_CKM_MD4 */
74 } md4_mech_type_t;
75 
76 #define	MD4_DIGEST_LENGTH	16	/* MD4 digest length in bytes */
77 
78 /*
79  * Context for MD4 mechanism.
80  */
81 typedef struct md4_ctx {
82 	md4_mech_type_t		mc_mech_type;	/* type of context */
83 	MD4_CTX			mc_md4_ctx;	/* MD4 context */
84 } md4_ctx_t;
85 
86 /*
87  * Macros to access the MD4 contexts from a context passed
88  * by KCF to one of the entry points.
89  */
90 
91 #define	PROV_MD4_CTX(ctx)	((md4_ctx_t *)(ctx)->cc_provider_private)
92 
93 /*
94  * Mechanism info structure passed to KCF during registration.
95  */
96 static crypto_mech_info_t md4_mech_info_tab[] = {
97 	/* MD4 */
98 	{SUN_CKM_MD4, MD4_MECH_INFO_TYPE,
99 	    CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
100 	    0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
101 };
102 
103 static void md4_provider_status(crypto_provider_handle_t, uint_t *);
104 
105 static crypto_control_ops_t md4_control_ops = {
106 	md4_provider_status
107 };
108 
109 static int md4_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
110     crypto_req_handle_t);
111 static int md4_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
112     crypto_req_handle_t);
113 static int md4_digest_update(crypto_ctx_t *, crypto_data_t *,
114     crypto_req_handle_t);
115 static int md4_digest_final(crypto_ctx_t *, crypto_data_t *,
116     crypto_req_handle_t);
117 static int md4_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
118     crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
119     crypto_req_handle_t);
120 
121 static crypto_digest_ops_t md4_digest_ops = {
122 	md4_digest_init,
123 	md4_digest,
124 	md4_digest_update,
125 	NULL,
126 	md4_digest_final,
127 	md4_digest_atomic
128 };
129 
130 static crypto_ops_t md4_crypto_ops = {
131 	&md4_control_ops,
132 	&md4_digest_ops,
133 	NULL,
134 	NULL,
135 	NULL,
136 	NULL,
137 	NULL,
138 	NULL,
139 	NULL,
140 	NULL,
141 	NULL,
142 	NULL,
143 	NULL,
144 	NULL,
145 };
146 
147 static crypto_provider_info_t md4_prov_info = {
148 	CRYPTO_SPI_VERSION_1,
149 	"MD4 Software Provider",
150 	CRYPTO_SW_PROVIDER,
151 	{&modlinkage},
152 	NULL,
153 	&md4_crypto_ops,
154 	sizeof (md4_mech_info_tab)/sizeof (crypto_mech_info_t),
155 	md4_mech_info_tab
156 };
157 
158 static crypto_kcf_provider_handle_t md4_prov_handle = NULL;
159 
160 int
161 _init(void)
162 {
163 	int ret;
164 
165 	if ((ret = mod_install(&modlinkage)) != 0)
166 		return (ret);
167 
168 	/*
169 	 * Register with KCF. If the registration fails, log an
170 	 * error and uninstall the module.
171 	 */
172 	if ((ret = crypto_register_provider(&md4_prov_info,
173 	    &md4_prov_handle)) != CRYPTO_SUCCESS) {
174 		cmn_err(CE_WARN, "md4 _init: "
175 		    "crypto_register_provider() failed (0x%x)", ret);
176 		(void) mod_remove(&modlinkage);
177 		return (ret);
178 	}
179 
180 	return (0);
181 }
182 
183 int
184 _fini(void)
185 {
186 	int ret;
187 
188 	/*
189 	 * Unregister from KCF if previous registration succeeded.
190 	 */
191 	if (md4_prov_handle != NULL) {
192 		if ((ret = crypto_unregister_provider(md4_prov_handle)) !=
193 		    CRYPTO_SUCCESS) {
194 			cmn_err(CE_WARN, "md4 _fini: "
195 			    "crypto_unregister_provider() failed (0x%x)", ret);
196 			return (EBUSY);
197 		}
198 		md4_prov_handle = NULL;
199 	}
200 
201 	return (mod_remove(&modlinkage));
202 }
203 
204 int
205 _info(struct modinfo *modinfop)
206 {
207 	return (mod_info(&modlinkage, modinfop));
208 }
209 
210 /*
211  * KCF software provider control entry points.
212  */
213 /* ARGSUSED */
214 static void
215 md4_provider_status(crypto_provider_handle_t provider, uint_t *status)
216 {
217 	*status = CRYPTO_PROVIDER_READY;
218 }
219 
220 /*
221  * KCF software provider digest entry points.
222  */
223 
224 static int
225 md4_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
226     crypto_req_handle_t req)
227 {
228 	if (mechanism->cm_type != MD4_MECH_INFO_TYPE)
229 		return (CRYPTO_MECHANISM_INVALID);
230 
231 	/*
232 	 * Allocate and initialize MD4 context.
233 	 */
234 	ctx->cc_provider_private = kmem_alloc(sizeof (md4_ctx_t),
235 	    crypto_kmflag(req));
236 	if (ctx->cc_provider_private == NULL)
237 		return (CRYPTO_HOST_MEMORY);
238 
239 	PROV_MD4_CTX(ctx)->mc_mech_type = MD4_MECH_INFO_TYPE;
240 	MD4Init(&PROV_MD4_CTX(ctx)->mc_md4_ctx);
241 
242 	return (CRYPTO_SUCCESS);
243 }
244 
245 /*
246  * Helper MD4 digest update function for uio data.
247  */
248 static int
249 md4_digest_update_uio(MD4_CTX *md4_ctx, crypto_data_t *data)
250 {
251 	off_t offset = data->cd_offset;
252 	size_t length = data->cd_length;
253 	uint_t vec_idx;
254 	size_t cur_len;
255 
256 	/* we support only kernel buffer */
257 	if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
258 		return (CRYPTO_ARGUMENTS_BAD);
259 
260 	/*
261 	 * Jump to the first iovec containing data to be
262 	 * digested.
263 	 */
264 	for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
265 	    offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
266 	    offset -= data->cd_uio->uio_iov[vec_idx++].iov_len)
267 		;
268 	if (vec_idx == data->cd_uio->uio_iovcnt) {
269 		/*
270 		 * The caller specified an offset that is larger than the
271 		 * total size of the buffers it provided.
272 		 */
273 		return (CRYPTO_DATA_LEN_RANGE);
274 	}
275 
276 	/*
277 	 * Now do the digesting on the iovecs.
278 	 */
279 	while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
280 		cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
281 		    offset, length);
282 
283 		MD4Update(md4_ctx, data->cd_uio->uio_iov[vec_idx].iov_base +
284 		    offset, cur_len);
285 
286 		length -= cur_len;
287 		vec_idx++;
288 		offset = 0;
289 	}
290 
291 	if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
292 		/*
293 		 * The end of the specified iovec's was reached but
294 		 * the length requested could not be processed, i.e.
295 		 * The caller requested to digest more data than it provided.
296 		 */
297 		return (CRYPTO_DATA_LEN_RANGE);
298 	}
299 
300 	return (CRYPTO_SUCCESS);
301 }
302 
303 /*
304  * Helper MD4 digest final function for uio data.
305  * digest_len is the length of the desired digest. If digest_len
306  * is smaller than the default MD4 digest length, the caller
307  * must pass a scratch buffer, digest_scratch, which must
308  * be at least MD4_DIGEST_LENGTH bytes.
309  */
310 static int
311 md4_digest_final_uio(MD4_CTX *md4_ctx, crypto_data_t *digest,
312     ulong_t digest_len, uchar_t *digest_scratch)
313 {
314 	off_t offset = digest->cd_offset;
315 	uint_t vec_idx;
316 
317 	/* we support only kernel buffer */
318 	if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
319 		return (CRYPTO_ARGUMENTS_BAD);
320 
321 	/*
322 	 * Jump to the first iovec containing ptr to the digest to
323 	 * be returned.
324 	 */
325 	for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
326 	    vec_idx < digest->cd_uio->uio_iovcnt;
327 	    offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len)
328 		;
329 	if (vec_idx == digest->cd_uio->uio_iovcnt) {
330 		/*
331 		 * The caller specified an offset that is
332 		 * larger than the total size of the buffers
333 		 * it provided.
334 		 */
335 		return (CRYPTO_DATA_LEN_RANGE);
336 	}
337 
338 	if (offset + digest_len <=
339 	    digest->cd_uio->uio_iov[vec_idx].iov_len) {
340 		/*
341 		 * The computed MD4 digest will fit in the current
342 		 * iovec.
343 		 */
344 		if (digest_len != MD4_DIGEST_LENGTH) {
345 			/*
346 			 * The caller requested a short digest. Digest
347 			 * into a scratch buffer and return to
348 			 * the user only what was requested.
349 			 */
350 			MD4Final(digest_scratch, md4_ctx);
351 			bcopy(digest_scratch, (uchar_t *)digest->
352 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
353 			    digest_len);
354 		} else {
355 			MD4Final((uchar_t *)digest->
356 			    cd_uio->uio_iov[vec_idx].iov_base + offset,
357 			    md4_ctx);
358 		}
359 	} else {
360 		/*
361 		 * The computed digest will be crossing one or more iovec's.
362 		 * This is bad performance-wise but we need to support it.
363 		 * Allocate a small scratch buffer on the stack and
364 		 * copy it piece meal to the specified digest iovec's.
365 		 */
366 		uchar_t digest_tmp[MD4_DIGEST_LENGTH];
367 		off_t scratch_offset = 0;
368 		size_t length = digest_len;
369 		size_t cur_len;
370 
371 		MD4Final(digest_tmp, md4_ctx);
372 
373 		while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
374 			cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
375 			    offset, length);
376 			bcopy(digest_tmp + scratch_offset,
377 			    digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
378 			    cur_len);
379 
380 			length -= cur_len;
381 			vec_idx++;
382 			scratch_offset += cur_len;
383 			offset = 0;
384 		}
385 
386 		if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
387 			/*
388 			 * The end of the specified iovec's was reached but
389 			 * the length requested could not be processed, i.e.
390 			 * The caller requested to digest more data than it
391 			 * provided.
392 			 */
393 			return (CRYPTO_DATA_LEN_RANGE);
394 		}
395 	}
396 
397 	return (CRYPTO_SUCCESS);
398 }
399 
400 /*
401  * Helper MD4 digest update for mblk's.
402  */
403 static int
404 md4_digest_update_mblk(MD4_CTX *md4_ctx, crypto_data_t *data)
405 {
406 	off_t offset = data->cd_offset;
407 	size_t length = data->cd_length;
408 	mblk_t *mp;
409 	size_t cur_len;
410 
411 	/*
412 	 * Jump to the first mblk_t containing data to be digested.
413 	 */
414 	for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
415 	    offset -= MBLKL(mp), mp = mp->b_cont)
416 		;
417 	if (mp == NULL) {
418 		/*
419 		 * The caller specified an offset that is larger than the
420 		 * total size of the buffers it provided.
421 		 */
422 		return (CRYPTO_DATA_LEN_RANGE);
423 	}
424 
425 	/*
426 	 * Now do the digesting on the mblk chain.
427 	 */
428 	while (mp != NULL && length > 0) {
429 		cur_len = MIN(MBLKL(mp) - offset, length);
430 		MD4Update(md4_ctx, mp->b_rptr + offset, cur_len);
431 		length -= cur_len;
432 		offset = 0;
433 		mp = mp->b_cont;
434 	}
435 
436 	if (mp == NULL && length > 0) {
437 		/*
438 		 * The end of the mblk was reached but the length requested
439 		 * could not be processed, i.e. The caller requested
440 		 * to digest more data than it provided.
441 		 */
442 		return (CRYPTO_DATA_LEN_RANGE);
443 	}
444 
445 	return (CRYPTO_SUCCESS);
446 }
447 
448 /*
449  * Helper MD4 digest final for mblk's.
450  * digest_len is the length of the desired digest. If digest_len
451  * is smaller than the default MD4 digest length, the caller
452  * must pass a scratch buffer, digest_scratch, which must
453  * be at least MD4_DIGEST_LENGTH bytes.
454  */
455 static int
456 md4_digest_final_mblk(MD4_CTX *md4_ctx, crypto_data_t *digest,
457     ulong_t digest_len, uchar_t *digest_scratch)
458 {
459 	off_t offset = digest->cd_offset;
460 	mblk_t *mp;
461 
462 	/*
463 	 * Jump to the first mblk_t that will be used to store the digest.
464 	 */
465 	for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
466 	    offset -= MBLKL(mp), mp = mp->b_cont)
467 		;
468 	if (mp == NULL) {
469 		/*
470 		 * The caller specified an offset that is larger than the
471 		 * total size of the buffers it provided.
472 		 */
473 		return (CRYPTO_DATA_LEN_RANGE);
474 	}
475 
476 	if (offset + digest_len <= MBLKL(mp)) {
477 		/*
478 		 * The computed MD4 digest will fit in the current mblk.
479 		 * Do the MD4Final() in-place.
480 		 */
481 		if (digest_len != MD4_DIGEST_LENGTH) {
482 			/*
483 			 * The caller requested a short digest. Digest
484 			 * into a scratch buffer and return to
485 			 * the user only what was requested.
486 			 */
487 			MD4Final(digest_scratch, md4_ctx);
488 			bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
489 		} else {
490 			MD4Final(mp->b_rptr + offset, md4_ctx);
491 		}
492 	} else {
493 		/*
494 		 * The computed digest will be crossing one or more mblk's.
495 		 * This is bad performance-wise but we need to support it.
496 		 * Allocate a small scratch buffer on the stack and
497 		 * copy it piece meal to the specified digest iovec's.
498 		 */
499 		uchar_t digest_tmp[MD4_DIGEST_LENGTH];
500 		off_t scratch_offset = 0;
501 		size_t length = digest_len;
502 		size_t cur_len;
503 
504 		MD4Final(digest_tmp, md4_ctx);
505 
506 		while (mp != NULL && length > 0) {
507 			cur_len = MIN(MBLKL(mp) - offset, length);
508 			bcopy(digest_tmp + scratch_offset,
509 			    mp->b_rptr + offset, cur_len);
510 
511 			length -= cur_len;
512 			mp = mp->b_cont;
513 			scratch_offset += cur_len;
514 			offset = 0;
515 		}
516 
517 		if (mp == NULL && length > 0) {
518 			/*
519 			 * The end of the specified mblk was reached but
520 			 * the length requested could not be processed, i.e.
521 			 * The caller requested to digest more data than it
522 			 * provided.
523 			 */
524 			return (CRYPTO_DATA_LEN_RANGE);
525 		}
526 	}
527 
528 	return (CRYPTO_SUCCESS);
529 }
530 
531 /* ARGSUSED */
532 static int
533 md4_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
534     crypto_req_handle_t req)
535 {
536 	int ret = CRYPTO_SUCCESS;
537 
538 	ASSERT(ctx->cc_provider_private != NULL);
539 
540 	/*
541 	 * We need to just return the length needed to store the output.
542 	 * We should not destroy the context for the following cases.
543 	 */
544 	if ((digest->cd_length == 0) ||
545 	    (digest->cd_length < MD4_DIGEST_LENGTH)) {
546 		digest->cd_length = MD4_DIGEST_LENGTH;
547 		return (CRYPTO_BUFFER_TOO_SMALL);
548 	}
549 
550 	/*
551 	 * Do the MD4 update on the specified input data.
552 	 */
553 	switch (data->cd_format) {
554 	case CRYPTO_DATA_RAW:
555 		MD4Update(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
556 		    data->cd_raw.iov_base + data->cd_offset,
557 		    data->cd_length);
558 		break;
559 	case CRYPTO_DATA_UIO:
560 		ret = md4_digest_update_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
561 		    data);
562 		break;
563 	case CRYPTO_DATA_MBLK:
564 		ret = md4_digest_update_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
565 		    data);
566 		break;
567 	default:
568 		ret = CRYPTO_ARGUMENTS_BAD;
569 	}
570 
571 	if (ret != CRYPTO_SUCCESS) {
572 		/* the update failed, free context and bail */
573 		kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
574 		ctx->cc_provider_private = NULL;
575 		digest->cd_length = 0;
576 		return (ret);
577 	}
578 
579 	/*
580 	 * Do an MD4 final, must be done separately since the digest
581 	 * type can be different than the input data type.
582 	 */
583 	switch (digest->cd_format) {
584 	case CRYPTO_DATA_RAW:
585 		MD4Final((unsigned char *)digest->cd_raw.iov_base +
586 		    digest->cd_offset, &PROV_MD4_CTX(ctx)->mc_md4_ctx);
587 		break;
588 	case CRYPTO_DATA_UIO:
589 		ret = md4_digest_final_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
590 		    digest, MD4_DIGEST_LENGTH, NULL);
591 		break;
592 	case CRYPTO_DATA_MBLK:
593 		ret = md4_digest_final_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
594 		    digest, MD4_DIGEST_LENGTH, NULL);
595 		break;
596 	default:
597 		ret = CRYPTO_ARGUMENTS_BAD;
598 	}
599 
600 	/* all done, free context and return */
601 
602 	if (ret == CRYPTO_SUCCESS) {
603 		digest->cd_length = MD4_DIGEST_LENGTH;
604 	} else {
605 		digest->cd_length = 0;
606 	}
607 
608 	kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
609 	ctx->cc_provider_private = NULL;
610 	return (ret);
611 }
612 
613 /* ARGSUSED */
614 static int
615 md4_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
616     crypto_req_handle_t req)
617 {
618 	int ret = CRYPTO_SUCCESS;
619 
620 	ASSERT(ctx->cc_provider_private != NULL);
621 
622 	/*
623 	 * Do the MD4 update on the specified input data.
624 	 */
625 	switch (data->cd_format) {
626 	case CRYPTO_DATA_RAW:
627 		MD4Update(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
628 		    data->cd_raw.iov_base + data->cd_offset,
629 		    data->cd_length);
630 		break;
631 	case CRYPTO_DATA_UIO:
632 		ret = md4_digest_update_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
633 		    data);
634 		break;
635 	case CRYPTO_DATA_MBLK:
636 		ret = md4_digest_update_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
637 		    data);
638 		break;
639 	default:
640 		ret = CRYPTO_ARGUMENTS_BAD;
641 	}
642 
643 	return (ret);
644 }
645 
646 /* ARGSUSED */
647 static int
648 md4_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
649     crypto_req_handle_t req)
650 {
651 	int ret = CRYPTO_SUCCESS;
652 
653 	ASSERT(ctx->cc_provider_private != NULL);
654 
655 	/*
656 	 * We need to just return the length needed to store the output.
657 	 * We should not destroy the context for the following cases.
658 	 */
659 	if ((digest->cd_length == 0) ||
660 	    (digest->cd_length < MD4_DIGEST_LENGTH)) {
661 		digest->cd_length = MD4_DIGEST_LENGTH;
662 		return (CRYPTO_BUFFER_TOO_SMALL);
663 	}
664 
665 	/*
666 	 * Do an MD4 final.
667 	 */
668 	switch (digest->cd_format) {
669 	case CRYPTO_DATA_RAW:
670 		MD4Final((unsigned char *)digest->cd_raw.iov_base +
671 		    digest->cd_offset, &PROV_MD4_CTX(ctx)->mc_md4_ctx);
672 		break;
673 	case CRYPTO_DATA_UIO:
674 		ret = md4_digest_final_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
675 		    digest, MD4_DIGEST_LENGTH, NULL);
676 		break;
677 	case CRYPTO_DATA_MBLK:
678 		ret = md4_digest_final_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
679 		    digest, MD4_DIGEST_LENGTH, NULL);
680 		break;
681 	default:
682 		ret = CRYPTO_ARGUMENTS_BAD;
683 	}
684 
685 	/* all done, free context and return */
686 
687 	if (ret == CRYPTO_SUCCESS) {
688 		digest->cd_length = MD4_DIGEST_LENGTH;
689 	} else {
690 		digest->cd_length = 0;
691 	}
692 
693 	kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
694 	ctx->cc_provider_private = NULL;
695 
696 	return (ret);
697 }
698 
699 /* ARGSUSED */
700 static int
701 md4_digest_atomic(crypto_provider_handle_t provider,
702     crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
703     crypto_data_t *data, crypto_data_t *digest,
704     crypto_req_handle_t req)
705 {
706 	int ret = CRYPTO_SUCCESS;
707 	MD4_CTX md4_ctx;
708 
709 	if (mechanism->cm_type != MD4_MECH_INFO_TYPE)
710 		return (CRYPTO_MECHANISM_INVALID);
711 
712 	/*
713 	 * Do the MD4 init.
714 	 */
715 	MD4Init(&md4_ctx);
716 
717 	/*
718 	 * Do the MD4 update on the specified input data.
719 	 */
720 	switch (data->cd_format) {
721 	case CRYPTO_DATA_RAW:
722 		MD4Update(&md4_ctx, data->cd_raw.iov_base + data->cd_offset,
723 		    data->cd_length);
724 		break;
725 	case CRYPTO_DATA_UIO:
726 		ret = md4_digest_update_uio(&md4_ctx, data);
727 		break;
728 	case CRYPTO_DATA_MBLK:
729 		ret = md4_digest_update_mblk(&md4_ctx, data);
730 		break;
731 	default:
732 		ret = CRYPTO_ARGUMENTS_BAD;
733 	}
734 
735 	if (ret != CRYPTO_SUCCESS) {
736 		/* the update failed, bail */
737 		digest->cd_length = 0;
738 		return (ret);
739 	}
740 
741 	/*
742 	 * Do an MD4 final, must be done separately since the digest
743 	 * type can be different than the input data type.
744 	 */
745 	switch (digest->cd_format) {
746 	case CRYPTO_DATA_RAW:
747 		MD4Final((unsigned char *)digest->cd_raw.iov_base +
748 		    digest->cd_offset, &md4_ctx);
749 		break;
750 	case CRYPTO_DATA_UIO:
751 		ret = md4_digest_final_uio(&md4_ctx, digest,
752 		    MD4_DIGEST_LENGTH, NULL);
753 		break;
754 	case CRYPTO_DATA_MBLK:
755 		ret = md4_digest_final_mblk(&md4_ctx, digest,
756 		    MD4_DIGEST_LENGTH, NULL);
757 		break;
758 	default:
759 		ret = CRYPTO_ARGUMENTS_BAD;
760 	}
761 
762 	if (ret == CRYPTO_SUCCESS) {
763 		digest->cd_length = MD4_DIGEST_LENGTH;
764 	} else {
765 		digest->cd_length = 0;
766 	}
767 
768 	return (ret);
769 }
770