1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/modctl.h>
28 #include <sys/cmn_err.h>
29 #include <sys/note.h>
30 #include <sys/crypto/common.h>
31 #include <sys/crypto/spi.h>
32 #include <sys/strsun.h>
33 #include <sys/systm.h>
34 #include <sys/sysmacros.h>
35
36 #include <sys/sha1.h>
37 #include <sha1/sha1_impl.h>
38
39 /*
40 * The sha1 module is created with two modlinkages:
41 * - a modlmisc that allows consumers to directly call the entry points
42 * SHA1Init, SHA1Update, and SHA1Final.
43 * - a modlcrypto that allows the module to register with the Kernel
44 * Cryptographic Framework (KCF) as a software provider for the SHA1
45 * mechanisms.
46 */
47
48 static struct modlmisc modlmisc = {
49 &mod_miscops,
50 "SHA1 Message-Digest Algorithm"
51 };
52
53 static struct modlcrypto modlcrypto = {
54 &mod_cryptoops,
55 "SHA1 Kernel SW Provider 1.1"
56 };
57
58 static struct modlinkage modlinkage = {
59 MODREV_1, &modlmisc, &modlcrypto, NULL
60 };
61
62
63 /*
64 * Macros to access the SHA1 or SHA1-HMAC contexts from a context passed
65 * by KCF to one of the entry points.
66 */
67
68 #define PROV_SHA1_CTX(ctx) ((sha1_ctx_t *)(ctx)->cc_provider_private)
69 #define PROV_SHA1_HMAC_CTX(ctx) ((sha1_hmac_ctx_t *)(ctx)->cc_provider_private)
70
71 /* to extract the digest length passed as mechanism parameter */
72 #define PROV_SHA1_GET_DIGEST_LEN(m, len) { \
73 if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t))) \
74 (len) = (uint32_t)*((ulong_t *)(void *)mechanism->cm_param); \
75 else { \
76 ulong_t tmp_ulong; \
77 bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t)); \
78 (len) = (uint32_t)tmp_ulong; \
79 } \
80 }
81
82 #define PROV_SHA1_DIGEST_KEY(ctx, key, len, digest) { \
83 SHA1Init(ctx); \
84 SHA1Update(ctx, key, len); \
85 SHA1Final(digest, ctx); \
86 }
87
88 /*
89 * Mechanism info structure passed to KCF during registration.
90 */
91 static crypto_mech_info_t sha1_mech_info_tab[] = {
92 /* SHA1 */
93 {SUN_CKM_SHA1, SHA1_MECH_INFO_TYPE,
94 CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
95 0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
96 /* SHA1-HMAC */
97 {SUN_CKM_SHA1_HMAC, SHA1_HMAC_MECH_INFO_TYPE,
98 CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
99 SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
100 CRYPTO_KEYSIZE_UNIT_IN_BYTES},
101 /* SHA1-HMAC GENERAL */
102 {SUN_CKM_SHA1_HMAC_GENERAL, SHA1_HMAC_GEN_MECH_INFO_TYPE,
103 CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
104 SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
105 CRYPTO_KEYSIZE_UNIT_IN_BYTES}
106 };
107
108 static void sha1_provider_status(crypto_provider_handle_t, uint_t *);
109
110 static crypto_control_ops_t sha1_control_ops = {
111 sha1_provider_status
112 };
113
114 static int sha1_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
115 crypto_req_handle_t);
116 static int sha1_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
117 crypto_req_handle_t);
118 static int sha1_digest_update(crypto_ctx_t *, crypto_data_t *,
119 crypto_req_handle_t);
120 static int sha1_digest_final(crypto_ctx_t *, crypto_data_t *,
121 crypto_req_handle_t);
122 static int sha1_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
123 crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
124 crypto_req_handle_t);
125
126 static crypto_digest_ops_t sha1_digest_ops = {
127 sha1_digest_init,
128 sha1_digest,
129 sha1_digest_update,
130 NULL,
131 sha1_digest_final,
132 sha1_digest_atomic
133 };
134
135 static int sha1_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
136 crypto_spi_ctx_template_t, crypto_req_handle_t);
137 static int sha1_mac_update(crypto_ctx_t *, crypto_data_t *,
138 crypto_req_handle_t);
139 static int sha1_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
140 static int sha1_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
141 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
142 crypto_spi_ctx_template_t, crypto_req_handle_t);
143 static int sha1_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
144 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
145 crypto_spi_ctx_template_t, crypto_req_handle_t);
146
147 static crypto_mac_ops_t sha1_mac_ops = {
148 sha1_mac_init,
149 NULL,
150 sha1_mac_update,
151 sha1_mac_final,
152 sha1_mac_atomic,
153 sha1_mac_verify_atomic
154 };
155
156 static int sha1_create_ctx_template(crypto_provider_handle_t,
157 crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
158 size_t *, crypto_req_handle_t);
159 static int sha1_free_context(crypto_ctx_t *);
160
161 static crypto_ctx_ops_t sha1_ctx_ops = {
162 sha1_create_ctx_template,
163 sha1_free_context
164 };
165
166 static crypto_ops_t sha1_crypto_ops = {
167 &sha1_control_ops,
168 &sha1_digest_ops,
169 NULL,
170 &sha1_mac_ops,
171 NULL,
172 NULL,
173 NULL,
174 NULL,
175 NULL,
176 NULL,
177 NULL,
178 NULL,
179 NULL,
180 &sha1_ctx_ops,
181 NULL,
182 NULL,
183 NULL,
184 };
185
186 static crypto_provider_info_t sha1_prov_info = {
187 CRYPTO_SPI_VERSION_4,
188 "SHA1 Software Provider",
189 CRYPTO_SW_PROVIDER,
190 {&modlinkage},
191 NULL,
192 &sha1_crypto_ops,
193 sizeof (sha1_mech_info_tab)/sizeof (crypto_mech_info_t),
194 sha1_mech_info_tab
195 };
196
197 static crypto_kcf_provider_handle_t sha1_prov_handle = NULL;
198
199 int
_init()200 _init()
201 {
202 int ret;
203
204 if ((ret = mod_install(&modlinkage)) != 0)
205 return (ret);
206
207 /*
208 * Register with KCF. If the registration fails, log do not uninstall
209 * the module, since the functionality provided by misc/sha1 should
210 * still be available.
211 */
212 (void) crypto_register_provider(&sha1_prov_info, &sha1_prov_handle);
213
214 return (0);
215 }
216
217 int
_info(struct modinfo * modinfop)218 _info(struct modinfo *modinfop)
219 {
220 return (mod_info(&modlinkage, modinfop));
221 }
222
223 /*
224 * KCF software provider control entry points.
225 */
226 /* ARGSUSED */
227 static void
sha1_provider_status(crypto_provider_handle_t provider,uint_t * status)228 sha1_provider_status(crypto_provider_handle_t provider, uint_t *status)
229 {
230 *status = CRYPTO_PROVIDER_READY;
231 }
232
233 /*
234 * KCF software provider digest entry points.
235 */
236
237 static int
sha1_digest_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_req_handle_t req)238 sha1_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
239 crypto_req_handle_t req)
240 {
241 if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
242 return (CRYPTO_MECHANISM_INVALID);
243
244 /*
245 * Allocate and initialize SHA1 context.
246 */
247 ctx->cc_provider_private = kmem_alloc(sizeof (sha1_ctx_t),
248 crypto_kmflag(req));
249 if (ctx->cc_provider_private == NULL)
250 return (CRYPTO_HOST_MEMORY);
251
252 PROV_SHA1_CTX(ctx)->sc_mech_type = SHA1_MECH_INFO_TYPE;
253 SHA1Init(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
254
255 return (CRYPTO_SUCCESS);
256 }
257
258 /*
259 * Helper SHA1 digest update function for uio data.
260 */
261 static int
sha1_digest_update_uio(SHA1_CTX * sha1_ctx,crypto_data_t * data)262 sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data)
263 {
264 off_t offset = data->cd_offset;
265 size_t length = data->cd_length;
266 uint_t vec_idx;
267 size_t cur_len;
268
269 /* we support only kernel buffer */
270 if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
271 return (CRYPTO_ARGUMENTS_BAD);
272
273 /*
274 * Jump to the first iovec containing data to be
275 * digested.
276 */
277 for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
278 offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
279 offset -= data->cd_uio->uio_iov[vec_idx++].iov_len)
280 ;
281 if (vec_idx == data->cd_uio->uio_iovcnt) {
282 /*
283 * The caller specified an offset that is larger than the
284 * total size of the buffers it provided.
285 */
286 return (CRYPTO_DATA_LEN_RANGE);
287 }
288
289 /*
290 * Now do the digesting on the iovecs.
291 */
292 while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
293 cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
294 offset, length);
295
296 SHA1Update(sha1_ctx,
297 (uint8_t *)data->cd_uio->uio_iov[vec_idx].iov_base + offset,
298 cur_len);
299
300 length -= cur_len;
301 vec_idx++;
302 offset = 0;
303 }
304
305 if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
306 /*
307 * The end of the specified iovec's was reached but
308 * the length requested could not be processed, i.e.
309 * The caller requested to digest more data than it provided.
310 */
311 return (CRYPTO_DATA_LEN_RANGE);
312 }
313
314 return (CRYPTO_SUCCESS);
315 }
316
317 /*
318 * Helper SHA1 digest final function for uio data.
319 * digest_len is the length of the desired digest. If digest_len
320 * is smaller than the default SHA1 digest length, the caller
321 * must pass a scratch buffer, digest_scratch, which must
322 * be at least SHA1_DIGEST_LENGTH bytes.
323 */
324 static int
sha1_digest_final_uio(SHA1_CTX * sha1_ctx,crypto_data_t * digest,ulong_t digest_len,uchar_t * digest_scratch)325 sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
326 ulong_t digest_len, uchar_t *digest_scratch)
327 {
328 off_t offset = digest->cd_offset;
329 uint_t vec_idx;
330
331 /* we support only kernel buffer */
332 if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
333 return (CRYPTO_ARGUMENTS_BAD);
334
335 /*
336 * Jump to the first iovec containing ptr to the digest to
337 * be returned.
338 */
339 for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
340 vec_idx < digest->cd_uio->uio_iovcnt;
341 offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len)
342 ;
343 if (vec_idx == digest->cd_uio->uio_iovcnt) {
344 /*
345 * The caller specified an offset that is
346 * larger than the total size of the buffers
347 * it provided.
348 */
349 return (CRYPTO_DATA_LEN_RANGE);
350 }
351
352 if (offset + digest_len <=
353 digest->cd_uio->uio_iov[vec_idx].iov_len) {
354 /*
355 * The computed SHA1 digest will fit in the current
356 * iovec.
357 */
358 if (digest_len != SHA1_DIGEST_LENGTH) {
359 /*
360 * The caller requested a short digest. Digest
361 * into a scratch buffer and return to
362 * the user only what was requested.
363 */
364 SHA1Final(digest_scratch, sha1_ctx);
365 bcopy(digest_scratch, (uchar_t *)digest->
366 cd_uio->uio_iov[vec_idx].iov_base + offset,
367 digest_len);
368 } else {
369 SHA1Final((uchar_t *)digest->
370 cd_uio->uio_iov[vec_idx].iov_base + offset,
371 sha1_ctx);
372 }
373 } else {
374 /*
375 * The computed digest will be crossing one or more iovec's.
376 * This is bad performance-wise but we need to support it.
377 * Allocate a small scratch buffer on the stack and
378 * copy it piece meal to the specified digest iovec's.
379 */
380 uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
381 off_t scratch_offset = 0;
382 size_t length = digest_len;
383 size_t cur_len;
384
385 SHA1Final(digest_tmp, sha1_ctx);
386
387 while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
388 cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
389 offset, length);
390 bcopy(digest_tmp + scratch_offset,
391 digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
392 cur_len);
393
394 length -= cur_len;
395 vec_idx++;
396 scratch_offset += cur_len;
397 offset = 0;
398 }
399
400 if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
401 /*
402 * The end of the specified iovec's was reached but
403 * the length requested could not be processed, i.e.
404 * The caller requested to digest more data than it
405 * provided.
406 */
407 return (CRYPTO_DATA_LEN_RANGE);
408 }
409 }
410
411 return (CRYPTO_SUCCESS);
412 }
413
414 /*
415 * Helper SHA1 digest update for mblk's.
416 */
417 static int
sha1_digest_update_mblk(SHA1_CTX * sha1_ctx,crypto_data_t * data)418 sha1_digest_update_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *data)
419 {
420 off_t offset = data->cd_offset;
421 size_t length = data->cd_length;
422 mblk_t *mp;
423 size_t cur_len;
424
425 /*
426 * Jump to the first mblk_t containing data to be digested.
427 */
428 for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
429 offset -= MBLKL(mp), mp = mp->b_cont)
430 ;
431 if (mp == NULL) {
432 /*
433 * The caller specified an offset that is larger than the
434 * total size of the buffers it provided.
435 */
436 return (CRYPTO_DATA_LEN_RANGE);
437 }
438
439 /*
440 * Now do the digesting on the mblk chain.
441 */
442 while (mp != NULL && length > 0) {
443 cur_len = MIN(MBLKL(mp) - offset, length);
444 SHA1Update(sha1_ctx, mp->b_rptr + offset, cur_len);
445 length -= cur_len;
446 offset = 0;
447 mp = mp->b_cont;
448 }
449
450 if (mp == NULL && length > 0) {
451 /*
452 * The end of the mblk was reached but the length requested
453 * could not be processed, i.e. The caller requested
454 * to digest more data than it provided.
455 */
456 return (CRYPTO_DATA_LEN_RANGE);
457 }
458
459 return (CRYPTO_SUCCESS);
460 }
461
462 /*
463 * Helper SHA1 digest final for mblk's.
464 * digest_len is the length of the desired digest. If digest_len
465 * is smaller than the default SHA1 digest length, the caller
466 * must pass a scratch buffer, digest_scratch, which must
467 * be at least SHA1_DIGEST_LENGTH bytes.
468 */
469 static int
sha1_digest_final_mblk(SHA1_CTX * sha1_ctx,crypto_data_t * digest,ulong_t digest_len,uchar_t * digest_scratch)470 sha1_digest_final_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
471 ulong_t digest_len, uchar_t *digest_scratch)
472 {
473 off_t offset = digest->cd_offset;
474 mblk_t *mp;
475
476 /*
477 * Jump to the first mblk_t that will be used to store the digest.
478 */
479 for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
480 offset -= MBLKL(mp), mp = mp->b_cont)
481 ;
482 if (mp == NULL) {
483 /*
484 * The caller specified an offset that is larger than the
485 * total size of the buffers it provided.
486 */
487 return (CRYPTO_DATA_LEN_RANGE);
488 }
489
490 if (offset + digest_len <= MBLKL(mp)) {
491 /*
492 * The computed SHA1 digest will fit in the current mblk.
493 * Do the SHA1Final() in-place.
494 */
495 if (digest_len != SHA1_DIGEST_LENGTH) {
496 /*
497 * The caller requested a short digest. Digest
498 * into a scratch buffer and return to
499 * the user only what was requested.
500 */
501 SHA1Final(digest_scratch, sha1_ctx);
502 bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
503 } else {
504 SHA1Final(mp->b_rptr + offset, sha1_ctx);
505 }
506 } else {
507 /*
508 * The computed digest will be crossing one or more mblk's.
509 * This is bad performance-wise but we need to support it.
510 * Allocate a small scratch buffer on the stack and
511 * copy it piece meal to the specified digest iovec's.
512 */
513 uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
514 off_t scratch_offset = 0;
515 size_t length = digest_len;
516 size_t cur_len;
517
518 SHA1Final(digest_tmp, sha1_ctx);
519
520 while (mp != NULL && length > 0) {
521 cur_len = MIN(MBLKL(mp) - offset, length);
522 bcopy(digest_tmp + scratch_offset,
523 mp->b_rptr + offset, cur_len);
524
525 length -= cur_len;
526 mp = mp->b_cont;
527 scratch_offset += cur_len;
528 offset = 0;
529 }
530
531 if (mp == NULL && length > 0) {
532 /*
533 * The end of the specified mblk was reached but
534 * the length requested could not be processed, i.e.
535 * The caller requested to digest more data than it
536 * provided.
537 */
538 return (CRYPTO_DATA_LEN_RANGE);
539 }
540 }
541
542 return (CRYPTO_SUCCESS);
543 }
544
545 /* ARGSUSED */
546 static int
sha1_digest(crypto_ctx_t * ctx,crypto_data_t * data,crypto_data_t * digest,crypto_req_handle_t req)547 sha1_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
548 crypto_req_handle_t req)
549 {
550 int ret = CRYPTO_SUCCESS;
551
552 ASSERT(ctx->cc_provider_private != NULL);
553
554 /*
555 * We need to just return the length needed to store the output.
556 * We should not destroy the context for the following cases.
557 */
558 if ((digest->cd_length == 0) ||
559 (digest->cd_length < SHA1_DIGEST_LENGTH)) {
560 digest->cd_length = SHA1_DIGEST_LENGTH;
561 return (CRYPTO_BUFFER_TOO_SMALL);
562 }
563
564 /*
565 * Do the SHA1 update on the specified input data.
566 */
567 switch (data->cd_format) {
568 case CRYPTO_DATA_RAW:
569 SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
570 (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
571 data->cd_length);
572 break;
573 case CRYPTO_DATA_UIO:
574 ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
575 data);
576 break;
577 case CRYPTO_DATA_MBLK:
578 ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
579 data);
580 break;
581 default:
582 ret = CRYPTO_ARGUMENTS_BAD;
583 }
584
585 if (ret != CRYPTO_SUCCESS) {
586 /* the update failed, free context and bail */
587 kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
588 ctx->cc_provider_private = NULL;
589 digest->cd_length = 0;
590 return (ret);
591 }
592
593 /*
594 * Do a SHA1 final, must be done separately since the digest
595 * type can be different than the input data type.
596 */
597 switch (digest->cd_format) {
598 case CRYPTO_DATA_RAW:
599 SHA1Final((unsigned char *)digest->cd_raw.iov_base +
600 digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
601 break;
602 case CRYPTO_DATA_UIO:
603 ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
604 digest, SHA1_DIGEST_LENGTH, NULL);
605 break;
606 case CRYPTO_DATA_MBLK:
607 ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
608 digest, SHA1_DIGEST_LENGTH, NULL);
609 break;
610 default:
611 ret = CRYPTO_ARGUMENTS_BAD;
612 }
613
614 /* all done, free context and return */
615
616 if (ret == CRYPTO_SUCCESS) {
617 digest->cd_length = SHA1_DIGEST_LENGTH;
618 } else {
619 digest->cd_length = 0;
620 }
621
622 kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
623 ctx->cc_provider_private = NULL;
624 return (ret);
625 }
626
627 /* ARGSUSED */
628 static int
sha1_digest_update(crypto_ctx_t * ctx,crypto_data_t * data,crypto_req_handle_t req)629 sha1_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
630 crypto_req_handle_t req)
631 {
632 int ret = CRYPTO_SUCCESS;
633
634 ASSERT(ctx->cc_provider_private != NULL);
635
636 /*
637 * Do the SHA1 update on the specified input data.
638 */
639 switch (data->cd_format) {
640 case CRYPTO_DATA_RAW:
641 SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
642 (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
643 data->cd_length);
644 break;
645 case CRYPTO_DATA_UIO:
646 ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
647 data);
648 break;
649 case CRYPTO_DATA_MBLK:
650 ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
651 data);
652 break;
653 default:
654 ret = CRYPTO_ARGUMENTS_BAD;
655 }
656
657 return (ret);
658 }
659
660 /* ARGSUSED */
661 static int
sha1_digest_final(crypto_ctx_t * ctx,crypto_data_t * digest,crypto_req_handle_t req)662 sha1_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
663 crypto_req_handle_t req)
664 {
665 int ret = CRYPTO_SUCCESS;
666
667 ASSERT(ctx->cc_provider_private != NULL);
668
669 /*
670 * We need to just return the length needed to store the output.
671 * We should not destroy the context for the following cases.
672 */
673 if ((digest->cd_length == 0) ||
674 (digest->cd_length < SHA1_DIGEST_LENGTH)) {
675 digest->cd_length = SHA1_DIGEST_LENGTH;
676 return (CRYPTO_BUFFER_TOO_SMALL);
677 }
678
679 /*
680 * Do a SHA1 final.
681 */
682 switch (digest->cd_format) {
683 case CRYPTO_DATA_RAW:
684 SHA1Final((unsigned char *)digest->cd_raw.iov_base +
685 digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
686 break;
687 case CRYPTO_DATA_UIO:
688 ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
689 digest, SHA1_DIGEST_LENGTH, NULL);
690 break;
691 case CRYPTO_DATA_MBLK:
692 ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
693 digest, SHA1_DIGEST_LENGTH, NULL);
694 break;
695 default:
696 ret = CRYPTO_ARGUMENTS_BAD;
697 }
698
699 /* all done, free context and return */
700
701 if (ret == CRYPTO_SUCCESS) {
702 digest->cd_length = SHA1_DIGEST_LENGTH;
703 } else {
704 digest->cd_length = 0;
705 }
706
707 kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
708 ctx->cc_provider_private = NULL;
709
710 return (ret);
711 }
712
713 /* ARGSUSED */
714 static int
sha1_digest_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_data_t * data,crypto_data_t * digest,crypto_req_handle_t req)715 sha1_digest_atomic(crypto_provider_handle_t provider,
716 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
717 crypto_data_t *data, crypto_data_t *digest,
718 crypto_req_handle_t req)
719 {
720 int ret = CRYPTO_SUCCESS;
721 SHA1_CTX sha1_ctx;
722
723 if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
724 return (CRYPTO_MECHANISM_INVALID);
725
726 /*
727 * Do the SHA1 init.
728 */
729 SHA1Init(&sha1_ctx);
730
731 /*
732 * Do the SHA1 update on the specified input data.
733 */
734 switch (data->cd_format) {
735 case CRYPTO_DATA_RAW:
736 SHA1Update(&sha1_ctx,
737 (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
738 data->cd_length);
739 break;
740 case CRYPTO_DATA_UIO:
741 ret = sha1_digest_update_uio(&sha1_ctx, data);
742 break;
743 case CRYPTO_DATA_MBLK:
744 ret = sha1_digest_update_mblk(&sha1_ctx, data);
745 break;
746 default:
747 ret = CRYPTO_ARGUMENTS_BAD;
748 }
749
750 if (ret != CRYPTO_SUCCESS) {
751 /* the update failed, bail */
752 digest->cd_length = 0;
753 return (ret);
754 }
755
756 /*
757 * Do a SHA1 final, must be done separately since the digest
758 * type can be different than the input data type.
759 */
760 switch (digest->cd_format) {
761 case CRYPTO_DATA_RAW:
762 SHA1Final((unsigned char *)digest->cd_raw.iov_base +
763 digest->cd_offset, &sha1_ctx);
764 break;
765 case CRYPTO_DATA_UIO:
766 ret = sha1_digest_final_uio(&sha1_ctx, digest,
767 SHA1_DIGEST_LENGTH, NULL);
768 break;
769 case CRYPTO_DATA_MBLK:
770 ret = sha1_digest_final_mblk(&sha1_ctx, digest,
771 SHA1_DIGEST_LENGTH, NULL);
772 break;
773 default:
774 ret = CRYPTO_ARGUMENTS_BAD;
775 }
776
777 if (ret == CRYPTO_SUCCESS) {
778 digest->cd_length = SHA1_DIGEST_LENGTH;
779 } else {
780 digest->cd_length = 0;
781 }
782
783 return (ret);
784 }
785
786 /*
787 * KCF software provider mac entry points.
788 *
789 * SHA1 HMAC is: SHA1(key XOR opad, SHA1(key XOR ipad, text))
790 *
791 * Init:
792 * The initialization routine initializes what we denote
793 * as the inner and outer contexts by doing
794 * - for inner context: SHA1(key XOR ipad)
795 * - for outer context: SHA1(key XOR opad)
796 *
797 * Update:
798 * Each subsequent SHA1 HMAC update will result in an
799 * update of the inner context with the specified data.
800 *
801 * Final:
802 * The SHA1 HMAC final will do a SHA1 final operation on the
803 * inner context, and the resulting digest will be used
804 * as the data for an update on the outer context. Last
805 * but not least, a SHA1 final on the outer context will
806 * be performed to obtain the SHA1 HMAC digest to return
807 * to the user.
808 */
809
810 /*
811 * Initialize a SHA1-HMAC context.
812 */
813 static void
sha1_mac_init_ctx(sha1_hmac_ctx_t * ctx,void * keyval,uint_t length_in_bytes)814 sha1_mac_init_ctx(sha1_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
815 {
816 uint32_t ipad[SHA1_HMAC_INTS_PER_BLOCK];
817 uint32_t opad[SHA1_HMAC_INTS_PER_BLOCK];
818 uint_t i;
819
820 bzero(ipad, SHA1_HMAC_BLOCK_SIZE);
821 bzero(opad, SHA1_HMAC_BLOCK_SIZE);
822
823 bcopy(keyval, ipad, length_in_bytes);
824 bcopy(keyval, opad, length_in_bytes);
825
826 /* XOR key with ipad (0x36) and opad (0x5c) */
827 for (i = 0; i < SHA1_HMAC_INTS_PER_BLOCK; i++) {
828 ipad[i] ^= 0x36363636;
829 opad[i] ^= 0x5c5c5c5c;
830 }
831
832 /* perform SHA1 on ipad */
833 SHA1Init(&ctx->hc_icontext);
834 SHA1Update(&ctx->hc_icontext, (uint8_t *)ipad, SHA1_HMAC_BLOCK_SIZE);
835
836 /* perform SHA1 on opad */
837 SHA1Init(&ctx->hc_ocontext);
838 SHA1Update(&ctx->hc_ocontext, (uint8_t *)opad, SHA1_HMAC_BLOCK_SIZE);
839 }
840
841 /*
842 */
843 static int
sha1_mac_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)844 sha1_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
845 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
846 crypto_req_handle_t req)
847 {
848 int ret = CRYPTO_SUCCESS;
849 uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
850
851 if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
852 mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
853 return (CRYPTO_MECHANISM_INVALID);
854
855 /* Add support for key by attributes (RFE 4706552) */
856 if (key->ck_format != CRYPTO_KEY_RAW)
857 return (CRYPTO_ARGUMENTS_BAD);
858
859 ctx->cc_provider_private = kmem_alloc(sizeof (sha1_hmac_ctx_t),
860 crypto_kmflag(req));
861 if (ctx->cc_provider_private == NULL)
862 return (CRYPTO_HOST_MEMORY);
863
864 if (ctx_template != NULL) {
865 /* reuse context template */
866 bcopy(ctx_template, PROV_SHA1_HMAC_CTX(ctx),
867 sizeof (sha1_hmac_ctx_t));
868 } else {
869 /* no context template, compute context */
870 if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
871 uchar_t digested_key[SHA1_DIGEST_LENGTH];
872 sha1_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
873
874 /*
875 * Hash the passed-in key to get a smaller key.
876 * The inner context is used since it hasn't been
877 * initialized yet.
878 */
879 PROV_SHA1_DIGEST_KEY(&hmac_ctx->hc_icontext,
880 key->ck_data, keylen_in_bytes, digested_key);
881 sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
882 digested_key, SHA1_DIGEST_LENGTH);
883 } else {
884 sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
885 key->ck_data, keylen_in_bytes);
886 }
887 }
888
889 /*
890 * Get the mechanism parameters, if applicable.
891 */
892 PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
893 if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
894 if (mechanism->cm_param == NULL ||
895 mechanism->cm_param_len != sizeof (ulong_t))
896 ret = CRYPTO_MECHANISM_PARAM_INVALID;
897 PROV_SHA1_GET_DIGEST_LEN(mechanism,
898 PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len);
899 if (PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len >
900 SHA1_DIGEST_LENGTH)
901 ret = CRYPTO_MECHANISM_PARAM_INVALID;
902 }
903
904 if (ret != CRYPTO_SUCCESS) {
905 bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
906 kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
907 ctx->cc_provider_private = NULL;
908 }
909
910 return (ret);
911 }
912
913 /* ARGSUSED */
914 static int
sha1_mac_update(crypto_ctx_t * ctx,crypto_data_t * data,crypto_req_handle_t req)915 sha1_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
916 {
917 int ret = CRYPTO_SUCCESS;
918
919 ASSERT(ctx->cc_provider_private != NULL);
920
921 /*
922 * Do a SHA1 update of the inner context using the specified
923 * data.
924 */
925 switch (data->cd_format) {
926 case CRYPTO_DATA_RAW:
927 SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_icontext,
928 (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
929 data->cd_length);
930 break;
931 case CRYPTO_DATA_UIO:
932 ret = sha1_digest_update_uio(
933 &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
934 break;
935 case CRYPTO_DATA_MBLK:
936 ret = sha1_digest_update_mblk(
937 &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
938 break;
939 default:
940 ret = CRYPTO_ARGUMENTS_BAD;
941 }
942
943 return (ret);
944 }
945
946 /* ARGSUSED */
947 static int
sha1_mac_final(crypto_ctx_t * ctx,crypto_data_t * mac,crypto_req_handle_t req)948 sha1_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
949 {
950 int ret = CRYPTO_SUCCESS;
951 uchar_t digest[SHA1_DIGEST_LENGTH];
952 uint32_t digest_len = SHA1_DIGEST_LENGTH;
953
954 ASSERT(ctx->cc_provider_private != NULL);
955
956 if (PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type ==
957 SHA1_HMAC_GEN_MECH_INFO_TYPE)
958 digest_len = PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len;
959
960 /*
961 * We need to just return the length needed to store the output.
962 * We should not destroy the context for the following cases.
963 */
964 if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
965 mac->cd_length = digest_len;
966 return (CRYPTO_BUFFER_TOO_SMALL);
967 }
968
969 /*
970 * Do a SHA1 final on the inner context.
971 */
972 SHA1Final(digest, &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext);
973
974 /*
975 * Do a SHA1 update on the outer context, feeding the inner
976 * digest as data.
977 */
978 SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, digest,
979 SHA1_DIGEST_LENGTH);
980
981 /*
982 * Do a SHA1 final on the outer context, storing the computing
983 * digest in the users buffer.
984 */
985 switch (mac->cd_format) {
986 case CRYPTO_DATA_RAW:
987 if (digest_len != SHA1_DIGEST_LENGTH) {
988 /*
989 * The caller requested a short digest. Digest
990 * into a scratch buffer and return to
991 * the user only what was requested.
992 */
993 SHA1Final(digest,
994 &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
995 bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
996 mac->cd_offset, digest_len);
997 } else {
998 SHA1Final((unsigned char *)mac->cd_raw.iov_base +
999 mac->cd_offset,
1000 &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
1001 }
1002 break;
1003 case CRYPTO_DATA_UIO:
1004 ret = sha1_digest_final_uio(
1005 &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
1006 digest_len, digest);
1007 break;
1008 case CRYPTO_DATA_MBLK:
1009 ret = sha1_digest_final_mblk(
1010 &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
1011 digest_len, digest);
1012 break;
1013 default:
1014 ret = CRYPTO_ARGUMENTS_BAD;
1015 }
1016
1017 if (ret == CRYPTO_SUCCESS) {
1018 mac->cd_length = digest_len;
1019 } else {
1020 mac->cd_length = 0;
1021 }
1022
1023 bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1024 kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1025 ctx->cc_provider_private = NULL;
1026
1027 return (ret);
1028 }
1029
1030 #define SHA1_MAC_UPDATE(data, ctx, ret) { \
1031 switch (data->cd_format) { \
1032 case CRYPTO_DATA_RAW: \
1033 SHA1Update(&(ctx).hc_icontext, \
1034 (uint8_t *)data->cd_raw.iov_base + \
1035 data->cd_offset, data->cd_length); \
1036 break; \
1037 case CRYPTO_DATA_UIO: \
1038 ret = sha1_digest_update_uio(&(ctx).hc_icontext, data); \
1039 break; \
1040 case CRYPTO_DATA_MBLK: \
1041 ret = sha1_digest_update_mblk(&(ctx).hc_icontext, \
1042 data); \
1043 break; \
1044 default: \
1045 ret = CRYPTO_ARGUMENTS_BAD; \
1046 } \
1047 }
1048
1049 /* ARGSUSED */
1050 static int
sha1_mac_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * data,crypto_data_t * mac,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)1051 sha1_mac_atomic(crypto_provider_handle_t provider,
1052 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1053 crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1054 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1055 {
1056 int ret = CRYPTO_SUCCESS;
1057 uchar_t digest[SHA1_DIGEST_LENGTH];
1058 sha1_hmac_ctx_t sha1_hmac_ctx;
1059 uint32_t digest_len = SHA1_DIGEST_LENGTH;
1060 uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1061
1062 if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1063 mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1064 return (CRYPTO_MECHANISM_INVALID);
1065
1066 /* Add support for key by attributes (RFE 4706552) */
1067 if (key->ck_format != CRYPTO_KEY_RAW)
1068 return (CRYPTO_ARGUMENTS_BAD);
1069
1070 if (ctx_template != NULL) {
1071 /* reuse context template */
1072 bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1073 } else {
1074 /* no context template, initialize context */
1075 if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1076 /*
1077 * Hash the passed-in key to get a smaller key.
1078 * The inner context is used since it hasn't been
1079 * initialized yet.
1080 */
1081 PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
1082 key->ck_data, keylen_in_bytes, digest);
1083 sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
1084 SHA1_DIGEST_LENGTH);
1085 } else {
1086 sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
1087 keylen_in_bytes);
1088 }
1089 }
1090
1091 /* get the mechanism parameters, if applicable */
1092 if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1093 if (mechanism->cm_param == NULL ||
1094 mechanism->cm_param_len != sizeof (ulong_t)) {
1095 ret = CRYPTO_MECHANISM_PARAM_INVALID;
1096 goto bail;
1097 }
1098 PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
1099 if (digest_len > SHA1_DIGEST_LENGTH) {
1100 ret = CRYPTO_MECHANISM_PARAM_INVALID;
1101 goto bail;
1102 }
1103 }
1104
1105 /* do a SHA1 update of the inner context using the specified data */
1106 SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
1107 if (ret != CRYPTO_SUCCESS)
1108 /* the update failed, free context and bail */
1109 goto bail;
1110
1111 /*
1112 * Do a SHA1 final on the inner context.
1113 */
1114 SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
1115
1116 /*
1117 * Do an SHA1 update on the outer context, feeding the inner
1118 * digest as data.
1119 */
1120 SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
1121
1122 /*
1123 * Do a SHA1 final on the outer context, storing the computed
1124 * digest in the users buffer.
1125 */
1126 switch (mac->cd_format) {
1127 case CRYPTO_DATA_RAW:
1128 if (digest_len != SHA1_DIGEST_LENGTH) {
1129 /*
1130 * The caller requested a short digest. Digest
1131 * into a scratch buffer and return to
1132 * the user only what was requested.
1133 */
1134 SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
1135 bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1136 mac->cd_offset, digest_len);
1137 } else {
1138 SHA1Final((unsigned char *)mac->cd_raw.iov_base +
1139 mac->cd_offset, &sha1_hmac_ctx.hc_ocontext);
1140 }
1141 break;
1142 case CRYPTO_DATA_UIO:
1143 ret = sha1_digest_final_uio(&sha1_hmac_ctx.hc_ocontext, mac,
1144 digest_len, digest);
1145 break;
1146 case CRYPTO_DATA_MBLK:
1147 ret = sha1_digest_final_mblk(&sha1_hmac_ctx.hc_ocontext, mac,
1148 digest_len, digest);
1149 break;
1150 default:
1151 ret = CRYPTO_ARGUMENTS_BAD;
1152 }
1153
1154 if (ret == CRYPTO_SUCCESS) {
1155 mac->cd_length = digest_len;
1156 } else {
1157 mac->cd_length = 0;
1158 }
1159 /* Extra paranoia: zeroize the context on the stack */
1160 bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1161
1162 return (ret);
1163 bail:
1164 bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1165 mac->cd_length = 0;
1166 return (ret);
1167 }
1168
1169 /* ARGSUSED */
1170 static int
sha1_mac_verify_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * data,crypto_data_t * mac,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)1171 sha1_mac_verify_atomic(crypto_provider_handle_t provider,
1172 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1173 crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1174 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1175 {
1176 int ret = CRYPTO_SUCCESS;
1177 uchar_t digest[SHA1_DIGEST_LENGTH];
1178 sha1_hmac_ctx_t sha1_hmac_ctx;
1179 uint32_t digest_len = SHA1_DIGEST_LENGTH;
1180 uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1181
1182 if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1183 mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1184 return (CRYPTO_MECHANISM_INVALID);
1185
1186 /* Add support for key by attributes (RFE 4706552) */
1187 if (key->ck_format != CRYPTO_KEY_RAW)
1188 return (CRYPTO_ARGUMENTS_BAD);
1189
1190 if (ctx_template != NULL) {
1191 /* reuse context template */
1192 bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1193 } else {
1194 /* no context template, initialize context */
1195 if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1196 /*
1197 * Hash the passed-in key to get a smaller key.
1198 * The inner context is used since it hasn't been
1199 * initialized yet.
1200 */
1201 PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
1202 key->ck_data, keylen_in_bytes, digest);
1203 sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
1204 SHA1_DIGEST_LENGTH);
1205 } else {
1206 sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
1207 keylen_in_bytes);
1208 }
1209 }
1210
1211 /* get the mechanism parameters, if applicable */
1212 if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1213 if (mechanism->cm_param == NULL ||
1214 mechanism->cm_param_len != sizeof (ulong_t)) {
1215 ret = CRYPTO_MECHANISM_PARAM_INVALID;
1216 goto bail;
1217 }
1218 PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
1219 if (digest_len > SHA1_DIGEST_LENGTH) {
1220 ret = CRYPTO_MECHANISM_PARAM_INVALID;
1221 goto bail;
1222 }
1223 }
1224
1225 if (mac->cd_length != digest_len) {
1226 ret = CRYPTO_INVALID_MAC;
1227 goto bail;
1228 }
1229
1230 /* do a SHA1 update of the inner context using the specified data */
1231 SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
1232 if (ret != CRYPTO_SUCCESS)
1233 /* the update failed, free context and bail */
1234 goto bail;
1235
1236 /* do a SHA1 final on the inner context */
1237 SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
1238
1239 /*
1240 * Do an SHA1 update on the outer context, feeding the inner
1241 * digest as data.
1242 */
1243 SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
1244
1245 /*
1246 * Do a SHA1 final on the outer context, storing the computed
1247 * digest in the users buffer.
1248 */
1249 SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
1250
1251 /*
1252 * Compare the computed digest against the expected digest passed
1253 * as argument.
1254 */
1255
1256 switch (mac->cd_format) {
1257
1258 case CRYPTO_DATA_RAW:
1259 if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
1260 mac->cd_offset, digest_len) != 0)
1261 ret = CRYPTO_INVALID_MAC;
1262 break;
1263
1264 case CRYPTO_DATA_UIO: {
1265 off_t offset = mac->cd_offset;
1266 uint_t vec_idx;
1267 off_t scratch_offset = 0;
1268 size_t length = digest_len;
1269 size_t cur_len;
1270
1271 /* we support only kernel buffer */
1272 if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
1273 return (CRYPTO_ARGUMENTS_BAD);
1274
1275 /* jump to the first iovec containing the expected digest */
1276 for (vec_idx = 0;
1277 offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
1278 vec_idx < mac->cd_uio->uio_iovcnt;
1279 offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len)
1280 ;
1281 if (vec_idx == mac->cd_uio->uio_iovcnt) {
1282 /*
1283 * The caller specified an offset that is
1284 * larger than the total size of the buffers
1285 * it provided.
1286 */
1287 ret = CRYPTO_DATA_LEN_RANGE;
1288 break;
1289 }
1290
1291 /* do the comparison of computed digest vs specified one */
1292 while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
1293 cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
1294 offset, length);
1295
1296 if (bcmp(digest + scratch_offset,
1297 mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
1298 cur_len) != 0) {
1299 ret = CRYPTO_INVALID_MAC;
1300 break;
1301 }
1302
1303 length -= cur_len;
1304 vec_idx++;
1305 scratch_offset += cur_len;
1306 offset = 0;
1307 }
1308 break;
1309 }
1310
1311 case CRYPTO_DATA_MBLK: {
1312 off_t offset = mac->cd_offset;
1313 mblk_t *mp;
1314 off_t scratch_offset = 0;
1315 size_t length = digest_len;
1316 size_t cur_len;
1317
1318 /* jump to the first mblk_t containing the expected digest */
1319 for (mp = mac->cd_mp; mp != NULL && offset >= MBLKL(mp);
1320 offset -= MBLKL(mp), mp = mp->b_cont)
1321 ;
1322 if (mp == NULL) {
1323 /*
1324 * The caller specified an offset that is larger than
1325 * the total size of the buffers it provided.
1326 */
1327 ret = CRYPTO_DATA_LEN_RANGE;
1328 break;
1329 }
1330
1331 while (mp != NULL && length > 0) {
1332 cur_len = MIN(MBLKL(mp) - offset, length);
1333 if (bcmp(digest + scratch_offset,
1334 mp->b_rptr + offset, cur_len) != 0) {
1335 ret = CRYPTO_INVALID_MAC;
1336 break;
1337 }
1338
1339 length -= cur_len;
1340 mp = mp->b_cont;
1341 scratch_offset += cur_len;
1342 offset = 0;
1343 }
1344 break;
1345 }
1346
1347 default:
1348 ret = CRYPTO_ARGUMENTS_BAD;
1349 }
1350
1351 bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1352 return (ret);
1353 bail:
1354 bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1355 mac->cd_length = 0;
1356 return (ret);
1357 }
1358
1359 /*
1360 * KCF software provider context management entry points.
1361 */
1362
1363 /* ARGSUSED */
1364 static int
sha1_create_ctx_template(crypto_provider_handle_t provider,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t * ctx_template,size_t * ctx_template_size,crypto_req_handle_t req)1365 sha1_create_ctx_template(crypto_provider_handle_t provider,
1366 crypto_mechanism_t *mechanism, crypto_key_t *key,
1367 crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
1368 crypto_req_handle_t req)
1369 {
1370 sha1_hmac_ctx_t *sha1_hmac_ctx_tmpl;
1371 uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1372
1373 if ((mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE) &&
1374 (mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)) {
1375 return (CRYPTO_MECHANISM_INVALID);
1376 }
1377
1378 /* Add support for key by attributes (RFE 4706552) */
1379 if (key->ck_format != CRYPTO_KEY_RAW)
1380 return (CRYPTO_ARGUMENTS_BAD);
1381
1382 /*
1383 * Allocate and initialize SHA1 context.
1384 */
1385 sha1_hmac_ctx_tmpl = kmem_alloc(sizeof (sha1_hmac_ctx_t),
1386 crypto_kmflag(req));
1387 if (sha1_hmac_ctx_tmpl == NULL)
1388 return (CRYPTO_HOST_MEMORY);
1389
1390 if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1391 uchar_t digested_key[SHA1_DIGEST_LENGTH];
1392
1393 /*
1394 * Hash the passed-in key to get a smaller key.
1395 * The inner context is used since it hasn't been
1396 * initialized yet.
1397 */
1398 PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx_tmpl->hc_icontext,
1399 key->ck_data, keylen_in_bytes, digested_key);
1400 sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, digested_key,
1401 SHA1_DIGEST_LENGTH);
1402 } else {
1403 sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, key->ck_data,
1404 keylen_in_bytes);
1405 }
1406
1407 sha1_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
1408 *ctx_template = (crypto_spi_ctx_template_t)sha1_hmac_ctx_tmpl;
1409 *ctx_template_size = sizeof (sha1_hmac_ctx_t);
1410
1411
1412 return (CRYPTO_SUCCESS);
1413 }
1414
1415 static int
sha1_free_context(crypto_ctx_t * ctx)1416 sha1_free_context(crypto_ctx_t *ctx)
1417 {
1418 uint_t ctx_len;
1419 sha1_mech_type_t mech_type;
1420
1421 if (ctx->cc_provider_private == NULL)
1422 return (CRYPTO_SUCCESS);
1423
1424 /*
1425 * We have to free either SHA1 or SHA1-HMAC contexts, which
1426 * have different lengths.
1427 */
1428
1429 mech_type = PROV_SHA1_CTX(ctx)->sc_mech_type;
1430 if (mech_type == SHA1_MECH_INFO_TYPE)
1431 ctx_len = sizeof (sha1_ctx_t);
1432 else {
1433 ASSERT(mech_type == SHA1_HMAC_MECH_INFO_TYPE ||
1434 mech_type == SHA1_HMAC_GEN_MECH_INFO_TYPE);
1435 ctx_len = sizeof (sha1_hmac_ctx_t);
1436 }
1437
1438 bzero(ctx->cc_provider_private, ctx_len);
1439 kmem_free(ctx->cc_provider_private, ctx_len);
1440 ctx->cc_provider_private = NULL;
1441
1442 return (CRYPTO_SUCCESS);
1443 }
1444