1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2019 Joyent, Inc.
26 */
27
28 #include <sys/modctl.h>
29 #include <sys/cmn_err.h>
30 #include <sys/note.h>
31 #include <sys/crypto/common.h>
32 #include <sys/crypto/spi.h>
33 #include <sys/strsun.h>
34 #include <sys/systm.h>
35 #include <sys/sysmacros.h>
36
37 #include <sys/sha1.h>
38 #include <sha1/sha1_impl.h>
39
40 /*
41 * The sha1 module is created with two modlinkages:
42 * - a modlmisc that allows consumers to directly call the entry points
43 * SHA1Init, SHA1Update, and SHA1Final.
44 * - a modlcrypto that allows the module to register with the Kernel
45 * Cryptographic Framework (KCF) as a software provider for the SHA1
46 * mechanisms.
47 */
48
49 static struct modlmisc modlmisc = {
50 &mod_miscops,
51 "SHA1 Message-Digest Algorithm"
52 };
53
54 static struct modlcrypto modlcrypto = {
55 &mod_cryptoops,
56 "SHA1 Kernel SW Provider 1.1"
57 };
58
59 static struct modlinkage modlinkage = {
60 MODREV_1, &modlmisc, &modlcrypto, NULL
61 };
62
63
64 /*
65 * Macros to access the SHA1 or SHA1-HMAC contexts from a context passed
66 * by KCF to one of the entry points.
67 */
68
69 #define PROV_SHA1_CTX(ctx) ((sha1_ctx_t *)(ctx)->cc_provider_private)
70 #define PROV_SHA1_HMAC_CTX(ctx) ((sha1_hmac_ctx_t *)(ctx)->cc_provider_private)
71
72 /* to extract the digest length passed as mechanism parameter */
73 #define PROV_SHA1_GET_DIGEST_LEN(m, len) { \
74 if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t))) \
75 (len) = (uint32_t)*((ulong_t *)(void *)mechanism->cm_param); \
76 else { \
77 ulong_t tmp_ulong; \
78 bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t)); \
79 (len) = (uint32_t)tmp_ulong; \
80 } \
81 }
82
83 #define PROV_SHA1_DIGEST_KEY(ctx, key, len, digest) { \
84 SHA1Init(ctx); \
85 SHA1Update(ctx, key, len); \
86 SHA1Final(digest, ctx); \
87 }
88
89 /*
90 * Mechanism info structure passed to KCF during registration.
91 */
92 static crypto_mech_info_t sha1_mech_info_tab[] = {
93 /* SHA1 */
94 {SUN_CKM_SHA1, SHA1_MECH_INFO_TYPE,
95 CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
96 0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
97 /* SHA1-HMAC */
98 {SUN_CKM_SHA1_HMAC, SHA1_HMAC_MECH_INFO_TYPE,
99 CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
100 SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
101 CRYPTO_KEYSIZE_UNIT_IN_BYTES},
102 /* SHA1-HMAC GENERAL */
103 {SUN_CKM_SHA1_HMAC_GENERAL, SHA1_HMAC_GEN_MECH_INFO_TYPE,
104 CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
105 SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
106 CRYPTO_KEYSIZE_UNIT_IN_BYTES}
107 };
108
109 static void sha1_provider_status(crypto_provider_handle_t, uint_t *);
110
111 static crypto_control_ops_t sha1_control_ops = {
112 sha1_provider_status
113 };
114
115 static int sha1_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
116 crypto_req_handle_t);
117 static int sha1_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
118 crypto_req_handle_t);
119 static int sha1_digest_update(crypto_ctx_t *, crypto_data_t *,
120 crypto_req_handle_t);
121 static int sha1_digest_final(crypto_ctx_t *, crypto_data_t *,
122 crypto_req_handle_t);
123 static int sha1_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
124 crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
125 crypto_req_handle_t);
126
127 static crypto_digest_ops_t sha1_digest_ops = {
128 sha1_digest_init,
129 sha1_digest,
130 sha1_digest_update,
131 NULL,
132 sha1_digest_final,
133 sha1_digest_atomic
134 };
135
136 static int sha1_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
137 crypto_spi_ctx_template_t, crypto_req_handle_t);
138 static int sha1_mac(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
139 crypto_req_handle_t);
140 static int sha1_mac_update(crypto_ctx_t *, crypto_data_t *,
141 crypto_req_handle_t);
142 static int sha1_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
143 static int sha1_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
144 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
145 crypto_spi_ctx_template_t, crypto_req_handle_t);
146 static int sha1_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
147 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
148 crypto_spi_ctx_template_t, crypto_req_handle_t);
149
150 static crypto_mac_ops_t sha1_mac_ops = {
151 sha1_mac_init,
152 sha1_mac,
153 sha1_mac_update,
154 sha1_mac_final,
155 sha1_mac_atomic,
156 sha1_mac_verify_atomic
157 };
158
159 static int sha1_create_ctx_template(crypto_provider_handle_t,
160 crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
161 size_t *, crypto_req_handle_t);
162 static int sha1_free_context(crypto_ctx_t *);
163
164 static crypto_ctx_ops_t sha1_ctx_ops = {
165 sha1_create_ctx_template,
166 sha1_free_context
167 };
168
169 static crypto_ops_t sha1_crypto_ops = {
170 &sha1_control_ops,
171 &sha1_digest_ops,
172 NULL,
173 &sha1_mac_ops,
174 NULL,
175 NULL,
176 NULL,
177 NULL,
178 NULL,
179 NULL,
180 NULL,
181 NULL,
182 NULL,
183 &sha1_ctx_ops,
184 NULL,
185 NULL,
186 NULL,
187 };
188
189 static crypto_provider_info_t sha1_prov_info = {
190 CRYPTO_SPI_VERSION_4,
191 "SHA1 Software Provider",
192 CRYPTO_SW_PROVIDER,
193 {&modlinkage},
194 NULL,
195 &sha1_crypto_ops,
196 sizeof (sha1_mech_info_tab)/sizeof (crypto_mech_info_t),
197 sha1_mech_info_tab
198 };
199
200 static crypto_kcf_provider_handle_t sha1_prov_handle = 0;
201
202 int
_init()203 _init()
204 {
205 int ret;
206
207 if ((ret = mod_install(&modlinkage)) != 0)
208 return (ret);
209
210 /*
211 * Register with KCF. If the registration fails, log do not uninstall
212 * the module, since the functionality provided by misc/sha1 should
213 * still be available.
214 */
215 (void) crypto_register_provider(&sha1_prov_info, &sha1_prov_handle);
216
217 return (0);
218 }
219
220 int
_info(struct modinfo * modinfop)221 _info(struct modinfo *modinfop)
222 {
223 return (mod_info(&modlinkage, modinfop));
224 }
225
226 /*
227 * KCF software provider control entry points.
228 */
229 /* ARGSUSED */
230 static void
sha1_provider_status(crypto_provider_handle_t provider,uint_t * status)231 sha1_provider_status(crypto_provider_handle_t provider, uint_t *status)
232 {
233 *status = CRYPTO_PROVIDER_READY;
234 }
235
236 /*
237 * KCF software provider digest entry points.
238 */
239
240 static int
sha1_digest_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_req_handle_t req)241 sha1_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
242 crypto_req_handle_t req)
243 {
244 if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
245 return (CRYPTO_MECHANISM_INVALID);
246
247 /*
248 * Allocate and initialize SHA1 context.
249 */
250 ctx->cc_provider_private = kmem_alloc(sizeof (sha1_ctx_t),
251 crypto_kmflag(req));
252 if (ctx->cc_provider_private == NULL)
253 return (CRYPTO_HOST_MEMORY);
254
255 PROV_SHA1_CTX(ctx)->sc_mech_type = SHA1_MECH_INFO_TYPE;
256 SHA1Init(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
257
258 return (CRYPTO_SUCCESS);
259 }
260
261 /*
262 * Helper SHA1 digest update function for uio data.
263 */
264 static int
sha1_digest_update_uio(SHA1_CTX * sha1_ctx,crypto_data_t * data)265 sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data)
266 {
267 off_t offset = data->cd_offset;
268 size_t length = data->cd_length;
269 uint_t vec_idx;
270 size_t cur_len;
271
272 /* we support only kernel buffer */
273 if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
274 return (CRYPTO_ARGUMENTS_BAD);
275
276 /*
277 * Jump to the first iovec containing data to be
278 * digested.
279 */
280 for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
281 offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
282 offset -= data->cd_uio->uio_iov[vec_idx++].iov_len)
283 ;
284 if (vec_idx == data->cd_uio->uio_iovcnt) {
285 /*
286 * The caller specified an offset that is larger than the
287 * total size of the buffers it provided.
288 */
289 return (CRYPTO_DATA_LEN_RANGE);
290 }
291
292 /*
293 * Now do the digesting on the iovecs.
294 */
295 while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
296 cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
297 offset, length);
298
299 SHA1Update(sha1_ctx,
300 (uint8_t *)data->cd_uio->uio_iov[vec_idx].iov_base + offset,
301 cur_len);
302
303 length -= cur_len;
304 vec_idx++;
305 offset = 0;
306 }
307
308 if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
309 /*
310 * The end of the specified iovec's was reached but
311 * the length requested could not be processed, i.e.
312 * The caller requested to digest more data than it provided.
313 */
314 return (CRYPTO_DATA_LEN_RANGE);
315 }
316
317 return (CRYPTO_SUCCESS);
318 }
319
320 /*
321 * Helper SHA1 digest final function for uio data.
322 * digest_len is the length of the desired digest. If digest_len
323 * is smaller than the default SHA1 digest length, the caller
324 * must pass a scratch buffer, digest_scratch, which must
325 * be at least SHA1_DIGEST_LENGTH bytes.
326 */
327 static int
sha1_digest_final_uio(SHA1_CTX * sha1_ctx,crypto_data_t * digest,ulong_t digest_len,uchar_t * digest_scratch)328 sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
329 ulong_t digest_len, uchar_t *digest_scratch)
330 {
331 off_t offset = digest->cd_offset;
332 uint_t vec_idx;
333
334 /* we support only kernel buffer */
335 if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
336 return (CRYPTO_ARGUMENTS_BAD);
337
338 /*
339 * Jump to the first iovec containing ptr to the digest to
340 * be returned.
341 */
342 for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
343 vec_idx < digest->cd_uio->uio_iovcnt;
344 offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len)
345 ;
346 if (vec_idx == digest->cd_uio->uio_iovcnt) {
347 /*
348 * The caller specified an offset that is
349 * larger than the total size of the buffers
350 * it provided.
351 */
352 return (CRYPTO_DATA_LEN_RANGE);
353 }
354
355 if (offset + digest_len <=
356 digest->cd_uio->uio_iov[vec_idx].iov_len) {
357 /*
358 * The computed SHA1 digest will fit in the current
359 * iovec.
360 */
361 if (digest_len != SHA1_DIGEST_LENGTH) {
362 /*
363 * The caller requested a short digest. Digest
364 * into a scratch buffer and return to
365 * the user only what was requested.
366 */
367 SHA1Final(digest_scratch, sha1_ctx);
368 bcopy(digest_scratch, (uchar_t *)digest->
369 cd_uio->uio_iov[vec_idx].iov_base + offset,
370 digest_len);
371 } else {
372 SHA1Final((uchar_t *)digest->
373 cd_uio->uio_iov[vec_idx].iov_base + offset,
374 sha1_ctx);
375 }
376 } else {
377 /*
378 * The computed digest will be crossing one or more iovec's.
379 * This is bad performance-wise but we need to support it.
380 * Allocate a small scratch buffer on the stack and
381 * copy it piece meal to the specified digest iovec's.
382 */
383 uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
384 off_t scratch_offset = 0;
385 size_t length = digest_len;
386 size_t cur_len;
387
388 SHA1Final(digest_tmp, sha1_ctx);
389
390 while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
391 cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
392 offset, length);
393 bcopy(digest_tmp + scratch_offset,
394 digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
395 cur_len);
396
397 length -= cur_len;
398 vec_idx++;
399 scratch_offset += cur_len;
400 offset = 0;
401 }
402
403 if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
404 /*
405 * The end of the specified iovec's was reached but
406 * the length requested could not be processed, i.e.
407 * The caller requested to digest more data than it
408 * provided.
409 */
410 return (CRYPTO_DATA_LEN_RANGE);
411 }
412 }
413
414 return (CRYPTO_SUCCESS);
415 }
416
417 /*
418 * Helper SHA1 digest update for mblk's.
419 */
420 static int
sha1_digest_update_mblk(SHA1_CTX * sha1_ctx,crypto_data_t * data)421 sha1_digest_update_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *data)
422 {
423 off_t offset = data->cd_offset;
424 size_t length = data->cd_length;
425 mblk_t *mp;
426 size_t cur_len;
427
428 /*
429 * Jump to the first mblk_t containing data to be digested.
430 */
431 for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
432 offset -= MBLKL(mp), mp = mp->b_cont)
433 ;
434 if (mp == NULL) {
435 /*
436 * The caller specified an offset that is larger than the
437 * total size of the buffers it provided.
438 */
439 return (CRYPTO_DATA_LEN_RANGE);
440 }
441
442 /*
443 * Now do the digesting on the mblk chain.
444 */
445 while (mp != NULL && length > 0) {
446 cur_len = MIN(MBLKL(mp) - offset, length);
447 SHA1Update(sha1_ctx, mp->b_rptr + offset, cur_len);
448 length -= cur_len;
449 offset = 0;
450 mp = mp->b_cont;
451 }
452
453 if (mp == NULL && length > 0) {
454 /*
455 * The end of the mblk was reached but the length requested
456 * could not be processed, i.e. The caller requested
457 * to digest more data than it provided.
458 */
459 return (CRYPTO_DATA_LEN_RANGE);
460 }
461
462 return (CRYPTO_SUCCESS);
463 }
464
465 /*
466 * Helper SHA1 digest final for mblk's.
467 * digest_len is the length of the desired digest. If digest_len
468 * is smaller than the default SHA1 digest length, the caller
469 * must pass a scratch buffer, digest_scratch, which must
470 * be at least SHA1_DIGEST_LENGTH bytes.
471 */
472 static int
sha1_digest_final_mblk(SHA1_CTX * sha1_ctx,crypto_data_t * digest,ulong_t digest_len,uchar_t * digest_scratch)473 sha1_digest_final_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
474 ulong_t digest_len, uchar_t *digest_scratch)
475 {
476 off_t offset = digest->cd_offset;
477 mblk_t *mp;
478
479 /*
480 * Jump to the first mblk_t that will be used to store the digest.
481 */
482 for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
483 offset -= MBLKL(mp), mp = mp->b_cont)
484 ;
485 if (mp == NULL) {
486 /*
487 * The caller specified an offset that is larger than the
488 * total size of the buffers it provided.
489 */
490 return (CRYPTO_DATA_LEN_RANGE);
491 }
492
493 if (offset + digest_len <= MBLKL(mp)) {
494 /*
495 * The computed SHA1 digest will fit in the current mblk.
496 * Do the SHA1Final() in-place.
497 */
498 if (digest_len != SHA1_DIGEST_LENGTH) {
499 /*
500 * The caller requested a short digest. Digest
501 * into a scratch buffer and return to
502 * the user only what was requested.
503 */
504 SHA1Final(digest_scratch, sha1_ctx);
505 bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
506 } else {
507 SHA1Final(mp->b_rptr + offset, sha1_ctx);
508 }
509 } else {
510 /*
511 * The computed digest will be crossing one or more mblk's.
512 * This is bad performance-wise but we need to support it.
513 * Allocate a small scratch buffer on the stack and
514 * copy it piece meal to the specified digest iovec's.
515 */
516 uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
517 off_t scratch_offset = 0;
518 size_t length = digest_len;
519 size_t cur_len;
520
521 SHA1Final(digest_tmp, sha1_ctx);
522
523 while (mp != NULL && length > 0) {
524 cur_len = MIN(MBLKL(mp) - offset, length);
525 bcopy(digest_tmp + scratch_offset,
526 mp->b_rptr + offset, cur_len);
527
528 length -= cur_len;
529 mp = mp->b_cont;
530 scratch_offset += cur_len;
531 offset = 0;
532 }
533
534 if (mp == NULL && length > 0) {
535 /*
536 * The end of the specified mblk was reached but
537 * the length requested could not be processed, i.e.
538 * The caller requested to digest more data than it
539 * provided.
540 */
541 return (CRYPTO_DATA_LEN_RANGE);
542 }
543 }
544
545 return (CRYPTO_SUCCESS);
546 }
547
548 /* ARGSUSED */
549 static int
sha1_digest(crypto_ctx_t * ctx,crypto_data_t * data,crypto_data_t * digest,crypto_req_handle_t req)550 sha1_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
551 crypto_req_handle_t req)
552 {
553 int ret = CRYPTO_SUCCESS;
554
555 ASSERT(ctx->cc_provider_private != NULL);
556
557 /*
558 * We need to just return the length needed to store the output.
559 * We should not destroy the context for the following cases.
560 */
561 if ((digest->cd_length == 0) ||
562 (digest->cd_length < SHA1_DIGEST_LENGTH)) {
563 digest->cd_length = SHA1_DIGEST_LENGTH;
564 return (CRYPTO_BUFFER_TOO_SMALL);
565 }
566
567 /*
568 * Do the SHA1 update on the specified input data.
569 */
570 switch (data->cd_format) {
571 case CRYPTO_DATA_RAW:
572 SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
573 (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
574 data->cd_length);
575 break;
576 case CRYPTO_DATA_UIO:
577 ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
578 data);
579 break;
580 case CRYPTO_DATA_MBLK:
581 ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
582 data);
583 break;
584 default:
585 ret = CRYPTO_ARGUMENTS_BAD;
586 }
587
588 if (ret != CRYPTO_SUCCESS) {
589 /* the update failed, free context and bail */
590 kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
591 ctx->cc_provider_private = NULL;
592 digest->cd_length = 0;
593 return (ret);
594 }
595
596 /*
597 * Do a SHA1 final, must be done separately since the digest
598 * type can be different than the input data type.
599 */
600 switch (digest->cd_format) {
601 case CRYPTO_DATA_RAW:
602 SHA1Final((unsigned char *)digest->cd_raw.iov_base +
603 digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
604 break;
605 case CRYPTO_DATA_UIO:
606 ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
607 digest, SHA1_DIGEST_LENGTH, NULL);
608 break;
609 case CRYPTO_DATA_MBLK:
610 ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
611 digest, SHA1_DIGEST_LENGTH, NULL);
612 break;
613 default:
614 ret = CRYPTO_ARGUMENTS_BAD;
615 }
616
617 /* all done, free context and return */
618
619 if (ret == CRYPTO_SUCCESS) {
620 digest->cd_length = SHA1_DIGEST_LENGTH;
621 } else {
622 digest->cd_length = 0;
623 }
624
625 kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
626 ctx->cc_provider_private = NULL;
627 return (ret);
628 }
629
630 /* ARGSUSED */
631 static int
sha1_digest_update(crypto_ctx_t * ctx,crypto_data_t * data,crypto_req_handle_t req)632 sha1_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
633 crypto_req_handle_t req)
634 {
635 int ret = CRYPTO_SUCCESS;
636
637 ASSERT(ctx->cc_provider_private != NULL);
638
639 /*
640 * Do the SHA1 update on the specified input data.
641 */
642 switch (data->cd_format) {
643 case CRYPTO_DATA_RAW:
644 SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
645 (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
646 data->cd_length);
647 break;
648 case CRYPTO_DATA_UIO:
649 ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
650 data);
651 break;
652 case CRYPTO_DATA_MBLK:
653 ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
654 data);
655 break;
656 default:
657 ret = CRYPTO_ARGUMENTS_BAD;
658 }
659
660 return (ret);
661 }
662
663 /* ARGSUSED */
664 static int
sha1_digest_final(crypto_ctx_t * ctx,crypto_data_t * digest,crypto_req_handle_t req)665 sha1_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
666 crypto_req_handle_t req)
667 {
668 int ret = CRYPTO_SUCCESS;
669
670 ASSERT(ctx->cc_provider_private != NULL);
671
672 /*
673 * We need to just return the length needed to store the output.
674 * We should not destroy the context for the following cases.
675 */
676 if ((digest->cd_length == 0) ||
677 (digest->cd_length < SHA1_DIGEST_LENGTH)) {
678 digest->cd_length = SHA1_DIGEST_LENGTH;
679 return (CRYPTO_BUFFER_TOO_SMALL);
680 }
681
682 /*
683 * Do a SHA1 final.
684 */
685 switch (digest->cd_format) {
686 case CRYPTO_DATA_RAW:
687 SHA1Final((unsigned char *)digest->cd_raw.iov_base +
688 digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
689 break;
690 case CRYPTO_DATA_UIO:
691 ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
692 digest, SHA1_DIGEST_LENGTH, NULL);
693 break;
694 case CRYPTO_DATA_MBLK:
695 ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
696 digest, SHA1_DIGEST_LENGTH, NULL);
697 break;
698 default:
699 ret = CRYPTO_ARGUMENTS_BAD;
700 }
701
702 /* all done, free context and return */
703
704 if (ret == CRYPTO_SUCCESS) {
705 digest->cd_length = SHA1_DIGEST_LENGTH;
706 } else {
707 digest->cd_length = 0;
708 }
709
710 kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
711 ctx->cc_provider_private = NULL;
712
713 return (ret);
714 }
715
716 /* ARGSUSED */
717 static int
sha1_digest_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_data_t * data,crypto_data_t * digest,crypto_req_handle_t req)718 sha1_digest_atomic(crypto_provider_handle_t provider,
719 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
720 crypto_data_t *data, crypto_data_t *digest,
721 crypto_req_handle_t req)
722 {
723 int ret = CRYPTO_SUCCESS;
724 SHA1_CTX sha1_ctx;
725
726 if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
727 return (CRYPTO_MECHANISM_INVALID);
728
729 /*
730 * Do the SHA1 init.
731 */
732 SHA1Init(&sha1_ctx);
733
734 /*
735 * Do the SHA1 update on the specified input data.
736 */
737 switch (data->cd_format) {
738 case CRYPTO_DATA_RAW:
739 SHA1Update(&sha1_ctx,
740 (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
741 data->cd_length);
742 break;
743 case CRYPTO_DATA_UIO:
744 ret = sha1_digest_update_uio(&sha1_ctx, data);
745 break;
746 case CRYPTO_DATA_MBLK:
747 ret = sha1_digest_update_mblk(&sha1_ctx, data);
748 break;
749 default:
750 ret = CRYPTO_ARGUMENTS_BAD;
751 }
752
753 if (ret != CRYPTO_SUCCESS) {
754 /* the update failed, bail */
755 digest->cd_length = 0;
756 return (ret);
757 }
758
759 /*
760 * Do a SHA1 final, must be done separately since the digest
761 * type can be different than the input data type.
762 */
763 switch (digest->cd_format) {
764 case CRYPTO_DATA_RAW:
765 SHA1Final((unsigned char *)digest->cd_raw.iov_base +
766 digest->cd_offset, &sha1_ctx);
767 break;
768 case CRYPTO_DATA_UIO:
769 ret = sha1_digest_final_uio(&sha1_ctx, digest,
770 SHA1_DIGEST_LENGTH, NULL);
771 break;
772 case CRYPTO_DATA_MBLK:
773 ret = sha1_digest_final_mblk(&sha1_ctx, digest,
774 SHA1_DIGEST_LENGTH, NULL);
775 break;
776 default:
777 ret = CRYPTO_ARGUMENTS_BAD;
778 }
779
780 if (ret == CRYPTO_SUCCESS) {
781 digest->cd_length = SHA1_DIGEST_LENGTH;
782 } else {
783 digest->cd_length = 0;
784 }
785
786 return (ret);
787 }
788
789 /*
790 * KCF software provider mac entry points.
791 *
792 * SHA1 HMAC is: SHA1(key XOR opad, SHA1(key XOR ipad, text))
793 *
794 * Init:
795 * The initialization routine initializes what we denote
796 * as the inner and outer contexts by doing
797 * - for inner context: SHA1(key XOR ipad)
798 * - for outer context: SHA1(key XOR opad)
799 *
800 * Update:
801 * Each subsequent SHA1 HMAC update will result in an
802 * update of the inner context with the specified data.
803 *
804 * Final:
805 * The SHA1 HMAC final will do a SHA1 final operation on the
806 * inner context, and the resulting digest will be used
807 * as the data for an update on the outer context. Last
808 * but not least, a SHA1 final on the outer context will
809 * be performed to obtain the SHA1 HMAC digest to return
810 * to the user.
811 */
812
813 /*
814 * Initialize a SHA1-HMAC context.
815 */
816 static void
sha1_mac_init_ctx(sha1_hmac_ctx_t * ctx,void * keyval,uint_t length_in_bytes)817 sha1_mac_init_ctx(sha1_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
818 {
819 uint32_t ipad[SHA1_HMAC_INTS_PER_BLOCK];
820 uint32_t opad[SHA1_HMAC_INTS_PER_BLOCK];
821 uint_t i;
822
823 bzero(ipad, SHA1_HMAC_BLOCK_SIZE);
824 bzero(opad, SHA1_HMAC_BLOCK_SIZE);
825
826 bcopy(keyval, ipad, length_in_bytes);
827 bcopy(keyval, opad, length_in_bytes);
828
829 /* XOR key with ipad (0x36) and opad (0x5c) */
830 for (i = 0; i < SHA1_HMAC_INTS_PER_BLOCK; i++) {
831 ipad[i] ^= 0x36363636;
832 opad[i] ^= 0x5c5c5c5c;
833 }
834
835 /* perform SHA1 on ipad */
836 SHA1Init(&ctx->hc_icontext);
837 SHA1Update(&ctx->hc_icontext, (uint8_t *)ipad, SHA1_HMAC_BLOCK_SIZE);
838
839 /* perform SHA1 on opad */
840 SHA1Init(&ctx->hc_ocontext);
841 SHA1Update(&ctx->hc_ocontext, (uint8_t *)opad, SHA1_HMAC_BLOCK_SIZE);
842 }
843
844 /*
845 */
846 static int
sha1_mac_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)847 sha1_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
848 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
849 crypto_req_handle_t req)
850 {
851 int ret = CRYPTO_SUCCESS;
852 uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
853
854 if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
855 mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
856 return (CRYPTO_MECHANISM_INVALID);
857
858 /* Add support for key by attributes (RFE 4706552) */
859 if (key->ck_format != CRYPTO_KEY_RAW)
860 return (CRYPTO_ARGUMENTS_BAD);
861
862 ctx->cc_provider_private = kmem_alloc(sizeof (sha1_hmac_ctx_t),
863 crypto_kmflag(req));
864 if (ctx->cc_provider_private == NULL)
865 return (CRYPTO_HOST_MEMORY);
866
867 if (ctx_template != NULL) {
868 /* reuse context template */
869 bcopy(ctx_template, PROV_SHA1_HMAC_CTX(ctx),
870 sizeof (sha1_hmac_ctx_t));
871 } else {
872 /* no context template, compute context */
873 if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
874 uchar_t digested_key[SHA1_DIGEST_LENGTH];
875 sha1_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
876
877 /*
878 * Hash the passed-in key to get a smaller key.
879 * The inner context is used since it hasn't been
880 * initialized yet.
881 */
882 PROV_SHA1_DIGEST_KEY(&hmac_ctx->hc_icontext,
883 key->ck_data, keylen_in_bytes, digested_key);
884 sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
885 digested_key, SHA1_DIGEST_LENGTH);
886 } else {
887 sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
888 key->ck_data, keylen_in_bytes);
889 }
890 }
891
892 /*
893 * Get the mechanism parameters, if applicable.
894 */
895 PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
896 if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
897 if (mechanism->cm_param == NULL ||
898 mechanism->cm_param_len != sizeof (ulong_t))
899 ret = CRYPTO_MECHANISM_PARAM_INVALID;
900 PROV_SHA1_GET_DIGEST_LEN(mechanism,
901 PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len);
902 if (PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len >
903 SHA1_DIGEST_LENGTH)
904 ret = CRYPTO_MECHANISM_PARAM_INVALID;
905 }
906
907 if (ret != CRYPTO_SUCCESS) {
908 bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
909 kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
910 ctx->cc_provider_private = NULL;
911 }
912
913 return (ret);
914 }
915
916 static int
sha1_mac(crypto_ctx_t * ctx,crypto_data_t * data,crypto_data_t * mac,crypto_req_handle_t req)917 sha1_mac(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *mac,
918 crypto_req_handle_t req)
919 {
920 SHA1_CTX *ictx = NULL;
921 SHA1_CTX *octx = NULL;
922 uchar_t digest[SHA1_DIGEST_LENGTH];
923 uint32_t digest_len = SHA1_DIGEST_LENGTH;
924 int ret = CRYPTO_SUCCESS;
925
926 ASSERT(ctx->cc_provider_private != NULL);
927
928 if (PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type ==
929 SHA1_HMAC_GEN_MECH_INFO_TYPE) {
930 digest_len = PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len;
931 }
932
933 if ((mac->cd_length == 0) ||
934 (mac->cd_length < digest_len)) {
935 mac->cd_length = digest_len;
936 return (CRYPTO_BUFFER_TOO_SMALL);
937 }
938
939 ictx = &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext;
940 octx = &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext;
941
942 switch (data->cd_format) {
943 case CRYPTO_DATA_RAW:
944 SHA1Update(ictx,
945 (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
946 data->cd_length);
947 break;
948 case CRYPTO_DATA_UIO:
949 ret = sha1_digest_update_uio(ictx, data);
950 break;
951 case CRYPTO_DATA_MBLK:
952 ret = sha1_digest_update_mblk(ictx, data);
953 break;
954 default:
955 ret = CRYPTO_ARGUMENTS_BAD;
956 }
957
958 if (ret != CRYPTO_SUCCESS) {
959 kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
960 ctx->cc_provider_private = NULL;
961 mac->cd_length = 0;
962 return (ret);
963 }
964
965 /*
966 * Do a SHA1 final on the inner context.
967 */
968 SHA1Final(digest, ictx);
969
970 /*
971 * Do a SH1 update on the outer context, feeding the inner
972 * digest as data.
973 */
974 SHA1Update(octx, digest, SHA1_DIGEST_LENGTH);
975
976 switch (mac->cd_format) {
977 case CRYPTO_DATA_RAW:
978 if (digest_len != SHA1_DIGEST_LENGTH) {
979 /*
980 * The caller requested a short digest. Digest
981 * into a scratch buffer and return to
982 * the user only what was requested.
983 */
984 SHA1Final(digest, octx);
985 bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
986 mac->cd_offset, digest_len);
987 } else {
988 SHA1Final((unsigned char *)mac->cd_raw.iov_base +
989 mac->cd_offset, octx);
990 }
991 break;
992 case CRYPTO_DATA_UIO:
993 ret = sha1_digest_final_uio(octx, mac, digest_len, digest);
994 break;
995 case CRYPTO_DATA_MBLK:
996 ret = sha1_digest_final_mblk(octx, mac, digest_len, digest);
997 break;
998 default:
999 ret = CRYPTO_ARGUMENTS_BAD;
1000 }
1001
1002 if (ret == CRYPTO_SUCCESS) {
1003 mac->cd_length = SHA1_DIGEST_LENGTH;
1004 } else {
1005 mac->cd_length = 0;
1006 }
1007
1008 kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1009 ctx->cc_provider_private = NULL;
1010 return (ret);
1011 }
1012
1013 /* ARGSUSED */
1014 static int
sha1_mac_update(crypto_ctx_t * ctx,crypto_data_t * data,crypto_req_handle_t req)1015 sha1_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
1016 {
1017 int ret = CRYPTO_SUCCESS;
1018
1019 ASSERT(ctx->cc_provider_private != NULL);
1020
1021 /*
1022 * Do a SHA1 update of the inner context using the specified
1023 * data.
1024 */
1025 switch (data->cd_format) {
1026 case CRYPTO_DATA_RAW:
1027 SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_icontext,
1028 (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
1029 data->cd_length);
1030 break;
1031 case CRYPTO_DATA_UIO:
1032 ret = sha1_digest_update_uio(
1033 &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
1034 break;
1035 case CRYPTO_DATA_MBLK:
1036 ret = sha1_digest_update_mblk(
1037 &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
1038 break;
1039 default:
1040 ret = CRYPTO_ARGUMENTS_BAD;
1041 }
1042
1043 return (ret);
1044 }
1045
1046 /* ARGSUSED */
1047 static int
sha1_mac_final(crypto_ctx_t * ctx,crypto_data_t * mac,crypto_req_handle_t req)1048 sha1_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
1049 {
1050 int ret = CRYPTO_SUCCESS;
1051 uchar_t digest[SHA1_DIGEST_LENGTH];
1052 uint32_t digest_len = SHA1_DIGEST_LENGTH;
1053
1054 ASSERT(ctx->cc_provider_private != NULL);
1055
1056 if (PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type ==
1057 SHA1_HMAC_GEN_MECH_INFO_TYPE)
1058 digest_len = PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len;
1059
1060 /*
1061 * We need to just return the length needed to store the output.
1062 * We should not destroy the context for the following cases.
1063 */
1064 if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
1065 mac->cd_length = digest_len;
1066 return (CRYPTO_BUFFER_TOO_SMALL);
1067 }
1068
1069 /*
1070 * Do a SHA1 final on the inner context.
1071 */
1072 SHA1Final(digest, &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext);
1073
1074 /*
1075 * Do a SHA1 update on the outer context, feeding the inner
1076 * digest as data.
1077 */
1078 SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, digest,
1079 SHA1_DIGEST_LENGTH);
1080
1081 /*
1082 * Do a SHA1 final on the outer context, storing the computing
1083 * digest in the users buffer.
1084 */
1085 switch (mac->cd_format) {
1086 case CRYPTO_DATA_RAW:
1087 if (digest_len != SHA1_DIGEST_LENGTH) {
1088 /*
1089 * The caller requested a short digest. Digest
1090 * into a scratch buffer and return to
1091 * the user only what was requested.
1092 */
1093 SHA1Final(digest,
1094 &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
1095 bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1096 mac->cd_offset, digest_len);
1097 } else {
1098 SHA1Final((unsigned char *)mac->cd_raw.iov_base +
1099 mac->cd_offset,
1100 &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
1101 }
1102 break;
1103 case CRYPTO_DATA_UIO:
1104 ret = sha1_digest_final_uio(
1105 &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
1106 digest_len, digest);
1107 break;
1108 case CRYPTO_DATA_MBLK:
1109 ret = sha1_digest_final_mblk(
1110 &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
1111 digest_len, digest);
1112 break;
1113 default:
1114 ret = CRYPTO_ARGUMENTS_BAD;
1115 }
1116
1117 if (ret == CRYPTO_SUCCESS) {
1118 mac->cd_length = digest_len;
1119 } else {
1120 mac->cd_length = 0;
1121 }
1122
1123 bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1124 kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1125 ctx->cc_provider_private = NULL;
1126
1127 return (ret);
1128 }
1129
1130 #define SHA1_MAC_UPDATE(data, ctx, ret) { \
1131 switch (data->cd_format) { \
1132 case CRYPTO_DATA_RAW: \
1133 SHA1Update(&(ctx).hc_icontext, \
1134 (uint8_t *)data->cd_raw.iov_base + \
1135 data->cd_offset, data->cd_length); \
1136 break; \
1137 case CRYPTO_DATA_UIO: \
1138 ret = sha1_digest_update_uio(&(ctx).hc_icontext, data); \
1139 break; \
1140 case CRYPTO_DATA_MBLK: \
1141 ret = sha1_digest_update_mblk(&(ctx).hc_icontext, \
1142 data); \
1143 break; \
1144 default: \
1145 ret = CRYPTO_ARGUMENTS_BAD; \
1146 } \
1147 }
1148
1149 /* ARGSUSED */
1150 static int
sha1_mac_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * data,crypto_data_t * mac,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)1151 sha1_mac_atomic(crypto_provider_handle_t provider,
1152 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1153 crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1154 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1155 {
1156 int ret = CRYPTO_SUCCESS;
1157 uchar_t digest[SHA1_DIGEST_LENGTH];
1158 sha1_hmac_ctx_t sha1_hmac_ctx;
1159 uint32_t digest_len = SHA1_DIGEST_LENGTH;
1160 uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1161
1162 if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1163 mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1164 return (CRYPTO_MECHANISM_INVALID);
1165
1166 /* Add support for key by attributes (RFE 4706552) */
1167 if (key->ck_format != CRYPTO_KEY_RAW)
1168 return (CRYPTO_ARGUMENTS_BAD);
1169
1170 if (ctx_template != NULL) {
1171 /* reuse context template */
1172 bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1173 } else {
1174 /* no context template, initialize context */
1175 if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1176 /*
1177 * Hash the passed-in key to get a smaller key.
1178 * The inner context is used since it hasn't been
1179 * initialized yet.
1180 */
1181 PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
1182 key->ck_data, keylen_in_bytes, digest);
1183 sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
1184 SHA1_DIGEST_LENGTH);
1185 } else {
1186 sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
1187 keylen_in_bytes);
1188 }
1189 }
1190
1191 /* get the mechanism parameters, if applicable */
1192 if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1193 if (mechanism->cm_param == NULL ||
1194 mechanism->cm_param_len != sizeof (ulong_t)) {
1195 ret = CRYPTO_MECHANISM_PARAM_INVALID;
1196 goto bail;
1197 }
1198 PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
1199 if (digest_len > SHA1_DIGEST_LENGTH) {
1200 ret = CRYPTO_MECHANISM_PARAM_INVALID;
1201 goto bail;
1202 }
1203 }
1204
1205 /* do a SHA1 update of the inner context using the specified data */
1206 SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
1207 if (ret != CRYPTO_SUCCESS)
1208 /* the update failed, free context and bail */
1209 goto bail;
1210
1211 /*
1212 * Do a SHA1 final on the inner context.
1213 */
1214 SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
1215
1216 /*
1217 * Do an SHA1 update on the outer context, feeding the inner
1218 * digest as data.
1219 */
1220 SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
1221
1222 /*
1223 * Do a SHA1 final on the outer context, storing the computed
1224 * digest in the users buffer.
1225 */
1226 switch (mac->cd_format) {
1227 case CRYPTO_DATA_RAW:
1228 if (digest_len != SHA1_DIGEST_LENGTH) {
1229 /*
1230 * The caller requested a short digest. Digest
1231 * into a scratch buffer and return to
1232 * the user only what was requested.
1233 */
1234 SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
1235 bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1236 mac->cd_offset, digest_len);
1237 } else {
1238 SHA1Final((unsigned char *)mac->cd_raw.iov_base +
1239 mac->cd_offset, &sha1_hmac_ctx.hc_ocontext);
1240 }
1241 break;
1242 case CRYPTO_DATA_UIO:
1243 ret = sha1_digest_final_uio(&sha1_hmac_ctx.hc_ocontext, mac,
1244 digest_len, digest);
1245 break;
1246 case CRYPTO_DATA_MBLK:
1247 ret = sha1_digest_final_mblk(&sha1_hmac_ctx.hc_ocontext, mac,
1248 digest_len, digest);
1249 break;
1250 default:
1251 ret = CRYPTO_ARGUMENTS_BAD;
1252 }
1253
1254 if (ret == CRYPTO_SUCCESS) {
1255 mac->cd_length = digest_len;
1256 } else {
1257 mac->cd_length = 0;
1258 }
1259 /* Extra paranoia: zeroize the context on the stack */
1260 bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1261
1262 return (ret);
1263 bail:
1264 bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1265 mac->cd_length = 0;
1266 return (ret);
1267 }
1268
1269 /* ARGSUSED */
1270 static int
sha1_mac_verify_atomic(crypto_provider_handle_t provider,crypto_session_id_t session_id,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * data,crypto_data_t * mac,crypto_spi_ctx_template_t ctx_template,crypto_req_handle_t req)1271 sha1_mac_verify_atomic(crypto_provider_handle_t provider,
1272 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1273 crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1274 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1275 {
1276 int ret = CRYPTO_SUCCESS;
1277 uchar_t digest[SHA1_DIGEST_LENGTH];
1278 sha1_hmac_ctx_t sha1_hmac_ctx;
1279 uint32_t digest_len = SHA1_DIGEST_LENGTH;
1280 uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1281
1282 if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1283 mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1284 return (CRYPTO_MECHANISM_INVALID);
1285
1286 /* Add support for key by attributes (RFE 4706552) */
1287 if (key->ck_format != CRYPTO_KEY_RAW)
1288 return (CRYPTO_ARGUMENTS_BAD);
1289
1290 if (ctx_template != NULL) {
1291 /* reuse context template */
1292 bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1293 } else {
1294 /* no context template, initialize context */
1295 if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1296 /*
1297 * Hash the passed-in key to get a smaller key.
1298 * The inner context is used since it hasn't been
1299 * initialized yet.
1300 */
1301 PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
1302 key->ck_data, keylen_in_bytes, digest);
1303 sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
1304 SHA1_DIGEST_LENGTH);
1305 } else {
1306 sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
1307 keylen_in_bytes);
1308 }
1309 }
1310
1311 /* get the mechanism parameters, if applicable */
1312 if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1313 if (mechanism->cm_param == NULL ||
1314 mechanism->cm_param_len != sizeof (ulong_t)) {
1315 ret = CRYPTO_MECHANISM_PARAM_INVALID;
1316 goto bail;
1317 }
1318 PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
1319 if (digest_len > SHA1_DIGEST_LENGTH) {
1320 ret = CRYPTO_MECHANISM_PARAM_INVALID;
1321 goto bail;
1322 }
1323 }
1324
1325 if (mac->cd_length != digest_len) {
1326 ret = CRYPTO_INVALID_MAC;
1327 goto bail;
1328 }
1329
1330 /* do a SHA1 update of the inner context using the specified data */
1331 SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
1332 if (ret != CRYPTO_SUCCESS)
1333 /* the update failed, free context and bail */
1334 goto bail;
1335
1336 /* do a SHA1 final on the inner context */
1337 SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
1338
1339 /*
1340 * Do an SHA1 update on the outer context, feeding the inner
1341 * digest as data.
1342 */
1343 SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
1344
1345 /*
1346 * Do a SHA1 final on the outer context, storing the computed
1347 * digest in the users buffer.
1348 */
1349 SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
1350
1351 /*
1352 * Compare the computed digest against the expected digest passed
1353 * as argument.
1354 */
1355
1356 switch (mac->cd_format) {
1357
1358 case CRYPTO_DATA_RAW:
1359 if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
1360 mac->cd_offset, digest_len) != 0)
1361 ret = CRYPTO_INVALID_MAC;
1362 break;
1363
1364 case CRYPTO_DATA_UIO: {
1365 off_t offset = mac->cd_offset;
1366 uint_t vec_idx;
1367 off_t scratch_offset = 0;
1368 size_t length = digest_len;
1369 size_t cur_len;
1370
1371 /* we support only kernel buffer */
1372 if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
1373 return (CRYPTO_ARGUMENTS_BAD);
1374
1375 /* jump to the first iovec containing the expected digest */
1376 for (vec_idx = 0;
1377 offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
1378 vec_idx < mac->cd_uio->uio_iovcnt;
1379 offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len)
1380 ;
1381 if (vec_idx == mac->cd_uio->uio_iovcnt) {
1382 /*
1383 * The caller specified an offset that is
1384 * larger than the total size of the buffers
1385 * it provided.
1386 */
1387 ret = CRYPTO_DATA_LEN_RANGE;
1388 break;
1389 }
1390
1391 /* do the comparison of computed digest vs specified one */
1392 while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
1393 cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
1394 offset, length);
1395
1396 if (bcmp(digest + scratch_offset,
1397 mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
1398 cur_len) != 0) {
1399 ret = CRYPTO_INVALID_MAC;
1400 break;
1401 }
1402
1403 length -= cur_len;
1404 vec_idx++;
1405 scratch_offset += cur_len;
1406 offset = 0;
1407 }
1408 break;
1409 }
1410
1411 case CRYPTO_DATA_MBLK: {
1412 off_t offset = mac->cd_offset;
1413 mblk_t *mp;
1414 off_t scratch_offset = 0;
1415 size_t length = digest_len;
1416 size_t cur_len;
1417
1418 /* jump to the first mblk_t containing the expected digest */
1419 for (mp = mac->cd_mp; mp != NULL && offset >= MBLKL(mp);
1420 offset -= MBLKL(mp), mp = mp->b_cont)
1421 ;
1422 if (mp == NULL) {
1423 /*
1424 * The caller specified an offset that is larger than
1425 * the total size of the buffers it provided.
1426 */
1427 ret = CRYPTO_DATA_LEN_RANGE;
1428 break;
1429 }
1430
1431 while (mp != NULL && length > 0) {
1432 cur_len = MIN(MBLKL(mp) - offset, length);
1433 if (bcmp(digest + scratch_offset,
1434 mp->b_rptr + offset, cur_len) != 0) {
1435 ret = CRYPTO_INVALID_MAC;
1436 break;
1437 }
1438
1439 length -= cur_len;
1440 mp = mp->b_cont;
1441 scratch_offset += cur_len;
1442 offset = 0;
1443 }
1444 break;
1445 }
1446
1447 default:
1448 ret = CRYPTO_ARGUMENTS_BAD;
1449 }
1450
1451 bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1452 return (ret);
1453 bail:
1454 bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1455 mac->cd_length = 0;
1456 return (ret);
1457 }
1458
1459 /*
1460 * KCF software provider context management entry points.
1461 */
1462
1463 /* ARGSUSED */
1464 static int
sha1_create_ctx_template(crypto_provider_handle_t provider,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t * ctx_template,size_t * ctx_template_size,crypto_req_handle_t req)1465 sha1_create_ctx_template(crypto_provider_handle_t provider,
1466 crypto_mechanism_t *mechanism, crypto_key_t *key,
1467 crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
1468 crypto_req_handle_t req)
1469 {
1470 sha1_hmac_ctx_t *sha1_hmac_ctx_tmpl;
1471 uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1472
1473 if ((mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE) &&
1474 (mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)) {
1475 return (CRYPTO_MECHANISM_INVALID);
1476 }
1477
1478 /* Add support for key by attributes (RFE 4706552) */
1479 if (key->ck_format != CRYPTO_KEY_RAW)
1480 return (CRYPTO_ARGUMENTS_BAD);
1481
1482 /*
1483 * Allocate and initialize SHA1 context.
1484 */
1485 sha1_hmac_ctx_tmpl = kmem_alloc(sizeof (sha1_hmac_ctx_t),
1486 crypto_kmflag(req));
1487 if (sha1_hmac_ctx_tmpl == NULL)
1488 return (CRYPTO_HOST_MEMORY);
1489
1490 if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1491 uchar_t digested_key[SHA1_DIGEST_LENGTH];
1492
1493 /*
1494 * Hash the passed-in key to get a smaller key.
1495 * The inner context is used since it hasn't been
1496 * initialized yet.
1497 */
1498 PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx_tmpl->hc_icontext,
1499 key->ck_data, keylen_in_bytes, digested_key);
1500 sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, digested_key,
1501 SHA1_DIGEST_LENGTH);
1502 } else {
1503 sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, key->ck_data,
1504 keylen_in_bytes);
1505 }
1506
1507 sha1_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
1508 *ctx_template = (crypto_spi_ctx_template_t)sha1_hmac_ctx_tmpl;
1509 *ctx_template_size = sizeof (sha1_hmac_ctx_t);
1510
1511
1512 return (CRYPTO_SUCCESS);
1513 }
1514
1515 static int
sha1_free_context(crypto_ctx_t * ctx)1516 sha1_free_context(crypto_ctx_t *ctx)
1517 {
1518 uint_t ctx_len;
1519 sha1_mech_type_t mech_type;
1520
1521 if (ctx->cc_provider_private == NULL)
1522 return (CRYPTO_SUCCESS);
1523
1524 /*
1525 * We have to free either SHA1 or SHA1-HMAC contexts, which
1526 * have different lengths.
1527 */
1528
1529 mech_type = PROV_SHA1_CTX(ctx)->sc_mech_type;
1530 if (mech_type == SHA1_MECH_INFO_TYPE)
1531 ctx_len = sizeof (sha1_ctx_t);
1532 else {
1533 ASSERT(mech_type == SHA1_HMAC_MECH_INFO_TYPE ||
1534 mech_type == SHA1_HMAC_GEN_MECH_INFO_TYPE);
1535 ctx_len = sizeof (sha1_hmac_ctx_t);
1536 }
1537
1538 bzero(ctx->cc_provider_private, ctx_len);
1539 kmem_free(ctx->cc_provider_private, ctx_len);
1540 ctx->cc_provider_private = NULL;
1541
1542 return (CRYPTO_SUCCESS);
1543 }
1544