xref: /freebsd/sys/contrib/openzfs/module/icp/io/sha2_mod.c (revision 75e1fea68aaa613a20dfdcd0c59dd403aca02c49)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/zfs_context.h>
28 #include <sys/crypto/common.h>
29 #include <sys/crypto/spi.h>
30 #include <sys/crypto/icp.h>
31 #include <sys/sha2.h>
32 #include <sha2/sha2_impl.h>
33 
34 /*
35  * Macros to access the SHA2 or SHA2-HMAC contexts from a context passed
36  * by KCF to one of the entry points.
37  */
38 
39 #define	PROV_SHA2_CTX(ctx)	((sha2_ctx_t *)(ctx)->cc_provider_private)
40 #define	PROV_SHA2_HMAC_CTX(ctx)	((sha2_hmac_ctx_t *)(ctx)->cc_provider_private)
41 
42 /* to extract the digest length passed as mechanism parameter */
43 #define	PROV_SHA2_GET_DIGEST_LEN(m, len) {				\
44 	if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t)))		\
45 		(len) = (uint32_t)*((ulong_t *)(m)->cm_param);	\
46 	else {								\
47 		ulong_t tmp_ulong;					\
48 		memcpy(&tmp_ulong, (m)->cm_param, sizeof (ulong_t));	\
49 		(len) = (uint32_t)tmp_ulong;				\
50 	}								\
51 }
52 
53 #define	PROV_SHA2_DIGEST_KEY(mech, ctx, key, len, digest) {	\
54 	SHA2Init(mech, ctx);				\
55 	SHA2Update(ctx, key, len);			\
56 	SHA2Final(digest, ctx);				\
57 }
58 
59 /*
60  * Mechanism info structure passed to KCF during registration.
61  */
62 static const crypto_mech_info_t sha2_mech_info_tab[] = {
63 	/* SHA512-HMAC */
64 	{SUN_CKM_SHA512_HMAC, SHA512_HMAC_MECH_INFO_TYPE,
65 	    CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC},
66 };
67 
68 static int sha2_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
69     crypto_spi_ctx_template_t);
70 static int sha2_mac_update(crypto_ctx_t *, crypto_data_t *);
71 static int sha2_mac_final(crypto_ctx_t *, crypto_data_t *);
72 static int sha2_mac_atomic(crypto_mechanism_t *, crypto_key_t *,
73     crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t);
74 static int sha2_mac_verify_atomic(crypto_mechanism_t *, crypto_key_t *,
75     crypto_data_t *, crypto_data_t *, crypto_spi_ctx_template_t);
76 
77 static const crypto_mac_ops_t sha2_mac_ops = {
78 	.mac_init = sha2_mac_init,
79 	.mac = NULL,
80 	.mac_update = sha2_mac_update,
81 	.mac_final = sha2_mac_final,
82 	.mac_atomic = sha2_mac_atomic,
83 	.mac_verify_atomic = sha2_mac_verify_atomic
84 };
85 
86 static int sha2_create_ctx_template(crypto_mechanism_t *, crypto_key_t *,
87     crypto_spi_ctx_template_t *, size_t *);
88 static int sha2_free_context(crypto_ctx_t *);
89 
90 static const crypto_ctx_ops_t sha2_ctx_ops = {
91 	.create_ctx_template = sha2_create_ctx_template,
92 	.free_context = sha2_free_context
93 };
94 
95 static const crypto_ops_t sha2_crypto_ops = {
96 	NULL,
97 	&sha2_mac_ops,
98 	&sha2_ctx_ops,
99 };
100 
101 static const crypto_provider_info_t sha2_prov_info = {
102 	"SHA2 Software Provider",
103 	&sha2_crypto_ops,
104 	sizeof (sha2_mech_info_tab) / sizeof (crypto_mech_info_t),
105 	sha2_mech_info_tab
106 };
107 
108 static crypto_kcf_provider_handle_t sha2_prov_handle = 0;
109 
110 int
sha2_mod_init(void)111 sha2_mod_init(void)
112 {
113 	int ret;
114 
115 	/*
116 	 * Register with KCF. If the registration fails, log an
117 	 * error but do not uninstall the module, since the functionality
118 	 * provided by misc/sha2 should still be available.
119 	 */
120 	if ((ret = crypto_register_provider(&sha2_prov_info,
121 	    &sha2_prov_handle)) != CRYPTO_SUCCESS)
122 		cmn_err(CE_WARN, "sha2 _init: "
123 		    "crypto_register_provider() failed (0x%x)", ret);
124 
125 	return (0);
126 }
127 
128 int
sha2_mod_fini(void)129 sha2_mod_fini(void)
130 {
131 	int ret = 0;
132 
133 	if (sha2_prov_handle != 0) {
134 		if ((ret = crypto_unregister_provider(sha2_prov_handle)) !=
135 		    CRYPTO_SUCCESS) {
136 			cmn_err(CE_WARN,
137 			    "sha2 _fini: crypto_unregister_provider() "
138 			    "failed (0x%x)", ret);
139 			return (EBUSY);
140 		}
141 		sha2_prov_handle = 0;
142 	}
143 
144 	return (ret);
145 }
146 
147 /*
148  * Helper SHA2 digest update function for uio data.
149  */
150 static int
sha2_digest_update_uio(SHA2_CTX * sha2_ctx,crypto_data_t * data)151 sha2_digest_update_uio(SHA2_CTX *sha2_ctx, crypto_data_t *data)
152 {
153 	off_t offset = data->cd_offset;
154 	size_t length = data->cd_length;
155 	uint_t vec_idx = 0;
156 	size_t cur_len;
157 
158 	/* we support only kernel buffer */
159 	if (zfs_uio_segflg(data->cd_uio) != UIO_SYSSPACE)
160 		return (CRYPTO_ARGUMENTS_BAD);
161 
162 	/*
163 	 * Jump to the first iovec containing data to be
164 	 * digested.
165 	 */
166 	offset = zfs_uio_index_at_offset(data->cd_uio, offset, &vec_idx);
167 	if (vec_idx == zfs_uio_iovcnt(data->cd_uio)) {
168 		/*
169 		 * The caller specified an offset that is larger than the
170 		 * total size of the buffers it provided.
171 		 */
172 		return (CRYPTO_DATA_LEN_RANGE);
173 	}
174 
175 	/*
176 	 * Now do the digesting on the iovecs.
177 	 */
178 	while (vec_idx < zfs_uio_iovcnt(data->cd_uio) && length > 0) {
179 		cur_len = MIN(zfs_uio_iovlen(data->cd_uio, vec_idx) -
180 		    offset, length);
181 
182 		SHA2Update(sha2_ctx, (uint8_t *)zfs_uio_iovbase(data->cd_uio,
183 		    vec_idx) + offset, cur_len);
184 		length -= cur_len;
185 		vec_idx++;
186 		offset = 0;
187 	}
188 
189 	if (vec_idx == zfs_uio_iovcnt(data->cd_uio) && length > 0) {
190 		/*
191 		 * The end of the specified iovec's was reached but
192 		 * the length requested could not be processed, i.e.
193 		 * The caller requested to digest more data than it provided.
194 		 */
195 		return (CRYPTO_DATA_LEN_RANGE);
196 	}
197 
198 	return (CRYPTO_SUCCESS);
199 }
200 
201 /*
202  * Helper SHA2 digest final function for uio data.
203  * digest_len is the length of the desired digest. If digest_len
204  * is smaller than the default SHA2 digest length, the caller
205  * must pass a scratch buffer, digest_scratch, which must
206  * be at least the algorithm's digest length bytes.
207  */
208 static int
sha2_digest_final_uio(SHA2_CTX * sha2_ctx,crypto_data_t * digest,ulong_t digest_len,uchar_t * digest_scratch)209 sha2_digest_final_uio(SHA2_CTX *sha2_ctx, crypto_data_t *digest,
210     ulong_t digest_len, uchar_t *digest_scratch)
211 {
212 	off_t offset = digest->cd_offset;
213 	uint_t vec_idx = 0;
214 
215 	/* we support only kernel buffer */
216 	if (zfs_uio_segflg(digest->cd_uio) != UIO_SYSSPACE)
217 		return (CRYPTO_ARGUMENTS_BAD);
218 
219 	/*
220 	 * Jump to the first iovec containing ptr to the digest to
221 	 * be returned.
222 	 */
223 	offset = zfs_uio_index_at_offset(digest->cd_uio, offset, &vec_idx);
224 	if (vec_idx == zfs_uio_iovcnt(digest->cd_uio)) {
225 		/*
226 		 * The caller specified an offset that is
227 		 * larger than the total size of the buffers
228 		 * it provided.
229 		 */
230 		return (CRYPTO_DATA_LEN_RANGE);
231 	}
232 
233 	if (offset + digest_len <=
234 	    zfs_uio_iovlen(digest->cd_uio, vec_idx)) {
235 		/*
236 		 * The computed SHA2 digest will fit in the current
237 		 * iovec.
238 		 */
239 		ASSERT3U(sha2_ctx->algotype, ==, SHA512_HMAC_MECH_INFO_TYPE);
240 		if (digest_len != SHA512_DIGEST_LENGTH) {
241 			/*
242 			 * The caller requested a short digest. Digest
243 			 * into a scratch buffer and return to
244 			 * the user only what was requested.
245 			 */
246 			SHA2Final(digest_scratch, sha2_ctx);
247 
248 			memcpy((uchar_t *)
249 			    zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset,
250 			    digest_scratch, digest_len);
251 		} else {
252 			SHA2Final((uchar_t *)zfs_uio_iovbase(digest->
253 			    cd_uio, vec_idx) + offset,
254 			    sha2_ctx);
255 
256 		}
257 	} else {
258 		/*
259 		 * The computed digest will be crossing one or more iovec's.
260 		 * This is bad performance-wise but we need to support it.
261 		 * Allocate a small scratch buffer on the stack and
262 		 * copy it piece meal to the specified digest iovec's.
263 		 */
264 		uchar_t digest_tmp[SHA512_DIGEST_LENGTH];
265 		off_t scratch_offset = 0;
266 		size_t length = digest_len;
267 		size_t cur_len;
268 
269 		SHA2Final(digest_tmp, sha2_ctx);
270 
271 		while (vec_idx < zfs_uio_iovcnt(digest->cd_uio) && length > 0) {
272 			cur_len =
273 			    MIN(zfs_uio_iovlen(digest->cd_uio, vec_idx) -
274 			    offset, length);
275 			memcpy(
276 			    zfs_uio_iovbase(digest->cd_uio, vec_idx) + offset,
277 			    digest_tmp + scratch_offset,
278 			    cur_len);
279 
280 			length -= cur_len;
281 			vec_idx++;
282 			scratch_offset += cur_len;
283 			offset = 0;
284 		}
285 
286 		if (vec_idx == zfs_uio_iovcnt(digest->cd_uio) && length > 0) {
287 			/*
288 			 * The end of the specified iovec's was reached but
289 			 * the length requested could not be processed, i.e.
290 			 * The caller requested to digest more data than it
291 			 * provided.
292 			 */
293 			return (CRYPTO_DATA_LEN_RANGE);
294 		}
295 	}
296 
297 	return (CRYPTO_SUCCESS);
298 }
299 
300 /*
301  * KCF software provider mac entry points.
302  *
303  * SHA2 HMAC is: SHA2(key XOR opad, SHA2(key XOR ipad, text))
304  *
305  * Init:
306  * The initialization routine initializes what we denote
307  * as the inner and outer contexts by doing
308  * - for inner context: SHA2(key XOR ipad)
309  * - for outer context: SHA2(key XOR opad)
310  *
311  * Update:
312  * Each subsequent SHA2 HMAC update will result in an
313  * update of the inner context with the specified data.
314  *
315  * Final:
316  * The SHA2 HMAC final will do a SHA2 final operation on the
317  * inner context, and the resulting digest will be used
318  * as the data for an update on the outer context. Last
319  * but not least, a SHA2 final on the outer context will
320  * be performed to obtain the SHA2 HMAC digest to return
321  * to the user.
322  */
323 
324 /*
325  * Initialize a SHA2-HMAC context.
326  */
327 static void
sha2_mac_init_ctx(sha2_hmac_ctx_t * ctx,void * keyval,uint_t length_in_bytes)328 sha2_mac_init_ctx(sha2_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
329 {
330 	uint64_t ipad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)] = {0};
331 	uint64_t opad[SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t)] = {0};
332 	int i, block_size, blocks_per_int64;
333 
334 	/* Determine the block size */
335 	ASSERT3U(ctx->hc_mech_type, ==, SHA512_HMAC_MECH_INFO_TYPE);
336 	block_size = SHA512_HMAC_BLOCK_SIZE;
337 	blocks_per_int64 = SHA512_HMAC_BLOCK_SIZE / sizeof (uint64_t);
338 
339 	(void) memset(ipad, 0, block_size);
340 	(void) memset(opad, 0, block_size);
341 
342 	if (keyval != NULL) {
343 		(void) memcpy(ipad, keyval, length_in_bytes);
344 		(void) memcpy(opad, keyval, length_in_bytes);
345 	} else {
346 		ASSERT0(length_in_bytes);
347 	}
348 
349 	/* XOR key with ipad (0x36) and opad (0x5c) */
350 	for (i = 0; i < blocks_per_int64; i ++) {
351 		ipad[i] ^= 0x3636363636363636;
352 		opad[i] ^= 0x5c5c5c5c5c5c5c5c;
353 	}
354 
355 	/* perform SHA2 on ipad */
356 	SHA2Init(ctx->hc_mech_type, &ctx->hc_icontext);
357 	SHA2Update(&ctx->hc_icontext, (uint8_t *)ipad, block_size);
358 
359 	/* perform SHA2 on opad */
360 	SHA2Init(ctx->hc_mech_type, &ctx->hc_ocontext);
361 	SHA2Update(&ctx->hc_ocontext, (uint8_t *)opad, block_size);
362 }
363 
364 /*
365  */
366 static int
sha2_mac_init(crypto_ctx_t * ctx,crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t ctx_template)367 sha2_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
368     crypto_key_t *key, crypto_spi_ctx_template_t ctx_template)
369 {
370 	int ret = CRYPTO_SUCCESS;
371 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
372 	uint_t sha_digest_len, sha_hmac_block_size;
373 
374 	/*
375 	 * Set the digest length and block size to values appropriate to the
376 	 * mechanism
377 	 */
378 	switch (mechanism->cm_type) {
379 	case SHA512_HMAC_MECH_INFO_TYPE:
380 		sha_digest_len = SHA512_DIGEST_LENGTH;
381 		sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE;
382 		break;
383 	default:
384 		return (CRYPTO_MECHANISM_INVALID);
385 	}
386 
387 	ctx->cc_provider_private =
388 	    kmem_alloc(sizeof (sha2_hmac_ctx_t), KM_SLEEP);
389 	if (ctx->cc_provider_private == NULL)
390 		return (CRYPTO_HOST_MEMORY);
391 
392 	PROV_SHA2_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
393 	if (ctx_template != NULL) {
394 		/* reuse context template */
395 		memcpy(PROV_SHA2_HMAC_CTX(ctx), ctx_template,
396 		    sizeof (sha2_hmac_ctx_t));
397 	} else {
398 		/* no context template, compute context */
399 		if (keylen_in_bytes > sha_hmac_block_size) {
400 			uchar_t digested_key[SHA512_DIGEST_LENGTH];
401 			sha2_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
402 
403 			/*
404 			 * Hash the passed-in key to get a smaller key.
405 			 * The inner context is used since it hasn't been
406 			 * initialized yet.
407 			 */
408 			PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3,
409 			    &hmac_ctx->hc_icontext,
410 			    key->ck_data, keylen_in_bytes, digested_key);
411 			sha2_mac_init_ctx(PROV_SHA2_HMAC_CTX(ctx),
412 			    digested_key, sha_digest_len);
413 		} else {
414 			sha2_mac_init_ctx(PROV_SHA2_HMAC_CTX(ctx),
415 			    key->ck_data, keylen_in_bytes);
416 		}
417 	}
418 
419 	if (ret != CRYPTO_SUCCESS) {
420 		memset(ctx->cc_provider_private, 0, sizeof (sha2_hmac_ctx_t));
421 		kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
422 		ctx->cc_provider_private = NULL;
423 	}
424 
425 	return (ret);
426 }
427 
428 static int
sha2_mac_update(crypto_ctx_t * ctx,crypto_data_t * data)429 sha2_mac_update(crypto_ctx_t *ctx, crypto_data_t *data)
430 {
431 	int ret = CRYPTO_SUCCESS;
432 
433 	ASSERT(ctx->cc_provider_private != NULL);
434 
435 	/*
436 	 * Do a SHA2 update of the inner context using the specified
437 	 * data.
438 	 */
439 	switch (data->cd_format) {
440 	case CRYPTO_DATA_RAW:
441 		SHA2Update(&PROV_SHA2_HMAC_CTX(ctx)->hc_icontext,
442 		    (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
443 		    data->cd_length);
444 		break;
445 	case CRYPTO_DATA_UIO:
446 		ret = sha2_digest_update_uio(
447 		    &PROV_SHA2_HMAC_CTX(ctx)->hc_icontext, data);
448 		break;
449 	default:
450 		ret = CRYPTO_ARGUMENTS_BAD;
451 	}
452 
453 	return (ret);
454 }
455 
456 static int
sha2_mac_final(crypto_ctx_t * ctx,crypto_data_t * mac)457 sha2_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac)
458 {
459 	int ret = CRYPTO_SUCCESS;
460 	uchar_t digest[SHA512_DIGEST_LENGTH];
461 	uint32_t digest_len, sha_digest_len;
462 
463 	ASSERT(ctx->cc_provider_private != NULL);
464 
465 	/* Set the digest lengths to values appropriate to the mechanism */
466 	switch (PROV_SHA2_HMAC_CTX(ctx)->hc_mech_type) {
467 	case SHA512_HMAC_MECH_INFO_TYPE:
468 		sha_digest_len = digest_len = SHA512_DIGEST_LENGTH;
469 		break;
470 	default:
471 		return (CRYPTO_ARGUMENTS_BAD);
472 	}
473 
474 	/*
475 	 * We need to just return the length needed to store the output.
476 	 * We should not destroy the context for the following cases.
477 	 */
478 	if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
479 		mac->cd_length = digest_len;
480 		return (CRYPTO_BUFFER_TOO_SMALL);
481 	}
482 
483 	/*
484 	 * Do a SHA2 final on the inner context.
485 	 */
486 	SHA2Final(digest, &PROV_SHA2_HMAC_CTX(ctx)->hc_icontext);
487 
488 	/*
489 	 * Do a SHA2 update on the outer context, feeding the inner
490 	 * digest as data.
491 	 */
492 	SHA2Update(&PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext, digest,
493 	    sha_digest_len);
494 
495 	/*
496 	 * Do a SHA2 final on the outer context, storing the computing
497 	 * digest in the users buffer.
498 	 */
499 	switch (mac->cd_format) {
500 	case CRYPTO_DATA_RAW:
501 		if (digest_len != sha_digest_len) {
502 			/*
503 			 * The caller requested a short digest. Digest
504 			 * into a scratch buffer and return to
505 			 * the user only what was requested.
506 			 */
507 			SHA2Final(digest,
508 			    &PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext);
509 			memcpy((unsigned char *)mac->cd_raw.iov_base +
510 			    mac->cd_offset, digest, digest_len);
511 		} else {
512 			SHA2Final((unsigned char *)mac->cd_raw.iov_base +
513 			    mac->cd_offset,
514 			    &PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext);
515 		}
516 		break;
517 	case CRYPTO_DATA_UIO:
518 		ret = sha2_digest_final_uio(
519 		    &PROV_SHA2_HMAC_CTX(ctx)->hc_ocontext, mac,
520 		    digest_len, digest);
521 		break;
522 	default:
523 		ret = CRYPTO_ARGUMENTS_BAD;
524 	}
525 
526 	if (ret == CRYPTO_SUCCESS)
527 		mac->cd_length = digest_len;
528 	else
529 		mac->cd_length = 0;
530 
531 	memset(ctx->cc_provider_private, 0, sizeof (sha2_hmac_ctx_t));
532 	kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t));
533 	ctx->cc_provider_private = NULL;
534 
535 	return (ret);
536 }
537 
538 #define	SHA2_MAC_UPDATE(data, ctx, ret) {				\
539 	switch (data->cd_format) {					\
540 	case CRYPTO_DATA_RAW:						\
541 		SHA2Update(&(ctx).hc_icontext,				\
542 		    (uint8_t *)data->cd_raw.iov_base +			\
543 		    data->cd_offset, data->cd_length);			\
544 		break;							\
545 	case CRYPTO_DATA_UIO:						\
546 		ret = sha2_digest_update_uio(&(ctx).hc_icontext, data);	\
547 		break;							\
548 	default:							\
549 		ret = CRYPTO_ARGUMENTS_BAD;				\
550 	}								\
551 }
552 
553 static int
sha2_mac_atomic(crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * data,crypto_data_t * mac,crypto_spi_ctx_template_t ctx_template)554 sha2_mac_atomic(crypto_mechanism_t *mechanism,
555     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
556     crypto_spi_ctx_template_t ctx_template)
557 {
558 	int ret = CRYPTO_SUCCESS;
559 	uchar_t digest[SHA512_DIGEST_LENGTH];
560 	sha2_hmac_ctx_t sha2_hmac_ctx;
561 	uint32_t sha_digest_len, digest_len, sha_hmac_block_size;
562 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
563 
564 	/*
565 	 * Set the digest length and block size to values appropriate to the
566 	 * mechanism
567 	 */
568 	switch (mechanism->cm_type) {
569 	case SHA512_HMAC_MECH_INFO_TYPE:
570 		sha_digest_len = digest_len = SHA512_DIGEST_LENGTH;
571 		sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE;
572 		break;
573 	default:
574 		return (CRYPTO_MECHANISM_INVALID);
575 	}
576 
577 	if (ctx_template != NULL) {
578 		/* reuse context template */
579 		memcpy(&sha2_hmac_ctx, ctx_template, sizeof (sha2_hmac_ctx_t));
580 	} else {
581 		sha2_hmac_ctx.hc_mech_type = mechanism->cm_type;
582 		/* no context template, initialize context */
583 		if (keylen_in_bytes > sha_hmac_block_size) {
584 			/*
585 			 * Hash the passed-in key to get a smaller key.
586 			 * The inner context is used since it hasn't been
587 			 * initialized yet.
588 			 */
589 			PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3,
590 			    &sha2_hmac_ctx.hc_icontext,
591 			    key->ck_data, keylen_in_bytes, digest);
592 			sha2_mac_init_ctx(&sha2_hmac_ctx, digest,
593 			    sha_digest_len);
594 		} else {
595 			sha2_mac_init_ctx(&sha2_hmac_ctx, key->ck_data,
596 			    keylen_in_bytes);
597 		}
598 	}
599 
600 	/* do a SHA2 update of the inner context using the specified data */
601 	SHA2_MAC_UPDATE(data, sha2_hmac_ctx, ret);
602 	if (ret != CRYPTO_SUCCESS)
603 		/* the update failed, free context and bail */
604 		goto bail;
605 
606 	/*
607 	 * Do a SHA2 final on the inner context.
608 	 */
609 	SHA2Final(digest, &sha2_hmac_ctx.hc_icontext);
610 
611 	/*
612 	 * Do an SHA2 update on the outer context, feeding the inner
613 	 * digest as data.
614 	 */
615 	ASSERT3U(mechanism->cm_type, ==, SHA512_HMAC_MECH_INFO_TYPE);
616 	SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, sha_digest_len);
617 
618 	/*
619 	 * Do a SHA2 final on the outer context, storing the computed
620 	 * digest in the users buffer.
621 	 */
622 	switch (mac->cd_format) {
623 	case CRYPTO_DATA_RAW:
624 		if (digest_len != sha_digest_len) {
625 			/*
626 			 * The caller requested a short digest. Digest
627 			 * into a scratch buffer and return to
628 			 * the user only what was requested.
629 			 */
630 			SHA2Final(digest, &sha2_hmac_ctx.hc_ocontext);
631 			memcpy((unsigned char *)mac->cd_raw.iov_base +
632 			    mac->cd_offset, digest, digest_len);
633 		} else {
634 			SHA2Final((unsigned char *)mac->cd_raw.iov_base +
635 			    mac->cd_offset, &sha2_hmac_ctx.hc_ocontext);
636 		}
637 		break;
638 	case CRYPTO_DATA_UIO:
639 		ret = sha2_digest_final_uio(&sha2_hmac_ctx.hc_ocontext, mac,
640 		    digest_len, digest);
641 		break;
642 	default:
643 		ret = CRYPTO_ARGUMENTS_BAD;
644 	}
645 
646 	if (ret == CRYPTO_SUCCESS) {
647 		mac->cd_length = digest_len;
648 		return (CRYPTO_SUCCESS);
649 	}
650 bail:
651 	memset(&sha2_hmac_ctx, 0, sizeof (sha2_hmac_ctx_t));
652 	mac->cd_length = 0;
653 	return (ret);
654 }
655 
656 static int
sha2_mac_verify_atomic(crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_data_t * data,crypto_data_t * mac,crypto_spi_ctx_template_t ctx_template)657 sha2_mac_verify_atomic(crypto_mechanism_t *mechanism,
658     crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
659     crypto_spi_ctx_template_t ctx_template)
660 {
661 	int ret = CRYPTO_SUCCESS;
662 	uchar_t digest[SHA512_DIGEST_LENGTH];
663 	sha2_hmac_ctx_t sha2_hmac_ctx;
664 	uint32_t sha_digest_len, digest_len, sha_hmac_block_size;
665 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
666 
667 	/*
668 	 * Set the digest length and block size to values appropriate to the
669 	 * mechanism
670 	 */
671 	switch (mechanism->cm_type) {
672 	case SHA512_HMAC_MECH_INFO_TYPE:
673 		sha_digest_len = digest_len = SHA512_DIGEST_LENGTH;
674 		sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE;
675 		break;
676 	default:
677 		return (CRYPTO_MECHANISM_INVALID);
678 	}
679 
680 	if (ctx_template != NULL) {
681 		/* reuse context template */
682 		memcpy(&sha2_hmac_ctx, ctx_template, sizeof (sha2_hmac_ctx_t));
683 	} else {
684 		sha2_hmac_ctx.hc_mech_type = mechanism->cm_type;
685 		/* no context template, initialize context */
686 		if (keylen_in_bytes > sha_hmac_block_size) {
687 			/*
688 			 * Hash the passed-in key to get a smaller key.
689 			 * The inner context is used since it hasn't been
690 			 * initialized yet.
691 			 */
692 			PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3,
693 			    &sha2_hmac_ctx.hc_icontext,
694 			    key->ck_data, keylen_in_bytes, digest);
695 			sha2_mac_init_ctx(&sha2_hmac_ctx, digest,
696 			    sha_digest_len);
697 		} else {
698 			sha2_mac_init_ctx(&sha2_hmac_ctx, key->ck_data,
699 			    keylen_in_bytes);
700 		}
701 	}
702 
703 	if (mac->cd_length != digest_len) {
704 		ret = CRYPTO_INVALID_MAC;
705 		goto bail;
706 	}
707 
708 	/* do a SHA2 update of the inner context using the specified data */
709 	SHA2_MAC_UPDATE(data, sha2_hmac_ctx, ret);
710 	if (ret != CRYPTO_SUCCESS)
711 		/* the update failed, free context and bail */
712 		goto bail;
713 
714 	/* do a SHA2 final on the inner context */
715 	SHA2Final(digest, &sha2_hmac_ctx.hc_icontext);
716 
717 	/*
718 	 * Do an SHA2 update on the outer context, feeding the inner
719 	 * digest as data.
720 	 */
721 	ASSERT3U(mechanism->cm_type, ==, SHA512_HMAC_MECH_INFO_TYPE);
722 	SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, sha_digest_len);
723 
724 	/*
725 	 * Do a SHA2 final on the outer context, storing the computed
726 	 * digest in the users buffer.
727 	 */
728 	SHA2Final(digest, &sha2_hmac_ctx.hc_ocontext);
729 
730 	/*
731 	 * Compare the computed digest against the expected digest passed
732 	 * as argument.
733 	 */
734 
735 	switch (mac->cd_format) {
736 
737 	case CRYPTO_DATA_RAW:
738 		if (memcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
739 		    mac->cd_offset, digest_len) != 0)
740 			ret = CRYPTO_INVALID_MAC;
741 		break;
742 
743 	case CRYPTO_DATA_UIO: {
744 		off_t offset = mac->cd_offset;
745 		uint_t vec_idx = 0;
746 		off_t scratch_offset = 0;
747 		size_t length = digest_len;
748 		size_t cur_len;
749 
750 		/* we support only kernel buffer */
751 		if (zfs_uio_segflg(mac->cd_uio) != UIO_SYSSPACE)
752 			return (CRYPTO_ARGUMENTS_BAD);
753 
754 		/* jump to the first iovec containing the expected digest */
755 		offset = zfs_uio_index_at_offset(mac->cd_uio, offset, &vec_idx);
756 		if (vec_idx == zfs_uio_iovcnt(mac->cd_uio)) {
757 			/*
758 			 * The caller specified an offset that is
759 			 * larger than the total size of the buffers
760 			 * it provided.
761 			 */
762 			ret = CRYPTO_DATA_LEN_RANGE;
763 			break;
764 		}
765 
766 		/* do the comparison of computed digest vs specified one */
767 		while (vec_idx < zfs_uio_iovcnt(mac->cd_uio) && length > 0) {
768 			cur_len = MIN(zfs_uio_iovlen(mac->cd_uio, vec_idx) -
769 			    offset, length);
770 
771 			if (memcmp(digest + scratch_offset,
772 			    zfs_uio_iovbase(mac->cd_uio, vec_idx) + offset,
773 			    cur_len) != 0) {
774 				ret = CRYPTO_INVALID_MAC;
775 				break;
776 			}
777 
778 			length -= cur_len;
779 			vec_idx++;
780 			scratch_offset += cur_len;
781 			offset = 0;
782 		}
783 		break;
784 	}
785 
786 	default:
787 		ret = CRYPTO_ARGUMENTS_BAD;
788 	}
789 
790 	return (ret);
791 bail:
792 	memset(&sha2_hmac_ctx, 0, sizeof (sha2_hmac_ctx_t));
793 	mac->cd_length = 0;
794 	return (ret);
795 }
796 
797 /*
798  * KCF software provider context management entry points.
799  */
800 
801 static int
sha2_create_ctx_template(crypto_mechanism_t * mechanism,crypto_key_t * key,crypto_spi_ctx_template_t * ctx_template,size_t * ctx_template_size)802 sha2_create_ctx_template(crypto_mechanism_t *mechanism, crypto_key_t *key,
803     crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size)
804 {
805 	sha2_hmac_ctx_t *sha2_hmac_ctx_tmpl;
806 	uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
807 	uint32_t sha_digest_len, sha_hmac_block_size;
808 
809 	/*
810 	 * Set the digest length and block size to values appropriate to the
811 	 * mechanism
812 	 */
813 	switch (mechanism->cm_type) {
814 	case SHA512_HMAC_MECH_INFO_TYPE:
815 		sha_digest_len = SHA512_DIGEST_LENGTH;
816 		sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE;
817 		break;
818 	default:
819 		return (CRYPTO_MECHANISM_INVALID);
820 	}
821 
822 	/*
823 	 * Allocate and initialize SHA2 context.
824 	 */
825 	sha2_hmac_ctx_tmpl = kmem_alloc(sizeof (sha2_hmac_ctx_t), KM_SLEEP);
826 	if (sha2_hmac_ctx_tmpl == NULL)
827 		return (CRYPTO_HOST_MEMORY);
828 
829 	sha2_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
830 
831 	if (keylen_in_bytes > sha_hmac_block_size) {
832 		uchar_t digested_key[SHA512_DIGEST_LENGTH];
833 
834 		/*
835 		 * Hash the passed-in key to get a smaller key.
836 		 * The inner context is used since it hasn't been
837 		 * initialized yet.
838 		 */
839 		PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3,
840 		    &sha2_hmac_ctx_tmpl->hc_icontext,
841 		    key->ck_data, keylen_in_bytes, digested_key);
842 		sha2_mac_init_ctx(sha2_hmac_ctx_tmpl, digested_key,
843 		    sha_digest_len);
844 	} else {
845 		sha2_mac_init_ctx(sha2_hmac_ctx_tmpl, key->ck_data,
846 		    keylen_in_bytes);
847 	}
848 
849 	*ctx_template = (crypto_spi_ctx_template_t)sha2_hmac_ctx_tmpl;
850 	*ctx_template_size = sizeof (sha2_hmac_ctx_t);
851 
852 	return (CRYPTO_SUCCESS);
853 }
854 
855 static int
sha2_free_context(crypto_ctx_t * ctx)856 sha2_free_context(crypto_ctx_t *ctx)
857 {
858 	uint_t ctx_len;
859 
860 	if (ctx->cc_provider_private == NULL)
861 		return (CRYPTO_SUCCESS);
862 
863 	ASSERT3U(PROV_SHA2_CTX(ctx)->sc_mech_type, ==,
864 	    SHA512_HMAC_MECH_INFO_TYPE);
865 	ctx_len = sizeof (sha2_hmac_ctx_t);
866 
867 	memset(ctx->cc_provider_private, 0, ctx_len);
868 	kmem_free(ctx->cc_provider_private, ctx_len);
869 	ctx->cc_provider_private = NULL;
870 
871 	return (CRYPTO_SUCCESS);
872 }
873