1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * AES provider for the Kernel Cryptographic Framework (KCF) 31 */ 32 33 #include <sys/types.h> 34 #include <sys/systm.h> 35 #include <sys/modctl.h> 36 #include <sys/cmn_err.h> 37 #include <sys/ddi.h> 38 #include <sys/crypto/common.h> 39 #include <sys/crypto/spi.h> 40 #include <sys/sysmacros.h> 41 #include <sys/strsun.h> 42 #include <aes_impl.h> 43 #include <aes_cbc_crypt.h> 44 45 extern struct mod_ops mod_cryptoops; 46 47 /* 48 * Module linkage information for the kernel. 49 */ 50 static struct modlcrypto modlcrypto = { 51 &mod_cryptoops, 52 "AES Kernel SW Provider %I%" 53 }; 54 55 static struct modlinkage modlinkage = { 56 MODREV_1, 57 (void *)&modlcrypto, 58 NULL 59 }; 60 61 /* 62 * CSPI information (entry points, provider info, etc.) 63 */ 64 typedef enum aes_mech_type { 65 AES_ECB_MECH_INFO_TYPE, /* SUN_CKM_AES_ECB */ 66 AES_CBC_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC */ 67 AES_CBC_PAD_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC_PAD */ 68 AES_CTR_MECH_INFO_TYPE /* SUN_CKM_AES_CTR */ 69 } aes_mech_type_t; 70 71 /* 72 * The following definitions are to keep EXPORT_SRC happy. 73 */ 74 #ifndef AES_MIN_KEY_LEN 75 #define AES_MIN_KEY_LEN 0 76 #endif 77 78 #ifndef AES_MAX_KEY_LEN 79 #define AES_MAX_KEY_LEN 0 80 #endif 81 82 /* 83 * Mechanism info structure passed to KCF during registration. 84 */ 85 static crypto_mech_info_t aes_mech_info_tab[] = { 86 /* AES_ECB */ 87 {SUN_CKM_AES_ECB, AES_ECB_MECH_INFO_TYPE, 88 CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | 89 CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC, 90 AES_MIN_KEY_LEN, AES_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}, 91 /* AES_CBC */ 92 {SUN_CKM_AES_CBC, AES_CBC_MECH_INFO_TYPE, 93 CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | 94 CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC, 95 AES_MIN_KEY_LEN, AES_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}, 96 /* AES_CTR */ 97 {SUN_CKM_AES_CTR, AES_CTR_MECH_INFO_TYPE, 98 CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | 99 CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC, 100 AES_MIN_KEY_LEN, AES_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES} 101 }; 102 103 #define AES_VALID_MECH(mech) \ 104 (((mech)->cm_type == AES_ECB_MECH_INFO_TYPE || \ 105 (mech)->cm_type == AES_CBC_MECH_INFO_TYPE || \ 106 (mech)->cm_type == AES_CTR_MECH_INFO_TYPE) ? 1 : 0) 107 108 /* operations are in-place if the output buffer is NULL */ 109 #define AES_ARG_INPLACE(input, output) \ 110 if ((output) == NULL) \ 111 (output) = (input); 112 113 static void aes_provider_status(crypto_provider_handle_t, uint_t *); 114 115 static crypto_control_ops_t aes_control_ops = { 116 aes_provider_status 117 }; 118 119 static int aes_common_init(crypto_ctx_t *, crypto_mechanism_t *, 120 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 121 static int aes_common_init_ctx(aes_ctx_t *, crypto_spi_ctx_template_t *, 122 crypto_mechanism_t *, crypto_key_t *, int); 123 static int aes_encrypt_final(crypto_ctx_t *, crypto_data_t *, 124 crypto_req_handle_t); 125 static int aes_decrypt_final(crypto_ctx_t *, crypto_data_t *, 126 crypto_req_handle_t); 127 128 static int aes_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 129 crypto_req_handle_t); 130 static int aes_encrypt_update(crypto_ctx_t *, crypto_data_t *, 131 crypto_data_t *, crypto_req_handle_t); 132 static int aes_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, 133 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 134 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 135 136 static int aes_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 137 crypto_req_handle_t); 138 static int aes_decrypt_update(crypto_ctx_t *, crypto_data_t *, 139 crypto_data_t *, crypto_req_handle_t); 140 static int aes_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, 141 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 142 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 143 144 static crypto_cipher_ops_t aes_cipher_ops = { 145 aes_common_init, 146 aes_encrypt, 147 aes_encrypt_update, 148 aes_encrypt_final, 149 aes_encrypt_atomic, 150 aes_common_init, 151 aes_decrypt, 152 aes_decrypt_update, 153 aes_decrypt_final, 154 aes_decrypt_atomic 155 }; 156 157 static int aes_create_ctx_template(crypto_provider_handle_t, 158 crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *, 159 size_t *, crypto_req_handle_t); 160 static int aes_free_context(crypto_ctx_t *); 161 162 static crypto_ctx_ops_t aes_ctx_ops = { 163 aes_create_ctx_template, 164 aes_free_context 165 }; 166 167 static crypto_ops_t aes_crypto_ops = { 168 &aes_control_ops, 169 NULL, 170 &aes_cipher_ops, 171 NULL, 172 NULL, 173 NULL, 174 NULL, 175 NULL, 176 NULL, 177 NULL, 178 NULL, 179 NULL, 180 NULL, 181 &aes_ctx_ops 182 }; 183 184 static crypto_provider_info_t aes_prov_info = { 185 CRYPTO_SPI_VERSION_1, 186 "AES Software Provider", 187 CRYPTO_SW_PROVIDER, 188 {&modlinkage}, 189 NULL, 190 &aes_crypto_ops, 191 sizeof (aes_mech_info_tab)/sizeof (crypto_mech_info_t), 192 aes_mech_info_tab 193 }; 194 195 static crypto_kcf_provider_handle_t aes_prov_handle = NULL; 196 197 int 198 _init(void) 199 { 200 int ret; 201 202 /* 203 * Register with KCF. If the registration fails, return error. 204 */ 205 if ((ret = crypto_register_provider(&aes_prov_info, 206 &aes_prov_handle)) != CRYPTO_SUCCESS) { 207 cmn_err(CE_WARN, "%s _init: crypto_register_provider()" 208 "failed (0x%x)", CRYPTO_PROVIDER_NAME, ret); 209 return (EACCES); 210 } 211 212 if ((ret = mod_install(&modlinkage)) != 0) { 213 int rv; 214 215 ASSERT(aes_prov_handle != NULL); 216 /* We should not return if the unregister returns busy. */ 217 while ((rv = crypto_unregister_provider(aes_prov_handle)) 218 == CRYPTO_BUSY) { 219 cmn_err(CE_WARN, 220 "%s _init: crypto_unregister_provider() " 221 "failed (0x%x). Retrying.", 222 CRYPTO_PROVIDER_NAME, rv); 223 /* wait 10 seconds and try again. */ 224 delay(10 * drv_usectohz(1000000)); 225 } 226 } 227 228 return (ret); 229 } 230 231 int 232 _fini(void) 233 { 234 int ret; 235 236 /* 237 * Unregister from KCF if previous registration succeeded. 238 */ 239 if (aes_prov_handle != NULL) { 240 if ((ret = crypto_unregister_provider(aes_prov_handle)) != 241 CRYPTO_SUCCESS) { 242 cmn_err(CE_WARN, 243 "%s _fini: crypto_unregister_provider() " 244 "failed (0x%x)", CRYPTO_PROVIDER_NAME, ret); 245 return (EBUSY); 246 } 247 aes_prov_handle = NULL; 248 } 249 250 return (mod_remove(&modlinkage)); 251 } 252 253 int 254 _info(struct modinfo *modinfop) 255 { 256 return (mod_info(&modlinkage, modinfop)); 257 } 258 259 260 /* EXPORT DELETE START */ 261 262 /* 263 * Initialize key schedules for AES 264 */ 265 static int 266 init_keysched(crypto_key_t *key, void *newbie) 267 { 268 /* 269 * Only keys by value are supported by this module. 270 */ 271 switch (key->ck_format) { 272 case CRYPTO_KEY_RAW: 273 if (key->ck_length < AES_MINBITS || 274 key->ck_length > AES_MAXBITS) { 275 return (CRYPTO_KEY_SIZE_RANGE); 276 } 277 278 /* key length must be either 128, 192, or 256 */ 279 if ((key->ck_length & 63) != 0) 280 return (CRYPTO_KEY_SIZE_RANGE); 281 break; 282 default: 283 return (CRYPTO_KEY_TYPE_INCONSISTENT); 284 } 285 286 aes_init_keysched(key->ck_data, key->ck_length, newbie); 287 return (CRYPTO_SUCCESS); 288 } 289 290 /* EXPORT DELETE END */ 291 292 /* 293 * KCF software provider control entry points. 294 */ 295 /* ARGSUSED */ 296 static void 297 aes_provider_status(crypto_provider_handle_t provider, uint_t *status) 298 { 299 *status = CRYPTO_PROVIDER_READY; 300 } 301 302 /* 303 * KCF software provider encrypt entry points. 304 */ 305 static int 306 aes_common_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 307 crypto_key_t *key, crypto_spi_ctx_template_t template, 308 crypto_req_handle_t req) 309 { 310 311 /* EXPORT DELETE START */ 312 313 aes_ctx_t *aes_ctx; 314 int rv; 315 int kmflag; 316 317 /* 318 * Only keys by value are supported by this module. 319 */ 320 if (key->ck_format != CRYPTO_KEY_RAW) { 321 return (CRYPTO_KEY_TYPE_INCONSISTENT); 322 } 323 324 if (!AES_VALID_MECH(mechanism)) 325 return (CRYPTO_MECHANISM_INVALID); 326 327 if (mechanism->cm_param != NULL) { 328 if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) { 329 if (mechanism->cm_param_len != 330 sizeof (CK_AES_CTR_PARAMS)) 331 return (CRYPTO_MECHANISM_PARAM_INVALID); 332 } else { 333 if (mechanism->cm_param_len != AES_BLOCK_LEN) 334 return (CRYPTO_MECHANISM_PARAM_INVALID); 335 } 336 } 337 338 /* 339 * Allocate an AES context. 340 */ 341 kmflag = crypto_kmflag(req); 342 if ((aes_ctx = kmem_zalloc(sizeof (aes_ctx_t), kmflag)) == NULL) 343 return (CRYPTO_HOST_MEMORY); 344 345 rv = aes_common_init_ctx(aes_ctx, template, mechanism, key, kmflag); 346 if (rv != CRYPTO_SUCCESS) { 347 kmem_free(aes_ctx, sizeof (aes_ctx_t)); 348 return (rv); 349 } 350 351 ctx->cc_provider_private = aes_ctx; 352 353 /* EXPORT DELETE END */ 354 355 return (CRYPTO_SUCCESS); 356 } 357 358 /* 359 * Helper AES encrypt update function for iov input data. 360 */ 361 static int 362 aes_cipher_update_iov(aes_ctx_t *aes_ctx, crypto_data_t *input, 363 crypto_data_t *output, int (*cipher)(aes_ctx_t *, caddr_t, size_t, 364 crypto_data_t *)) 365 { 366 int rv; 367 /* EXPORT DELETE START */ 368 369 if (input->cd_miscdata != NULL) { 370 if (IS_P2ALIGNED(input->cd_miscdata, sizeof (uint64_t))) { 371 /* LINTED: pointer alignment */ 372 aes_ctx->ac_iv[0] = *(uint64_t *)input->cd_miscdata; 373 /* LINTED: pointer alignment */ 374 aes_ctx->ac_iv[1] = *(uint64_t *)&input->cd_miscdata[8]; 375 } else { 376 uint8_t *miscdata8 = (uint8_t *)&input->cd_miscdata[0]; 377 uint8_t *iv8 = (uint8_t *)&aes_ctx->ac_iv[0]; 378 379 AES_COPY_BLOCK(miscdata8, iv8); 380 } 381 } 382 383 if (input->cd_raw.iov_len < input->cd_length) 384 return (CRYPTO_ARGUMENTS_BAD); 385 386 rv = (cipher)(aes_ctx, input->cd_raw.iov_base + input->cd_offset, 387 input->cd_length, (input == output) ? NULL : output); 388 389 /* EXPORT DELETE END */ 390 391 return (rv); 392 } 393 394 /* 395 * Helper AES encrypt update function for uio input data. 396 */ 397 static int 398 aes_cipher_update_uio(aes_ctx_t *aes_ctx, crypto_data_t *input, 399 crypto_data_t *output, int (*cipher)(aes_ctx_t *, caddr_t, size_t, 400 crypto_data_t *)) 401 { 402 /* EXPORT DELETE START */ 403 uio_t *uiop = input->cd_uio; 404 off_t offset = input->cd_offset; 405 size_t length = input->cd_length; 406 uint_t vec_idx; 407 size_t cur_len; 408 409 if (input->cd_miscdata != NULL) { 410 if (IS_P2ALIGNED(input->cd_miscdata, sizeof (uint64_t))) { 411 /* LINTED: pointer alignment */ 412 aes_ctx->ac_iv[0] = *(uint64_t *)input->cd_miscdata; 413 /* LINTED: pointer alignment */ 414 aes_ctx->ac_iv[1] = *(uint64_t *)&input->cd_miscdata[8]; 415 } else { 416 uint8_t *miscdata8 = (uint8_t *)&input->cd_miscdata[0]; 417 uint8_t *iv8 = (uint8_t *)&aes_ctx->ac_iv[0]; 418 419 AES_COPY_BLOCK(miscdata8, iv8); 420 } 421 } 422 423 if (input->cd_uio->uio_segflg != UIO_SYSSPACE) { 424 return (CRYPTO_ARGUMENTS_BAD); 425 } 426 427 /* 428 * Jump to the first iovec containing data to be 429 * processed. 430 */ 431 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 432 offset >= uiop->uio_iov[vec_idx].iov_len; 433 offset -= uiop->uio_iov[vec_idx++].iov_len); 434 if (vec_idx == uiop->uio_iovcnt) { 435 /* 436 * The caller specified an offset that is larger than the 437 * total size of the buffers it provided. 438 */ 439 return (CRYPTO_DATA_LEN_RANGE); 440 } 441 442 /* 443 * Now process the iovecs. 444 */ 445 while (vec_idx < uiop->uio_iovcnt && length > 0) { 446 cur_len = MIN(uiop->uio_iov[vec_idx].iov_len - 447 offset, length); 448 449 (cipher)(aes_ctx, uiop->uio_iov[vec_idx].iov_base + offset, 450 cur_len, (input == output) ? NULL : output); 451 452 length -= cur_len; 453 vec_idx++; 454 offset = 0; 455 } 456 457 if (vec_idx == uiop->uio_iovcnt && length > 0) { 458 /* 459 * The end of the specified iovec's was reached but 460 * the length requested could not be processed, i.e. 461 * The caller requested to digest more data than it provided. 462 */ 463 464 return (CRYPTO_DATA_LEN_RANGE); 465 } 466 467 /* EXPORT DELETE END */ 468 469 return (CRYPTO_SUCCESS); 470 } 471 472 /* 473 * Helper AES encrypt update function for mblk input data. 474 */ 475 static int 476 aes_cipher_update_mp(aes_ctx_t *aes_ctx, crypto_data_t *input, 477 crypto_data_t *output, int (*cipher)(aes_ctx_t *, caddr_t, size_t, 478 crypto_data_t *)) 479 { 480 /* EXPORT DELETE START */ 481 off_t offset = input->cd_offset; 482 size_t length = input->cd_length; 483 mblk_t *mp; 484 size_t cur_len; 485 486 if (input->cd_miscdata != NULL) { 487 if (IS_P2ALIGNED(input->cd_miscdata, sizeof (uint64_t))) { 488 /* LINTED: pointer alignment */ 489 aes_ctx->ac_iv[0] = *(uint64_t *)input->cd_miscdata; 490 /* LINTED: pointer alignment */ 491 aes_ctx->ac_iv[1] = *(uint64_t *)&input->cd_miscdata[8]; 492 } else { 493 uint8_t *miscdata8 = (uint8_t *)&input->cd_miscdata[0]; 494 uint8_t *iv8 = (uint8_t *)&aes_ctx->ac_iv[0]; 495 496 AES_COPY_BLOCK(miscdata8, iv8); 497 } 498 } 499 500 /* 501 * Jump to the first mblk_t containing data to be processed. 502 */ 503 for (mp = input->cd_mp; mp != NULL && offset >= MBLKL(mp); 504 offset -= MBLKL(mp), mp = mp->b_cont); 505 if (mp == NULL) { 506 /* 507 * The caller specified an offset that is larger than the 508 * total size of the buffers it provided. 509 */ 510 return (CRYPTO_DATA_LEN_RANGE); 511 } 512 513 /* 514 * Now do the processing on the mblk chain. 515 */ 516 while (mp != NULL && length > 0) { 517 cur_len = MIN(MBLKL(mp) - offset, length); 518 (cipher)(aes_ctx, (char *)(mp->b_rptr + offset), cur_len, 519 (input == output) ? NULL : output); 520 521 length -= cur_len; 522 offset = 0; 523 mp = mp->b_cont; 524 } 525 526 if (mp == NULL && length > 0) { 527 /* 528 * The end of the mblk was reached but the length requested 529 * could not be processed, i.e. The caller requested 530 * to digest more data than it provided. 531 */ 532 return (CRYPTO_DATA_LEN_RANGE); 533 } 534 535 /* EXPORT DELETE END */ 536 537 return (CRYPTO_SUCCESS); 538 } 539 540 /* ARGSUSED */ 541 static int 542 aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, 543 crypto_data_t *ciphertext, crypto_req_handle_t req) 544 { 545 int ret = CRYPTO_FAILED; 546 547 /* EXPORT DELETE START */ 548 549 aes_ctx_t *aes_ctx; 550 551 ASSERT(ctx->cc_provider_private != NULL); 552 aes_ctx = ctx->cc_provider_private; 553 554 /* 555 * For block ciphers, plaintext must be a multiple of AES block size. 556 * This test is only valid for ciphers whose blocksize is a power of 2. 557 */ 558 if (((aes_ctx->ac_flags & AES_CTR_MODE) == 0) && 559 (plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0) 560 return (CRYPTO_DATA_LEN_RANGE); 561 562 AES_ARG_INPLACE(plaintext, ciphertext); 563 564 /* 565 * We need to just return the length needed to store the output. 566 * We should not destroy the context for the following case. 567 */ 568 if (ciphertext->cd_length < plaintext->cd_length) { 569 ciphertext->cd_length = plaintext->cd_length; 570 return (CRYPTO_BUFFER_TOO_SMALL); 571 } 572 573 /* 574 * Do an update on the specified input data. 575 */ 576 ret = aes_encrypt_update(ctx, plaintext, ciphertext, req); 577 ASSERT(aes_ctx->ac_remainder_len == 0); 578 (void) aes_free_context(ctx); 579 580 /* EXPORT DELETE END */ 581 582 /* LINTED */ 583 return (ret); 584 } 585 586 /* ARGSUSED */ 587 static int 588 aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 589 crypto_data_t *plaintext, crypto_req_handle_t req) 590 { 591 int ret = CRYPTO_FAILED; 592 593 /* EXPORT DELETE START */ 594 595 aes_ctx_t *aes_ctx; 596 597 ASSERT(ctx->cc_provider_private != NULL); 598 aes_ctx = ctx->cc_provider_private; 599 600 /* 601 * For block ciphers, ciphertext must be a multiple of AES block size. 602 * This test is only valid for ciphers whose blocksize is a power of 2. 603 */ 604 if (((aes_ctx->ac_flags & AES_CTR_MODE) == 0) && 605 (ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0) 606 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); 607 608 AES_ARG_INPLACE(ciphertext, plaintext); 609 610 /* 611 * We need to just return the length needed to store the output. 612 * We should not destroy the context for the following case. 613 */ 614 if (plaintext->cd_length < ciphertext->cd_length) { 615 plaintext->cd_length = ciphertext->cd_length; 616 return (CRYPTO_BUFFER_TOO_SMALL); 617 } 618 619 /* 620 * Do an update on the specified input data. 621 */ 622 ret = aes_decrypt_update(ctx, ciphertext, plaintext, req); 623 ASSERT(aes_ctx->ac_remainder_len == 0); 624 (void) aes_free_context(ctx); 625 626 /* EXPORT DELETE END */ 627 628 /* LINTED */ 629 return (ret); 630 } 631 632 /* ARGSUSED */ 633 static int 634 aes_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext, 635 crypto_data_t *ciphertext, crypto_req_handle_t req) 636 { 637 off_t saved_offset; 638 size_t saved_length, out_len; 639 int ret = CRYPTO_SUCCESS; 640 aes_ctx_t *aes_ctx; 641 642 ASSERT(ctx->cc_provider_private != NULL); 643 644 AES_ARG_INPLACE(plaintext, ciphertext); 645 646 /* compute number of bytes that will hold the ciphertext */ 647 out_len = ((aes_ctx_t *)ctx->cc_provider_private)->ac_remainder_len; 648 out_len += plaintext->cd_length; 649 out_len &= ~(AES_BLOCK_LEN - 1); 650 651 /* return length needed to store the output */ 652 if (ciphertext->cd_length < out_len) { 653 ciphertext->cd_length = out_len; 654 return (CRYPTO_BUFFER_TOO_SMALL); 655 } 656 657 saved_offset = ciphertext->cd_offset; 658 saved_length = ciphertext->cd_length; 659 660 /* 661 * Do the AES update on the specified input data. 662 */ 663 switch (plaintext->cd_format) { 664 case CRYPTO_DATA_RAW: 665 ret = aes_cipher_update_iov(ctx->cc_provider_private, 666 plaintext, ciphertext, aes_encrypt_contiguous_blocks); 667 break; 668 case CRYPTO_DATA_UIO: 669 ret = aes_cipher_update_uio(ctx->cc_provider_private, 670 plaintext, ciphertext, aes_encrypt_contiguous_blocks); 671 break; 672 case CRYPTO_DATA_MBLK: 673 ret = aes_cipher_update_mp(ctx->cc_provider_private, 674 plaintext, ciphertext, aes_encrypt_contiguous_blocks); 675 break; 676 default: 677 ret = CRYPTO_ARGUMENTS_BAD; 678 } 679 680 /* 681 * Since AES counter mode is a stream cipher, we call 682 * aes_counter_final() to pick up any remaining bytes. 683 * It is an internal function that does not destroy 684 * the context like *normal* final routines. 685 */ 686 aes_ctx = ctx->cc_provider_private; 687 if ((aes_ctx->ac_flags & AES_CTR_MODE) && 688 (aes_ctx->ac_remainder_len > 0)) { 689 ret = aes_counter_final(aes_ctx, ciphertext); 690 } 691 692 if (ret == CRYPTO_SUCCESS) { 693 if (plaintext != ciphertext) 694 ciphertext->cd_length = 695 ciphertext->cd_offset - saved_offset; 696 } else { 697 ciphertext->cd_length = saved_length; 698 } 699 ciphertext->cd_offset = saved_offset; 700 701 return (ret); 702 } 703 704 /* ARGSUSED */ 705 static int 706 aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 707 crypto_data_t *plaintext, crypto_req_handle_t req) 708 { 709 off_t saved_offset; 710 size_t saved_length, out_len; 711 int ret = CRYPTO_SUCCESS; 712 aes_ctx_t *aes_ctx; 713 714 ASSERT(ctx->cc_provider_private != NULL); 715 716 AES_ARG_INPLACE(ciphertext, plaintext); 717 718 /* compute number of bytes that will hold the plaintext */ 719 out_len = ((aes_ctx_t *)ctx->cc_provider_private)->ac_remainder_len; 720 out_len += ciphertext->cd_length; 721 out_len &= ~(AES_BLOCK_LEN - 1); 722 723 /* return length needed to store the output */ 724 if (plaintext->cd_length < out_len) { 725 plaintext->cd_length = out_len; 726 return (CRYPTO_BUFFER_TOO_SMALL); 727 } 728 729 saved_offset = plaintext->cd_offset; 730 saved_length = plaintext->cd_length; 731 732 /* 733 * Do the AES update on the specified input data. 734 */ 735 switch (ciphertext->cd_format) { 736 case CRYPTO_DATA_RAW: 737 ret = aes_cipher_update_iov(ctx->cc_provider_private, 738 ciphertext, plaintext, aes_decrypt_contiguous_blocks); 739 break; 740 case CRYPTO_DATA_UIO: 741 ret = aes_cipher_update_uio(ctx->cc_provider_private, 742 ciphertext, plaintext, aes_decrypt_contiguous_blocks); 743 break; 744 case CRYPTO_DATA_MBLK: 745 ret = aes_cipher_update_mp(ctx->cc_provider_private, 746 ciphertext, plaintext, aes_decrypt_contiguous_blocks); 747 break; 748 default: 749 ret = CRYPTO_ARGUMENTS_BAD; 750 } 751 752 /* 753 * Since AES counter mode is a stream cipher, we call 754 * aes_counter_final() to pick up any remaining bytes. 755 * It is an internal function that does not destroy 756 * the context like *normal* final routines. 757 */ 758 aes_ctx = ctx->cc_provider_private; 759 if ((aes_ctx->ac_flags & AES_CTR_MODE) && 760 (aes_ctx->ac_remainder_len > 0)) { 761 ret = aes_counter_final(aes_ctx, plaintext); 762 } 763 764 if (ret == CRYPTO_SUCCESS) { 765 if (ciphertext != plaintext) 766 plaintext->cd_length = 767 plaintext->cd_offset - saved_offset; 768 } else { 769 plaintext->cd_length = saved_length; 770 } 771 plaintext->cd_offset = saved_offset; 772 773 774 return (ret); 775 } 776 777 /* ARGSUSED */ 778 static int 779 aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data, 780 crypto_req_handle_t req) 781 { 782 783 /* EXPORT DELETE START */ 784 785 aes_ctx_t *aes_ctx; 786 int ret; 787 788 ASSERT(ctx->cc_provider_private != NULL); 789 aes_ctx = ctx->cc_provider_private; 790 791 if (data->cd_format != CRYPTO_DATA_RAW && 792 data->cd_format != CRYPTO_DATA_UIO && 793 data->cd_format != CRYPTO_DATA_MBLK) { 794 return (CRYPTO_ARGUMENTS_BAD); 795 } 796 797 /* 798 * There must be no unprocessed plaintext. 799 * This happens if the length of the last data is 800 * not a multiple of the AES block length. 801 */ 802 if (aes_ctx->ac_remainder_len > 0) { 803 if ((aes_ctx->ac_flags & AES_CTR_MODE) == 0) 804 return (CRYPTO_DATA_LEN_RANGE); 805 else { 806 ret = aes_counter_final(aes_ctx, data); 807 if (ret != CRYPTO_SUCCESS) 808 return (ret); 809 } 810 } 811 812 if ((aes_ctx->ac_flags & AES_CTR_MODE) == 0) 813 data->cd_length = 0; 814 815 (void) aes_free_context(ctx); 816 817 /* EXPORT DELETE END */ 818 819 return (CRYPTO_SUCCESS); 820 } 821 822 /* ARGSUSED */ 823 static int 824 aes_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *data, 825 crypto_req_handle_t req) 826 { 827 828 /* EXPORT DELETE START */ 829 830 aes_ctx_t *aes_ctx; 831 int ret; 832 833 ASSERT(ctx->cc_provider_private != NULL); 834 aes_ctx = ctx->cc_provider_private; 835 836 if (data->cd_format != CRYPTO_DATA_RAW && 837 data->cd_format != CRYPTO_DATA_UIO && 838 data->cd_format != CRYPTO_DATA_MBLK) { 839 return (CRYPTO_ARGUMENTS_BAD); 840 } 841 842 /* 843 * There must be no unprocessed ciphertext. 844 * This happens if the length of the last ciphertext is 845 * not a multiple of the AES block length. 846 */ 847 if (aes_ctx->ac_remainder_len > 0) { 848 if ((aes_ctx->ac_flags & AES_CTR_MODE) == 0) 849 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); 850 else { 851 ret = aes_counter_final(aes_ctx, data); 852 if (ret != CRYPTO_SUCCESS) 853 return (ret); 854 } 855 } 856 857 if ((aes_ctx->ac_flags & AES_CTR_MODE) == 0) 858 data->cd_length = 0; 859 860 (void) aes_free_context(ctx); 861 862 /* EXPORT DELETE END */ 863 864 return (CRYPTO_SUCCESS); 865 } 866 867 /* ARGSUSED */ 868 static int 869 aes_encrypt_atomic(crypto_provider_handle_t provider, 870 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 871 crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext, 872 crypto_spi_ctx_template_t template, crypto_req_handle_t req) 873 { 874 aes_ctx_t aes_ctx; /* on the stack */ 875 off_t saved_offset; 876 size_t saved_length; 877 int ret; 878 879 AES_ARG_INPLACE(plaintext, ciphertext); 880 881 if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) { 882 /* 883 * Plaintext must be a multiple of AES block size. 884 * This test only works for non-padded mechanisms 885 * when blocksize is 2^N. 886 */ 887 if ((plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0) 888 return (CRYPTO_DATA_LEN_RANGE); 889 } 890 891 /* return length needed to store the output */ 892 if (ciphertext->cd_length < plaintext->cd_length) { 893 ciphertext->cd_length = plaintext->cd_length; 894 return (CRYPTO_BUFFER_TOO_SMALL); 895 } 896 897 if (!AES_VALID_MECH(mechanism)) 898 return (CRYPTO_MECHANISM_INVALID); 899 900 if (mechanism->cm_param != NULL) { 901 if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) { 902 if (mechanism->cm_param_len != 903 sizeof (CK_AES_CTR_PARAMS)) 904 return (CRYPTO_MECHANISM_PARAM_INVALID); 905 } else { 906 if (mechanism->cm_param_len != AES_BLOCK_LEN) 907 return (CRYPTO_MECHANISM_PARAM_INVALID); 908 } 909 } 910 911 bzero(&aes_ctx, sizeof (aes_ctx_t)); 912 913 ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key, 914 crypto_kmflag(req)); 915 if (ret != CRYPTO_SUCCESS) 916 return (ret); 917 918 saved_offset = ciphertext->cd_offset; 919 saved_length = ciphertext->cd_length; 920 921 /* 922 * Do an update on the specified input data. 923 */ 924 switch (plaintext->cd_format) { 925 case CRYPTO_DATA_RAW: 926 ret = aes_cipher_update_iov(&aes_ctx, plaintext, ciphertext, 927 aes_encrypt_contiguous_blocks); 928 break; 929 case CRYPTO_DATA_UIO: 930 ret = aes_cipher_update_uio(&aes_ctx, plaintext, ciphertext, 931 aes_encrypt_contiguous_blocks); 932 break; 933 case CRYPTO_DATA_MBLK: 934 ret = aes_cipher_update_mp(&aes_ctx, plaintext, ciphertext, 935 aes_encrypt_contiguous_blocks); 936 break; 937 default: 938 ret = CRYPTO_ARGUMENTS_BAD; 939 } 940 941 if (ret == CRYPTO_SUCCESS) { 942 if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) { 943 ASSERT(aes_ctx.ac_remainder_len == 0); 944 if (plaintext != ciphertext) 945 ciphertext->cd_length = 946 ciphertext->cd_offset - saved_offset; 947 } else { 948 if (aes_ctx.ac_remainder_len > 0) { 949 ret = aes_counter_final(&aes_ctx, ciphertext); 950 if (ret != CRYPTO_SUCCESS) 951 goto out; 952 } 953 if (plaintext != ciphertext) 954 ciphertext->cd_length = 955 ciphertext->cd_offset - saved_offset; 956 } 957 } else { 958 ciphertext->cd_length = saved_length; 959 } 960 ciphertext->cd_offset = saved_offset; 961 962 out: 963 if (aes_ctx.ac_flags & AES_PROVIDER_OWNS_KEY_SCHEDULE) { 964 bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); 965 kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); 966 } 967 968 return (ret); 969 } 970 971 /* ARGSUSED */ 972 static int 973 aes_decrypt_atomic(crypto_provider_handle_t provider, 974 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 975 crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext, 976 crypto_spi_ctx_template_t template, crypto_req_handle_t req) 977 { 978 aes_ctx_t aes_ctx; /* on the stack */ 979 off_t saved_offset; 980 size_t saved_length; 981 int ret; 982 983 AES_ARG_INPLACE(ciphertext, plaintext); 984 985 if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) { 986 /* 987 * Ciphertext must be a multiple of AES block size. 988 * This test only works for non-padded mechanisms 989 * when blocksize is 2^N. 990 */ 991 if ((ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0) 992 return (CRYPTO_DATA_LEN_RANGE); 993 } 994 995 /* return length needed to store the output */ 996 if (plaintext->cd_length < ciphertext->cd_length) { 997 plaintext->cd_length = ciphertext->cd_length; 998 return (CRYPTO_BUFFER_TOO_SMALL); 999 } 1000 1001 if (!AES_VALID_MECH(mechanism)) 1002 return (CRYPTO_MECHANISM_INVALID); 1003 1004 if (mechanism->cm_param != NULL) { 1005 if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) { 1006 if (mechanism->cm_param_len != 1007 sizeof (CK_AES_CTR_PARAMS)) 1008 return (CRYPTO_MECHANISM_PARAM_INVALID); 1009 } else { 1010 if (mechanism->cm_param_len != AES_BLOCK_LEN) 1011 return (CRYPTO_MECHANISM_PARAM_INVALID); 1012 } 1013 } 1014 1015 bzero(&aes_ctx, sizeof (aes_ctx_t)); 1016 1017 ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key, 1018 crypto_kmflag(req)); 1019 if (ret != CRYPTO_SUCCESS) 1020 return (ret); 1021 1022 saved_offset = plaintext->cd_offset; 1023 saved_length = plaintext->cd_length; 1024 1025 /* 1026 * Do an update on the specified input data. 1027 */ 1028 switch (ciphertext->cd_format) { 1029 case CRYPTO_DATA_RAW: 1030 ret = aes_cipher_update_iov(&aes_ctx, ciphertext, plaintext, 1031 aes_decrypt_contiguous_blocks); 1032 break; 1033 case CRYPTO_DATA_UIO: 1034 ret = aes_cipher_update_uio(&aes_ctx, ciphertext, plaintext, 1035 aes_decrypt_contiguous_blocks); 1036 break; 1037 case CRYPTO_DATA_MBLK: 1038 ret = aes_cipher_update_mp(&aes_ctx, ciphertext, plaintext, 1039 aes_decrypt_contiguous_blocks); 1040 break; 1041 default: 1042 ret = CRYPTO_ARGUMENTS_BAD; 1043 } 1044 1045 if (ret == CRYPTO_SUCCESS) { 1046 if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) { 1047 ASSERT(aes_ctx.ac_remainder_len == 0); 1048 if (ciphertext != plaintext) 1049 plaintext->cd_length = 1050 plaintext->cd_offset - saved_offset; 1051 } else { 1052 if (aes_ctx.ac_remainder_len > 0) { 1053 ret = aes_counter_final(&aes_ctx, plaintext); 1054 if (ret != CRYPTO_SUCCESS) 1055 goto out; 1056 } 1057 if (ciphertext != plaintext) 1058 plaintext->cd_length = 1059 plaintext->cd_offset - saved_offset; 1060 } 1061 } else { 1062 plaintext->cd_length = saved_length; 1063 } 1064 plaintext->cd_offset = saved_offset; 1065 1066 out: 1067 if (aes_ctx.ac_flags & AES_PROVIDER_OWNS_KEY_SCHEDULE) { 1068 bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); 1069 kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); 1070 } 1071 1072 return (ret); 1073 } 1074 1075 /* 1076 * KCF software provider context template entry points. 1077 */ 1078 /* ARGSUSED */ 1079 static int 1080 aes_create_ctx_template(crypto_provider_handle_t provider, 1081 crypto_mechanism_t *mechanism, crypto_key_t *key, 1082 crypto_spi_ctx_template_t *tmpl, size_t *tmpl_size, crypto_req_handle_t req) 1083 { 1084 1085 /* EXPORT DELETE START */ 1086 1087 void *keysched; 1088 size_t size; 1089 int rv; 1090 1091 if (!AES_VALID_MECH(mechanism)) 1092 return (CRYPTO_MECHANISM_INVALID); 1093 1094 if ((keysched = aes_alloc_keysched(&size, 1095 crypto_kmflag(req))) == NULL) { 1096 return (CRYPTO_HOST_MEMORY); 1097 } 1098 1099 /* 1100 * Initialize key schedule. Key length information is stored 1101 * in the key. 1102 */ 1103 if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) { 1104 bzero(keysched, size); 1105 kmem_free(keysched, size); 1106 return (rv); 1107 } 1108 1109 *tmpl = keysched; 1110 *tmpl_size = size; 1111 1112 /* EXPORT DELETE END */ 1113 1114 return (CRYPTO_SUCCESS); 1115 } 1116 1117 /* ARGSUSED */ 1118 static int 1119 aes_free_context(crypto_ctx_t *ctx) 1120 { 1121 1122 /* EXPORT DELETE START */ 1123 1124 aes_ctx_t *aes_ctx = ctx->cc_provider_private; 1125 1126 if (aes_ctx != NULL) { 1127 if (aes_ctx->ac_flags & AES_PROVIDER_OWNS_KEY_SCHEDULE) { 1128 ASSERT(aes_ctx->ac_keysched_len != 0); 1129 bzero(aes_ctx->ac_keysched, aes_ctx->ac_keysched_len); 1130 kmem_free(aes_ctx->ac_keysched, 1131 aes_ctx->ac_keysched_len); 1132 } 1133 kmem_free(aes_ctx, sizeof (aes_ctx_t)); 1134 ctx->cc_provider_private = NULL; 1135 } 1136 1137 /* EXPORT DELETE END */ 1138 1139 return (CRYPTO_SUCCESS); 1140 } 1141 1142 /* ARGSUSED */ 1143 static int 1144 aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template, 1145 crypto_mechanism_t *mechanism, crypto_key_t *key, int kmflag) 1146 { 1147 int rv = CRYPTO_SUCCESS; 1148 1149 /* EXPORT DELETE START */ 1150 1151 void *keysched; 1152 size_t size; 1153 1154 aes_ctx->ac_flags = 0; 1155 1156 if (mechanism->cm_type == AES_CBC_MECH_INFO_TYPE) { 1157 /* 1158 * Copy 128-bit IV into context. 1159 * 1160 * If cm_param == NULL then the IV comes from the 1161 * cd_miscdata field in the crypto_data structure. 1162 */ 1163 if (mechanism->cm_param != NULL) { 1164 ASSERT(mechanism->cm_param_len == AES_BLOCK_LEN); 1165 if (IS_P2ALIGNED(mechanism->cm_param, 1166 sizeof (uint64_t))) { 1167 uint64_t *param64; 1168 param64 = (uint64_t *)mechanism->cm_param; 1169 1170 aes_ctx->ac_iv[0] = *param64++; 1171 aes_ctx->ac_iv[1] = *param64; 1172 } else { 1173 uint8_t *iv8; 1174 uint8_t *p8; 1175 iv8 = (uint8_t *)&aes_ctx->ac_iv; 1176 p8 = (uint8_t *)&mechanism->cm_param[0]; 1177 1178 iv8[0] = p8[0]; 1179 iv8[1] = p8[1]; 1180 iv8[2] = p8[2]; 1181 iv8[3] = p8[3]; 1182 iv8[4] = p8[4]; 1183 iv8[5] = p8[5]; 1184 iv8[6] = p8[6]; 1185 iv8[7] = p8[7]; 1186 iv8[8] = p8[8]; 1187 iv8[9] = p8[9]; 1188 iv8[10] = p8[10]; 1189 iv8[11] = p8[11]; 1190 iv8[12] = p8[12]; 1191 iv8[13] = p8[13]; 1192 iv8[14] = p8[14]; 1193 iv8[15] = p8[15]; 1194 } 1195 } 1196 1197 aes_ctx->ac_lastp = (uint8_t *)&aes_ctx->ac_iv[0]; 1198 aes_ctx->ac_flags |= AES_CBC_MODE; 1199 1200 } else if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) { 1201 if (mechanism->cm_param != NULL) { 1202 CK_AES_CTR_PARAMS *pp; 1203 uint64_t mask = 0; 1204 ulong_t count; 1205 uint8_t *iv8; 1206 uint8_t *p8; 1207 1208 pp = (CK_AES_CTR_PARAMS *)mechanism->cm_param; 1209 iv8 = (uint8_t *)&aes_ctx->ac_iv; 1210 p8 = (uint8_t *)&pp->cb[0]; 1211 1212 /* XXX what to do about miscdata */ 1213 count = pp->ulCounterBits; 1214 if (count == 0 || count > 64) { 1215 return (CRYPTO_MECHANISM_PARAM_INVALID); 1216 } 1217 while (count-- > 0) 1218 mask |= (1ULL << count); 1219 1220 aes_ctx->ac_counter_mask = mask; 1221 1222 iv8[0] = p8[0]; 1223 iv8[1] = p8[1]; 1224 iv8[2] = p8[2]; 1225 iv8[3] = p8[3]; 1226 iv8[4] = p8[4]; 1227 iv8[5] = p8[5]; 1228 iv8[6] = p8[6]; 1229 iv8[7] = p8[7]; 1230 iv8[8] = p8[8]; 1231 iv8[9] = p8[9]; 1232 iv8[10] = p8[10]; 1233 iv8[11] = p8[11]; 1234 iv8[12] = p8[12]; 1235 iv8[13] = p8[13]; 1236 iv8[14] = p8[14]; 1237 iv8[15] = p8[15]; 1238 } else { 1239 return (CRYPTO_MECHANISM_PARAM_INVALID); 1240 } 1241 1242 aes_ctx->ac_lastp = (uint8_t *)&aes_ctx->ac_iv[0]; 1243 aes_ctx->ac_flags |= AES_CTR_MODE; 1244 } else { 1245 aes_ctx->ac_flags |= AES_ECB_MODE; 1246 } 1247 1248 if (template == NULL) { 1249 if ((keysched = aes_alloc_keysched(&size, kmflag)) == NULL) 1250 return (CRYPTO_HOST_MEMORY); 1251 /* 1252 * Initialize key schedule. 1253 * Key length is stored in the key. 1254 */ 1255 if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) 1256 kmem_free(keysched, size); 1257 1258 aes_ctx->ac_flags |= AES_PROVIDER_OWNS_KEY_SCHEDULE; 1259 aes_ctx->ac_keysched_len = size; 1260 } else { 1261 keysched = template; 1262 } 1263 aes_ctx->ac_keysched = keysched; 1264 1265 /* EXPORT DELETE END */ 1266 1267 return (rv); 1268 } 1269