1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * AES provider for the Kernel Cryptographic Framework (KCF) 30 */ 31 32 #include <sys/types.h> 33 #include <sys/systm.h> 34 #include <sys/modctl.h> 35 #include <sys/cmn_err.h> 36 #include <sys/ddi.h> 37 #include <sys/crypto/common.h> 38 #include <sys/crypto/spi.h> 39 #include <sys/sysmacros.h> 40 #include <sys/strsun.h> 41 #include <aes_impl.h> 42 #include <aes_cbc_crypt.h> 43 44 extern struct mod_ops mod_cryptoops; 45 46 /* 47 * Module linkage information for the kernel. 48 */ 49 static struct modlcrypto modlcrypto = { 50 &mod_cryptoops, 51 "AES Kernel SW Provider %I%" 52 }; 53 54 static struct modlinkage modlinkage = { 55 MODREV_1, 56 (void *)&modlcrypto, 57 NULL 58 }; 59 60 /* 61 * CSPI information (entry points, provider info, etc.) 62 */ 63 typedef enum aes_mech_type { 64 AES_ECB_MECH_INFO_TYPE, /* SUN_CKM_AES_ECB */ 65 AES_CBC_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC */ 66 AES_CBC_PAD_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC_PAD */ 67 AES_CTR_MECH_INFO_TYPE /* SUN_CKM_AES_CTR */ 68 } aes_mech_type_t; 69 70 /* 71 * The following definitions are to keep EXPORT_SRC happy. 72 */ 73 #ifndef AES_MIN_KEY_BYTES 74 #define AES_MIN_KEY_BYTES 0 75 #endif 76 77 #ifndef AES_MAX_KEY_BYTES 78 #define AES_MAX_KEY_BYTES 0 79 #endif 80 81 /* 82 * Mechanism info structure passed to KCF during registration. 83 */ 84 static crypto_mech_info_t aes_mech_info_tab[] = { 85 /* AES_ECB */ 86 {SUN_CKM_AES_ECB, AES_ECB_MECH_INFO_TYPE, 87 CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | 88 CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC, 89 AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES}, 90 /* AES_CBC */ 91 {SUN_CKM_AES_CBC, AES_CBC_MECH_INFO_TYPE, 92 CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | 93 CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC, 94 AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES}, 95 /* AES_CTR */ 96 {SUN_CKM_AES_CTR, AES_CTR_MECH_INFO_TYPE, 97 CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | 98 CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC, 99 AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES} 100 }; 101 102 /* operations are in-place if the output buffer is NULL */ 103 #define AES_ARG_INPLACE(input, output) \ 104 if ((output) == NULL) \ 105 (output) = (input); 106 107 static void aes_provider_status(crypto_provider_handle_t, uint_t *); 108 109 static crypto_control_ops_t aes_control_ops = { 110 aes_provider_status 111 }; 112 113 static int aes_common_init(crypto_ctx_t *, crypto_mechanism_t *, 114 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 115 static int aes_common_init_ctx(aes_ctx_t *, crypto_spi_ctx_template_t *, 116 crypto_mechanism_t *, crypto_key_t *, int); 117 static int aes_encrypt_final(crypto_ctx_t *, crypto_data_t *, 118 crypto_req_handle_t); 119 static int aes_decrypt_final(crypto_ctx_t *, crypto_data_t *, 120 crypto_req_handle_t); 121 122 static int aes_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 123 crypto_req_handle_t); 124 static int aes_encrypt_update(crypto_ctx_t *, crypto_data_t *, 125 crypto_data_t *, crypto_req_handle_t); 126 static int aes_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, 127 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 128 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 129 130 static int aes_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 131 crypto_req_handle_t); 132 static int aes_decrypt_update(crypto_ctx_t *, crypto_data_t *, 133 crypto_data_t *, crypto_req_handle_t); 134 static int aes_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, 135 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 136 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 137 138 static crypto_cipher_ops_t aes_cipher_ops = { 139 aes_common_init, 140 aes_encrypt, 141 aes_encrypt_update, 142 aes_encrypt_final, 143 aes_encrypt_atomic, 144 aes_common_init, 145 aes_decrypt, 146 aes_decrypt_update, 147 aes_decrypt_final, 148 aes_decrypt_atomic 149 }; 150 151 static int aes_create_ctx_template(crypto_provider_handle_t, 152 crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *, 153 size_t *, crypto_req_handle_t); 154 static int aes_free_context(crypto_ctx_t *); 155 156 static crypto_ctx_ops_t aes_ctx_ops = { 157 aes_create_ctx_template, 158 aes_free_context 159 }; 160 161 static crypto_ops_t aes_crypto_ops = { 162 &aes_control_ops, 163 NULL, 164 &aes_cipher_ops, 165 NULL, 166 NULL, 167 NULL, 168 NULL, 169 NULL, 170 NULL, 171 NULL, 172 NULL, 173 NULL, 174 NULL, 175 &aes_ctx_ops 176 }; 177 178 static crypto_provider_info_t aes_prov_info = { 179 CRYPTO_SPI_VERSION_1, 180 "AES Software Provider", 181 CRYPTO_SW_PROVIDER, 182 {&modlinkage}, 183 NULL, 184 &aes_crypto_ops, 185 sizeof (aes_mech_info_tab)/sizeof (crypto_mech_info_t), 186 aes_mech_info_tab 187 }; 188 189 static crypto_kcf_provider_handle_t aes_prov_handle = NULL; 190 191 int 192 _init(void) 193 { 194 int ret; 195 196 /* 197 * Register with KCF. If the registration fails, return error. 198 */ 199 if ((ret = crypto_register_provider(&aes_prov_info, 200 &aes_prov_handle)) != CRYPTO_SUCCESS) { 201 cmn_err(CE_WARN, "%s _init: crypto_register_provider()" 202 "failed (0x%x)", CRYPTO_PROVIDER_NAME, ret); 203 return (EACCES); 204 } 205 206 if ((ret = mod_install(&modlinkage)) != 0) { 207 int rv; 208 209 ASSERT(aes_prov_handle != NULL); 210 /* We should not return if the unregister returns busy. */ 211 while ((rv = crypto_unregister_provider(aes_prov_handle)) 212 == CRYPTO_BUSY) { 213 cmn_err(CE_WARN, 214 "%s _init: crypto_unregister_provider() " 215 "failed (0x%x). Retrying.", 216 CRYPTO_PROVIDER_NAME, rv); 217 /* wait 10 seconds and try again. */ 218 delay(10 * drv_usectohz(1000000)); 219 } 220 } 221 222 return (ret); 223 } 224 225 int 226 _fini(void) 227 { 228 int ret; 229 230 /* 231 * Unregister from KCF if previous registration succeeded. 232 */ 233 if (aes_prov_handle != NULL) { 234 if ((ret = crypto_unregister_provider(aes_prov_handle)) != 235 CRYPTO_SUCCESS) { 236 cmn_err(CE_WARN, 237 "%s _fini: crypto_unregister_provider() " 238 "failed (0x%x)", CRYPTO_PROVIDER_NAME, ret); 239 return (EBUSY); 240 } 241 aes_prov_handle = NULL; 242 } 243 244 return (mod_remove(&modlinkage)); 245 } 246 247 int 248 _info(struct modinfo *modinfop) 249 { 250 return (mod_info(&modlinkage, modinfop)); 251 } 252 253 254 static int 255 aes_check_mech_param(crypto_mechanism_t *mechanism) 256 { 257 int rv = CRYPTO_SUCCESS; 258 259 switch (mechanism->cm_type) { 260 case AES_ECB_MECH_INFO_TYPE: 261 /* no parameter */ 262 break; 263 case AES_CBC_MECH_INFO_TYPE: 264 if (mechanism->cm_param != NULL && 265 mechanism->cm_param_len != AES_BLOCK_LEN) 266 rv = CRYPTO_MECHANISM_PARAM_INVALID; 267 break; 268 case AES_CTR_MECH_INFO_TYPE: 269 if (mechanism->cm_param != NULL && 270 mechanism->cm_param_len != sizeof (CK_AES_CTR_PARAMS)) 271 rv = CRYPTO_MECHANISM_PARAM_INVALID; 272 break; 273 default: 274 rv = CRYPTO_MECHANISM_INVALID; 275 } 276 return (rv); 277 } 278 279 /* EXPORT DELETE START */ 280 281 /* 282 * Initialize key schedules for AES 283 */ 284 static int 285 init_keysched(crypto_key_t *key, void *newbie) 286 { 287 /* 288 * Only keys by value are supported by this module. 289 */ 290 switch (key->ck_format) { 291 case CRYPTO_KEY_RAW: 292 if (key->ck_length < AES_MINBITS || 293 key->ck_length > AES_MAXBITS) { 294 return (CRYPTO_KEY_SIZE_RANGE); 295 } 296 297 /* key length must be either 128, 192, or 256 */ 298 if ((key->ck_length & 63) != 0) 299 return (CRYPTO_KEY_SIZE_RANGE); 300 break; 301 default: 302 return (CRYPTO_KEY_TYPE_INCONSISTENT); 303 } 304 305 aes_init_keysched(key->ck_data, key->ck_length, newbie); 306 return (CRYPTO_SUCCESS); 307 } 308 309 /* EXPORT DELETE END */ 310 311 /* 312 * KCF software provider control entry points. 313 */ 314 /* ARGSUSED */ 315 static void 316 aes_provider_status(crypto_provider_handle_t provider, uint_t *status) 317 { 318 *status = CRYPTO_PROVIDER_READY; 319 } 320 321 /* 322 * KCF software provider encrypt entry points. 323 */ 324 static int 325 aes_common_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 326 crypto_key_t *key, crypto_spi_ctx_template_t template, 327 crypto_req_handle_t req) 328 { 329 330 /* EXPORT DELETE START */ 331 332 aes_ctx_t *aes_ctx; 333 int rv; 334 int kmflag; 335 336 /* 337 * Only keys by value are supported by this module. 338 */ 339 if (key->ck_format != CRYPTO_KEY_RAW) { 340 return (CRYPTO_KEY_TYPE_INCONSISTENT); 341 } 342 343 if ((rv = aes_check_mech_param(mechanism)) != CRYPTO_SUCCESS) 344 return (rv); 345 346 /* 347 * Allocate an AES context. 348 */ 349 kmflag = crypto_kmflag(req); 350 if ((aes_ctx = kmem_zalloc(sizeof (aes_ctx_t), kmflag)) == NULL) 351 return (CRYPTO_HOST_MEMORY); 352 353 rv = aes_common_init_ctx(aes_ctx, template, mechanism, key, kmflag); 354 if (rv != CRYPTO_SUCCESS) { 355 kmem_free(aes_ctx, sizeof (aes_ctx_t)); 356 return (rv); 357 } 358 359 ctx->cc_provider_private = aes_ctx; 360 361 /* EXPORT DELETE END */ 362 363 return (CRYPTO_SUCCESS); 364 } 365 366 /* 367 * Helper AES encrypt update function for iov input data. 368 */ 369 static int 370 aes_cipher_update_iov(aes_ctx_t *aes_ctx, crypto_data_t *input, 371 crypto_data_t *output, int (*cipher)(aes_ctx_t *, caddr_t, size_t, 372 crypto_data_t *)) 373 { 374 int rv; 375 /* EXPORT DELETE START */ 376 377 if (input->cd_miscdata != NULL) { 378 if (IS_P2ALIGNED(input->cd_miscdata, sizeof (uint64_t))) { 379 /* LINTED: pointer alignment */ 380 aes_ctx->ac_iv[0] = *(uint64_t *)input->cd_miscdata; 381 /* LINTED: pointer alignment */ 382 aes_ctx->ac_iv[1] = *(uint64_t *)&input->cd_miscdata[8]; 383 } else { 384 uint8_t *miscdata8 = (uint8_t *)&input->cd_miscdata[0]; 385 uint8_t *iv8 = (uint8_t *)&aes_ctx->ac_iv[0]; 386 387 AES_COPY_BLOCK(miscdata8, iv8); 388 } 389 } 390 391 if (input->cd_raw.iov_len < input->cd_length) 392 return (CRYPTO_ARGUMENTS_BAD); 393 394 rv = (cipher)(aes_ctx, input->cd_raw.iov_base + input->cd_offset, 395 input->cd_length, (input == output) ? NULL : output); 396 397 /* EXPORT DELETE END */ 398 399 return (rv); 400 } 401 402 /* 403 * Helper AES encrypt update function for uio input data. 404 */ 405 static int 406 aes_cipher_update_uio(aes_ctx_t *aes_ctx, crypto_data_t *input, 407 crypto_data_t *output, int (*cipher)(aes_ctx_t *, caddr_t, size_t, 408 crypto_data_t *)) 409 { 410 /* EXPORT DELETE START */ 411 uio_t *uiop = input->cd_uio; 412 off_t offset = input->cd_offset; 413 size_t length = input->cd_length; 414 uint_t vec_idx; 415 size_t cur_len; 416 417 if (input->cd_miscdata != NULL) { 418 if (IS_P2ALIGNED(input->cd_miscdata, sizeof (uint64_t))) { 419 /* LINTED: pointer alignment */ 420 aes_ctx->ac_iv[0] = *(uint64_t *)input->cd_miscdata; 421 /* LINTED: pointer alignment */ 422 aes_ctx->ac_iv[1] = *(uint64_t *)&input->cd_miscdata[8]; 423 } else { 424 uint8_t *miscdata8 = (uint8_t *)&input->cd_miscdata[0]; 425 uint8_t *iv8 = (uint8_t *)&aes_ctx->ac_iv[0]; 426 427 AES_COPY_BLOCK(miscdata8, iv8); 428 } 429 } 430 431 if (input->cd_uio->uio_segflg != UIO_SYSSPACE) { 432 return (CRYPTO_ARGUMENTS_BAD); 433 } 434 435 /* 436 * Jump to the first iovec containing data to be 437 * processed. 438 */ 439 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 440 offset >= uiop->uio_iov[vec_idx].iov_len; 441 offset -= uiop->uio_iov[vec_idx++].iov_len); 442 if (vec_idx == uiop->uio_iovcnt) { 443 /* 444 * The caller specified an offset that is larger than the 445 * total size of the buffers it provided. 446 */ 447 return (CRYPTO_DATA_LEN_RANGE); 448 } 449 450 /* 451 * Now process the iovecs. 452 */ 453 while (vec_idx < uiop->uio_iovcnt && length > 0) { 454 cur_len = MIN(uiop->uio_iov[vec_idx].iov_len - 455 offset, length); 456 457 (cipher)(aes_ctx, uiop->uio_iov[vec_idx].iov_base + offset, 458 cur_len, (input == output) ? NULL : output); 459 460 length -= cur_len; 461 vec_idx++; 462 offset = 0; 463 } 464 465 if (vec_idx == uiop->uio_iovcnt && length > 0) { 466 /* 467 * The end of the specified iovec's was reached but 468 * the length requested could not be processed, i.e. 469 * The caller requested to digest more data than it provided. 470 */ 471 472 return (CRYPTO_DATA_LEN_RANGE); 473 } 474 475 /* EXPORT DELETE END */ 476 477 return (CRYPTO_SUCCESS); 478 } 479 480 /* 481 * Helper AES encrypt update function for mblk input data. 482 */ 483 static int 484 aes_cipher_update_mp(aes_ctx_t *aes_ctx, crypto_data_t *input, 485 crypto_data_t *output, int (*cipher)(aes_ctx_t *, caddr_t, size_t, 486 crypto_data_t *)) 487 { 488 /* EXPORT DELETE START */ 489 off_t offset = input->cd_offset; 490 size_t length = input->cd_length; 491 mblk_t *mp; 492 size_t cur_len; 493 494 if (input->cd_miscdata != NULL) { 495 if (IS_P2ALIGNED(input->cd_miscdata, sizeof (uint64_t))) { 496 /* LINTED: pointer alignment */ 497 aes_ctx->ac_iv[0] = *(uint64_t *)input->cd_miscdata; 498 /* LINTED: pointer alignment */ 499 aes_ctx->ac_iv[1] = *(uint64_t *)&input->cd_miscdata[8]; 500 } else { 501 uint8_t *miscdata8 = (uint8_t *)&input->cd_miscdata[0]; 502 uint8_t *iv8 = (uint8_t *)&aes_ctx->ac_iv[0]; 503 504 AES_COPY_BLOCK(miscdata8, iv8); 505 } 506 } 507 508 /* 509 * Jump to the first mblk_t containing data to be processed. 510 */ 511 for (mp = input->cd_mp; mp != NULL && offset >= MBLKL(mp); 512 offset -= MBLKL(mp), mp = mp->b_cont); 513 if (mp == NULL) { 514 /* 515 * The caller specified an offset that is larger than the 516 * total size of the buffers it provided. 517 */ 518 return (CRYPTO_DATA_LEN_RANGE); 519 } 520 521 /* 522 * Now do the processing on the mblk chain. 523 */ 524 while (mp != NULL && length > 0) { 525 cur_len = MIN(MBLKL(mp) - offset, length); 526 (cipher)(aes_ctx, (char *)(mp->b_rptr + offset), cur_len, 527 (input == output) ? NULL : output); 528 529 length -= cur_len; 530 offset = 0; 531 mp = mp->b_cont; 532 } 533 534 if (mp == NULL && length > 0) { 535 /* 536 * The end of the mblk was reached but the length requested 537 * could not be processed, i.e. The caller requested 538 * to digest more data than it provided. 539 */ 540 return (CRYPTO_DATA_LEN_RANGE); 541 } 542 543 /* EXPORT DELETE END */ 544 545 return (CRYPTO_SUCCESS); 546 } 547 548 /* ARGSUSED */ 549 static int 550 aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, 551 crypto_data_t *ciphertext, crypto_req_handle_t req) 552 { 553 int ret = CRYPTO_FAILED; 554 555 /* EXPORT DELETE START */ 556 557 aes_ctx_t *aes_ctx; 558 559 ASSERT(ctx->cc_provider_private != NULL); 560 aes_ctx = ctx->cc_provider_private; 561 562 /* 563 * For block ciphers, plaintext must be a multiple of AES block size. 564 * This test is only valid for ciphers whose blocksize is a power of 2. 565 */ 566 if (((aes_ctx->ac_flags & AES_CTR_MODE) == 0) && 567 (plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0) 568 return (CRYPTO_DATA_LEN_RANGE); 569 570 AES_ARG_INPLACE(plaintext, ciphertext); 571 572 /* 573 * We need to just return the length needed to store the output. 574 * We should not destroy the context for the following case. 575 */ 576 if (ciphertext->cd_length < plaintext->cd_length) { 577 ciphertext->cd_length = plaintext->cd_length; 578 return (CRYPTO_BUFFER_TOO_SMALL); 579 } 580 581 /* 582 * Do an update on the specified input data. 583 */ 584 ret = aes_encrypt_update(ctx, plaintext, ciphertext, req); 585 ASSERT(aes_ctx->ac_remainder_len == 0); 586 (void) aes_free_context(ctx); 587 588 /* EXPORT DELETE END */ 589 590 /* LINTED */ 591 return (ret); 592 } 593 594 /* ARGSUSED */ 595 static int 596 aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 597 crypto_data_t *plaintext, crypto_req_handle_t req) 598 { 599 int ret = CRYPTO_FAILED; 600 601 /* EXPORT DELETE START */ 602 603 aes_ctx_t *aes_ctx; 604 605 ASSERT(ctx->cc_provider_private != NULL); 606 aes_ctx = ctx->cc_provider_private; 607 608 /* 609 * For block ciphers, ciphertext must be a multiple of AES block size. 610 * This test is only valid for ciphers whose blocksize is a power of 2. 611 */ 612 if (((aes_ctx->ac_flags & AES_CTR_MODE) == 0) && 613 (ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0) 614 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); 615 616 AES_ARG_INPLACE(ciphertext, plaintext); 617 618 /* 619 * We need to just return the length needed to store the output. 620 * We should not destroy the context for the following case. 621 */ 622 if (plaintext->cd_length < ciphertext->cd_length) { 623 plaintext->cd_length = ciphertext->cd_length; 624 return (CRYPTO_BUFFER_TOO_SMALL); 625 } 626 627 /* 628 * Do an update on the specified input data. 629 */ 630 ret = aes_decrypt_update(ctx, ciphertext, plaintext, req); 631 ASSERT(aes_ctx->ac_remainder_len == 0); 632 (void) aes_free_context(ctx); 633 634 /* EXPORT DELETE END */ 635 636 /* LINTED */ 637 return (ret); 638 } 639 640 /* ARGSUSED */ 641 static int 642 aes_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext, 643 crypto_data_t *ciphertext, crypto_req_handle_t req) 644 { 645 off_t saved_offset; 646 size_t saved_length, out_len; 647 int ret = CRYPTO_SUCCESS; 648 aes_ctx_t *aes_ctx; 649 650 ASSERT(ctx->cc_provider_private != NULL); 651 652 AES_ARG_INPLACE(plaintext, ciphertext); 653 654 /* compute number of bytes that will hold the ciphertext */ 655 out_len = ((aes_ctx_t *)ctx->cc_provider_private)->ac_remainder_len; 656 out_len += plaintext->cd_length; 657 out_len &= ~(AES_BLOCK_LEN - 1); 658 659 /* return length needed to store the output */ 660 if (ciphertext->cd_length < out_len) { 661 ciphertext->cd_length = out_len; 662 return (CRYPTO_BUFFER_TOO_SMALL); 663 } 664 665 saved_offset = ciphertext->cd_offset; 666 saved_length = ciphertext->cd_length; 667 668 /* 669 * Do the AES update on the specified input data. 670 */ 671 switch (plaintext->cd_format) { 672 case CRYPTO_DATA_RAW: 673 ret = aes_cipher_update_iov(ctx->cc_provider_private, 674 plaintext, ciphertext, aes_encrypt_contiguous_blocks); 675 break; 676 case CRYPTO_DATA_UIO: 677 ret = aes_cipher_update_uio(ctx->cc_provider_private, 678 plaintext, ciphertext, aes_encrypt_contiguous_blocks); 679 break; 680 case CRYPTO_DATA_MBLK: 681 ret = aes_cipher_update_mp(ctx->cc_provider_private, 682 plaintext, ciphertext, aes_encrypt_contiguous_blocks); 683 break; 684 default: 685 ret = CRYPTO_ARGUMENTS_BAD; 686 } 687 688 /* 689 * Since AES counter mode is a stream cipher, we call 690 * aes_counter_final() to pick up any remaining bytes. 691 * It is an internal function that does not destroy 692 * the context like *normal* final routines. 693 */ 694 aes_ctx = ctx->cc_provider_private; 695 if ((aes_ctx->ac_flags & AES_CTR_MODE) && 696 (aes_ctx->ac_remainder_len > 0)) { 697 ret = aes_counter_final(aes_ctx, ciphertext); 698 } 699 700 if (ret == CRYPTO_SUCCESS) { 701 if (plaintext != ciphertext) 702 ciphertext->cd_length = 703 ciphertext->cd_offset - saved_offset; 704 } else { 705 ciphertext->cd_length = saved_length; 706 } 707 ciphertext->cd_offset = saved_offset; 708 709 return (ret); 710 } 711 712 /* ARGSUSED */ 713 static int 714 aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 715 crypto_data_t *plaintext, crypto_req_handle_t req) 716 { 717 off_t saved_offset; 718 size_t saved_length, out_len; 719 int ret = CRYPTO_SUCCESS; 720 aes_ctx_t *aes_ctx; 721 722 ASSERT(ctx->cc_provider_private != NULL); 723 724 AES_ARG_INPLACE(ciphertext, plaintext); 725 726 /* compute number of bytes that will hold the plaintext */ 727 out_len = ((aes_ctx_t *)ctx->cc_provider_private)->ac_remainder_len; 728 out_len += ciphertext->cd_length; 729 out_len &= ~(AES_BLOCK_LEN - 1); 730 731 /* return length needed to store the output */ 732 if (plaintext->cd_length < out_len) { 733 plaintext->cd_length = out_len; 734 return (CRYPTO_BUFFER_TOO_SMALL); 735 } 736 737 saved_offset = plaintext->cd_offset; 738 saved_length = plaintext->cd_length; 739 740 /* 741 * Do the AES update on the specified input data. 742 */ 743 switch (ciphertext->cd_format) { 744 case CRYPTO_DATA_RAW: 745 ret = aes_cipher_update_iov(ctx->cc_provider_private, 746 ciphertext, plaintext, aes_decrypt_contiguous_blocks); 747 break; 748 case CRYPTO_DATA_UIO: 749 ret = aes_cipher_update_uio(ctx->cc_provider_private, 750 ciphertext, plaintext, aes_decrypt_contiguous_blocks); 751 break; 752 case CRYPTO_DATA_MBLK: 753 ret = aes_cipher_update_mp(ctx->cc_provider_private, 754 ciphertext, plaintext, aes_decrypt_contiguous_blocks); 755 break; 756 default: 757 ret = CRYPTO_ARGUMENTS_BAD; 758 } 759 760 /* 761 * Since AES counter mode is a stream cipher, we call 762 * aes_counter_final() to pick up any remaining bytes. 763 * It is an internal function that does not destroy 764 * the context like *normal* final routines. 765 */ 766 aes_ctx = ctx->cc_provider_private; 767 if ((aes_ctx->ac_flags & AES_CTR_MODE) && 768 (aes_ctx->ac_remainder_len > 0)) { 769 ret = aes_counter_final(aes_ctx, plaintext); 770 } 771 772 if (ret == CRYPTO_SUCCESS) { 773 if (ciphertext != plaintext) 774 plaintext->cd_length = 775 plaintext->cd_offset - saved_offset; 776 } else { 777 plaintext->cd_length = saved_length; 778 } 779 plaintext->cd_offset = saved_offset; 780 781 782 return (ret); 783 } 784 785 /* ARGSUSED */ 786 static int 787 aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data, 788 crypto_req_handle_t req) 789 { 790 791 /* EXPORT DELETE START */ 792 793 aes_ctx_t *aes_ctx; 794 int ret; 795 796 ASSERT(ctx->cc_provider_private != NULL); 797 aes_ctx = ctx->cc_provider_private; 798 799 if (data->cd_format != CRYPTO_DATA_RAW && 800 data->cd_format != CRYPTO_DATA_UIO && 801 data->cd_format != CRYPTO_DATA_MBLK) { 802 return (CRYPTO_ARGUMENTS_BAD); 803 } 804 805 /* 806 * There must be no unprocessed plaintext. 807 * This happens if the length of the last data is 808 * not a multiple of the AES block length. 809 */ 810 if (aes_ctx->ac_remainder_len > 0) { 811 if ((aes_ctx->ac_flags & AES_CTR_MODE) == 0) 812 return (CRYPTO_DATA_LEN_RANGE); 813 else { 814 ret = aes_counter_final(aes_ctx, data); 815 if (ret != CRYPTO_SUCCESS) 816 return (ret); 817 } 818 } 819 820 if ((aes_ctx->ac_flags & AES_CTR_MODE) == 0) 821 data->cd_length = 0; 822 823 (void) aes_free_context(ctx); 824 825 /* EXPORT DELETE END */ 826 827 return (CRYPTO_SUCCESS); 828 } 829 830 /* ARGSUSED */ 831 static int 832 aes_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *data, 833 crypto_req_handle_t req) 834 { 835 836 /* EXPORT DELETE START */ 837 838 aes_ctx_t *aes_ctx; 839 int ret; 840 841 ASSERT(ctx->cc_provider_private != NULL); 842 aes_ctx = ctx->cc_provider_private; 843 844 if (data->cd_format != CRYPTO_DATA_RAW && 845 data->cd_format != CRYPTO_DATA_UIO && 846 data->cd_format != CRYPTO_DATA_MBLK) { 847 return (CRYPTO_ARGUMENTS_BAD); 848 } 849 850 /* 851 * There must be no unprocessed ciphertext. 852 * This happens if the length of the last ciphertext is 853 * not a multiple of the AES block length. 854 */ 855 if (aes_ctx->ac_remainder_len > 0) { 856 if ((aes_ctx->ac_flags & AES_CTR_MODE) == 0) 857 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); 858 else { 859 ret = aes_counter_final(aes_ctx, data); 860 if (ret != CRYPTO_SUCCESS) 861 return (ret); 862 } 863 } 864 865 if ((aes_ctx->ac_flags & AES_CTR_MODE) == 0) 866 data->cd_length = 0; 867 868 (void) aes_free_context(ctx); 869 870 /* EXPORT DELETE END */ 871 872 return (CRYPTO_SUCCESS); 873 } 874 875 /* ARGSUSED */ 876 static int 877 aes_encrypt_atomic(crypto_provider_handle_t provider, 878 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 879 crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext, 880 crypto_spi_ctx_template_t template, crypto_req_handle_t req) 881 { 882 aes_ctx_t aes_ctx; /* on the stack */ 883 off_t saved_offset; 884 size_t saved_length; 885 int ret; 886 887 AES_ARG_INPLACE(plaintext, ciphertext); 888 889 if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) { 890 /* 891 * Plaintext must be a multiple of AES block size. 892 * This test only works for non-padded mechanisms 893 * when blocksize is 2^N. 894 */ 895 if ((plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0) 896 return (CRYPTO_DATA_LEN_RANGE); 897 } 898 899 /* return length needed to store the output */ 900 if (ciphertext->cd_length < plaintext->cd_length) { 901 ciphertext->cd_length = plaintext->cd_length; 902 return (CRYPTO_BUFFER_TOO_SMALL); 903 } 904 905 if ((ret = aes_check_mech_param(mechanism)) != CRYPTO_SUCCESS) 906 return (ret); 907 908 bzero(&aes_ctx, sizeof (aes_ctx_t)); 909 910 ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key, 911 crypto_kmflag(req)); 912 if (ret != CRYPTO_SUCCESS) 913 return (ret); 914 915 saved_offset = ciphertext->cd_offset; 916 saved_length = ciphertext->cd_length; 917 918 /* 919 * Do an update on the specified input data. 920 */ 921 switch (plaintext->cd_format) { 922 case CRYPTO_DATA_RAW: 923 ret = aes_cipher_update_iov(&aes_ctx, plaintext, ciphertext, 924 aes_encrypt_contiguous_blocks); 925 break; 926 case CRYPTO_DATA_UIO: 927 ret = aes_cipher_update_uio(&aes_ctx, plaintext, ciphertext, 928 aes_encrypt_contiguous_blocks); 929 break; 930 case CRYPTO_DATA_MBLK: 931 ret = aes_cipher_update_mp(&aes_ctx, plaintext, ciphertext, 932 aes_encrypt_contiguous_blocks); 933 break; 934 default: 935 ret = CRYPTO_ARGUMENTS_BAD; 936 } 937 938 if (ret == CRYPTO_SUCCESS) { 939 if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) { 940 ASSERT(aes_ctx.ac_remainder_len == 0); 941 if (plaintext != ciphertext) 942 ciphertext->cd_length = 943 ciphertext->cd_offset - saved_offset; 944 } else { 945 if (aes_ctx.ac_remainder_len > 0) { 946 ret = aes_counter_final(&aes_ctx, ciphertext); 947 if (ret != CRYPTO_SUCCESS) 948 goto out; 949 } 950 if (plaintext != ciphertext) 951 ciphertext->cd_length = 952 ciphertext->cd_offset - saved_offset; 953 } 954 } else { 955 ciphertext->cd_length = saved_length; 956 } 957 ciphertext->cd_offset = saved_offset; 958 959 out: 960 if (aes_ctx.ac_flags & AES_PROVIDER_OWNS_KEY_SCHEDULE) { 961 bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); 962 kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); 963 } 964 965 return (ret); 966 } 967 968 /* ARGSUSED */ 969 static int 970 aes_decrypt_atomic(crypto_provider_handle_t provider, 971 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 972 crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext, 973 crypto_spi_ctx_template_t template, crypto_req_handle_t req) 974 { 975 aes_ctx_t aes_ctx; /* on the stack */ 976 off_t saved_offset; 977 size_t saved_length; 978 int ret; 979 980 AES_ARG_INPLACE(ciphertext, plaintext); 981 982 if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) { 983 /* 984 * Ciphertext must be a multiple of AES block size. 985 * This test only works for non-padded mechanisms 986 * when blocksize is 2^N. 987 */ 988 if ((ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0) 989 return (CRYPTO_DATA_LEN_RANGE); 990 } 991 992 /* return length needed to store the output */ 993 if (plaintext->cd_length < ciphertext->cd_length) { 994 plaintext->cd_length = ciphertext->cd_length; 995 return (CRYPTO_BUFFER_TOO_SMALL); 996 } 997 998 if ((ret = aes_check_mech_param(mechanism)) != CRYPTO_SUCCESS) 999 return (ret); 1000 1001 bzero(&aes_ctx, sizeof (aes_ctx_t)); 1002 1003 ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key, 1004 crypto_kmflag(req)); 1005 if (ret != CRYPTO_SUCCESS) 1006 return (ret); 1007 1008 saved_offset = plaintext->cd_offset; 1009 saved_length = plaintext->cd_length; 1010 1011 /* 1012 * Do an update on the specified input data. 1013 */ 1014 switch (ciphertext->cd_format) { 1015 case CRYPTO_DATA_RAW: 1016 ret = aes_cipher_update_iov(&aes_ctx, ciphertext, plaintext, 1017 aes_decrypt_contiguous_blocks); 1018 break; 1019 case CRYPTO_DATA_UIO: 1020 ret = aes_cipher_update_uio(&aes_ctx, ciphertext, plaintext, 1021 aes_decrypt_contiguous_blocks); 1022 break; 1023 case CRYPTO_DATA_MBLK: 1024 ret = aes_cipher_update_mp(&aes_ctx, ciphertext, plaintext, 1025 aes_decrypt_contiguous_blocks); 1026 break; 1027 default: 1028 ret = CRYPTO_ARGUMENTS_BAD; 1029 } 1030 1031 if (ret == CRYPTO_SUCCESS) { 1032 if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) { 1033 ASSERT(aes_ctx.ac_remainder_len == 0); 1034 if (ciphertext != plaintext) 1035 plaintext->cd_length = 1036 plaintext->cd_offset - saved_offset; 1037 } else { 1038 if (aes_ctx.ac_remainder_len > 0) { 1039 ret = aes_counter_final(&aes_ctx, plaintext); 1040 if (ret != CRYPTO_SUCCESS) 1041 goto out; 1042 } 1043 if (ciphertext != plaintext) 1044 plaintext->cd_length = 1045 plaintext->cd_offset - saved_offset; 1046 } 1047 } else { 1048 plaintext->cd_length = saved_length; 1049 } 1050 plaintext->cd_offset = saved_offset; 1051 1052 out: 1053 if (aes_ctx.ac_flags & AES_PROVIDER_OWNS_KEY_SCHEDULE) { 1054 bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); 1055 kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); 1056 } 1057 1058 return (ret); 1059 } 1060 1061 /* 1062 * KCF software provider context template entry points. 1063 */ 1064 /* ARGSUSED */ 1065 static int 1066 aes_create_ctx_template(crypto_provider_handle_t provider, 1067 crypto_mechanism_t *mechanism, crypto_key_t *key, 1068 crypto_spi_ctx_template_t *tmpl, size_t *tmpl_size, crypto_req_handle_t req) 1069 { 1070 1071 /* EXPORT DELETE START */ 1072 1073 void *keysched; 1074 size_t size; 1075 int rv; 1076 1077 if (mechanism->cm_type != AES_ECB_MECH_INFO_TYPE && 1078 mechanism->cm_type != AES_CBC_MECH_INFO_TYPE && 1079 mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) 1080 return (CRYPTO_MECHANISM_INVALID); 1081 1082 if ((keysched = aes_alloc_keysched(&size, 1083 crypto_kmflag(req))) == NULL) { 1084 return (CRYPTO_HOST_MEMORY); 1085 } 1086 1087 /* 1088 * Initialize key schedule. Key length information is stored 1089 * in the key. 1090 */ 1091 if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) { 1092 bzero(keysched, size); 1093 kmem_free(keysched, size); 1094 return (rv); 1095 } 1096 1097 *tmpl = keysched; 1098 *tmpl_size = size; 1099 1100 /* EXPORT DELETE END */ 1101 1102 return (CRYPTO_SUCCESS); 1103 } 1104 1105 /* ARGSUSED */ 1106 static int 1107 aes_free_context(crypto_ctx_t *ctx) 1108 { 1109 1110 /* EXPORT DELETE START */ 1111 1112 aes_ctx_t *aes_ctx = ctx->cc_provider_private; 1113 1114 if (aes_ctx != NULL) { 1115 if (aes_ctx->ac_flags & AES_PROVIDER_OWNS_KEY_SCHEDULE) { 1116 ASSERT(aes_ctx->ac_keysched_len != 0); 1117 bzero(aes_ctx->ac_keysched, aes_ctx->ac_keysched_len); 1118 kmem_free(aes_ctx->ac_keysched, 1119 aes_ctx->ac_keysched_len); 1120 } 1121 kmem_free(aes_ctx, sizeof (aes_ctx_t)); 1122 ctx->cc_provider_private = NULL; 1123 } 1124 1125 /* EXPORT DELETE END */ 1126 1127 return (CRYPTO_SUCCESS); 1128 } 1129 1130 /* ARGSUSED */ 1131 static int 1132 aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template, 1133 crypto_mechanism_t *mechanism, crypto_key_t *key, int kmflag) 1134 { 1135 int rv = CRYPTO_SUCCESS; 1136 1137 /* EXPORT DELETE START */ 1138 1139 void *keysched; 1140 size_t size; 1141 1142 aes_ctx->ac_flags = 0; 1143 1144 if (mechanism->cm_type == AES_CBC_MECH_INFO_TYPE) { 1145 /* 1146 * Copy 128-bit IV into context. 1147 * 1148 * If cm_param == NULL then the IV comes from the 1149 * cd_miscdata field in the crypto_data structure. 1150 */ 1151 if (mechanism->cm_param != NULL) { 1152 ASSERT(mechanism->cm_param_len == AES_BLOCK_LEN); 1153 if (IS_P2ALIGNED(mechanism->cm_param, 1154 sizeof (uint64_t))) { 1155 uint64_t *param64; 1156 param64 = (uint64_t *)mechanism->cm_param; 1157 1158 aes_ctx->ac_iv[0] = *param64++; 1159 aes_ctx->ac_iv[1] = *param64; 1160 } else { 1161 uint8_t *iv8; 1162 uint8_t *p8; 1163 iv8 = (uint8_t *)&aes_ctx->ac_iv; 1164 p8 = (uint8_t *)&mechanism->cm_param[0]; 1165 1166 iv8[0] = p8[0]; 1167 iv8[1] = p8[1]; 1168 iv8[2] = p8[2]; 1169 iv8[3] = p8[3]; 1170 iv8[4] = p8[4]; 1171 iv8[5] = p8[5]; 1172 iv8[6] = p8[6]; 1173 iv8[7] = p8[7]; 1174 iv8[8] = p8[8]; 1175 iv8[9] = p8[9]; 1176 iv8[10] = p8[10]; 1177 iv8[11] = p8[11]; 1178 iv8[12] = p8[12]; 1179 iv8[13] = p8[13]; 1180 iv8[14] = p8[14]; 1181 iv8[15] = p8[15]; 1182 } 1183 } 1184 1185 aes_ctx->ac_lastp = (uint8_t *)&aes_ctx->ac_iv[0]; 1186 aes_ctx->ac_flags |= AES_CBC_MODE; 1187 1188 } else if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) { 1189 if (mechanism->cm_param != NULL) { 1190 CK_AES_CTR_PARAMS *pp; 1191 uint64_t mask = 0; 1192 ulong_t count; 1193 uint8_t *iv8; 1194 uint8_t *p8; 1195 1196 /* XXX what to do about miscdata */ 1197 pp = (CK_AES_CTR_PARAMS *)mechanism->cm_param; 1198 count = pp->ulCounterBits; 1199 if (count == 0 || count > 64) { 1200 return (CRYPTO_MECHANISM_PARAM_INVALID); 1201 } 1202 while (count-- > 0) 1203 mask |= (1ULL << count); 1204 #ifdef _LITTLE_ENDIAN 1205 p8 = (uint8_t *)&mask; 1206 mask = (((uint64_t)p8[0] << 56) | 1207 ((uint64_t)p8[1] << 48) | 1208 ((uint64_t)p8[2] << 40) | 1209 ((uint64_t)p8[3] << 32) | 1210 ((uint64_t)p8[4] << 24) | 1211 ((uint64_t)p8[5] << 16) | 1212 ((uint64_t)p8[6] << 8) | 1213 (uint64_t)p8[7]); 1214 #endif 1215 aes_ctx->ac_counter_mask = mask; 1216 1217 iv8 = (uint8_t *)&aes_ctx->ac_iv; 1218 p8 = (uint8_t *)&pp->cb[0]; 1219 1220 iv8[0] = p8[0]; 1221 iv8[1] = p8[1]; 1222 iv8[2] = p8[2]; 1223 iv8[3] = p8[3]; 1224 iv8[4] = p8[4]; 1225 iv8[5] = p8[5]; 1226 iv8[6] = p8[6]; 1227 iv8[7] = p8[7]; 1228 iv8[8] = p8[8]; 1229 iv8[9] = p8[9]; 1230 iv8[10] = p8[10]; 1231 iv8[11] = p8[11]; 1232 iv8[12] = p8[12]; 1233 iv8[13] = p8[13]; 1234 iv8[14] = p8[14]; 1235 iv8[15] = p8[15]; 1236 } else { 1237 return (CRYPTO_MECHANISM_PARAM_INVALID); 1238 } 1239 1240 aes_ctx->ac_lastp = (uint8_t *)&aes_ctx->ac_iv[0]; 1241 aes_ctx->ac_flags |= AES_CTR_MODE; 1242 } else { 1243 aes_ctx->ac_flags |= AES_ECB_MODE; 1244 } 1245 1246 if (template == NULL) { 1247 if ((keysched = aes_alloc_keysched(&size, kmflag)) == NULL) 1248 return (CRYPTO_HOST_MEMORY); 1249 /* 1250 * Initialize key schedule. 1251 * Key length is stored in the key. 1252 */ 1253 if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) 1254 kmem_free(keysched, size); 1255 1256 aes_ctx->ac_flags |= AES_PROVIDER_OWNS_KEY_SCHEDULE; 1257 aes_ctx->ac_keysched_len = size; 1258 } else { 1259 keysched = template; 1260 } 1261 aes_ctx->ac_keysched = keysched; 1262 1263 /* EXPORT DELETE END */ 1264 1265 return (rv); 1266 } 1267