1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * AES provider for the Kernel Cryptographic Framework (KCF) 31 */ 32 33 #include <sys/types.h> 34 #include <sys/systm.h> 35 #include <sys/modctl.h> 36 #include <sys/cmn_err.h> 37 #include <sys/ddi.h> 38 #include <sys/crypto/common.h> 39 #include <sys/crypto/spi.h> 40 #include <sys/sysmacros.h> 41 #include <sys/strsun.h> 42 #include <aes_impl.h> 43 #include <aes_cbc_crypt.h> 44 45 extern struct mod_ops mod_cryptoops; 46 47 /* 48 * Module linkage information for the kernel. 49 */ 50 static struct modlcrypto modlcrypto = { 51 &mod_cryptoops, 52 "AES Kernel SW Provider %I%" 53 }; 54 55 static struct modlinkage modlinkage = { 56 MODREV_1, 57 (void *)&modlcrypto, 58 NULL 59 }; 60 61 /* 62 * CSPI information (entry points, provider info, etc.) 63 */ 64 typedef enum aes_mech_type { 65 AES_ECB_MECH_INFO_TYPE, /* SUN_CKM_AES_ECB */ 66 AES_CBC_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC */ 67 AES_CBC_PAD_MECH_INFO_TYPE, /* SUN_CKM_AES_CBC_PAD */ 68 AES_CTR_MECH_INFO_TYPE /* SUN_CKM_AES_CTR */ 69 } aes_mech_type_t; 70 71 /* 72 * The following definitions are to keep EXPORT_SRC happy. 73 */ 74 #ifndef AES_MIN_KEY_LEN 75 #define AES_MIN_KEY_LEN 0 76 #endif 77 78 #ifndef AES_MAX_KEY_LEN 79 #define AES_MAX_KEY_LEN 0 80 #endif 81 82 /* 83 * Mechanism info structure passed to KCF during registration. 84 */ 85 static crypto_mech_info_t aes_mech_info_tab[] = { 86 /* AES_ECB */ 87 {SUN_CKM_AES_ECB, AES_ECB_MECH_INFO_TYPE, 88 CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | 89 CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC, 90 AES_MIN_KEY_LEN, AES_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}, 91 /* AES_CBC */ 92 {SUN_CKM_AES_CBC, AES_CBC_MECH_INFO_TYPE, 93 CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | 94 CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC, 95 AES_MIN_KEY_LEN, AES_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}, 96 /* AES_CTR */ 97 {SUN_CKM_AES_CTR, AES_CTR_MECH_INFO_TYPE, 98 CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC | 99 CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC, 100 AES_MIN_KEY_LEN, AES_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES} 101 }; 102 103 /* operations are in-place if the output buffer is NULL */ 104 #define AES_ARG_INPLACE(input, output) \ 105 if ((output) == NULL) \ 106 (output) = (input); 107 108 static void aes_provider_status(crypto_provider_handle_t, uint_t *); 109 110 static crypto_control_ops_t aes_control_ops = { 111 aes_provider_status 112 }; 113 114 static int aes_common_init(crypto_ctx_t *, crypto_mechanism_t *, 115 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 116 static int aes_common_init_ctx(aes_ctx_t *, crypto_spi_ctx_template_t *, 117 crypto_mechanism_t *, crypto_key_t *, int); 118 static int aes_encrypt_final(crypto_ctx_t *, crypto_data_t *, 119 crypto_req_handle_t); 120 static int aes_decrypt_final(crypto_ctx_t *, crypto_data_t *, 121 crypto_req_handle_t); 122 123 static int aes_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 124 crypto_req_handle_t); 125 static int aes_encrypt_update(crypto_ctx_t *, crypto_data_t *, 126 crypto_data_t *, crypto_req_handle_t); 127 static int aes_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, 128 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 129 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 130 131 static int aes_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *, 132 crypto_req_handle_t); 133 static int aes_decrypt_update(crypto_ctx_t *, crypto_data_t *, 134 crypto_data_t *, crypto_req_handle_t); 135 static int aes_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t, 136 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, 137 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t); 138 139 static crypto_cipher_ops_t aes_cipher_ops = { 140 aes_common_init, 141 aes_encrypt, 142 aes_encrypt_update, 143 aes_encrypt_final, 144 aes_encrypt_atomic, 145 aes_common_init, 146 aes_decrypt, 147 aes_decrypt_update, 148 aes_decrypt_final, 149 aes_decrypt_atomic 150 }; 151 152 static int aes_create_ctx_template(crypto_provider_handle_t, 153 crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *, 154 size_t *, crypto_req_handle_t); 155 static int aes_free_context(crypto_ctx_t *); 156 157 static crypto_ctx_ops_t aes_ctx_ops = { 158 aes_create_ctx_template, 159 aes_free_context 160 }; 161 162 static crypto_ops_t aes_crypto_ops = { 163 &aes_control_ops, 164 NULL, 165 &aes_cipher_ops, 166 NULL, 167 NULL, 168 NULL, 169 NULL, 170 NULL, 171 NULL, 172 NULL, 173 NULL, 174 NULL, 175 NULL, 176 &aes_ctx_ops 177 }; 178 179 static crypto_provider_info_t aes_prov_info = { 180 CRYPTO_SPI_VERSION_1, 181 "AES Software Provider", 182 CRYPTO_SW_PROVIDER, 183 {&modlinkage}, 184 NULL, 185 &aes_crypto_ops, 186 sizeof (aes_mech_info_tab)/sizeof (crypto_mech_info_t), 187 aes_mech_info_tab 188 }; 189 190 static crypto_kcf_provider_handle_t aes_prov_handle = NULL; 191 192 int 193 _init(void) 194 { 195 int ret; 196 197 /* 198 * Register with KCF. If the registration fails, return error. 199 */ 200 if ((ret = crypto_register_provider(&aes_prov_info, 201 &aes_prov_handle)) != CRYPTO_SUCCESS) { 202 cmn_err(CE_WARN, "%s _init: crypto_register_provider()" 203 "failed (0x%x)", CRYPTO_PROVIDER_NAME, ret); 204 return (EACCES); 205 } 206 207 if ((ret = mod_install(&modlinkage)) != 0) { 208 int rv; 209 210 ASSERT(aes_prov_handle != NULL); 211 /* We should not return if the unregister returns busy. */ 212 while ((rv = crypto_unregister_provider(aes_prov_handle)) 213 == CRYPTO_BUSY) { 214 cmn_err(CE_WARN, 215 "%s _init: crypto_unregister_provider() " 216 "failed (0x%x). Retrying.", 217 CRYPTO_PROVIDER_NAME, rv); 218 /* wait 10 seconds and try again. */ 219 delay(10 * drv_usectohz(1000000)); 220 } 221 } 222 223 return (ret); 224 } 225 226 int 227 _fini(void) 228 { 229 int ret; 230 231 /* 232 * Unregister from KCF if previous registration succeeded. 233 */ 234 if (aes_prov_handle != NULL) { 235 if ((ret = crypto_unregister_provider(aes_prov_handle)) != 236 CRYPTO_SUCCESS) { 237 cmn_err(CE_WARN, 238 "%s _fini: crypto_unregister_provider() " 239 "failed (0x%x)", CRYPTO_PROVIDER_NAME, ret); 240 return (EBUSY); 241 } 242 aes_prov_handle = NULL; 243 } 244 245 return (mod_remove(&modlinkage)); 246 } 247 248 int 249 _info(struct modinfo *modinfop) 250 { 251 return (mod_info(&modlinkage, modinfop)); 252 } 253 254 255 static int 256 aes_check_mech_param(crypto_mechanism_t *mechanism) 257 { 258 int rv = CRYPTO_SUCCESS; 259 260 switch (mechanism->cm_type) { 261 case AES_ECB_MECH_INFO_TYPE: 262 /* no parameter */ 263 break; 264 case AES_CBC_MECH_INFO_TYPE: 265 if (mechanism->cm_param != NULL && 266 mechanism->cm_param_len != AES_BLOCK_LEN) 267 rv = CRYPTO_MECHANISM_PARAM_INVALID; 268 break; 269 case AES_CTR_MECH_INFO_TYPE: 270 if (mechanism->cm_param != NULL && 271 mechanism->cm_param_len != sizeof (CK_AES_CTR_PARAMS)) 272 rv = CRYPTO_MECHANISM_PARAM_INVALID; 273 break; 274 default: 275 rv = CRYPTO_MECHANISM_INVALID; 276 } 277 return (rv); 278 } 279 280 /* EXPORT DELETE START */ 281 282 /* 283 * Initialize key schedules for AES 284 */ 285 static int 286 init_keysched(crypto_key_t *key, void *newbie) 287 { 288 /* 289 * Only keys by value are supported by this module. 290 */ 291 switch (key->ck_format) { 292 case CRYPTO_KEY_RAW: 293 if (key->ck_length < AES_MINBITS || 294 key->ck_length > AES_MAXBITS) { 295 return (CRYPTO_KEY_SIZE_RANGE); 296 } 297 298 /* key length must be either 128, 192, or 256 */ 299 if ((key->ck_length & 63) != 0) 300 return (CRYPTO_KEY_SIZE_RANGE); 301 break; 302 default: 303 return (CRYPTO_KEY_TYPE_INCONSISTENT); 304 } 305 306 aes_init_keysched(key->ck_data, key->ck_length, newbie); 307 return (CRYPTO_SUCCESS); 308 } 309 310 /* EXPORT DELETE END */ 311 312 /* 313 * KCF software provider control entry points. 314 */ 315 /* ARGSUSED */ 316 static void 317 aes_provider_status(crypto_provider_handle_t provider, uint_t *status) 318 { 319 *status = CRYPTO_PROVIDER_READY; 320 } 321 322 /* 323 * KCF software provider encrypt entry points. 324 */ 325 static int 326 aes_common_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, 327 crypto_key_t *key, crypto_spi_ctx_template_t template, 328 crypto_req_handle_t req) 329 { 330 331 /* EXPORT DELETE START */ 332 333 aes_ctx_t *aes_ctx; 334 int rv; 335 int kmflag; 336 337 /* 338 * Only keys by value are supported by this module. 339 */ 340 if (key->ck_format != CRYPTO_KEY_RAW) { 341 return (CRYPTO_KEY_TYPE_INCONSISTENT); 342 } 343 344 if ((rv = aes_check_mech_param(mechanism)) != CRYPTO_SUCCESS) 345 return (rv); 346 347 /* 348 * Allocate an AES context. 349 */ 350 kmflag = crypto_kmflag(req); 351 if ((aes_ctx = kmem_zalloc(sizeof (aes_ctx_t), kmflag)) == NULL) 352 return (CRYPTO_HOST_MEMORY); 353 354 rv = aes_common_init_ctx(aes_ctx, template, mechanism, key, kmflag); 355 if (rv != CRYPTO_SUCCESS) { 356 kmem_free(aes_ctx, sizeof (aes_ctx_t)); 357 return (rv); 358 } 359 360 ctx->cc_provider_private = aes_ctx; 361 362 /* EXPORT DELETE END */ 363 364 return (CRYPTO_SUCCESS); 365 } 366 367 /* 368 * Helper AES encrypt update function for iov input data. 369 */ 370 static int 371 aes_cipher_update_iov(aes_ctx_t *aes_ctx, crypto_data_t *input, 372 crypto_data_t *output, int (*cipher)(aes_ctx_t *, caddr_t, size_t, 373 crypto_data_t *)) 374 { 375 int rv; 376 /* EXPORT DELETE START */ 377 378 if (input->cd_miscdata != NULL) { 379 if (IS_P2ALIGNED(input->cd_miscdata, sizeof (uint64_t))) { 380 /* LINTED: pointer alignment */ 381 aes_ctx->ac_iv[0] = *(uint64_t *)input->cd_miscdata; 382 /* LINTED: pointer alignment */ 383 aes_ctx->ac_iv[1] = *(uint64_t *)&input->cd_miscdata[8]; 384 } else { 385 uint8_t *miscdata8 = (uint8_t *)&input->cd_miscdata[0]; 386 uint8_t *iv8 = (uint8_t *)&aes_ctx->ac_iv[0]; 387 388 AES_COPY_BLOCK(miscdata8, iv8); 389 } 390 } 391 392 if (input->cd_raw.iov_len < input->cd_length) 393 return (CRYPTO_ARGUMENTS_BAD); 394 395 rv = (cipher)(aes_ctx, input->cd_raw.iov_base + input->cd_offset, 396 input->cd_length, (input == output) ? NULL : output); 397 398 /* EXPORT DELETE END */ 399 400 return (rv); 401 } 402 403 /* 404 * Helper AES encrypt update function for uio input data. 405 */ 406 static int 407 aes_cipher_update_uio(aes_ctx_t *aes_ctx, crypto_data_t *input, 408 crypto_data_t *output, int (*cipher)(aes_ctx_t *, caddr_t, size_t, 409 crypto_data_t *)) 410 { 411 /* EXPORT DELETE START */ 412 uio_t *uiop = input->cd_uio; 413 off_t offset = input->cd_offset; 414 size_t length = input->cd_length; 415 uint_t vec_idx; 416 size_t cur_len; 417 418 if (input->cd_miscdata != NULL) { 419 if (IS_P2ALIGNED(input->cd_miscdata, sizeof (uint64_t))) { 420 /* LINTED: pointer alignment */ 421 aes_ctx->ac_iv[0] = *(uint64_t *)input->cd_miscdata; 422 /* LINTED: pointer alignment */ 423 aes_ctx->ac_iv[1] = *(uint64_t *)&input->cd_miscdata[8]; 424 } else { 425 uint8_t *miscdata8 = (uint8_t *)&input->cd_miscdata[0]; 426 uint8_t *iv8 = (uint8_t *)&aes_ctx->ac_iv[0]; 427 428 AES_COPY_BLOCK(miscdata8, iv8); 429 } 430 } 431 432 if (input->cd_uio->uio_segflg != UIO_SYSSPACE) { 433 return (CRYPTO_ARGUMENTS_BAD); 434 } 435 436 /* 437 * Jump to the first iovec containing data to be 438 * processed. 439 */ 440 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt && 441 offset >= uiop->uio_iov[vec_idx].iov_len; 442 offset -= uiop->uio_iov[vec_idx++].iov_len); 443 if (vec_idx == uiop->uio_iovcnt) { 444 /* 445 * The caller specified an offset that is larger than the 446 * total size of the buffers it provided. 447 */ 448 return (CRYPTO_DATA_LEN_RANGE); 449 } 450 451 /* 452 * Now process the iovecs. 453 */ 454 while (vec_idx < uiop->uio_iovcnt && length > 0) { 455 cur_len = MIN(uiop->uio_iov[vec_idx].iov_len - 456 offset, length); 457 458 (cipher)(aes_ctx, uiop->uio_iov[vec_idx].iov_base + offset, 459 cur_len, (input == output) ? NULL : output); 460 461 length -= cur_len; 462 vec_idx++; 463 offset = 0; 464 } 465 466 if (vec_idx == uiop->uio_iovcnt && length > 0) { 467 /* 468 * The end of the specified iovec's was reached but 469 * the length requested could not be processed, i.e. 470 * The caller requested to digest more data than it provided. 471 */ 472 473 return (CRYPTO_DATA_LEN_RANGE); 474 } 475 476 /* EXPORT DELETE END */ 477 478 return (CRYPTO_SUCCESS); 479 } 480 481 /* 482 * Helper AES encrypt update function for mblk input data. 483 */ 484 static int 485 aes_cipher_update_mp(aes_ctx_t *aes_ctx, crypto_data_t *input, 486 crypto_data_t *output, int (*cipher)(aes_ctx_t *, caddr_t, size_t, 487 crypto_data_t *)) 488 { 489 /* EXPORT DELETE START */ 490 off_t offset = input->cd_offset; 491 size_t length = input->cd_length; 492 mblk_t *mp; 493 size_t cur_len; 494 495 if (input->cd_miscdata != NULL) { 496 if (IS_P2ALIGNED(input->cd_miscdata, sizeof (uint64_t))) { 497 /* LINTED: pointer alignment */ 498 aes_ctx->ac_iv[0] = *(uint64_t *)input->cd_miscdata; 499 /* LINTED: pointer alignment */ 500 aes_ctx->ac_iv[1] = *(uint64_t *)&input->cd_miscdata[8]; 501 } else { 502 uint8_t *miscdata8 = (uint8_t *)&input->cd_miscdata[0]; 503 uint8_t *iv8 = (uint8_t *)&aes_ctx->ac_iv[0]; 504 505 AES_COPY_BLOCK(miscdata8, iv8); 506 } 507 } 508 509 /* 510 * Jump to the first mblk_t containing data to be processed. 511 */ 512 for (mp = input->cd_mp; mp != NULL && offset >= MBLKL(mp); 513 offset -= MBLKL(mp), mp = mp->b_cont); 514 if (mp == NULL) { 515 /* 516 * The caller specified an offset that is larger than the 517 * total size of the buffers it provided. 518 */ 519 return (CRYPTO_DATA_LEN_RANGE); 520 } 521 522 /* 523 * Now do the processing on the mblk chain. 524 */ 525 while (mp != NULL && length > 0) { 526 cur_len = MIN(MBLKL(mp) - offset, length); 527 (cipher)(aes_ctx, (char *)(mp->b_rptr + offset), cur_len, 528 (input == output) ? NULL : output); 529 530 length -= cur_len; 531 offset = 0; 532 mp = mp->b_cont; 533 } 534 535 if (mp == NULL && length > 0) { 536 /* 537 * The end of the mblk was reached but the length requested 538 * could not be processed, i.e. The caller requested 539 * to digest more data than it provided. 540 */ 541 return (CRYPTO_DATA_LEN_RANGE); 542 } 543 544 /* EXPORT DELETE END */ 545 546 return (CRYPTO_SUCCESS); 547 } 548 549 /* ARGSUSED */ 550 static int 551 aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext, 552 crypto_data_t *ciphertext, crypto_req_handle_t req) 553 { 554 int ret = CRYPTO_FAILED; 555 556 /* EXPORT DELETE START */ 557 558 aes_ctx_t *aes_ctx; 559 560 ASSERT(ctx->cc_provider_private != NULL); 561 aes_ctx = ctx->cc_provider_private; 562 563 /* 564 * For block ciphers, plaintext must be a multiple of AES block size. 565 * This test is only valid for ciphers whose blocksize is a power of 2. 566 */ 567 if (((aes_ctx->ac_flags & AES_CTR_MODE) == 0) && 568 (plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0) 569 return (CRYPTO_DATA_LEN_RANGE); 570 571 AES_ARG_INPLACE(plaintext, ciphertext); 572 573 /* 574 * We need to just return the length needed to store the output. 575 * We should not destroy the context for the following case. 576 */ 577 if (ciphertext->cd_length < plaintext->cd_length) { 578 ciphertext->cd_length = plaintext->cd_length; 579 return (CRYPTO_BUFFER_TOO_SMALL); 580 } 581 582 /* 583 * Do an update on the specified input data. 584 */ 585 ret = aes_encrypt_update(ctx, plaintext, ciphertext, req); 586 ASSERT(aes_ctx->ac_remainder_len == 0); 587 (void) aes_free_context(ctx); 588 589 /* EXPORT DELETE END */ 590 591 /* LINTED */ 592 return (ret); 593 } 594 595 /* ARGSUSED */ 596 static int 597 aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 598 crypto_data_t *plaintext, crypto_req_handle_t req) 599 { 600 int ret = CRYPTO_FAILED; 601 602 /* EXPORT DELETE START */ 603 604 aes_ctx_t *aes_ctx; 605 606 ASSERT(ctx->cc_provider_private != NULL); 607 aes_ctx = ctx->cc_provider_private; 608 609 /* 610 * For block ciphers, ciphertext must be a multiple of AES block size. 611 * This test is only valid for ciphers whose blocksize is a power of 2. 612 */ 613 if (((aes_ctx->ac_flags & AES_CTR_MODE) == 0) && 614 (ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0) 615 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); 616 617 AES_ARG_INPLACE(ciphertext, plaintext); 618 619 /* 620 * We need to just return the length needed to store the output. 621 * We should not destroy the context for the following case. 622 */ 623 if (plaintext->cd_length < ciphertext->cd_length) { 624 plaintext->cd_length = ciphertext->cd_length; 625 return (CRYPTO_BUFFER_TOO_SMALL); 626 } 627 628 /* 629 * Do an update on the specified input data. 630 */ 631 ret = aes_decrypt_update(ctx, ciphertext, plaintext, req); 632 ASSERT(aes_ctx->ac_remainder_len == 0); 633 (void) aes_free_context(ctx); 634 635 /* EXPORT DELETE END */ 636 637 /* LINTED */ 638 return (ret); 639 } 640 641 /* ARGSUSED */ 642 static int 643 aes_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext, 644 crypto_data_t *ciphertext, crypto_req_handle_t req) 645 { 646 off_t saved_offset; 647 size_t saved_length, out_len; 648 int ret = CRYPTO_SUCCESS; 649 aes_ctx_t *aes_ctx; 650 651 ASSERT(ctx->cc_provider_private != NULL); 652 653 AES_ARG_INPLACE(plaintext, ciphertext); 654 655 /* compute number of bytes that will hold the ciphertext */ 656 out_len = ((aes_ctx_t *)ctx->cc_provider_private)->ac_remainder_len; 657 out_len += plaintext->cd_length; 658 out_len &= ~(AES_BLOCK_LEN - 1); 659 660 /* return length needed to store the output */ 661 if (ciphertext->cd_length < out_len) { 662 ciphertext->cd_length = out_len; 663 return (CRYPTO_BUFFER_TOO_SMALL); 664 } 665 666 saved_offset = ciphertext->cd_offset; 667 saved_length = ciphertext->cd_length; 668 669 /* 670 * Do the AES update on the specified input data. 671 */ 672 switch (plaintext->cd_format) { 673 case CRYPTO_DATA_RAW: 674 ret = aes_cipher_update_iov(ctx->cc_provider_private, 675 plaintext, ciphertext, aes_encrypt_contiguous_blocks); 676 break; 677 case CRYPTO_DATA_UIO: 678 ret = aes_cipher_update_uio(ctx->cc_provider_private, 679 plaintext, ciphertext, aes_encrypt_contiguous_blocks); 680 break; 681 case CRYPTO_DATA_MBLK: 682 ret = aes_cipher_update_mp(ctx->cc_provider_private, 683 plaintext, ciphertext, aes_encrypt_contiguous_blocks); 684 break; 685 default: 686 ret = CRYPTO_ARGUMENTS_BAD; 687 } 688 689 /* 690 * Since AES counter mode is a stream cipher, we call 691 * aes_counter_final() to pick up any remaining bytes. 692 * It is an internal function that does not destroy 693 * the context like *normal* final routines. 694 */ 695 aes_ctx = ctx->cc_provider_private; 696 if ((aes_ctx->ac_flags & AES_CTR_MODE) && 697 (aes_ctx->ac_remainder_len > 0)) { 698 ret = aes_counter_final(aes_ctx, ciphertext); 699 } 700 701 if (ret == CRYPTO_SUCCESS) { 702 if (plaintext != ciphertext) 703 ciphertext->cd_length = 704 ciphertext->cd_offset - saved_offset; 705 } else { 706 ciphertext->cd_length = saved_length; 707 } 708 ciphertext->cd_offset = saved_offset; 709 710 return (ret); 711 } 712 713 /* ARGSUSED */ 714 static int 715 aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext, 716 crypto_data_t *plaintext, crypto_req_handle_t req) 717 { 718 off_t saved_offset; 719 size_t saved_length, out_len; 720 int ret = CRYPTO_SUCCESS; 721 aes_ctx_t *aes_ctx; 722 723 ASSERT(ctx->cc_provider_private != NULL); 724 725 AES_ARG_INPLACE(ciphertext, plaintext); 726 727 /* compute number of bytes that will hold the plaintext */ 728 out_len = ((aes_ctx_t *)ctx->cc_provider_private)->ac_remainder_len; 729 out_len += ciphertext->cd_length; 730 out_len &= ~(AES_BLOCK_LEN - 1); 731 732 /* return length needed to store the output */ 733 if (plaintext->cd_length < out_len) { 734 plaintext->cd_length = out_len; 735 return (CRYPTO_BUFFER_TOO_SMALL); 736 } 737 738 saved_offset = plaintext->cd_offset; 739 saved_length = plaintext->cd_length; 740 741 /* 742 * Do the AES update on the specified input data. 743 */ 744 switch (ciphertext->cd_format) { 745 case CRYPTO_DATA_RAW: 746 ret = aes_cipher_update_iov(ctx->cc_provider_private, 747 ciphertext, plaintext, aes_decrypt_contiguous_blocks); 748 break; 749 case CRYPTO_DATA_UIO: 750 ret = aes_cipher_update_uio(ctx->cc_provider_private, 751 ciphertext, plaintext, aes_decrypt_contiguous_blocks); 752 break; 753 case CRYPTO_DATA_MBLK: 754 ret = aes_cipher_update_mp(ctx->cc_provider_private, 755 ciphertext, plaintext, aes_decrypt_contiguous_blocks); 756 break; 757 default: 758 ret = CRYPTO_ARGUMENTS_BAD; 759 } 760 761 /* 762 * Since AES counter mode is a stream cipher, we call 763 * aes_counter_final() to pick up any remaining bytes. 764 * It is an internal function that does not destroy 765 * the context like *normal* final routines. 766 */ 767 aes_ctx = ctx->cc_provider_private; 768 if ((aes_ctx->ac_flags & AES_CTR_MODE) && 769 (aes_ctx->ac_remainder_len > 0)) { 770 ret = aes_counter_final(aes_ctx, plaintext); 771 } 772 773 if (ret == CRYPTO_SUCCESS) { 774 if (ciphertext != plaintext) 775 plaintext->cd_length = 776 plaintext->cd_offset - saved_offset; 777 } else { 778 plaintext->cd_length = saved_length; 779 } 780 plaintext->cd_offset = saved_offset; 781 782 783 return (ret); 784 } 785 786 /* ARGSUSED */ 787 static int 788 aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data, 789 crypto_req_handle_t req) 790 { 791 792 /* EXPORT DELETE START */ 793 794 aes_ctx_t *aes_ctx; 795 int ret; 796 797 ASSERT(ctx->cc_provider_private != NULL); 798 aes_ctx = ctx->cc_provider_private; 799 800 if (data->cd_format != CRYPTO_DATA_RAW && 801 data->cd_format != CRYPTO_DATA_UIO && 802 data->cd_format != CRYPTO_DATA_MBLK) { 803 return (CRYPTO_ARGUMENTS_BAD); 804 } 805 806 /* 807 * There must be no unprocessed plaintext. 808 * This happens if the length of the last data is 809 * not a multiple of the AES block length. 810 */ 811 if (aes_ctx->ac_remainder_len > 0) { 812 if ((aes_ctx->ac_flags & AES_CTR_MODE) == 0) 813 return (CRYPTO_DATA_LEN_RANGE); 814 else { 815 ret = aes_counter_final(aes_ctx, data); 816 if (ret != CRYPTO_SUCCESS) 817 return (ret); 818 } 819 } 820 821 if ((aes_ctx->ac_flags & AES_CTR_MODE) == 0) 822 data->cd_length = 0; 823 824 (void) aes_free_context(ctx); 825 826 /* EXPORT DELETE END */ 827 828 return (CRYPTO_SUCCESS); 829 } 830 831 /* ARGSUSED */ 832 static int 833 aes_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *data, 834 crypto_req_handle_t req) 835 { 836 837 /* EXPORT DELETE START */ 838 839 aes_ctx_t *aes_ctx; 840 int ret; 841 842 ASSERT(ctx->cc_provider_private != NULL); 843 aes_ctx = ctx->cc_provider_private; 844 845 if (data->cd_format != CRYPTO_DATA_RAW && 846 data->cd_format != CRYPTO_DATA_UIO && 847 data->cd_format != CRYPTO_DATA_MBLK) { 848 return (CRYPTO_ARGUMENTS_BAD); 849 } 850 851 /* 852 * There must be no unprocessed ciphertext. 853 * This happens if the length of the last ciphertext is 854 * not a multiple of the AES block length. 855 */ 856 if (aes_ctx->ac_remainder_len > 0) { 857 if ((aes_ctx->ac_flags & AES_CTR_MODE) == 0) 858 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); 859 else { 860 ret = aes_counter_final(aes_ctx, data); 861 if (ret != CRYPTO_SUCCESS) 862 return (ret); 863 } 864 } 865 866 if ((aes_ctx->ac_flags & AES_CTR_MODE) == 0) 867 data->cd_length = 0; 868 869 (void) aes_free_context(ctx); 870 871 /* EXPORT DELETE END */ 872 873 return (CRYPTO_SUCCESS); 874 } 875 876 /* ARGSUSED */ 877 static int 878 aes_encrypt_atomic(crypto_provider_handle_t provider, 879 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 880 crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext, 881 crypto_spi_ctx_template_t template, crypto_req_handle_t req) 882 { 883 aes_ctx_t aes_ctx; /* on the stack */ 884 off_t saved_offset; 885 size_t saved_length; 886 int ret; 887 888 AES_ARG_INPLACE(plaintext, ciphertext); 889 890 if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) { 891 /* 892 * Plaintext must be a multiple of AES block size. 893 * This test only works for non-padded mechanisms 894 * when blocksize is 2^N. 895 */ 896 if ((plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0) 897 return (CRYPTO_DATA_LEN_RANGE); 898 } 899 900 /* return length needed to store the output */ 901 if (ciphertext->cd_length < plaintext->cd_length) { 902 ciphertext->cd_length = plaintext->cd_length; 903 return (CRYPTO_BUFFER_TOO_SMALL); 904 } 905 906 if ((ret = aes_check_mech_param(mechanism)) != CRYPTO_SUCCESS) 907 return (ret); 908 909 bzero(&aes_ctx, sizeof (aes_ctx_t)); 910 911 ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key, 912 crypto_kmflag(req)); 913 if (ret != CRYPTO_SUCCESS) 914 return (ret); 915 916 saved_offset = ciphertext->cd_offset; 917 saved_length = ciphertext->cd_length; 918 919 /* 920 * Do an update on the specified input data. 921 */ 922 switch (plaintext->cd_format) { 923 case CRYPTO_DATA_RAW: 924 ret = aes_cipher_update_iov(&aes_ctx, plaintext, ciphertext, 925 aes_encrypt_contiguous_blocks); 926 break; 927 case CRYPTO_DATA_UIO: 928 ret = aes_cipher_update_uio(&aes_ctx, plaintext, ciphertext, 929 aes_encrypt_contiguous_blocks); 930 break; 931 case CRYPTO_DATA_MBLK: 932 ret = aes_cipher_update_mp(&aes_ctx, plaintext, ciphertext, 933 aes_encrypt_contiguous_blocks); 934 break; 935 default: 936 ret = CRYPTO_ARGUMENTS_BAD; 937 } 938 939 if (ret == CRYPTO_SUCCESS) { 940 if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) { 941 ASSERT(aes_ctx.ac_remainder_len == 0); 942 if (plaintext != ciphertext) 943 ciphertext->cd_length = 944 ciphertext->cd_offset - saved_offset; 945 } else { 946 if (aes_ctx.ac_remainder_len > 0) { 947 ret = aes_counter_final(&aes_ctx, ciphertext); 948 if (ret != CRYPTO_SUCCESS) 949 goto out; 950 } 951 if (plaintext != ciphertext) 952 ciphertext->cd_length = 953 ciphertext->cd_offset - saved_offset; 954 } 955 } else { 956 ciphertext->cd_length = saved_length; 957 } 958 ciphertext->cd_offset = saved_offset; 959 960 out: 961 if (aes_ctx.ac_flags & AES_PROVIDER_OWNS_KEY_SCHEDULE) { 962 bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); 963 kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); 964 } 965 966 return (ret); 967 } 968 969 /* ARGSUSED */ 970 static int 971 aes_decrypt_atomic(crypto_provider_handle_t provider, 972 crypto_session_id_t session_id, crypto_mechanism_t *mechanism, 973 crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext, 974 crypto_spi_ctx_template_t template, crypto_req_handle_t req) 975 { 976 aes_ctx_t aes_ctx; /* on the stack */ 977 off_t saved_offset; 978 size_t saved_length; 979 int ret; 980 981 AES_ARG_INPLACE(ciphertext, plaintext); 982 983 if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) { 984 /* 985 * Ciphertext must be a multiple of AES block size. 986 * This test only works for non-padded mechanisms 987 * when blocksize is 2^N. 988 */ 989 if ((ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0) 990 return (CRYPTO_DATA_LEN_RANGE); 991 } 992 993 /* return length needed to store the output */ 994 if (plaintext->cd_length < ciphertext->cd_length) { 995 plaintext->cd_length = ciphertext->cd_length; 996 return (CRYPTO_BUFFER_TOO_SMALL); 997 } 998 999 if ((ret = aes_check_mech_param(mechanism)) != CRYPTO_SUCCESS) 1000 return (ret); 1001 1002 bzero(&aes_ctx, sizeof (aes_ctx_t)); 1003 1004 ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key, 1005 crypto_kmflag(req)); 1006 if (ret != CRYPTO_SUCCESS) 1007 return (ret); 1008 1009 saved_offset = plaintext->cd_offset; 1010 saved_length = plaintext->cd_length; 1011 1012 /* 1013 * Do an update on the specified input data. 1014 */ 1015 switch (ciphertext->cd_format) { 1016 case CRYPTO_DATA_RAW: 1017 ret = aes_cipher_update_iov(&aes_ctx, ciphertext, plaintext, 1018 aes_decrypt_contiguous_blocks); 1019 break; 1020 case CRYPTO_DATA_UIO: 1021 ret = aes_cipher_update_uio(&aes_ctx, ciphertext, plaintext, 1022 aes_decrypt_contiguous_blocks); 1023 break; 1024 case CRYPTO_DATA_MBLK: 1025 ret = aes_cipher_update_mp(&aes_ctx, ciphertext, plaintext, 1026 aes_decrypt_contiguous_blocks); 1027 break; 1028 default: 1029 ret = CRYPTO_ARGUMENTS_BAD; 1030 } 1031 1032 if (ret == CRYPTO_SUCCESS) { 1033 if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) { 1034 ASSERT(aes_ctx.ac_remainder_len == 0); 1035 if (ciphertext != plaintext) 1036 plaintext->cd_length = 1037 plaintext->cd_offset - saved_offset; 1038 } else { 1039 if (aes_ctx.ac_remainder_len > 0) { 1040 ret = aes_counter_final(&aes_ctx, plaintext); 1041 if (ret != CRYPTO_SUCCESS) 1042 goto out; 1043 } 1044 if (ciphertext != plaintext) 1045 plaintext->cd_length = 1046 plaintext->cd_offset - saved_offset; 1047 } 1048 } else { 1049 plaintext->cd_length = saved_length; 1050 } 1051 plaintext->cd_offset = saved_offset; 1052 1053 out: 1054 if (aes_ctx.ac_flags & AES_PROVIDER_OWNS_KEY_SCHEDULE) { 1055 bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); 1056 kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len); 1057 } 1058 1059 return (ret); 1060 } 1061 1062 /* 1063 * KCF software provider context template entry points. 1064 */ 1065 /* ARGSUSED */ 1066 static int 1067 aes_create_ctx_template(crypto_provider_handle_t provider, 1068 crypto_mechanism_t *mechanism, crypto_key_t *key, 1069 crypto_spi_ctx_template_t *tmpl, size_t *tmpl_size, crypto_req_handle_t req) 1070 { 1071 1072 /* EXPORT DELETE START */ 1073 1074 void *keysched; 1075 size_t size; 1076 int rv; 1077 1078 if (mechanism->cm_type != AES_ECB_MECH_INFO_TYPE && 1079 mechanism->cm_type != AES_CBC_MECH_INFO_TYPE && 1080 mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) 1081 return (CRYPTO_MECHANISM_INVALID); 1082 1083 if ((keysched = aes_alloc_keysched(&size, 1084 crypto_kmflag(req))) == NULL) { 1085 return (CRYPTO_HOST_MEMORY); 1086 } 1087 1088 /* 1089 * Initialize key schedule. Key length information is stored 1090 * in the key. 1091 */ 1092 if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) { 1093 bzero(keysched, size); 1094 kmem_free(keysched, size); 1095 return (rv); 1096 } 1097 1098 *tmpl = keysched; 1099 *tmpl_size = size; 1100 1101 /* EXPORT DELETE END */ 1102 1103 return (CRYPTO_SUCCESS); 1104 } 1105 1106 /* ARGSUSED */ 1107 static int 1108 aes_free_context(crypto_ctx_t *ctx) 1109 { 1110 1111 /* EXPORT DELETE START */ 1112 1113 aes_ctx_t *aes_ctx = ctx->cc_provider_private; 1114 1115 if (aes_ctx != NULL) { 1116 if (aes_ctx->ac_flags & AES_PROVIDER_OWNS_KEY_SCHEDULE) { 1117 ASSERT(aes_ctx->ac_keysched_len != 0); 1118 bzero(aes_ctx->ac_keysched, aes_ctx->ac_keysched_len); 1119 kmem_free(aes_ctx->ac_keysched, 1120 aes_ctx->ac_keysched_len); 1121 } 1122 kmem_free(aes_ctx, sizeof (aes_ctx_t)); 1123 ctx->cc_provider_private = NULL; 1124 } 1125 1126 /* EXPORT DELETE END */ 1127 1128 return (CRYPTO_SUCCESS); 1129 } 1130 1131 /* ARGSUSED */ 1132 static int 1133 aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template, 1134 crypto_mechanism_t *mechanism, crypto_key_t *key, int kmflag) 1135 { 1136 int rv = CRYPTO_SUCCESS; 1137 1138 /* EXPORT DELETE START */ 1139 1140 void *keysched; 1141 size_t size; 1142 1143 aes_ctx->ac_flags = 0; 1144 1145 if (mechanism->cm_type == AES_CBC_MECH_INFO_TYPE) { 1146 /* 1147 * Copy 128-bit IV into context. 1148 * 1149 * If cm_param == NULL then the IV comes from the 1150 * cd_miscdata field in the crypto_data structure. 1151 */ 1152 if (mechanism->cm_param != NULL) { 1153 ASSERT(mechanism->cm_param_len == AES_BLOCK_LEN); 1154 if (IS_P2ALIGNED(mechanism->cm_param, 1155 sizeof (uint64_t))) { 1156 uint64_t *param64; 1157 param64 = (uint64_t *)mechanism->cm_param; 1158 1159 aes_ctx->ac_iv[0] = *param64++; 1160 aes_ctx->ac_iv[1] = *param64; 1161 } else { 1162 uint8_t *iv8; 1163 uint8_t *p8; 1164 iv8 = (uint8_t *)&aes_ctx->ac_iv; 1165 p8 = (uint8_t *)&mechanism->cm_param[0]; 1166 1167 iv8[0] = p8[0]; 1168 iv8[1] = p8[1]; 1169 iv8[2] = p8[2]; 1170 iv8[3] = p8[3]; 1171 iv8[4] = p8[4]; 1172 iv8[5] = p8[5]; 1173 iv8[6] = p8[6]; 1174 iv8[7] = p8[7]; 1175 iv8[8] = p8[8]; 1176 iv8[9] = p8[9]; 1177 iv8[10] = p8[10]; 1178 iv8[11] = p8[11]; 1179 iv8[12] = p8[12]; 1180 iv8[13] = p8[13]; 1181 iv8[14] = p8[14]; 1182 iv8[15] = p8[15]; 1183 } 1184 } 1185 1186 aes_ctx->ac_lastp = (uint8_t *)&aes_ctx->ac_iv[0]; 1187 aes_ctx->ac_flags |= AES_CBC_MODE; 1188 1189 } else if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) { 1190 if (mechanism->cm_param != NULL) { 1191 CK_AES_CTR_PARAMS *pp; 1192 uint64_t mask = 0; 1193 ulong_t count; 1194 uint8_t *iv8; 1195 uint8_t *p8; 1196 1197 pp = (CK_AES_CTR_PARAMS *)mechanism->cm_param; 1198 iv8 = (uint8_t *)&aes_ctx->ac_iv; 1199 p8 = (uint8_t *)&pp->cb[0]; 1200 1201 /* XXX what to do about miscdata */ 1202 count = pp->ulCounterBits; 1203 if (count == 0 || count > 64) { 1204 return (CRYPTO_MECHANISM_PARAM_INVALID); 1205 } 1206 while (count-- > 0) 1207 mask |= (1ULL << count); 1208 1209 aes_ctx->ac_counter_mask = mask; 1210 1211 iv8[0] = p8[0]; 1212 iv8[1] = p8[1]; 1213 iv8[2] = p8[2]; 1214 iv8[3] = p8[3]; 1215 iv8[4] = p8[4]; 1216 iv8[5] = p8[5]; 1217 iv8[6] = p8[6]; 1218 iv8[7] = p8[7]; 1219 iv8[8] = p8[8]; 1220 iv8[9] = p8[9]; 1221 iv8[10] = p8[10]; 1222 iv8[11] = p8[11]; 1223 iv8[12] = p8[12]; 1224 iv8[13] = p8[13]; 1225 iv8[14] = p8[14]; 1226 iv8[15] = p8[15]; 1227 } else { 1228 return (CRYPTO_MECHANISM_PARAM_INVALID); 1229 } 1230 1231 aes_ctx->ac_lastp = (uint8_t *)&aes_ctx->ac_iv[0]; 1232 aes_ctx->ac_flags |= AES_CTR_MODE; 1233 } else { 1234 aes_ctx->ac_flags |= AES_ECB_MODE; 1235 } 1236 1237 if (template == NULL) { 1238 if ((keysched = aes_alloc_keysched(&size, kmflag)) == NULL) 1239 return (CRYPTO_HOST_MEMORY); 1240 /* 1241 * Initialize key schedule. 1242 * Key length is stored in the key. 1243 */ 1244 if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) 1245 kmem_free(keysched, size); 1246 1247 aes_ctx->ac_flags |= AES_PROVIDER_OWNS_KEY_SCHEDULE; 1248 aes_ctx->ac_keysched_len = size; 1249 } else { 1250 keysched = template; 1251 } 1252 aes_ctx->ac_keysched = keysched; 1253 1254 /* EXPORT DELETE END */ 1255 1256 return (rv); 1257 } 1258