1 /* 2 * Copyright 2004-2023 The OpenSSL Project Authors. All Rights Reserved. 3 * 4 * Licensed under the Apache License 2.0 (the "License"). You may not use 5 * this file except in compliance with the License. You can obtain a copy 6 * in the file LICENSE in the source distribution or at 7 * https://www.openssl.org/source/license.html 8 */ 9 10 /* 11 * This file uses the low level AES and engine functions (which are deprecated 12 * for non-internal use) in order to implement the padlock engine AES ciphers. 13 */ 14 #define OPENSSL_SUPPRESS_DEPRECATED 15 16 #include <stdio.h> 17 #include <string.h> 18 19 #include <openssl/opensslconf.h> 20 #include <openssl/crypto.h> 21 #include <openssl/engine.h> 22 #include <openssl/evp.h> 23 #include <openssl/aes.h> 24 #include <openssl/rand.h> 25 #include <openssl/err.h> 26 #include <openssl/modes.h> 27 28 #ifndef OPENSSL_NO_PADLOCKENG 29 30 /* 31 * VIA PadLock AES is available *ONLY* on some x86 CPUs. Not only that it 32 * doesn't exist elsewhere, but it even can't be compiled on other platforms! 33 */ 34 35 #undef COMPILE_PADLOCKENG 36 #if defined(PADLOCK_ASM) 37 #define COMPILE_PADLOCKENG 38 #ifdef OPENSSL_NO_DYNAMIC_ENGINE 39 static ENGINE *ENGINE_padlock(void); 40 #endif 41 #endif 42 43 #ifdef OPENSSL_NO_DYNAMIC_ENGINE 44 void engine_load_padlock_int(void); 45 void engine_load_padlock_int(void) 46 { 47 /* On non-x86 CPUs it just returns. */ 48 #ifdef COMPILE_PADLOCKENG 49 ENGINE *toadd = ENGINE_padlock(); 50 if (!toadd) 51 return; 52 ERR_set_mark(); 53 ENGINE_add(toadd); 54 /* 55 * If the "add" worked, it gets a structural reference. So either way, we 56 * release our just-created reference. 57 */ 58 ENGINE_free(toadd); 59 /* 60 * If the "add" didn't work, it was probably a conflict because it was 61 * already added (eg. someone calling ENGINE_load_blah then calling 62 * ENGINE_load_builtin_engines() perhaps). 63 */ 64 ERR_pop_to_mark(); 65 #endif 66 } 67 68 #endif 69 70 #ifdef COMPILE_PADLOCKENG 71 72 /* Function for ENGINE detection and control */ 73 static int padlock_available(void); 74 static int padlock_init(ENGINE *e); 75 76 /* RNG Stuff */ 77 static RAND_METHOD padlock_rand; 78 79 /* Cipher Stuff */ 80 static int padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, 81 const int **nids, int nid); 82 83 /* Engine names */ 84 static const char *padlock_id = "padlock"; 85 static char padlock_name[100]; 86 87 /* Available features */ 88 static int padlock_use_ace = 0; /* Advanced Cryptography Engine */ 89 static int padlock_use_rng = 0; /* Random Number Generator */ 90 91 /* ===== Engine "management" functions ===== */ 92 93 /* Prepare the ENGINE structure for registration */ 94 static int padlock_bind_helper(ENGINE *e) 95 { 96 /* Check available features */ 97 padlock_available(); 98 99 /* 100 * RNG is currently disabled for reasons discussed in commentary just 101 * before padlock_rand_bytes function. 102 */ 103 padlock_use_rng = 0; 104 105 /* Generate a nice engine name with available features */ 106 BIO_snprintf(padlock_name, sizeof(padlock_name), 107 "VIA PadLock (%s, %s)", 108 padlock_use_rng ? "RNG" : "no-RNG", 109 padlock_use_ace ? "ACE" : "no-ACE"); 110 111 /* Register everything or return with an error */ 112 if (!ENGINE_set_id(e, padlock_id) || !ENGINE_set_name(e, padlock_name) || !ENGINE_set_init_function(e, padlock_init) || (padlock_use_ace && !ENGINE_set_ciphers(e, padlock_ciphers)) || (padlock_use_rng && !ENGINE_set_RAND(e, &padlock_rand))) { 113 return 0; 114 } 115 116 /* Everything looks good */ 117 return 1; 118 } 119 120 #ifdef OPENSSL_NO_DYNAMIC_ENGINE 121 /* Constructor */ 122 static ENGINE *ENGINE_padlock(void) 123 { 124 ENGINE *eng = ENGINE_new(); 125 126 if (eng == NULL) { 127 return NULL; 128 } 129 130 if (!padlock_bind_helper(eng)) { 131 ENGINE_free(eng); 132 return NULL; 133 } 134 135 return eng; 136 } 137 #endif 138 139 /* Check availability of the engine */ 140 static int padlock_init(ENGINE *e) 141 { 142 return (padlock_use_rng || padlock_use_ace); 143 } 144 145 #ifndef AES_ASM 146 static int padlock_aes_set_encrypt_key(const unsigned char *userKey, 147 const int bits, 148 AES_KEY *key); 149 static int padlock_aes_set_decrypt_key(const unsigned char *userKey, 150 const int bits, 151 AES_KEY *key); 152 #define AES_ASM 153 #define AES_set_encrypt_key padlock_aes_set_encrypt_key 154 #define AES_set_decrypt_key padlock_aes_set_decrypt_key 155 /* clang-format off */ 156 # include "../crypto/aes/aes_core.c" 157 /* clang-format on */ 158 #endif 159 160 /* 161 * This stuff is needed if this ENGINE is being compiled into a 162 * self-contained shared-library. 163 */ 164 #ifndef OPENSSL_NO_DYNAMIC_ENGINE 165 static int padlock_bind_fn(ENGINE *e, const char *id) 166 { 167 if (id && (strcmp(id, padlock_id) != 0)) { 168 return 0; 169 } 170 171 if (!padlock_bind_helper(e)) { 172 return 0; 173 } 174 175 return 1; 176 } 177 178 IMPLEMENT_DYNAMIC_CHECK_FN() 179 IMPLEMENT_DYNAMIC_BIND_FN(padlock_bind_fn) 180 #endif /* !OPENSSL_NO_DYNAMIC_ENGINE */ 181 /* ===== Here comes the "real" engine ===== */ 182 183 /* Some AES-related constants */ 184 #define AES_BLOCK_SIZE 16 185 #define AES_KEY_SIZE_128 16 186 #define AES_KEY_SIZE_192 24 187 #define AES_KEY_SIZE_256 32 188 /* 189 * Here we store the status information relevant to the current context. 190 */ 191 /* 192 * BIG FAT WARNING: Inline assembler in PADLOCK_XCRYPT_ASM() depends on 193 * the order of items in this structure. Don't blindly modify, reorder, 194 * etc! 195 */ 196 struct padlock_cipher_data { 197 unsigned char iv[AES_BLOCK_SIZE]; /* Initialization vector */ 198 union { 199 unsigned int pad[4]; 200 struct { 201 int rounds : 4; 202 int dgst : 1; /* n/a in C3 */ 203 int align : 1; /* n/a in C3 */ 204 int ciphr : 1; /* n/a in C3 */ /* codespell:ignore */ 205 unsigned int keygen : 1; 206 int interm : 1; /* codespell:ignore */ 207 unsigned int encdec : 1; 208 int ksize : 2; 209 } b; 210 } cword; /* Control word */ 211 AES_KEY ks; /* Encryption key */ 212 }; 213 214 /* Interface to assembler module */ 215 unsigned int padlock_capability(void); 216 void padlock_key_bswap(AES_KEY *key); 217 void padlock_verify_context(struct padlock_cipher_data *ctx); 218 void padlock_reload_key(void); 219 void padlock_aes_block(void *out, const void *inp, 220 struct padlock_cipher_data *ctx); 221 int padlock_ecb_encrypt(void *out, const void *inp, 222 struct padlock_cipher_data *ctx, size_t len); 223 int padlock_cbc_encrypt(void *out, const void *inp, 224 struct padlock_cipher_data *ctx, size_t len); 225 int padlock_cfb_encrypt(void *out, const void *inp, 226 struct padlock_cipher_data *ctx, size_t len); 227 int padlock_ofb_encrypt(void *out, const void *inp, 228 struct padlock_cipher_data *ctx, size_t len); 229 int padlock_ctr32_encrypt(void *out, const void *inp, 230 struct padlock_cipher_data *ctx, size_t len); 231 int padlock_xstore(void *out, int edx); 232 void padlock_sha1_oneshot(void *ctx, const void *inp, size_t len); 233 void padlock_sha1(void *ctx, const void *inp, size_t len); 234 void padlock_sha256_oneshot(void *ctx, const void *inp, size_t len); 235 void padlock_sha256(void *ctx, const void *inp, size_t len); 236 237 /* 238 * Load supported features of the CPU to see if the PadLock is available. 239 */ 240 static int padlock_available(void) 241 { 242 unsigned int edx = padlock_capability(); 243 244 /* Fill up some flags */ 245 padlock_use_ace = ((edx & (0x3 << 6)) == (0x3 << 6)); 246 padlock_use_rng = ((edx & (0x3 << 2)) == (0x3 << 2)); 247 248 return padlock_use_ace + padlock_use_rng; 249 } 250 251 /* ===== AES encryption/decryption ===== */ 252 253 #if defined(NID_aes_128_cfb128) && !defined(NID_aes_128_cfb) 254 #define NID_aes_128_cfb NID_aes_128_cfb128 255 #endif 256 257 #if defined(NID_aes_128_ofb128) && !defined(NID_aes_128_ofb) 258 #define NID_aes_128_ofb NID_aes_128_ofb128 259 #endif 260 261 #if defined(NID_aes_192_cfb128) && !defined(NID_aes_192_cfb) 262 #define NID_aes_192_cfb NID_aes_192_cfb128 263 #endif 264 265 #if defined(NID_aes_192_ofb128) && !defined(NID_aes_192_ofb) 266 #define NID_aes_192_ofb NID_aes_192_ofb128 267 #endif 268 269 #if defined(NID_aes_256_cfb128) && !defined(NID_aes_256_cfb) 270 #define NID_aes_256_cfb NID_aes_256_cfb128 271 #endif 272 273 #if defined(NID_aes_256_ofb128) && !defined(NID_aes_256_ofb) 274 #define NID_aes_256_ofb NID_aes_256_ofb128 275 #endif 276 277 /* List of supported ciphers. */ 278 static const int padlock_cipher_nids[] = { 279 NID_aes_128_ecb, 280 NID_aes_128_cbc, 281 NID_aes_128_cfb, 282 NID_aes_128_ofb, 283 NID_aes_128_ctr, 284 285 NID_aes_192_ecb, 286 NID_aes_192_cbc, 287 NID_aes_192_cfb, 288 NID_aes_192_ofb, 289 NID_aes_192_ctr, 290 291 NID_aes_256_ecb, 292 NID_aes_256_cbc, 293 NID_aes_256_cfb, 294 NID_aes_256_ofb, 295 NID_aes_256_ctr 296 }; 297 298 static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids) / sizeof(padlock_cipher_nids[0])); 299 300 /* Function prototypes ... */ 301 static int padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, 302 const unsigned char *iv, int enc); 303 304 #define NEAREST_ALIGNED(ptr) ((unsigned char *)(ptr) + ((0x10 - ((size_t)(ptr) & 0x0F)) & 0x0F)) 305 #define ALIGNED_CIPHER_DATA(ctx) ((struct padlock_cipher_data *) \ 306 NEAREST_ALIGNED(EVP_CIPHER_CTX_get_cipher_data(ctx))) 307 308 static int 309 padlock_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, 310 const unsigned char *in_arg, size_t nbytes) 311 { 312 return padlock_ecb_encrypt(out_arg, in_arg, 313 ALIGNED_CIPHER_DATA(ctx), nbytes); 314 } 315 316 static int 317 padlock_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, 318 const unsigned char *in_arg, size_t nbytes) 319 { 320 struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx); 321 int ret; 322 323 memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE); 324 if ((ret = padlock_cbc_encrypt(out_arg, in_arg, cdata, nbytes))) 325 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE); 326 return ret; 327 } 328 329 static int 330 padlock_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, 331 const unsigned char *in_arg, size_t nbytes) 332 { 333 struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx); 334 size_t chunk; 335 336 if ((chunk = EVP_CIPHER_CTX_get_num(ctx))) { /* borrow chunk variable */ 337 unsigned char *ivp = EVP_CIPHER_CTX_iv_noconst(ctx); 338 339 if (chunk >= AES_BLOCK_SIZE) 340 return 0; /* bogus value */ 341 342 if (EVP_CIPHER_CTX_is_encrypting(ctx)) 343 while (chunk < AES_BLOCK_SIZE && nbytes != 0) { 344 ivp[chunk] = *(out_arg++) = *(in_arg++) ^ ivp[chunk]; 345 chunk++, nbytes--; 346 } 347 else 348 while (chunk < AES_BLOCK_SIZE && nbytes != 0) { 349 unsigned char c = *(in_arg++); 350 *(out_arg++) = c ^ ivp[chunk]; 351 ivp[chunk++] = c, nbytes--; 352 } 353 354 EVP_CIPHER_CTX_set_num(ctx, chunk % AES_BLOCK_SIZE); 355 } 356 357 if (nbytes == 0) 358 return 1; 359 360 memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE); 361 362 if ((chunk = nbytes & ~(AES_BLOCK_SIZE - 1))) { 363 if (!padlock_cfb_encrypt(out_arg, in_arg, cdata, chunk)) 364 return 0; 365 nbytes -= chunk; 366 } 367 368 if (nbytes) { 369 unsigned char *ivp = cdata->iv; 370 371 out_arg += chunk; 372 in_arg += chunk; 373 EVP_CIPHER_CTX_set_num(ctx, nbytes); 374 if (cdata->cword.b.encdec) { 375 cdata->cword.b.encdec = 0; 376 padlock_reload_key(); 377 padlock_aes_block(ivp, ivp, cdata); 378 cdata->cword.b.encdec = 1; 379 padlock_reload_key(); 380 while (nbytes) { 381 unsigned char c = *(in_arg++); 382 *(out_arg++) = c ^ *ivp; 383 *(ivp++) = c, nbytes--; 384 } 385 } else { 386 padlock_reload_key(); 387 padlock_aes_block(ivp, ivp, cdata); 388 padlock_reload_key(); 389 while (nbytes) { 390 *ivp = *(out_arg++) = *(in_arg++) ^ *ivp; 391 ivp++, nbytes--; 392 } 393 } 394 } 395 396 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE); 397 398 return 1; 399 } 400 401 static int 402 padlock_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, 403 const unsigned char *in_arg, size_t nbytes) 404 { 405 struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx); 406 size_t chunk; 407 408 /* 409 * ctx->num is maintained in byte-oriented modes, such as CFB and OFB... 410 */ 411 if ((chunk = EVP_CIPHER_CTX_get_num(ctx))) { /* borrow chunk variable */ 412 unsigned char *ivp = EVP_CIPHER_CTX_iv_noconst(ctx); 413 414 if (chunk >= AES_BLOCK_SIZE) 415 return 0; /* bogus value */ 416 417 while (chunk < AES_BLOCK_SIZE && nbytes != 0) { 418 *(out_arg++) = *(in_arg++) ^ ivp[chunk]; 419 chunk++, nbytes--; 420 } 421 422 EVP_CIPHER_CTX_set_num(ctx, chunk % AES_BLOCK_SIZE); 423 } 424 425 if (nbytes == 0) 426 return 1; 427 428 memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE); 429 430 if ((chunk = nbytes & ~(AES_BLOCK_SIZE - 1))) { 431 if (!padlock_ofb_encrypt(out_arg, in_arg, cdata, chunk)) 432 return 0; 433 nbytes -= chunk; 434 } 435 436 if (nbytes) { 437 unsigned char *ivp = cdata->iv; 438 439 out_arg += chunk; 440 in_arg += chunk; 441 EVP_CIPHER_CTX_set_num(ctx, nbytes); 442 padlock_reload_key(); /* empirically found */ 443 padlock_aes_block(ivp, ivp, cdata); 444 padlock_reload_key(); /* empirically found */ 445 while (nbytes) { 446 *(out_arg++) = *(in_arg++) ^ *ivp; 447 ivp++, nbytes--; 448 } 449 } 450 451 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE); 452 453 return 1; 454 } 455 456 static void padlock_ctr32_encrypt_glue(const unsigned char *in, 457 unsigned char *out, size_t blocks, 458 struct padlock_cipher_data *ctx, 459 const unsigned char *ivec) 460 { 461 memcpy(ctx->iv, ivec, AES_BLOCK_SIZE); 462 padlock_ctr32_encrypt(out, in, ctx, AES_BLOCK_SIZE * blocks); 463 } 464 465 static int 466 padlock_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg, 467 const unsigned char *in_arg, size_t nbytes) 468 { 469 struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx); 470 int n = EVP_CIPHER_CTX_get_num(ctx); 471 unsigned int num; 472 473 if (n < 0) 474 return 0; 475 num = (unsigned int)n; 476 477 CRYPTO_ctr128_encrypt_ctr32(in_arg, out_arg, nbytes, 478 cdata, EVP_CIPHER_CTX_iv_noconst(ctx), 479 EVP_CIPHER_CTX_buf_noconst(ctx), &num, 480 (ctr128_f)padlock_ctr32_encrypt_glue); 481 482 EVP_CIPHER_CTX_set_num(ctx, (size_t)num); 483 return 1; 484 } 485 486 #define EVP_CIPHER_block_size_ECB AES_BLOCK_SIZE 487 #define EVP_CIPHER_block_size_CBC AES_BLOCK_SIZE 488 #define EVP_CIPHER_block_size_OFB 1 489 #define EVP_CIPHER_block_size_CFB 1 490 #define EVP_CIPHER_block_size_CTR 1 491 492 /* 493 * Declaring so many ciphers by hand would be a pain. Instead introduce a bit 494 * of preprocessor magic :-) 495 */ 496 #define DECLARE_AES_EVP(ksize, lmode, umode) \ 497 static EVP_CIPHER *_hidden_aes_##ksize##_##lmode = NULL; \ 498 static const EVP_CIPHER *padlock_aes_##ksize##_##lmode(void) \ 499 { \ 500 if (_hidden_aes_##ksize##_##lmode == NULL \ 501 && ((_hidden_aes_##ksize##_##lmode = EVP_CIPHER_meth_new(NID_aes_##ksize##_##lmode, \ 502 EVP_CIPHER_block_size_##umode, \ 503 AES_KEY_SIZE_##ksize)) \ 504 == NULL \ 505 || !EVP_CIPHER_meth_set_iv_length(_hidden_aes_##ksize##_##lmode, \ 506 AES_BLOCK_SIZE) \ 507 || !EVP_CIPHER_meth_set_flags(_hidden_aes_##ksize##_##lmode, \ 508 0 | EVP_CIPH_##umode##_MODE) \ 509 || !EVP_CIPHER_meth_set_init(_hidden_aes_##ksize##_##lmode, \ 510 padlock_aes_init_key) \ 511 || !EVP_CIPHER_meth_set_do_cipher(_hidden_aes_##ksize##_##lmode, \ 512 padlock_##lmode##_cipher) \ 513 || !EVP_CIPHER_meth_set_impl_ctx_size(_hidden_aes_##ksize##_##lmode, \ 514 sizeof(struct padlock_cipher_data) + 16) \ 515 || !EVP_CIPHER_meth_set_set_asn1_params(_hidden_aes_##ksize##_##lmode, \ 516 EVP_CIPHER_set_asn1_iv) \ 517 || !EVP_CIPHER_meth_set_get_asn1_params(_hidden_aes_##ksize##_##lmode, \ 518 EVP_CIPHER_get_asn1_iv))) { \ 519 EVP_CIPHER_meth_free(_hidden_aes_##ksize##_##lmode); \ 520 _hidden_aes_##ksize##_##lmode = NULL; \ 521 } \ 522 return _hidden_aes_##ksize##_##lmode; \ 523 } 524 525 DECLARE_AES_EVP(128, ecb, ECB) 526 DECLARE_AES_EVP(128, cbc, CBC) 527 DECLARE_AES_EVP(128, cfb, CFB) 528 DECLARE_AES_EVP(128, ofb, OFB) 529 DECLARE_AES_EVP(128, ctr, CTR) 530 531 DECLARE_AES_EVP(192, ecb, ECB) 532 DECLARE_AES_EVP(192, cbc, CBC) 533 DECLARE_AES_EVP(192, cfb, CFB) 534 DECLARE_AES_EVP(192, ofb, OFB) 535 DECLARE_AES_EVP(192, ctr, CTR) 536 537 DECLARE_AES_EVP(256, ecb, ECB) 538 DECLARE_AES_EVP(256, cbc, CBC) 539 DECLARE_AES_EVP(256, cfb, CFB) 540 DECLARE_AES_EVP(256, ofb, OFB) 541 DECLARE_AES_EVP(256, ctr, CTR) 542 543 static int 544 padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, 545 int nid) 546 { 547 /* No specific cipher => return a list of supported nids ... */ 548 if (!cipher) { 549 *nids = padlock_cipher_nids; 550 return padlock_cipher_nids_num; 551 } 552 553 /* ... or the requested "cipher" otherwise */ 554 switch (nid) { 555 case NID_aes_128_ecb: 556 *cipher = padlock_aes_128_ecb(); 557 break; 558 case NID_aes_128_cbc: 559 *cipher = padlock_aes_128_cbc(); 560 break; 561 case NID_aes_128_cfb: 562 *cipher = padlock_aes_128_cfb(); 563 break; 564 case NID_aes_128_ofb: 565 *cipher = padlock_aes_128_ofb(); 566 break; 567 case NID_aes_128_ctr: 568 *cipher = padlock_aes_128_ctr(); 569 break; 570 571 case NID_aes_192_ecb: 572 *cipher = padlock_aes_192_ecb(); 573 break; 574 case NID_aes_192_cbc: 575 *cipher = padlock_aes_192_cbc(); 576 break; 577 case NID_aes_192_cfb: 578 *cipher = padlock_aes_192_cfb(); 579 break; 580 case NID_aes_192_ofb: 581 *cipher = padlock_aes_192_ofb(); 582 break; 583 case NID_aes_192_ctr: 584 *cipher = padlock_aes_192_ctr(); 585 break; 586 587 case NID_aes_256_ecb: 588 *cipher = padlock_aes_256_ecb(); 589 break; 590 case NID_aes_256_cbc: 591 *cipher = padlock_aes_256_cbc(); 592 break; 593 case NID_aes_256_cfb: 594 *cipher = padlock_aes_256_cfb(); 595 break; 596 case NID_aes_256_ofb: 597 *cipher = padlock_aes_256_ofb(); 598 break; 599 case NID_aes_256_ctr: 600 *cipher = padlock_aes_256_ctr(); 601 break; 602 603 default: 604 /* Sorry, we don't support this NID */ 605 *cipher = NULL; 606 return 0; 607 } 608 609 return 1; 610 } 611 612 /* Prepare the encryption key for PadLock usage */ 613 static int 614 padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, 615 const unsigned char *iv, int enc) 616 { 617 struct padlock_cipher_data *cdata; 618 int key_len = EVP_CIPHER_CTX_get_key_length(ctx) * 8; 619 unsigned long mode = EVP_CIPHER_CTX_get_mode(ctx); 620 621 if (key == NULL) 622 return 0; /* ERROR */ 623 624 cdata = ALIGNED_CIPHER_DATA(ctx); 625 memset(cdata, 0, sizeof(*cdata)); 626 627 /* Prepare Control word. */ 628 if (mode == EVP_CIPH_OFB_MODE || mode == EVP_CIPH_CTR_MODE) 629 cdata->cword.b.encdec = 0; 630 else 631 cdata->cword.b.encdec = (EVP_CIPHER_CTX_is_encrypting(ctx) == 0); 632 cdata->cword.b.rounds = 10 + (key_len - 128) / 32; 633 cdata->cword.b.ksize = (key_len - 128) / 64; 634 635 switch (key_len) { 636 case 128: 637 /* 638 * PadLock can generate an extended key for AES128 in hardware 639 */ 640 memcpy(cdata->ks.rd_key, key, AES_KEY_SIZE_128); 641 cdata->cword.b.keygen = 0; 642 break; 643 644 case 192: 645 case 256: 646 /* 647 * Generate an extended AES key in software. Needed for AES192/AES256 648 */ 649 /* 650 * Well, the above applies to Stepping 8 CPUs and is listed as 651 * hardware errata. They most likely will fix it at some point and 652 * then a check for stepping would be due here. 653 */ 654 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE) 655 && !enc) 656 AES_set_decrypt_key(key, key_len, &cdata->ks); 657 else 658 AES_set_encrypt_key(key, key_len, &cdata->ks); 659 /* 660 * OpenSSL C functions use byte-swapped extended key. 661 */ 662 padlock_key_bswap(&cdata->ks); 663 cdata->cword.b.keygen = 1; 664 break; 665 666 default: 667 /* ERROR */ 668 return 0; 669 } 670 671 /* 672 * This is done to cover for cases when user reuses the 673 * context for new key. The catch is that if we don't do 674 * this, padlock_eas_cipher might proceed with old key... 675 */ 676 padlock_reload_key(); 677 678 return 1; 679 } 680 681 /* ===== Random Number Generator ===== */ 682 /* 683 * This code is not engaged. The reason is that it does not comply 684 * with recommendations for VIA RNG usage for secure applications 685 * (posted at http://www.via.com.tw/en/viac3/c3.jsp) nor does it 686 * provide meaningful error control... 687 */ 688 /* 689 * Wrapper that provides an interface between the API and the raw PadLock 690 * RNG 691 */ 692 static int padlock_rand_bytes(unsigned char *output, int count) 693 { 694 unsigned int eax, buf; 695 696 while (count >= 8) { 697 eax = padlock_xstore(output, 0); 698 if (!(eax & (1 << 6))) 699 return 0; /* RNG disabled */ 700 /* this ---vv--- covers DC bias, Raw Bits and String Filter */ 701 if (eax & (0x1F << 10)) 702 return 0; 703 if ((eax & 0x1F) == 0) 704 continue; /* no data, retry... */ 705 if ((eax & 0x1F) != 8) 706 return 0; /* fatal failure... */ 707 output += 8; 708 count -= 8; 709 } 710 while (count > 0) { 711 eax = padlock_xstore(&buf, 3); 712 if (!(eax & (1 << 6))) 713 return 0; /* RNG disabled */ 714 /* this ---vv--- covers DC bias, Raw Bits and String Filter */ 715 if (eax & (0x1F << 10)) 716 return 0; 717 if ((eax & 0x1F) == 0) 718 continue; /* no data, retry... */ 719 if ((eax & 0x1F) != 1) 720 return 0; /* fatal failure... */ 721 *output++ = (unsigned char)buf; 722 count--; 723 } 724 OPENSSL_cleanse(&buf, sizeof(buf)); 725 726 return 1; 727 } 728 729 /* Dummy but necessary function */ 730 static int padlock_rand_status(void) 731 { 732 return 1; 733 } 734 735 /* Prepare structure for registration */ 736 static RAND_METHOD padlock_rand = { 737 NULL, /* seed */ 738 padlock_rand_bytes, /* bytes */ 739 NULL, /* cleanup */ 740 NULL, /* add */ 741 padlock_rand_bytes, /* pseudorand */ 742 padlock_rand_status, /* rand status */ 743 }; 744 745 #endif /* COMPILE_PADLOCKENG */ 746 #endif /* !OPENSSL_NO_PADLOCKENG */ 747 748 #if defined(OPENSSL_NO_PADLOCKENG) || !defined(COMPILE_PADLOCKENG) 749 #ifndef OPENSSL_NO_DYNAMIC_ENGINE 750 OPENSSL_EXPORT 751 int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns); 752 OPENSSL_EXPORT 753 int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns) 754 { 755 return 0; 756 } 757 758 IMPLEMENT_DYNAMIC_CHECK_FN() 759 #endif 760 #endif 761