1 /* 2 * Cryptographic API. 3 * 4 * Support for VIA PadLock hardware crypto engine. 5 * 6 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz> 7 * 8 * Key expansion routine taken from crypto/aes.c 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * --------------------------------------------------------------------------- 16 * Copyright (c) 2002, Dr Brian Gladman <brg@gladman.me.uk>, Worcester, UK. 17 * All rights reserved. 18 * 19 * LICENSE TERMS 20 * 21 * The free distribution and use of this software in both source and binary 22 * form is allowed (with or without changes) provided that: 23 * 24 * 1. distributions of this source code include the above copyright 25 * notice, this list of conditions and the following disclaimer; 26 * 27 * 2. distributions in binary form include the above copyright 28 * notice, this list of conditions and the following disclaimer 29 * in the documentation and/or other associated materials; 30 * 31 * 3. the copyright holder's name is not used to endorse products 32 * built using this software without specific written permission. 33 * 34 * ALTERNATIVELY, provided that this notice is retained in full, this product 35 * may be distributed under the terms of the GNU General Public License (GPL), 36 * in which case the provisions of the GPL apply INSTEAD OF those given above. 37 * 38 * DISCLAIMER 39 * 40 * This software is provided 'as is' with no explicit or implied warranties 41 * in respect of its properties, including, but not limited to, correctness 42 * and/or fitness for purpose. 43 * --------------------------------------------------------------------------- 44 */ 45 46 #include <linux/module.h> 47 #include <linux/init.h> 48 #include <linux/types.h> 49 #include <linux/errno.h> 50 #include <linux/crypto.h> 51 #include <linux/interrupt.h> 52 #include <linux/kernel.h> 53 #include <asm/byteorder.h> 54 #include "padlock.h" 55 56 #define AES_MIN_KEY_SIZE 16 /* in uint8_t units */ 57 #define AES_MAX_KEY_SIZE 32 /* ditto */ 58 #define AES_BLOCK_SIZE 16 /* ditto */ 59 #define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ 60 #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) 61 62 /* Control word. */ 63 struct cword { 64 unsigned int __attribute__ ((__packed__)) 65 rounds:4, 66 algo:3, 67 keygen:1, 68 interm:1, 69 encdec:1, 70 ksize:2; 71 } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); 72 73 /* Whenever making any changes to the following 74 * structure *make sure* you keep E, d_data 75 * and cword aligned on 16 Bytes boundaries!!! */ 76 struct aes_ctx { 77 struct { 78 struct cword encrypt; 79 struct cword decrypt; 80 } cword; 81 u32 *D; 82 int key_length; 83 u32 E[AES_EXTENDED_KEY_SIZE] 84 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); 85 u32 d_data[AES_EXTENDED_KEY_SIZE] 86 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); 87 }; 88 89 /* ====== Key management routines ====== */ 90 91 static inline uint32_t 92 generic_rotr32 (const uint32_t x, const unsigned bits) 93 { 94 const unsigned n = bits % 32; 95 return (x >> n) | (x << (32 - n)); 96 } 97 98 static inline uint32_t 99 generic_rotl32 (const uint32_t x, const unsigned bits) 100 { 101 const unsigned n = bits % 32; 102 return (x << n) | (x >> (32 - n)); 103 } 104 105 #define rotl generic_rotl32 106 #define rotr generic_rotr32 107 108 /* 109 * #define byte(x, nr) ((unsigned char)((x) >> (nr*8))) 110 */ 111 static inline uint8_t 112 byte(const uint32_t x, const unsigned n) 113 { 114 return x >> (n << 3); 115 } 116 117 #define E_KEY ctx->E 118 #define D_KEY ctx->D 119 120 static uint8_t pow_tab[256]; 121 static uint8_t log_tab[256]; 122 static uint8_t sbx_tab[256]; 123 static uint8_t isb_tab[256]; 124 static uint32_t rco_tab[10]; 125 static uint32_t ft_tab[4][256]; 126 static uint32_t it_tab[4][256]; 127 128 static uint32_t fl_tab[4][256]; 129 static uint32_t il_tab[4][256]; 130 131 static inline uint8_t 132 f_mult (uint8_t a, uint8_t b) 133 { 134 uint8_t aa = log_tab[a], cc = aa + log_tab[b]; 135 136 return pow_tab[cc + (cc < aa ? 1 : 0)]; 137 } 138 139 #define ff_mult(a,b) (a && b ? f_mult(a, b) : 0) 140 141 #define f_rn(bo, bi, n, k) \ 142 bo[n] = ft_tab[0][byte(bi[n],0)] ^ \ 143 ft_tab[1][byte(bi[(n + 1) & 3],1)] ^ \ 144 ft_tab[2][byte(bi[(n + 2) & 3],2)] ^ \ 145 ft_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n) 146 147 #define i_rn(bo, bi, n, k) \ 148 bo[n] = it_tab[0][byte(bi[n],0)] ^ \ 149 it_tab[1][byte(bi[(n + 3) & 3],1)] ^ \ 150 it_tab[2][byte(bi[(n + 2) & 3],2)] ^ \ 151 it_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n) 152 153 #define ls_box(x) \ 154 ( fl_tab[0][byte(x, 0)] ^ \ 155 fl_tab[1][byte(x, 1)] ^ \ 156 fl_tab[2][byte(x, 2)] ^ \ 157 fl_tab[3][byte(x, 3)] ) 158 159 #define f_rl(bo, bi, n, k) \ 160 bo[n] = fl_tab[0][byte(bi[n],0)] ^ \ 161 fl_tab[1][byte(bi[(n + 1) & 3],1)] ^ \ 162 fl_tab[2][byte(bi[(n + 2) & 3],2)] ^ \ 163 fl_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n) 164 165 #define i_rl(bo, bi, n, k) \ 166 bo[n] = il_tab[0][byte(bi[n],0)] ^ \ 167 il_tab[1][byte(bi[(n + 3) & 3],1)] ^ \ 168 il_tab[2][byte(bi[(n + 2) & 3],2)] ^ \ 169 il_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n) 170 171 static void 172 gen_tabs (void) 173 { 174 uint32_t i, t; 175 uint8_t p, q; 176 177 /* log and power tables for GF(2**8) finite field with 178 0x011b as modular polynomial - the simplest prmitive 179 root is 0x03, used here to generate the tables */ 180 181 for (i = 0, p = 1; i < 256; ++i) { 182 pow_tab[i] = (uint8_t) p; 183 log_tab[p] = (uint8_t) i; 184 185 p ^= (p << 1) ^ (p & 0x80 ? 0x01b : 0); 186 } 187 188 log_tab[1] = 0; 189 190 for (i = 0, p = 1; i < 10; ++i) { 191 rco_tab[i] = p; 192 193 p = (p << 1) ^ (p & 0x80 ? 0x01b : 0); 194 } 195 196 for (i = 0; i < 256; ++i) { 197 p = (i ? pow_tab[255 - log_tab[i]] : 0); 198 q = ((p >> 7) | (p << 1)) ^ ((p >> 6) | (p << 2)); 199 p ^= 0x63 ^ q ^ ((q >> 6) | (q << 2)); 200 sbx_tab[i] = p; 201 isb_tab[p] = (uint8_t) i; 202 } 203 204 for (i = 0; i < 256; ++i) { 205 p = sbx_tab[i]; 206 207 t = p; 208 fl_tab[0][i] = t; 209 fl_tab[1][i] = rotl (t, 8); 210 fl_tab[2][i] = rotl (t, 16); 211 fl_tab[3][i] = rotl (t, 24); 212 213 t = ((uint32_t) ff_mult (2, p)) | 214 ((uint32_t) p << 8) | 215 ((uint32_t) p << 16) | ((uint32_t) ff_mult (3, p) << 24); 216 217 ft_tab[0][i] = t; 218 ft_tab[1][i] = rotl (t, 8); 219 ft_tab[2][i] = rotl (t, 16); 220 ft_tab[3][i] = rotl (t, 24); 221 222 p = isb_tab[i]; 223 224 t = p; 225 il_tab[0][i] = t; 226 il_tab[1][i] = rotl (t, 8); 227 il_tab[2][i] = rotl (t, 16); 228 il_tab[3][i] = rotl (t, 24); 229 230 t = ((uint32_t) ff_mult (14, p)) | 231 ((uint32_t) ff_mult (9, p) << 8) | 232 ((uint32_t) ff_mult (13, p) << 16) | 233 ((uint32_t) ff_mult (11, p) << 24); 234 235 it_tab[0][i] = t; 236 it_tab[1][i] = rotl (t, 8); 237 it_tab[2][i] = rotl (t, 16); 238 it_tab[3][i] = rotl (t, 24); 239 } 240 } 241 242 #define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b) 243 244 #define imix_col(y,x) \ 245 u = star_x(x); \ 246 v = star_x(u); \ 247 w = star_x(v); \ 248 t = w ^ (x); \ 249 (y) = u ^ v ^ w; \ 250 (y) ^= rotr(u ^ t, 8) ^ \ 251 rotr(v ^ t, 16) ^ \ 252 rotr(t,24) 253 254 /* initialise the key schedule from the user supplied key */ 255 256 #define loop4(i) \ 257 { t = rotr(t, 8); t = ls_box(t) ^ rco_tab[i]; \ 258 t ^= E_KEY[4 * i]; E_KEY[4 * i + 4] = t; \ 259 t ^= E_KEY[4 * i + 1]; E_KEY[4 * i + 5] = t; \ 260 t ^= E_KEY[4 * i + 2]; E_KEY[4 * i + 6] = t; \ 261 t ^= E_KEY[4 * i + 3]; E_KEY[4 * i + 7] = t; \ 262 } 263 264 #define loop6(i) \ 265 { t = rotr(t, 8); t = ls_box(t) ^ rco_tab[i]; \ 266 t ^= E_KEY[6 * i]; E_KEY[6 * i + 6] = t; \ 267 t ^= E_KEY[6 * i + 1]; E_KEY[6 * i + 7] = t; \ 268 t ^= E_KEY[6 * i + 2]; E_KEY[6 * i + 8] = t; \ 269 t ^= E_KEY[6 * i + 3]; E_KEY[6 * i + 9] = t; \ 270 t ^= E_KEY[6 * i + 4]; E_KEY[6 * i + 10] = t; \ 271 t ^= E_KEY[6 * i + 5]; E_KEY[6 * i + 11] = t; \ 272 } 273 274 #define loop8(i) \ 275 { t = rotr(t, 8); ; t = ls_box(t) ^ rco_tab[i]; \ 276 t ^= E_KEY[8 * i]; E_KEY[8 * i + 8] = t; \ 277 t ^= E_KEY[8 * i + 1]; E_KEY[8 * i + 9] = t; \ 278 t ^= E_KEY[8 * i + 2]; E_KEY[8 * i + 10] = t; \ 279 t ^= E_KEY[8 * i + 3]; E_KEY[8 * i + 11] = t; \ 280 t = E_KEY[8 * i + 4] ^ ls_box(t); \ 281 E_KEY[8 * i + 12] = t; \ 282 t ^= E_KEY[8 * i + 5]; E_KEY[8 * i + 13] = t; \ 283 t ^= E_KEY[8 * i + 6]; E_KEY[8 * i + 14] = t; \ 284 t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \ 285 } 286 287 /* Tells whether the ACE is capable to generate 288 the extended key for a given key_len. */ 289 static inline int 290 aes_hw_extkey_available(uint8_t key_len) 291 { 292 /* TODO: We should check the actual CPU model/stepping 293 as it's possible that the capability will be 294 added in the next CPU revisions. */ 295 if (key_len == 16) 296 return 1; 297 return 0; 298 } 299 300 static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) 301 { 302 unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); 303 unsigned long align = PADLOCK_ALIGNMENT; 304 305 if (align <= crypto_tfm_ctx_alignment()) 306 align = 1; 307 return (struct aes_ctx *)ALIGN(addr, align); 308 } 309 310 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 311 unsigned int key_len, u32 *flags) 312 { 313 struct aes_ctx *ctx = aes_ctx(tfm); 314 const __le32 *key = (const __le32 *)in_key; 315 uint32_t i, t, u, v, w; 316 uint32_t P[AES_EXTENDED_KEY_SIZE]; 317 uint32_t rounds; 318 319 if (key_len != 16 && key_len != 24 && key_len != 32) { 320 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 321 return -EINVAL; 322 } 323 324 ctx->key_length = key_len; 325 326 /* 327 * If the hardware is capable of generating the extended key 328 * itself we must supply the plain key for both encryption 329 * and decryption. 330 */ 331 ctx->D = ctx->E; 332 333 E_KEY[0] = le32_to_cpu(key[0]); 334 E_KEY[1] = le32_to_cpu(key[1]); 335 E_KEY[2] = le32_to_cpu(key[2]); 336 E_KEY[3] = le32_to_cpu(key[3]); 337 338 /* Prepare control words. */ 339 memset(&ctx->cword, 0, sizeof(ctx->cword)); 340 341 ctx->cword.decrypt.encdec = 1; 342 ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4; 343 ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds; 344 ctx->cword.encrypt.ksize = (key_len - 16) / 8; 345 ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize; 346 347 /* Don't generate extended keys if the hardware can do it. */ 348 if (aes_hw_extkey_available(key_len)) 349 return 0; 350 351 ctx->D = ctx->d_data; 352 ctx->cword.encrypt.keygen = 1; 353 ctx->cword.decrypt.keygen = 1; 354 355 switch (key_len) { 356 case 16: 357 t = E_KEY[3]; 358 for (i = 0; i < 10; ++i) 359 loop4 (i); 360 break; 361 362 case 24: 363 E_KEY[4] = le32_to_cpu(key[4]); 364 t = E_KEY[5] = le32_to_cpu(key[5]); 365 for (i = 0; i < 8; ++i) 366 loop6 (i); 367 break; 368 369 case 32: 370 E_KEY[4] = le32_to_cpu(key[4]); 371 E_KEY[5] = le32_to_cpu(key[5]); 372 E_KEY[6] = le32_to_cpu(key[6]); 373 t = E_KEY[7] = le32_to_cpu(key[7]); 374 for (i = 0; i < 7; ++i) 375 loop8 (i); 376 break; 377 } 378 379 D_KEY[0] = E_KEY[0]; 380 D_KEY[1] = E_KEY[1]; 381 D_KEY[2] = E_KEY[2]; 382 D_KEY[3] = E_KEY[3]; 383 384 for (i = 4; i < key_len + 24; ++i) { 385 imix_col (D_KEY[i], E_KEY[i]); 386 } 387 388 /* PadLock needs a different format of the decryption key. */ 389 rounds = 10 + (key_len - 16) / 4; 390 391 for (i = 0; i < rounds; i++) { 392 P[((i + 1) * 4) + 0] = D_KEY[((rounds - i - 1) * 4) + 0]; 393 P[((i + 1) * 4) + 1] = D_KEY[((rounds - i - 1) * 4) + 1]; 394 P[((i + 1) * 4) + 2] = D_KEY[((rounds - i - 1) * 4) + 2]; 395 P[((i + 1) * 4) + 3] = D_KEY[((rounds - i - 1) * 4) + 3]; 396 } 397 398 P[0] = E_KEY[(rounds * 4) + 0]; 399 P[1] = E_KEY[(rounds * 4) + 1]; 400 P[2] = E_KEY[(rounds * 4) + 2]; 401 P[3] = E_KEY[(rounds * 4) + 3]; 402 403 memcpy(D_KEY, P, AES_EXTENDED_KEY_SIZE_B); 404 405 return 0; 406 } 407 408 /* ====== Encryption/decryption routines ====== */ 409 410 /* These are the real call to PadLock. */ 411 static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, 412 void *control_word, u32 count) 413 { 414 asm volatile ("pushfl; popfl"); /* enforce key reload. */ 415 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ 416 : "+S"(input), "+D"(output) 417 : "d"(control_word), "b"(key), "c"(count)); 418 } 419 420 static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, 421 u8 *iv, void *control_word, u32 count) 422 { 423 /* Enforce key reload. */ 424 asm volatile ("pushfl; popfl"); 425 /* rep xcryptcbc */ 426 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" 427 : "+S" (input), "+D" (output), "+a" (iv) 428 : "d" (control_word), "b" (key), "c" (count)); 429 return iv; 430 } 431 432 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 433 { 434 struct aes_ctx *ctx = aes_ctx(tfm); 435 padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 1); 436 } 437 438 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 439 { 440 struct aes_ctx *ctx = aes_ctx(tfm); 441 padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); 442 } 443 444 static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, 445 const u8 *in, unsigned int nbytes) 446 { 447 struct aes_ctx *ctx = aes_ctx(desc->tfm); 448 padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 449 nbytes / AES_BLOCK_SIZE); 450 return nbytes & ~(AES_BLOCK_SIZE - 1); 451 } 452 453 static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, 454 const u8 *in, unsigned int nbytes) 455 { 456 struct aes_ctx *ctx = aes_ctx(desc->tfm); 457 padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 458 nbytes / AES_BLOCK_SIZE); 459 return nbytes & ~(AES_BLOCK_SIZE - 1); 460 } 461 462 static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, 463 const u8 *in, unsigned int nbytes) 464 { 465 struct aes_ctx *ctx = aes_ctx(desc->tfm); 466 u8 *iv; 467 468 iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info, 469 &ctx->cword.encrypt, nbytes / AES_BLOCK_SIZE); 470 memcpy(desc->info, iv, AES_BLOCK_SIZE); 471 472 return nbytes & ~(AES_BLOCK_SIZE - 1); 473 } 474 475 static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, 476 const u8 *in, unsigned int nbytes) 477 { 478 struct aes_ctx *ctx = aes_ctx(desc->tfm); 479 padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt, 480 nbytes / AES_BLOCK_SIZE); 481 return nbytes & ~(AES_BLOCK_SIZE - 1); 482 } 483 484 static struct crypto_alg aes_alg = { 485 .cra_name = "aes", 486 .cra_driver_name = "aes-padlock", 487 .cra_priority = PADLOCK_CRA_PRIORITY, 488 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 489 .cra_blocksize = AES_BLOCK_SIZE, 490 .cra_ctxsize = sizeof(struct aes_ctx), 491 .cra_alignmask = PADLOCK_ALIGNMENT - 1, 492 .cra_module = THIS_MODULE, 493 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), 494 .cra_u = { 495 .cipher = { 496 .cia_min_keysize = AES_MIN_KEY_SIZE, 497 .cia_max_keysize = AES_MAX_KEY_SIZE, 498 .cia_setkey = aes_set_key, 499 .cia_encrypt = aes_encrypt, 500 .cia_decrypt = aes_decrypt, 501 .cia_encrypt_ecb = aes_encrypt_ecb, 502 .cia_decrypt_ecb = aes_decrypt_ecb, 503 .cia_encrypt_cbc = aes_encrypt_cbc, 504 .cia_decrypt_cbc = aes_decrypt_cbc, 505 } 506 } 507 }; 508 509 static int __init padlock_init(void) 510 { 511 int ret; 512 513 if (!cpu_has_xcrypt) { 514 printk(KERN_ERR PFX "VIA PadLock not detected.\n"); 515 return -ENODEV; 516 } 517 518 if (!cpu_has_xcrypt_enabled) { 519 printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); 520 return -ENODEV; 521 } 522 523 gen_tabs(); 524 if ((ret = crypto_register_alg(&aes_alg))) { 525 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); 526 return ret; 527 } 528 529 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); 530 531 return ret; 532 } 533 534 static void __exit padlock_fini(void) 535 { 536 crypto_unregister_alg(&aes_alg); 537 } 538 539 module_init(padlock_init); 540 module_exit(padlock_fini); 541 542 MODULE_DESCRIPTION("VIA PadLock AES algorithm support"); 543 MODULE_LICENSE("GPL"); 544 MODULE_AUTHOR("Michal Ludvig"); 545 546 MODULE_ALIAS("aes-padlock"); 547 548 /* This module used to be called padlock. */ 549 MODULE_ALIAS("padlock"); 550