1 /* 2 * Cryptographic API. 3 * 4 * Support for VIA PadLock hardware crypto engine. 5 * 6 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz> 7 * 8 * Key expansion routine taken from crypto/aes_generic.c 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * --------------------------------------------------------------------------- 16 * Copyright (c) 2002, Dr Brian Gladman <brg@gladman.me.uk>, Worcester, UK. 17 * All rights reserved. 18 * 19 * LICENSE TERMS 20 * 21 * The free distribution and use of this software in both source and binary 22 * form is allowed (with or without changes) provided that: 23 * 24 * 1. distributions of this source code include the above copyright 25 * notice, this list of conditions and the following disclaimer; 26 * 27 * 2. distributions in binary form include the above copyright 28 * notice, this list of conditions and the following disclaimer 29 * in the documentation and/or other associated materials; 30 * 31 * 3. the copyright holder's name is not used to endorse products 32 * built using this software without specific written permission. 33 * 34 * ALTERNATIVELY, provided that this notice is retained in full, this product 35 * may be distributed under the terms of the GNU General Public License (GPL), 36 * in which case the provisions of the GPL apply INSTEAD OF those given above. 37 * 38 * DISCLAIMER 39 * 40 * This software is provided 'as is' with no explicit or implied warranties 41 * in respect of its properties, including, but not limited to, correctness 42 * and/or fitness for purpose. 43 * --------------------------------------------------------------------------- 44 */ 45 46 #include <crypto/algapi.h> 47 #include <linux/module.h> 48 #include <linux/init.h> 49 #include <linux/types.h> 50 #include <linux/errno.h> 51 #include <linux/interrupt.h> 52 #include <linux/kernel.h> 53 #include <asm/byteorder.h> 54 #include "padlock.h" 55 56 #define AES_MIN_KEY_SIZE 16 /* in uint8_t units */ 57 #define AES_MAX_KEY_SIZE 32 /* ditto */ 58 #define AES_BLOCK_SIZE 16 /* ditto */ 59 #define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ 60 #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) 61 62 /* Control word. */ 63 struct cword { 64 unsigned int __attribute__ ((__packed__)) 65 rounds:4, 66 algo:3, 67 keygen:1, 68 interm:1, 69 encdec:1, 70 ksize:2; 71 } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); 72 73 /* Whenever making any changes to the following 74 * structure *make sure* you keep E, d_data 75 * and cword aligned on 16 Bytes boundaries!!! */ 76 struct aes_ctx { 77 struct { 78 struct cword encrypt; 79 struct cword decrypt; 80 } cword; 81 u32 *D; 82 int key_length; 83 u32 E[AES_EXTENDED_KEY_SIZE] 84 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); 85 u32 d_data[AES_EXTENDED_KEY_SIZE] 86 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); 87 }; 88 89 /* ====== Key management routines ====== */ 90 91 static inline uint32_t 92 generic_rotr32 (const uint32_t x, const unsigned bits) 93 { 94 const unsigned n = bits % 32; 95 return (x >> n) | (x << (32 - n)); 96 } 97 98 static inline uint32_t 99 generic_rotl32 (const uint32_t x, const unsigned bits) 100 { 101 const unsigned n = bits % 32; 102 return (x << n) | (x >> (32 - n)); 103 } 104 105 #define rotl generic_rotl32 106 #define rotr generic_rotr32 107 108 /* 109 * #define byte(x, nr) ((unsigned char)((x) >> (nr*8))) 110 */ 111 static inline uint8_t 112 byte(const uint32_t x, const unsigned n) 113 { 114 return x >> (n << 3); 115 } 116 117 #define E_KEY ctx->E 118 #define D_KEY ctx->D 119 120 static uint8_t pow_tab[256]; 121 static uint8_t log_tab[256]; 122 static uint8_t sbx_tab[256]; 123 static uint8_t isb_tab[256]; 124 static uint32_t rco_tab[10]; 125 static uint32_t ft_tab[4][256]; 126 static uint32_t it_tab[4][256]; 127 128 static uint32_t fl_tab[4][256]; 129 static uint32_t il_tab[4][256]; 130 131 static inline uint8_t 132 f_mult (uint8_t a, uint8_t b) 133 { 134 uint8_t aa = log_tab[a], cc = aa + log_tab[b]; 135 136 return pow_tab[cc + (cc < aa ? 1 : 0)]; 137 } 138 139 #define ff_mult(a,b) (a && b ? f_mult(a, b) : 0) 140 141 #define f_rn(bo, bi, n, k) \ 142 bo[n] = ft_tab[0][byte(bi[n],0)] ^ \ 143 ft_tab[1][byte(bi[(n + 1) & 3],1)] ^ \ 144 ft_tab[2][byte(bi[(n + 2) & 3],2)] ^ \ 145 ft_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n) 146 147 #define i_rn(bo, bi, n, k) \ 148 bo[n] = it_tab[0][byte(bi[n],0)] ^ \ 149 it_tab[1][byte(bi[(n + 3) & 3],1)] ^ \ 150 it_tab[2][byte(bi[(n + 2) & 3],2)] ^ \ 151 it_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n) 152 153 #define ls_box(x) \ 154 ( fl_tab[0][byte(x, 0)] ^ \ 155 fl_tab[1][byte(x, 1)] ^ \ 156 fl_tab[2][byte(x, 2)] ^ \ 157 fl_tab[3][byte(x, 3)] ) 158 159 #define f_rl(bo, bi, n, k) \ 160 bo[n] = fl_tab[0][byte(bi[n],0)] ^ \ 161 fl_tab[1][byte(bi[(n + 1) & 3],1)] ^ \ 162 fl_tab[2][byte(bi[(n + 2) & 3],2)] ^ \ 163 fl_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n) 164 165 #define i_rl(bo, bi, n, k) \ 166 bo[n] = il_tab[0][byte(bi[n],0)] ^ \ 167 il_tab[1][byte(bi[(n + 3) & 3],1)] ^ \ 168 il_tab[2][byte(bi[(n + 2) & 3],2)] ^ \ 169 il_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n) 170 171 static void 172 gen_tabs (void) 173 { 174 uint32_t i, t; 175 uint8_t p, q; 176 177 /* log and power tables for GF(2**8) finite field with 178 0x011b as modular polynomial - the simplest prmitive 179 root is 0x03, used here to generate the tables */ 180 181 for (i = 0, p = 1; i < 256; ++i) { 182 pow_tab[i] = (uint8_t) p; 183 log_tab[p] = (uint8_t) i; 184 185 p ^= (p << 1) ^ (p & 0x80 ? 0x01b : 0); 186 } 187 188 log_tab[1] = 0; 189 190 for (i = 0, p = 1; i < 10; ++i) { 191 rco_tab[i] = p; 192 193 p = (p << 1) ^ (p & 0x80 ? 0x01b : 0); 194 } 195 196 for (i = 0; i < 256; ++i) { 197 p = (i ? pow_tab[255 - log_tab[i]] : 0); 198 q = ((p >> 7) | (p << 1)) ^ ((p >> 6) | (p << 2)); 199 p ^= 0x63 ^ q ^ ((q >> 6) | (q << 2)); 200 sbx_tab[i] = p; 201 isb_tab[p] = (uint8_t) i; 202 } 203 204 for (i = 0; i < 256; ++i) { 205 p = sbx_tab[i]; 206 207 t = p; 208 fl_tab[0][i] = t; 209 fl_tab[1][i] = rotl (t, 8); 210 fl_tab[2][i] = rotl (t, 16); 211 fl_tab[3][i] = rotl (t, 24); 212 213 t = ((uint32_t) ff_mult (2, p)) | 214 ((uint32_t) p << 8) | 215 ((uint32_t) p << 16) | ((uint32_t) ff_mult (3, p) << 24); 216 217 ft_tab[0][i] = t; 218 ft_tab[1][i] = rotl (t, 8); 219 ft_tab[2][i] = rotl (t, 16); 220 ft_tab[3][i] = rotl (t, 24); 221 222 p = isb_tab[i]; 223 224 t = p; 225 il_tab[0][i] = t; 226 il_tab[1][i] = rotl (t, 8); 227 il_tab[2][i] = rotl (t, 16); 228 il_tab[3][i] = rotl (t, 24); 229 230 t = ((uint32_t) ff_mult (14, p)) | 231 ((uint32_t) ff_mult (9, p) << 8) | 232 ((uint32_t) ff_mult (13, p) << 16) | 233 ((uint32_t) ff_mult (11, p) << 24); 234 235 it_tab[0][i] = t; 236 it_tab[1][i] = rotl (t, 8); 237 it_tab[2][i] = rotl (t, 16); 238 it_tab[3][i] = rotl (t, 24); 239 } 240 } 241 242 #define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b) 243 244 #define imix_col(y,x) \ 245 u = star_x(x); \ 246 v = star_x(u); \ 247 w = star_x(v); \ 248 t = w ^ (x); \ 249 (y) = u ^ v ^ w; \ 250 (y) ^= rotr(u ^ t, 8) ^ \ 251 rotr(v ^ t, 16) ^ \ 252 rotr(t,24) 253 254 /* initialise the key schedule from the user supplied key */ 255 256 #define loop4(i) \ 257 { t = rotr(t, 8); t = ls_box(t) ^ rco_tab[i]; \ 258 t ^= E_KEY[4 * i]; E_KEY[4 * i + 4] = t; \ 259 t ^= E_KEY[4 * i + 1]; E_KEY[4 * i + 5] = t; \ 260 t ^= E_KEY[4 * i + 2]; E_KEY[4 * i + 6] = t; \ 261 t ^= E_KEY[4 * i + 3]; E_KEY[4 * i + 7] = t; \ 262 } 263 264 #define loop6(i) \ 265 { t = rotr(t, 8); t = ls_box(t) ^ rco_tab[i]; \ 266 t ^= E_KEY[6 * i]; E_KEY[6 * i + 6] = t; \ 267 t ^= E_KEY[6 * i + 1]; E_KEY[6 * i + 7] = t; \ 268 t ^= E_KEY[6 * i + 2]; E_KEY[6 * i + 8] = t; \ 269 t ^= E_KEY[6 * i + 3]; E_KEY[6 * i + 9] = t; \ 270 t ^= E_KEY[6 * i + 4]; E_KEY[6 * i + 10] = t; \ 271 t ^= E_KEY[6 * i + 5]; E_KEY[6 * i + 11] = t; \ 272 } 273 274 #define loop8(i) \ 275 { t = rotr(t, 8); ; t = ls_box(t) ^ rco_tab[i]; \ 276 t ^= E_KEY[8 * i]; E_KEY[8 * i + 8] = t; \ 277 t ^= E_KEY[8 * i + 1]; E_KEY[8 * i + 9] = t; \ 278 t ^= E_KEY[8 * i + 2]; E_KEY[8 * i + 10] = t; \ 279 t ^= E_KEY[8 * i + 3]; E_KEY[8 * i + 11] = t; \ 280 t = E_KEY[8 * i + 4] ^ ls_box(t); \ 281 E_KEY[8 * i + 12] = t; \ 282 t ^= E_KEY[8 * i + 5]; E_KEY[8 * i + 13] = t; \ 283 t ^= E_KEY[8 * i + 6]; E_KEY[8 * i + 14] = t; \ 284 t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \ 285 } 286 287 /* Tells whether the ACE is capable to generate 288 the extended key for a given key_len. */ 289 static inline int 290 aes_hw_extkey_available(uint8_t key_len) 291 { 292 /* TODO: We should check the actual CPU model/stepping 293 as it's possible that the capability will be 294 added in the next CPU revisions. */ 295 if (key_len == 16) 296 return 1; 297 return 0; 298 } 299 300 static inline struct aes_ctx *aes_ctx_common(void *ctx) 301 { 302 unsigned long addr = (unsigned long)ctx; 303 unsigned long align = PADLOCK_ALIGNMENT; 304 305 if (align <= crypto_tfm_ctx_alignment()) 306 align = 1; 307 return (struct aes_ctx *)ALIGN(addr, align); 308 } 309 310 static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) 311 { 312 return aes_ctx_common(crypto_tfm_ctx(tfm)); 313 } 314 315 static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm) 316 { 317 return aes_ctx_common(crypto_blkcipher_ctx(tfm)); 318 } 319 320 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 321 unsigned int key_len) 322 { 323 struct aes_ctx *ctx = aes_ctx(tfm); 324 const __le32 *key = (const __le32 *)in_key; 325 u32 *flags = &tfm->crt_flags; 326 uint32_t i, t, u, v, w; 327 uint32_t P[AES_EXTENDED_KEY_SIZE]; 328 uint32_t rounds; 329 330 if (key_len % 8) { 331 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 332 return -EINVAL; 333 } 334 335 ctx->key_length = key_len; 336 337 /* 338 * If the hardware is capable of generating the extended key 339 * itself we must supply the plain key for both encryption 340 * and decryption. 341 */ 342 ctx->D = ctx->E; 343 344 E_KEY[0] = le32_to_cpu(key[0]); 345 E_KEY[1] = le32_to_cpu(key[1]); 346 E_KEY[2] = le32_to_cpu(key[2]); 347 E_KEY[3] = le32_to_cpu(key[3]); 348 349 /* Prepare control words. */ 350 memset(&ctx->cword, 0, sizeof(ctx->cword)); 351 352 ctx->cword.decrypt.encdec = 1; 353 ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4; 354 ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds; 355 ctx->cword.encrypt.ksize = (key_len - 16) / 8; 356 ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize; 357 358 /* Don't generate extended keys if the hardware can do it. */ 359 if (aes_hw_extkey_available(key_len)) 360 return 0; 361 362 ctx->D = ctx->d_data; 363 ctx->cword.encrypt.keygen = 1; 364 ctx->cword.decrypt.keygen = 1; 365 366 switch (key_len) { 367 case 16: 368 t = E_KEY[3]; 369 for (i = 0; i < 10; ++i) 370 loop4 (i); 371 break; 372 373 case 24: 374 E_KEY[4] = le32_to_cpu(key[4]); 375 t = E_KEY[5] = le32_to_cpu(key[5]); 376 for (i = 0; i < 8; ++i) 377 loop6 (i); 378 break; 379 380 case 32: 381 E_KEY[4] = le32_to_cpu(key[4]); 382 E_KEY[5] = le32_to_cpu(key[5]); 383 E_KEY[6] = le32_to_cpu(key[6]); 384 t = E_KEY[7] = le32_to_cpu(key[7]); 385 for (i = 0; i < 7; ++i) 386 loop8 (i); 387 break; 388 } 389 390 D_KEY[0] = E_KEY[0]; 391 D_KEY[1] = E_KEY[1]; 392 D_KEY[2] = E_KEY[2]; 393 D_KEY[3] = E_KEY[3]; 394 395 for (i = 4; i < key_len + 24; ++i) { 396 imix_col (D_KEY[i], E_KEY[i]); 397 } 398 399 /* PadLock needs a different format of the decryption key. */ 400 rounds = 10 + (key_len - 16) / 4; 401 402 for (i = 0; i < rounds; i++) { 403 P[((i + 1) * 4) + 0] = D_KEY[((rounds - i - 1) * 4) + 0]; 404 P[((i + 1) * 4) + 1] = D_KEY[((rounds - i - 1) * 4) + 1]; 405 P[((i + 1) * 4) + 2] = D_KEY[((rounds - i - 1) * 4) + 2]; 406 P[((i + 1) * 4) + 3] = D_KEY[((rounds - i - 1) * 4) + 3]; 407 } 408 409 P[0] = E_KEY[(rounds * 4) + 0]; 410 P[1] = E_KEY[(rounds * 4) + 1]; 411 P[2] = E_KEY[(rounds * 4) + 2]; 412 P[3] = E_KEY[(rounds * 4) + 3]; 413 414 memcpy(D_KEY, P, AES_EXTENDED_KEY_SIZE_B); 415 416 return 0; 417 } 418 419 /* ====== Encryption/decryption routines ====== */ 420 421 /* These are the real call to PadLock. */ 422 static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, 423 void *control_word) 424 { 425 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ 426 : "+S"(input), "+D"(output) 427 : "d"(control_word), "b"(key), "c"(1)); 428 } 429 430 static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword) 431 { 432 u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1]; 433 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); 434 435 memcpy(tmp, in, AES_BLOCK_SIZE); 436 padlock_xcrypt(tmp, out, key, cword); 437 } 438 439 static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, 440 struct cword *cword) 441 { 442 asm volatile ("pushfl; popfl"); 443 444 /* padlock_xcrypt requires at least two blocks of data. */ 445 if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) & 446 (PAGE_SIZE - 1)))) { 447 aes_crypt_copy(in, out, key, cword); 448 return; 449 } 450 451 padlock_xcrypt(in, out, key, cword); 452 } 453 454 static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, 455 void *control_word, u32 count) 456 { 457 if (count == 1) { 458 aes_crypt(input, output, key, control_word); 459 return; 460 } 461 462 asm volatile ("pushfl; popfl"); /* enforce key reload. */ 463 asm volatile ("test $1, %%cl;" 464 "je 1f;" 465 "lea -1(%%ecx), %%eax;" 466 "mov $1, %%ecx;" 467 ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ 468 "mov %%eax, %%ecx;" 469 "1:" 470 ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ 471 : "+S"(input), "+D"(output) 472 : "d"(control_word), "b"(key), "c"(count) 473 : "ax"); 474 } 475 476 static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, 477 u8 *iv, void *control_word, u32 count) 478 { 479 /* Enforce key reload. */ 480 asm volatile ("pushfl; popfl"); 481 /* rep xcryptcbc */ 482 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" 483 : "+S" (input), "+D" (output), "+a" (iv) 484 : "d" (control_word), "b" (key), "c" (count)); 485 return iv; 486 } 487 488 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 489 { 490 struct aes_ctx *ctx = aes_ctx(tfm); 491 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); 492 } 493 494 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 495 { 496 struct aes_ctx *ctx = aes_ctx(tfm); 497 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); 498 } 499 500 static struct crypto_alg aes_alg = { 501 .cra_name = "aes", 502 .cra_driver_name = "aes-padlock", 503 .cra_priority = PADLOCK_CRA_PRIORITY, 504 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 505 .cra_blocksize = AES_BLOCK_SIZE, 506 .cra_ctxsize = sizeof(struct aes_ctx), 507 .cra_alignmask = PADLOCK_ALIGNMENT - 1, 508 .cra_module = THIS_MODULE, 509 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), 510 .cra_u = { 511 .cipher = { 512 .cia_min_keysize = AES_MIN_KEY_SIZE, 513 .cia_max_keysize = AES_MAX_KEY_SIZE, 514 .cia_setkey = aes_set_key, 515 .cia_encrypt = aes_encrypt, 516 .cia_decrypt = aes_decrypt, 517 } 518 } 519 }; 520 521 static int ecb_aes_encrypt(struct blkcipher_desc *desc, 522 struct scatterlist *dst, struct scatterlist *src, 523 unsigned int nbytes) 524 { 525 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 526 struct blkcipher_walk walk; 527 int err; 528 529 blkcipher_walk_init(&walk, dst, src, nbytes); 530 err = blkcipher_walk_virt(desc, &walk); 531 532 while ((nbytes = walk.nbytes)) { 533 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, 534 ctx->E, &ctx->cword.encrypt, 535 nbytes / AES_BLOCK_SIZE); 536 nbytes &= AES_BLOCK_SIZE - 1; 537 err = blkcipher_walk_done(desc, &walk, nbytes); 538 } 539 540 return err; 541 } 542 543 static int ecb_aes_decrypt(struct blkcipher_desc *desc, 544 struct scatterlist *dst, struct scatterlist *src, 545 unsigned int nbytes) 546 { 547 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 548 struct blkcipher_walk walk; 549 int err; 550 551 blkcipher_walk_init(&walk, dst, src, nbytes); 552 err = blkcipher_walk_virt(desc, &walk); 553 554 while ((nbytes = walk.nbytes)) { 555 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, 556 ctx->D, &ctx->cword.decrypt, 557 nbytes / AES_BLOCK_SIZE); 558 nbytes &= AES_BLOCK_SIZE - 1; 559 err = blkcipher_walk_done(desc, &walk, nbytes); 560 } 561 562 return err; 563 } 564 565 static struct crypto_alg ecb_aes_alg = { 566 .cra_name = "ecb(aes)", 567 .cra_driver_name = "ecb-aes-padlock", 568 .cra_priority = PADLOCK_COMPOSITE_PRIORITY, 569 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 570 .cra_blocksize = AES_BLOCK_SIZE, 571 .cra_ctxsize = sizeof(struct aes_ctx), 572 .cra_alignmask = PADLOCK_ALIGNMENT - 1, 573 .cra_type = &crypto_blkcipher_type, 574 .cra_module = THIS_MODULE, 575 .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), 576 .cra_u = { 577 .blkcipher = { 578 .min_keysize = AES_MIN_KEY_SIZE, 579 .max_keysize = AES_MAX_KEY_SIZE, 580 .setkey = aes_set_key, 581 .encrypt = ecb_aes_encrypt, 582 .decrypt = ecb_aes_decrypt, 583 } 584 } 585 }; 586 587 static int cbc_aes_encrypt(struct blkcipher_desc *desc, 588 struct scatterlist *dst, struct scatterlist *src, 589 unsigned int nbytes) 590 { 591 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 592 struct blkcipher_walk walk; 593 int err; 594 595 blkcipher_walk_init(&walk, dst, src, nbytes); 596 err = blkcipher_walk_virt(desc, &walk); 597 598 while ((nbytes = walk.nbytes)) { 599 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, 600 walk.dst.virt.addr, ctx->E, 601 walk.iv, &ctx->cword.encrypt, 602 nbytes / AES_BLOCK_SIZE); 603 memcpy(walk.iv, iv, AES_BLOCK_SIZE); 604 nbytes &= AES_BLOCK_SIZE - 1; 605 err = blkcipher_walk_done(desc, &walk, nbytes); 606 } 607 608 return err; 609 } 610 611 static int cbc_aes_decrypt(struct blkcipher_desc *desc, 612 struct scatterlist *dst, struct scatterlist *src, 613 unsigned int nbytes) 614 { 615 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 616 struct blkcipher_walk walk; 617 int err; 618 619 blkcipher_walk_init(&walk, dst, src, nbytes); 620 err = blkcipher_walk_virt(desc, &walk); 621 622 while ((nbytes = walk.nbytes)) { 623 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, 624 ctx->D, walk.iv, &ctx->cword.decrypt, 625 nbytes / AES_BLOCK_SIZE); 626 nbytes &= AES_BLOCK_SIZE - 1; 627 err = blkcipher_walk_done(desc, &walk, nbytes); 628 } 629 630 return err; 631 } 632 633 static struct crypto_alg cbc_aes_alg = { 634 .cra_name = "cbc(aes)", 635 .cra_driver_name = "cbc-aes-padlock", 636 .cra_priority = PADLOCK_COMPOSITE_PRIORITY, 637 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 638 .cra_blocksize = AES_BLOCK_SIZE, 639 .cra_ctxsize = sizeof(struct aes_ctx), 640 .cra_alignmask = PADLOCK_ALIGNMENT - 1, 641 .cra_type = &crypto_blkcipher_type, 642 .cra_module = THIS_MODULE, 643 .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), 644 .cra_u = { 645 .blkcipher = { 646 .min_keysize = AES_MIN_KEY_SIZE, 647 .max_keysize = AES_MAX_KEY_SIZE, 648 .ivsize = AES_BLOCK_SIZE, 649 .setkey = aes_set_key, 650 .encrypt = cbc_aes_encrypt, 651 .decrypt = cbc_aes_decrypt, 652 } 653 } 654 }; 655 656 static int __init padlock_init(void) 657 { 658 int ret; 659 660 if (!cpu_has_xcrypt) { 661 printk(KERN_ERR PFX "VIA PadLock not detected.\n"); 662 return -ENODEV; 663 } 664 665 if (!cpu_has_xcrypt_enabled) { 666 printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); 667 return -ENODEV; 668 } 669 670 gen_tabs(); 671 if ((ret = crypto_register_alg(&aes_alg))) 672 goto aes_err; 673 674 if ((ret = crypto_register_alg(&ecb_aes_alg))) 675 goto ecb_aes_err; 676 677 if ((ret = crypto_register_alg(&cbc_aes_alg))) 678 goto cbc_aes_err; 679 680 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); 681 682 out: 683 return ret; 684 685 cbc_aes_err: 686 crypto_unregister_alg(&ecb_aes_alg); 687 ecb_aes_err: 688 crypto_unregister_alg(&aes_alg); 689 aes_err: 690 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); 691 goto out; 692 } 693 694 static void __exit padlock_fini(void) 695 { 696 crypto_unregister_alg(&cbc_aes_alg); 697 crypto_unregister_alg(&ecb_aes_alg); 698 crypto_unregister_alg(&aes_alg); 699 } 700 701 module_init(padlock_init); 702 module_exit(padlock_fini); 703 704 MODULE_DESCRIPTION("VIA PadLock AES algorithm support"); 705 MODULE_LICENSE("GPL"); 706 MODULE_AUTHOR("Michal Ludvig"); 707 708 MODULE_ALIAS("aes"); 709