1 /* 2 * Glue Code for AVX assembler version of Twofish Cipher 3 * 4 * Copyright (C) 2012 Johannes Goetzfried 5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 20 * USA 21 * 22 */ 23 24 #include <linux/module.h> 25 #include <linux/hardirq.h> 26 #include <linux/types.h> 27 #include <linux/crypto.h> 28 #include <linux/err.h> 29 #include <crypto/algapi.h> 30 #include <crypto/twofish.h> 31 #include <crypto/cryptd.h> 32 #include <crypto/b128ops.h> 33 #include <crypto/ctr.h> 34 #include <crypto/lrw.h> 35 #include <crypto/xts.h> 36 #include <asm/i387.h> 37 #include <asm/xcr.h> 38 #include <asm/xsave.h> 39 #include <asm/crypto/twofish.h> 40 #include <asm/crypto/ablk_helper.h> 41 #include <asm/crypto/glue_helper.h> 42 #include <crypto/scatterwalk.h> 43 #include <linux/workqueue.h> 44 #include <linux/spinlock.h> 45 46 #define TWOFISH_PARALLEL_BLOCKS 8 47 48 static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, 49 const u8 *src) 50 { 51 __twofish_enc_blk_3way(ctx, dst, src, false); 52 } 53 54 /* 8-way parallel cipher functions */ 55 asmlinkage void __twofish_enc_blk_8way(struct twofish_ctx *ctx, u8 *dst, 56 const u8 *src, bool xor); 57 asmlinkage void twofish_dec_blk_8way(struct twofish_ctx *ctx, u8 *dst, 58 const u8 *src); 59 60 static inline void twofish_enc_blk_xway(struct twofish_ctx *ctx, u8 *dst, 61 const u8 *src) 62 { 63 __twofish_enc_blk_8way(ctx, dst, src, false); 64 } 65 66 static inline void twofish_enc_blk_xway_xor(struct twofish_ctx *ctx, u8 *dst, 67 const u8 *src) 68 { 69 __twofish_enc_blk_8way(ctx, dst, src, true); 70 } 71 72 static inline void twofish_dec_blk_xway(struct twofish_ctx *ctx, u8 *dst, 73 const u8 *src) 74 { 75 twofish_dec_blk_8way(ctx, dst, src); 76 } 77 78 static void twofish_dec_blk_cbc_xway(void *ctx, u128 *dst, const u128 *src) 79 { 80 u128 ivs[TWOFISH_PARALLEL_BLOCKS - 1]; 81 unsigned int j; 82 83 for (j = 0; j < TWOFISH_PARALLEL_BLOCKS - 1; j++) 84 ivs[j] = src[j]; 85 86 twofish_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src); 87 88 for (j = 0; j < TWOFISH_PARALLEL_BLOCKS - 1; j++) 89 u128_xor(dst + (j + 1), dst + (j + 1), ivs + j); 90 } 91 92 static void twofish_enc_blk_ctr_xway(void *ctx, u128 *dst, const u128 *src, 93 u128 *iv) 94 { 95 be128 ctrblks[TWOFISH_PARALLEL_BLOCKS]; 96 unsigned int i; 97 98 for (i = 0; i < TWOFISH_PARALLEL_BLOCKS; i++) { 99 if (dst != src) 100 dst[i] = src[i]; 101 102 u128_to_be128(&ctrblks[i], iv); 103 u128_inc(iv); 104 } 105 106 twofish_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks); 107 } 108 109 static const struct common_glue_ctx twofish_enc = { 110 .num_funcs = 3, 111 .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, 112 113 .funcs = { { 114 .num_blocks = TWOFISH_PARALLEL_BLOCKS, 115 .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_xway) } 116 }, { 117 .num_blocks = 3, 118 .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) } 119 }, { 120 .num_blocks = 1, 121 .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) } 122 } } 123 }; 124 125 static const struct common_glue_ctx twofish_ctr = { 126 .num_funcs = 3, 127 .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, 128 129 .funcs = { { 130 .num_blocks = TWOFISH_PARALLEL_BLOCKS, 131 .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_xway) } 132 }, { 133 .num_blocks = 3, 134 .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_3way) } 135 }, { 136 .num_blocks = 1, 137 .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr) } 138 } } 139 }; 140 141 static const struct common_glue_ctx twofish_dec = { 142 .num_funcs = 3, 143 .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, 144 145 .funcs = { { 146 .num_blocks = TWOFISH_PARALLEL_BLOCKS, 147 .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_xway) } 148 }, { 149 .num_blocks = 3, 150 .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) } 151 }, { 152 .num_blocks = 1, 153 .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) } 154 } } 155 }; 156 157 static const struct common_glue_ctx twofish_dec_cbc = { 158 .num_funcs = 3, 159 .fpu_blocks_limit = TWOFISH_PARALLEL_BLOCKS, 160 161 .funcs = { { 162 .num_blocks = TWOFISH_PARALLEL_BLOCKS, 163 .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_xway) } 164 }, { 165 .num_blocks = 3, 166 .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) } 167 }, { 168 .num_blocks = 1, 169 .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) } 170 } } 171 }; 172 173 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 174 struct scatterlist *src, unsigned int nbytes) 175 { 176 return glue_ecb_crypt_128bit(&twofish_enc, desc, dst, src, nbytes); 177 } 178 179 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 180 struct scatterlist *src, unsigned int nbytes) 181 { 182 return glue_ecb_crypt_128bit(&twofish_dec, desc, dst, src, nbytes); 183 } 184 185 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 186 struct scatterlist *src, unsigned int nbytes) 187 { 188 return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(twofish_enc_blk), desc, 189 dst, src, nbytes); 190 } 191 192 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 193 struct scatterlist *src, unsigned int nbytes) 194 { 195 return glue_cbc_decrypt_128bit(&twofish_dec_cbc, desc, dst, src, 196 nbytes); 197 } 198 199 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, 200 struct scatterlist *src, unsigned int nbytes) 201 { 202 return glue_ctr_crypt_128bit(&twofish_ctr, desc, dst, src, nbytes); 203 } 204 205 static inline bool twofish_fpu_begin(bool fpu_enabled, unsigned int nbytes) 206 { 207 return glue_fpu_begin(TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS, NULL, 208 fpu_enabled, nbytes); 209 } 210 211 static inline void twofish_fpu_end(bool fpu_enabled) 212 { 213 glue_fpu_end(fpu_enabled); 214 } 215 216 struct crypt_priv { 217 struct twofish_ctx *ctx; 218 bool fpu_enabled; 219 }; 220 221 static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) 222 { 223 const unsigned int bsize = TF_BLOCK_SIZE; 224 struct crypt_priv *ctx = priv; 225 int i; 226 227 ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes); 228 229 if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) { 230 twofish_enc_blk_xway(ctx->ctx, srcdst, srcdst); 231 return; 232 } 233 234 for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3) 235 twofish_enc_blk_3way(ctx->ctx, srcdst, srcdst); 236 237 nbytes %= bsize * 3; 238 239 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize) 240 twofish_enc_blk(ctx->ctx, srcdst, srcdst); 241 } 242 243 static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) 244 { 245 const unsigned int bsize = TF_BLOCK_SIZE; 246 struct crypt_priv *ctx = priv; 247 int i; 248 249 ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes); 250 251 if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) { 252 twofish_dec_blk_xway(ctx->ctx, srcdst, srcdst); 253 return; 254 } 255 256 for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3) 257 twofish_dec_blk_3way(ctx->ctx, srcdst, srcdst); 258 259 nbytes %= bsize * 3; 260 261 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize) 262 twofish_dec_blk(ctx->ctx, srcdst, srcdst); 263 } 264 265 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 266 struct scatterlist *src, unsigned int nbytes) 267 { 268 struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 269 be128 buf[TWOFISH_PARALLEL_BLOCKS]; 270 struct crypt_priv crypt_ctx = { 271 .ctx = &ctx->twofish_ctx, 272 .fpu_enabled = false, 273 }; 274 struct lrw_crypt_req req = { 275 .tbuf = buf, 276 .tbuflen = sizeof(buf), 277 278 .table_ctx = &ctx->lrw_table, 279 .crypt_ctx = &crypt_ctx, 280 .crypt_fn = encrypt_callback, 281 }; 282 int ret; 283 284 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 285 ret = lrw_crypt(desc, dst, src, nbytes, &req); 286 twofish_fpu_end(crypt_ctx.fpu_enabled); 287 288 return ret; 289 } 290 291 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 292 struct scatterlist *src, unsigned int nbytes) 293 { 294 struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 295 be128 buf[TWOFISH_PARALLEL_BLOCKS]; 296 struct crypt_priv crypt_ctx = { 297 .ctx = &ctx->twofish_ctx, 298 .fpu_enabled = false, 299 }; 300 struct lrw_crypt_req req = { 301 .tbuf = buf, 302 .tbuflen = sizeof(buf), 303 304 .table_ctx = &ctx->lrw_table, 305 .crypt_ctx = &crypt_ctx, 306 .crypt_fn = decrypt_callback, 307 }; 308 int ret; 309 310 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 311 ret = lrw_crypt(desc, dst, src, nbytes, &req); 312 twofish_fpu_end(crypt_ctx.fpu_enabled); 313 314 return ret; 315 } 316 317 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 318 struct scatterlist *src, unsigned int nbytes) 319 { 320 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 321 be128 buf[TWOFISH_PARALLEL_BLOCKS]; 322 struct crypt_priv crypt_ctx = { 323 .ctx = &ctx->crypt_ctx, 324 .fpu_enabled = false, 325 }; 326 struct xts_crypt_req req = { 327 .tbuf = buf, 328 .tbuflen = sizeof(buf), 329 330 .tweak_ctx = &ctx->tweak_ctx, 331 .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk), 332 .crypt_ctx = &crypt_ctx, 333 .crypt_fn = encrypt_callback, 334 }; 335 int ret; 336 337 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 338 ret = xts_crypt(desc, dst, src, nbytes, &req); 339 twofish_fpu_end(crypt_ctx.fpu_enabled); 340 341 return ret; 342 } 343 344 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 345 struct scatterlist *src, unsigned int nbytes) 346 { 347 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 348 be128 buf[TWOFISH_PARALLEL_BLOCKS]; 349 struct crypt_priv crypt_ctx = { 350 .ctx = &ctx->crypt_ctx, 351 .fpu_enabled = false, 352 }; 353 struct xts_crypt_req req = { 354 .tbuf = buf, 355 .tbuflen = sizeof(buf), 356 357 .tweak_ctx = &ctx->tweak_ctx, 358 .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk), 359 .crypt_ctx = &crypt_ctx, 360 .crypt_fn = decrypt_callback, 361 }; 362 int ret; 363 364 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 365 ret = xts_crypt(desc, dst, src, nbytes, &req); 366 twofish_fpu_end(crypt_ctx.fpu_enabled); 367 368 return ret; 369 } 370 371 static struct crypto_alg twofish_algs[10] = { { 372 .cra_name = "__ecb-twofish-avx", 373 .cra_driver_name = "__driver-ecb-twofish-avx", 374 .cra_priority = 0, 375 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 376 .cra_blocksize = TF_BLOCK_SIZE, 377 .cra_ctxsize = sizeof(struct twofish_ctx), 378 .cra_alignmask = 0, 379 .cra_type = &crypto_blkcipher_type, 380 .cra_module = THIS_MODULE, 381 .cra_u = { 382 .blkcipher = { 383 .min_keysize = TF_MIN_KEY_SIZE, 384 .max_keysize = TF_MAX_KEY_SIZE, 385 .setkey = twofish_setkey, 386 .encrypt = ecb_encrypt, 387 .decrypt = ecb_decrypt, 388 }, 389 }, 390 }, { 391 .cra_name = "__cbc-twofish-avx", 392 .cra_driver_name = "__driver-cbc-twofish-avx", 393 .cra_priority = 0, 394 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 395 .cra_blocksize = TF_BLOCK_SIZE, 396 .cra_ctxsize = sizeof(struct twofish_ctx), 397 .cra_alignmask = 0, 398 .cra_type = &crypto_blkcipher_type, 399 .cra_module = THIS_MODULE, 400 .cra_u = { 401 .blkcipher = { 402 .min_keysize = TF_MIN_KEY_SIZE, 403 .max_keysize = TF_MAX_KEY_SIZE, 404 .setkey = twofish_setkey, 405 .encrypt = cbc_encrypt, 406 .decrypt = cbc_decrypt, 407 }, 408 }, 409 }, { 410 .cra_name = "__ctr-twofish-avx", 411 .cra_driver_name = "__driver-ctr-twofish-avx", 412 .cra_priority = 0, 413 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 414 .cra_blocksize = 1, 415 .cra_ctxsize = sizeof(struct twofish_ctx), 416 .cra_alignmask = 0, 417 .cra_type = &crypto_blkcipher_type, 418 .cra_module = THIS_MODULE, 419 .cra_u = { 420 .blkcipher = { 421 .min_keysize = TF_MIN_KEY_SIZE, 422 .max_keysize = TF_MAX_KEY_SIZE, 423 .ivsize = TF_BLOCK_SIZE, 424 .setkey = twofish_setkey, 425 .encrypt = ctr_crypt, 426 .decrypt = ctr_crypt, 427 }, 428 }, 429 }, { 430 .cra_name = "__lrw-twofish-avx", 431 .cra_driver_name = "__driver-lrw-twofish-avx", 432 .cra_priority = 0, 433 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 434 .cra_blocksize = TF_BLOCK_SIZE, 435 .cra_ctxsize = sizeof(struct twofish_lrw_ctx), 436 .cra_alignmask = 0, 437 .cra_type = &crypto_blkcipher_type, 438 .cra_module = THIS_MODULE, 439 .cra_exit = lrw_twofish_exit_tfm, 440 .cra_u = { 441 .blkcipher = { 442 .min_keysize = TF_MIN_KEY_SIZE + 443 TF_BLOCK_SIZE, 444 .max_keysize = TF_MAX_KEY_SIZE + 445 TF_BLOCK_SIZE, 446 .ivsize = TF_BLOCK_SIZE, 447 .setkey = lrw_twofish_setkey, 448 .encrypt = lrw_encrypt, 449 .decrypt = lrw_decrypt, 450 }, 451 }, 452 }, { 453 .cra_name = "__xts-twofish-avx", 454 .cra_driver_name = "__driver-xts-twofish-avx", 455 .cra_priority = 0, 456 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 457 .cra_blocksize = TF_BLOCK_SIZE, 458 .cra_ctxsize = sizeof(struct twofish_xts_ctx), 459 .cra_alignmask = 0, 460 .cra_type = &crypto_blkcipher_type, 461 .cra_module = THIS_MODULE, 462 .cra_u = { 463 .blkcipher = { 464 .min_keysize = TF_MIN_KEY_SIZE * 2, 465 .max_keysize = TF_MAX_KEY_SIZE * 2, 466 .ivsize = TF_BLOCK_SIZE, 467 .setkey = xts_twofish_setkey, 468 .encrypt = xts_encrypt, 469 .decrypt = xts_decrypt, 470 }, 471 }, 472 }, { 473 .cra_name = "ecb(twofish)", 474 .cra_driver_name = "ecb-twofish-avx", 475 .cra_priority = 400, 476 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 477 .cra_blocksize = TF_BLOCK_SIZE, 478 .cra_ctxsize = sizeof(struct async_helper_ctx), 479 .cra_alignmask = 0, 480 .cra_type = &crypto_ablkcipher_type, 481 .cra_module = THIS_MODULE, 482 .cra_init = ablk_init, 483 .cra_exit = ablk_exit, 484 .cra_u = { 485 .ablkcipher = { 486 .min_keysize = TF_MIN_KEY_SIZE, 487 .max_keysize = TF_MAX_KEY_SIZE, 488 .setkey = ablk_set_key, 489 .encrypt = ablk_encrypt, 490 .decrypt = ablk_decrypt, 491 }, 492 }, 493 }, { 494 .cra_name = "cbc(twofish)", 495 .cra_driver_name = "cbc-twofish-avx", 496 .cra_priority = 400, 497 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 498 .cra_blocksize = TF_BLOCK_SIZE, 499 .cra_ctxsize = sizeof(struct async_helper_ctx), 500 .cra_alignmask = 0, 501 .cra_type = &crypto_ablkcipher_type, 502 .cra_module = THIS_MODULE, 503 .cra_init = ablk_init, 504 .cra_exit = ablk_exit, 505 .cra_u = { 506 .ablkcipher = { 507 .min_keysize = TF_MIN_KEY_SIZE, 508 .max_keysize = TF_MAX_KEY_SIZE, 509 .ivsize = TF_BLOCK_SIZE, 510 .setkey = ablk_set_key, 511 .encrypt = __ablk_encrypt, 512 .decrypt = ablk_decrypt, 513 }, 514 }, 515 }, { 516 .cra_name = "ctr(twofish)", 517 .cra_driver_name = "ctr-twofish-avx", 518 .cra_priority = 400, 519 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 520 .cra_blocksize = 1, 521 .cra_ctxsize = sizeof(struct async_helper_ctx), 522 .cra_alignmask = 0, 523 .cra_type = &crypto_ablkcipher_type, 524 .cra_module = THIS_MODULE, 525 .cra_init = ablk_init, 526 .cra_exit = ablk_exit, 527 .cra_u = { 528 .ablkcipher = { 529 .min_keysize = TF_MIN_KEY_SIZE, 530 .max_keysize = TF_MAX_KEY_SIZE, 531 .ivsize = TF_BLOCK_SIZE, 532 .setkey = ablk_set_key, 533 .encrypt = ablk_encrypt, 534 .decrypt = ablk_encrypt, 535 .geniv = "chainiv", 536 }, 537 }, 538 }, { 539 .cra_name = "lrw(twofish)", 540 .cra_driver_name = "lrw-twofish-avx", 541 .cra_priority = 400, 542 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 543 .cra_blocksize = TF_BLOCK_SIZE, 544 .cra_ctxsize = sizeof(struct async_helper_ctx), 545 .cra_alignmask = 0, 546 .cra_type = &crypto_ablkcipher_type, 547 .cra_module = THIS_MODULE, 548 .cra_init = ablk_init, 549 .cra_exit = ablk_exit, 550 .cra_u = { 551 .ablkcipher = { 552 .min_keysize = TF_MIN_KEY_SIZE + 553 TF_BLOCK_SIZE, 554 .max_keysize = TF_MAX_KEY_SIZE + 555 TF_BLOCK_SIZE, 556 .ivsize = TF_BLOCK_SIZE, 557 .setkey = ablk_set_key, 558 .encrypt = ablk_encrypt, 559 .decrypt = ablk_decrypt, 560 }, 561 }, 562 }, { 563 .cra_name = "xts(twofish)", 564 .cra_driver_name = "xts-twofish-avx", 565 .cra_priority = 400, 566 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 567 .cra_blocksize = TF_BLOCK_SIZE, 568 .cra_ctxsize = sizeof(struct async_helper_ctx), 569 .cra_alignmask = 0, 570 .cra_type = &crypto_ablkcipher_type, 571 .cra_module = THIS_MODULE, 572 .cra_init = ablk_init, 573 .cra_exit = ablk_exit, 574 .cra_u = { 575 .ablkcipher = { 576 .min_keysize = TF_MIN_KEY_SIZE * 2, 577 .max_keysize = TF_MAX_KEY_SIZE * 2, 578 .ivsize = TF_BLOCK_SIZE, 579 .setkey = ablk_set_key, 580 .encrypt = ablk_encrypt, 581 .decrypt = ablk_decrypt, 582 }, 583 }, 584 } }; 585 586 static int __init twofish_init(void) 587 { 588 u64 xcr0; 589 590 if (!cpu_has_avx || !cpu_has_osxsave) { 591 printk(KERN_INFO "AVX instructions are not detected.\n"); 592 return -ENODEV; 593 } 594 595 xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); 596 if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) { 597 printk(KERN_INFO "AVX detected but unusable.\n"); 598 return -ENODEV; 599 } 600 601 return crypto_register_algs(twofish_algs, ARRAY_SIZE(twofish_algs)); 602 } 603 604 static void __exit twofish_exit(void) 605 { 606 crypto_unregister_algs(twofish_algs, ARRAY_SIZE(twofish_algs)); 607 } 608 609 module_init(twofish_init); 610 module_exit(twofish_exit); 611 612 MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized"); 613 MODULE_LICENSE("GPL"); 614 MODULE_ALIAS("twofish"); 615