xref: /linux/arch/x86/crypto/twofish_glue_3way.c (revision b8716614a7cc2fc15ea2a518edd04755fb08d922)
1 /*
2  * Glue Code for 3-way parallel assembler optimized version of Twofish
3  *
4  * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
5  *
6  * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
7  *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8  * CTR part based on code (crypto/ctr.c) by:
9  *   (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
24  * USA
25  *
26  */
27 
28 #include <asm/processor.h>
29 #include <linux/crypto.h>
30 #include <linux/init.h>
31 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <crypto/algapi.h>
34 #include <crypto/twofish.h>
35 #include <crypto/b128ops.h>
36 #include <crypto/lrw.h>
37 #include <crypto/xts.h>
38 
39 /* regular block cipher functions from twofish_x86_64 module */
40 asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
41 				const u8 *src);
42 asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
43 				const u8 *src);
44 
45 /* 3-way parallel cipher functions */
46 asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
47 				       const u8 *src, bool xor);
48 asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
49 				     const u8 *src);
50 
51 static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
52 					const u8 *src)
53 {
54 	__twofish_enc_blk_3way(ctx, dst, src, false);
55 }
56 
57 static inline void twofish_enc_blk_xor_3way(struct twofish_ctx *ctx, u8 *dst,
58 					    const u8 *src)
59 {
60 	__twofish_enc_blk_3way(ctx, dst, src, true);
61 }
62 
63 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
64 		     void (*fn)(struct twofish_ctx *, u8 *, const u8 *),
65 		     void (*fn_3way)(struct twofish_ctx *, u8 *, const u8 *))
66 {
67 	struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
68 	unsigned int bsize = TF_BLOCK_SIZE;
69 	unsigned int nbytes;
70 	int err;
71 
72 	err = blkcipher_walk_virt(desc, walk);
73 
74 	while ((nbytes = walk->nbytes)) {
75 		u8 *wsrc = walk->src.virt.addr;
76 		u8 *wdst = walk->dst.virt.addr;
77 
78 		/* Process three block batch */
79 		if (nbytes >= bsize * 3) {
80 			do {
81 				fn_3way(ctx, wdst, wsrc);
82 
83 				wsrc += bsize * 3;
84 				wdst += bsize * 3;
85 				nbytes -= bsize * 3;
86 			} while (nbytes >= bsize * 3);
87 
88 			if (nbytes < bsize)
89 				goto done;
90 		}
91 
92 		/* Handle leftovers */
93 		do {
94 			fn(ctx, wdst, wsrc);
95 
96 			wsrc += bsize;
97 			wdst += bsize;
98 			nbytes -= bsize;
99 		} while (nbytes >= bsize);
100 
101 done:
102 		err = blkcipher_walk_done(desc, walk, nbytes);
103 	}
104 
105 	return err;
106 }
107 
108 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
109 		       struct scatterlist *src, unsigned int nbytes)
110 {
111 	struct blkcipher_walk walk;
112 
113 	blkcipher_walk_init(&walk, dst, src, nbytes);
114 	return ecb_crypt(desc, &walk, twofish_enc_blk, twofish_enc_blk_3way);
115 }
116 
117 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
118 		       struct scatterlist *src, unsigned int nbytes)
119 {
120 	struct blkcipher_walk walk;
121 
122 	blkcipher_walk_init(&walk, dst, src, nbytes);
123 	return ecb_crypt(desc, &walk, twofish_dec_blk, twofish_dec_blk_3way);
124 }
125 
126 static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
127 				  struct blkcipher_walk *walk)
128 {
129 	struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
130 	unsigned int bsize = TF_BLOCK_SIZE;
131 	unsigned int nbytes = walk->nbytes;
132 	u128 *src = (u128 *)walk->src.virt.addr;
133 	u128 *dst = (u128 *)walk->dst.virt.addr;
134 	u128 *iv = (u128 *)walk->iv;
135 
136 	do {
137 		u128_xor(dst, src, iv);
138 		twofish_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
139 		iv = dst;
140 
141 		src += 1;
142 		dst += 1;
143 		nbytes -= bsize;
144 	} while (nbytes >= bsize);
145 
146 	u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv);
147 	return nbytes;
148 }
149 
150 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
151 		       struct scatterlist *src, unsigned int nbytes)
152 {
153 	struct blkcipher_walk walk;
154 	int err;
155 
156 	blkcipher_walk_init(&walk, dst, src, nbytes);
157 	err = blkcipher_walk_virt(desc, &walk);
158 
159 	while ((nbytes = walk.nbytes)) {
160 		nbytes = __cbc_encrypt(desc, &walk);
161 		err = blkcipher_walk_done(desc, &walk, nbytes);
162 	}
163 
164 	return err;
165 }
166 
167 static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
168 				  struct blkcipher_walk *walk)
169 {
170 	struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
171 	unsigned int bsize = TF_BLOCK_SIZE;
172 	unsigned int nbytes = walk->nbytes;
173 	u128 *src = (u128 *)walk->src.virt.addr;
174 	u128 *dst = (u128 *)walk->dst.virt.addr;
175 	u128 ivs[3 - 1];
176 	u128 last_iv;
177 
178 	/* Start of the last block. */
179 	src += nbytes / bsize - 1;
180 	dst += nbytes / bsize - 1;
181 
182 	last_iv = *src;
183 
184 	/* Process three block batch */
185 	if (nbytes >= bsize * 3) {
186 		do {
187 			nbytes -= bsize * (3 - 1);
188 			src -= 3 - 1;
189 			dst -= 3 - 1;
190 
191 			ivs[0] = src[0];
192 			ivs[1] = src[1];
193 
194 			twofish_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src);
195 
196 			u128_xor(dst + 1, dst + 1, ivs + 0);
197 			u128_xor(dst + 2, dst + 2, ivs + 1);
198 
199 			nbytes -= bsize;
200 			if (nbytes < bsize)
201 				goto done;
202 
203 			u128_xor(dst, dst, src - 1);
204 			src -= 1;
205 			dst -= 1;
206 		} while (nbytes >= bsize * 3);
207 
208 		if (nbytes < bsize)
209 			goto done;
210 	}
211 
212 	/* Handle leftovers */
213 	for (;;) {
214 		twofish_dec_blk(ctx, (u8 *)dst, (u8 *)src);
215 
216 		nbytes -= bsize;
217 		if (nbytes < bsize)
218 			break;
219 
220 		u128_xor(dst, dst, src - 1);
221 		src -= 1;
222 		dst -= 1;
223 	}
224 
225 done:
226 	u128_xor(dst, dst, (u128 *)walk->iv);
227 	*(u128 *)walk->iv = last_iv;
228 
229 	return nbytes;
230 }
231 
232 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
233 		       struct scatterlist *src, unsigned int nbytes)
234 {
235 	struct blkcipher_walk walk;
236 	int err;
237 
238 	blkcipher_walk_init(&walk, dst, src, nbytes);
239 	err = blkcipher_walk_virt(desc, &walk);
240 
241 	while ((nbytes = walk.nbytes)) {
242 		nbytes = __cbc_decrypt(desc, &walk);
243 		err = blkcipher_walk_done(desc, &walk, nbytes);
244 	}
245 
246 	return err;
247 }
248 
249 static inline void u128_to_be128(be128 *dst, const u128 *src)
250 {
251 	dst->a = cpu_to_be64(src->a);
252 	dst->b = cpu_to_be64(src->b);
253 }
254 
255 static inline void be128_to_u128(u128 *dst, const be128 *src)
256 {
257 	dst->a = be64_to_cpu(src->a);
258 	dst->b = be64_to_cpu(src->b);
259 }
260 
261 static inline void u128_inc(u128 *i)
262 {
263 	i->b++;
264 	if (!i->b)
265 		i->a++;
266 }
267 
268 static void ctr_crypt_final(struct blkcipher_desc *desc,
269 			    struct blkcipher_walk *walk)
270 {
271 	struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
272 	u8 *ctrblk = walk->iv;
273 	u8 keystream[TF_BLOCK_SIZE];
274 	u8 *src = walk->src.virt.addr;
275 	u8 *dst = walk->dst.virt.addr;
276 	unsigned int nbytes = walk->nbytes;
277 
278 	twofish_enc_blk(ctx, keystream, ctrblk);
279 	crypto_xor(keystream, src, nbytes);
280 	memcpy(dst, keystream, nbytes);
281 
282 	crypto_inc(ctrblk, TF_BLOCK_SIZE);
283 }
284 
285 static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
286 				struct blkcipher_walk *walk)
287 {
288 	struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
289 	unsigned int bsize = TF_BLOCK_SIZE;
290 	unsigned int nbytes = walk->nbytes;
291 	u128 *src = (u128 *)walk->src.virt.addr;
292 	u128 *dst = (u128 *)walk->dst.virt.addr;
293 	u128 ctrblk;
294 	be128 ctrblocks[3];
295 
296 	be128_to_u128(&ctrblk, (be128 *)walk->iv);
297 
298 	/* Process three block batch */
299 	if (nbytes >= bsize * 3) {
300 		do {
301 			if (dst != src) {
302 				dst[0] = src[0];
303 				dst[1] = src[1];
304 				dst[2] = src[2];
305 			}
306 
307 			/* create ctrblks for parallel encrypt */
308 			u128_to_be128(&ctrblocks[0], &ctrblk);
309 			u128_inc(&ctrblk);
310 			u128_to_be128(&ctrblocks[1], &ctrblk);
311 			u128_inc(&ctrblk);
312 			u128_to_be128(&ctrblocks[2], &ctrblk);
313 			u128_inc(&ctrblk);
314 
315 			twofish_enc_blk_xor_3way(ctx, (u8 *)dst,
316 						 (u8 *)ctrblocks);
317 
318 			src += 3;
319 			dst += 3;
320 			nbytes -= bsize * 3;
321 		} while (nbytes >= bsize * 3);
322 
323 		if (nbytes < bsize)
324 			goto done;
325 	}
326 
327 	/* Handle leftovers */
328 	do {
329 		if (dst != src)
330 			*dst = *src;
331 
332 		u128_to_be128(&ctrblocks[0], &ctrblk);
333 		u128_inc(&ctrblk);
334 
335 		twofish_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
336 		u128_xor(dst, dst, (u128 *)ctrblocks);
337 
338 		src += 1;
339 		dst += 1;
340 		nbytes -= bsize;
341 	} while (nbytes >= bsize);
342 
343 done:
344 	u128_to_be128((be128 *)walk->iv, &ctrblk);
345 	return nbytes;
346 }
347 
348 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
349 		     struct scatterlist *src, unsigned int nbytes)
350 {
351 	struct blkcipher_walk walk;
352 	int err;
353 
354 	blkcipher_walk_init(&walk, dst, src, nbytes);
355 	err = blkcipher_walk_virt_block(desc, &walk, TF_BLOCK_SIZE);
356 
357 	while ((nbytes = walk.nbytes) >= TF_BLOCK_SIZE) {
358 		nbytes = __ctr_crypt(desc, &walk);
359 		err = blkcipher_walk_done(desc, &walk, nbytes);
360 	}
361 
362 	if (walk.nbytes) {
363 		ctr_crypt_final(desc, &walk);
364 		err = blkcipher_walk_done(desc, &walk, 0);
365 	}
366 
367 	return err;
368 }
369 
370 static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
371 {
372 	const unsigned int bsize = TF_BLOCK_SIZE;
373 	struct twofish_ctx *ctx = priv;
374 	int i;
375 
376 	if (nbytes == 3 * bsize) {
377 		twofish_enc_blk_3way(ctx, srcdst, srcdst);
378 		return;
379 	}
380 
381 	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
382 		twofish_enc_blk(ctx, srcdst, srcdst);
383 }
384 
385 static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
386 {
387 	const unsigned int bsize = TF_BLOCK_SIZE;
388 	struct twofish_ctx *ctx = priv;
389 	int i;
390 
391 	if (nbytes == 3 * bsize) {
392 		twofish_dec_blk_3way(ctx, srcdst, srcdst);
393 		return;
394 	}
395 
396 	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
397 		twofish_dec_blk(ctx, srcdst, srcdst);
398 }
399 
400 struct twofish_lrw_ctx {
401 	struct lrw_table_ctx lrw_table;
402 	struct twofish_ctx twofish_ctx;
403 };
404 
405 static int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
406 			      unsigned int keylen)
407 {
408 	struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
409 	int err;
410 
411 	err = __twofish_setkey(&ctx->twofish_ctx, key, keylen - TF_BLOCK_SIZE,
412 			       &tfm->crt_flags);
413 	if (err)
414 		return err;
415 
416 	return lrw_init_table(&ctx->lrw_table, key + keylen - TF_BLOCK_SIZE);
417 }
418 
419 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
420 		       struct scatterlist *src, unsigned int nbytes)
421 {
422 	struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
423 	be128 buf[3];
424 	struct lrw_crypt_req req = {
425 		.tbuf = buf,
426 		.tbuflen = sizeof(buf),
427 
428 		.table_ctx = &ctx->lrw_table,
429 		.crypt_ctx = &ctx->twofish_ctx,
430 		.crypt_fn = encrypt_callback,
431 	};
432 
433 	return lrw_crypt(desc, dst, src, nbytes, &req);
434 }
435 
436 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
437 		       struct scatterlist *src, unsigned int nbytes)
438 {
439 	struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
440 	be128 buf[3];
441 	struct lrw_crypt_req req = {
442 		.tbuf = buf,
443 		.tbuflen = sizeof(buf),
444 
445 		.table_ctx = &ctx->lrw_table,
446 		.crypt_ctx = &ctx->twofish_ctx,
447 		.crypt_fn = decrypt_callback,
448 	};
449 
450 	return lrw_crypt(desc, dst, src, nbytes, &req);
451 }
452 
453 static void lrw_exit_tfm(struct crypto_tfm *tfm)
454 {
455 	struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
456 
457 	lrw_free_table(&ctx->lrw_table);
458 }
459 
460 struct twofish_xts_ctx {
461 	struct twofish_ctx tweak_ctx;
462 	struct twofish_ctx crypt_ctx;
463 };
464 
465 static int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
466 			      unsigned int keylen)
467 {
468 	struct twofish_xts_ctx *ctx = crypto_tfm_ctx(tfm);
469 	u32 *flags = &tfm->crt_flags;
470 	int err;
471 
472 	/* key consists of keys of equal size concatenated, therefore
473 	 * the length must be even
474 	 */
475 	if (keylen % 2) {
476 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
477 		return -EINVAL;
478 	}
479 
480 	/* first half of xts-key is for crypt */
481 	err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
482 	if (err)
483 		return err;
484 
485 	/* second half of xts-key is for tweak */
486 	return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
487 				flags);
488 }
489 
490 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
491 		       struct scatterlist *src, unsigned int nbytes)
492 {
493 	struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
494 	be128 buf[3];
495 	struct xts_crypt_req req = {
496 		.tbuf = buf,
497 		.tbuflen = sizeof(buf),
498 
499 		.tweak_ctx = &ctx->tweak_ctx,
500 		.tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
501 		.crypt_ctx = &ctx->crypt_ctx,
502 		.crypt_fn = encrypt_callback,
503 	};
504 
505 	return xts_crypt(desc, dst, src, nbytes, &req);
506 }
507 
508 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
509 		       struct scatterlist *src, unsigned int nbytes)
510 {
511 	struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
512 	be128 buf[3];
513 	struct xts_crypt_req req = {
514 		.tbuf = buf,
515 		.tbuflen = sizeof(buf),
516 
517 		.tweak_ctx = &ctx->tweak_ctx,
518 		.tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
519 		.crypt_ctx = &ctx->crypt_ctx,
520 		.crypt_fn = decrypt_callback,
521 	};
522 
523 	return xts_crypt(desc, dst, src, nbytes, &req);
524 }
525 
526 static struct crypto_alg tf_algs[5] = { {
527 	.cra_name		= "ecb(twofish)",
528 	.cra_driver_name	= "ecb-twofish-3way",
529 	.cra_priority		= 300,
530 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
531 	.cra_blocksize		= TF_BLOCK_SIZE,
532 	.cra_ctxsize		= sizeof(struct twofish_ctx),
533 	.cra_alignmask		= 0,
534 	.cra_type		= &crypto_blkcipher_type,
535 	.cra_module		= THIS_MODULE,
536 	.cra_list		= LIST_HEAD_INIT(tf_algs[0].cra_list),
537 	.cra_u = {
538 		.blkcipher = {
539 			.min_keysize	= TF_MIN_KEY_SIZE,
540 			.max_keysize	= TF_MAX_KEY_SIZE,
541 			.setkey		= twofish_setkey,
542 			.encrypt	= ecb_encrypt,
543 			.decrypt	= ecb_decrypt,
544 		},
545 	},
546 }, {
547 	.cra_name		= "cbc(twofish)",
548 	.cra_driver_name	= "cbc-twofish-3way",
549 	.cra_priority		= 300,
550 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
551 	.cra_blocksize		= TF_BLOCK_SIZE,
552 	.cra_ctxsize		= sizeof(struct twofish_ctx),
553 	.cra_alignmask		= 0,
554 	.cra_type		= &crypto_blkcipher_type,
555 	.cra_module		= THIS_MODULE,
556 	.cra_list		= LIST_HEAD_INIT(tf_algs[1].cra_list),
557 	.cra_u = {
558 		.blkcipher = {
559 			.min_keysize	= TF_MIN_KEY_SIZE,
560 			.max_keysize	= TF_MAX_KEY_SIZE,
561 			.ivsize		= TF_BLOCK_SIZE,
562 			.setkey		= twofish_setkey,
563 			.encrypt	= cbc_encrypt,
564 			.decrypt	= cbc_decrypt,
565 		},
566 	},
567 }, {
568 	.cra_name		= "ctr(twofish)",
569 	.cra_driver_name	= "ctr-twofish-3way",
570 	.cra_priority		= 300,
571 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
572 	.cra_blocksize		= 1,
573 	.cra_ctxsize		= sizeof(struct twofish_ctx),
574 	.cra_alignmask		= 0,
575 	.cra_type		= &crypto_blkcipher_type,
576 	.cra_module		= THIS_MODULE,
577 	.cra_list		= LIST_HEAD_INIT(tf_algs[2].cra_list),
578 	.cra_u = {
579 		.blkcipher = {
580 			.min_keysize	= TF_MIN_KEY_SIZE,
581 			.max_keysize	= TF_MAX_KEY_SIZE,
582 			.ivsize		= TF_BLOCK_SIZE,
583 			.setkey		= twofish_setkey,
584 			.encrypt	= ctr_crypt,
585 			.decrypt	= ctr_crypt,
586 		},
587 	},
588 }, {
589 	.cra_name		= "lrw(twofish)",
590 	.cra_driver_name	= "lrw-twofish-3way",
591 	.cra_priority		= 300,
592 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
593 	.cra_blocksize		= TF_BLOCK_SIZE,
594 	.cra_ctxsize		= sizeof(struct twofish_lrw_ctx),
595 	.cra_alignmask		= 0,
596 	.cra_type		= &crypto_blkcipher_type,
597 	.cra_module		= THIS_MODULE,
598 	.cra_list		= LIST_HEAD_INIT(tf_algs[3].cra_list),
599 	.cra_exit		= lrw_exit_tfm,
600 	.cra_u = {
601 		.blkcipher = {
602 			.min_keysize	= TF_MIN_KEY_SIZE + TF_BLOCK_SIZE,
603 			.max_keysize	= TF_MAX_KEY_SIZE + TF_BLOCK_SIZE,
604 			.ivsize		= TF_BLOCK_SIZE,
605 			.setkey		= lrw_twofish_setkey,
606 			.encrypt	= lrw_encrypt,
607 			.decrypt	= lrw_decrypt,
608 		},
609 	},
610 }, {
611 	.cra_name		= "xts(twofish)",
612 	.cra_driver_name	= "xts-twofish-3way",
613 	.cra_priority		= 300,
614 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
615 	.cra_blocksize		= TF_BLOCK_SIZE,
616 	.cra_ctxsize		= sizeof(struct twofish_xts_ctx),
617 	.cra_alignmask		= 0,
618 	.cra_type		= &crypto_blkcipher_type,
619 	.cra_module		= THIS_MODULE,
620 	.cra_list		= LIST_HEAD_INIT(tf_algs[4].cra_list),
621 	.cra_u = {
622 		.blkcipher = {
623 			.min_keysize	= TF_MIN_KEY_SIZE * 2,
624 			.max_keysize	= TF_MAX_KEY_SIZE * 2,
625 			.ivsize		= TF_BLOCK_SIZE,
626 			.setkey		= xts_twofish_setkey,
627 			.encrypt	= xts_encrypt,
628 			.decrypt	= xts_decrypt,
629 		},
630 	},
631 } };
632 
633 static bool is_blacklisted_cpu(void)
634 {
635 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
636 		return false;
637 
638 	if (boot_cpu_data.x86 == 0x06 &&
639 		(boot_cpu_data.x86_model == 0x1c ||
640 		 boot_cpu_data.x86_model == 0x26 ||
641 		 boot_cpu_data.x86_model == 0x36)) {
642 		/*
643 		 * On Atom, twofish-3way is slower than original assembler
644 		 * implementation. Twofish-3way trades off some performance in
645 		 * storing blocks in 64bit registers to allow three blocks to
646 		 * be processed parallel. Parallel operation then allows gaining
647 		 * more performance than was trade off, on out-of-order CPUs.
648 		 * However Atom does not benefit from this parallellism and
649 		 * should be blacklisted.
650 		 */
651 		return true;
652 	}
653 
654 	if (boot_cpu_data.x86 == 0x0f) {
655 		/*
656 		 * On Pentium 4, twofish-3way is slower than original assembler
657 		 * implementation because excessive uses of 64bit rotate and
658 		 * left-shifts (which are really slow on P4) needed to store and
659 		 * handle 128bit block in two 64bit registers.
660 		 */
661 		return true;
662 	}
663 
664 	return false;
665 }
666 
667 static int force;
668 module_param(force, int, 0);
669 MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
670 
671 int __init init(void)
672 {
673 	if (!force && is_blacklisted_cpu()) {
674 		printk(KERN_INFO
675 			"twofish-x86_64-3way: performance on this CPU "
676 			"would be suboptimal: disabling "
677 			"twofish-x86_64-3way.\n");
678 		return -ENODEV;
679 	}
680 
681 	return crypto_register_algs(tf_algs, ARRAY_SIZE(tf_algs));
682 }
683 
684 void __exit fini(void)
685 {
686 	crypto_unregister_algs(tf_algs, ARRAY_SIZE(tf_algs));
687 }
688 
689 module_init(init);
690 module_exit(fini);
691 
692 MODULE_LICENSE("GPL");
693 MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
694 MODULE_ALIAS("twofish");
695 MODULE_ALIAS("twofish-asm");
696