xref: /linux/arch/x86/crypto/twofish_glue_3way.c (revision a522ee85ba979e7897a75b1c97db1b0304b68b5c)
1 /*
2  * Glue Code for 3-way parallel assembler optimized version of Twofish
3  *
4  * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
5  *
6  * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
7  *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8  * CTR part based on code (crypto/ctr.c) by:
9  *   (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
24  * USA
25  *
26  */
27 
28 #include <asm/processor.h>
29 #include <linux/crypto.h>
30 #include <linux/init.h>
31 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <crypto/algapi.h>
34 #include <crypto/twofish.h>
35 #include <crypto/b128ops.h>
36 #include <crypto/lrw.h>
37 #include <crypto/xts.h>
38 
39 /* regular block cipher functions from twofish_x86_64 module */
40 asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
41 				const u8 *src);
42 asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
43 				const u8 *src);
44 
45 /* 3-way parallel cipher functions */
46 asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
47 				       const u8 *src, bool xor);
48 asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
49 				     const u8 *src);
50 
51 static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
52 					const u8 *src)
53 {
54 	__twofish_enc_blk_3way(ctx, dst, src, false);
55 }
56 
57 static inline void twofish_enc_blk_xor_3way(struct twofish_ctx *ctx, u8 *dst,
58 					    const u8 *src)
59 {
60 	__twofish_enc_blk_3way(ctx, dst, src, true);
61 }
62 
63 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
64 		     void (*fn)(struct twofish_ctx *, u8 *, const u8 *),
65 		     void (*fn_3way)(struct twofish_ctx *, u8 *, const u8 *))
66 {
67 	struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
68 	unsigned int bsize = TF_BLOCK_SIZE;
69 	unsigned int nbytes;
70 	int err;
71 
72 	err = blkcipher_walk_virt(desc, walk);
73 
74 	while ((nbytes = walk->nbytes)) {
75 		u8 *wsrc = walk->src.virt.addr;
76 		u8 *wdst = walk->dst.virt.addr;
77 
78 		/* Process three block batch */
79 		if (nbytes >= bsize * 3) {
80 			do {
81 				fn_3way(ctx, wdst, wsrc);
82 
83 				wsrc += bsize * 3;
84 				wdst += bsize * 3;
85 				nbytes -= bsize * 3;
86 			} while (nbytes >= bsize * 3);
87 
88 			if (nbytes < bsize)
89 				goto done;
90 		}
91 
92 		/* Handle leftovers */
93 		do {
94 			fn(ctx, wdst, wsrc);
95 
96 			wsrc += bsize;
97 			wdst += bsize;
98 			nbytes -= bsize;
99 		} while (nbytes >= bsize);
100 
101 done:
102 		err = blkcipher_walk_done(desc, walk, nbytes);
103 	}
104 
105 	return err;
106 }
107 
108 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
109 		       struct scatterlist *src, unsigned int nbytes)
110 {
111 	struct blkcipher_walk walk;
112 
113 	blkcipher_walk_init(&walk, dst, src, nbytes);
114 	return ecb_crypt(desc, &walk, twofish_enc_blk, twofish_enc_blk_3way);
115 }
116 
117 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
118 		       struct scatterlist *src, unsigned int nbytes)
119 {
120 	struct blkcipher_walk walk;
121 
122 	blkcipher_walk_init(&walk, dst, src, nbytes);
123 	return ecb_crypt(desc, &walk, twofish_dec_blk, twofish_dec_blk_3way);
124 }
125 
126 static struct crypto_alg blk_ecb_alg = {
127 	.cra_name		= "ecb(twofish)",
128 	.cra_driver_name	= "ecb-twofish-3way",
129 	.cra_priority		= 300,
130 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
131 	.cra_blocksize		= TF_BLOCK_SIZE,
132 	.cra_ctxsize		= sizeof(struct twofish_ctx),
133 	.cra_alignmask		= 0,
134 	.cra_type		= &crypto_blkcipher_type,
135 	.cra_module		= THIS_MODULE,
136 	.cra_list		= LIST_HEAD_INIT(blk_ecb_alg.cra_list),
137 	.cra_u = {
138 		.blkcipher = {
139 			.min_keysize	= TF_MIN_KEY_SIZE,
140 			.max_keysize	= TF_MAX_KEY_SIZE,
141 			.setkey		= twofish_setkey,
142 			.encrypt	= ecb_encrypt,
143 			.decrypt	= ecb_decrypt,
144 		},
145 	},
146 };
147 
148 static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
149 				  struct blkcipher_walk *walk)
150 {
151 	struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
152 	unsigned int bsize = TF_BLOCK_SIZE;
153 	unsigned int nbytes = walk->nbytes;
154 	u128 *src = (u128 *)walk->src.virt.addr;
155 	u128 *dst = (u128 *)walk->dst.virt.addr;
156 	u128 *iv = (u128 *)walk->iv;
157 
158 	do {
159 		u128_xor(dst, src, iv);
160 		twofish_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
161 		iv = dst;
162 
163 		src += 1;
164 		dst += 1;
165 		nbytes -= bsize;
166 	} while (nbytes >= bsize);
167 
168 	u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv);
169 	return nbytes;
170 }
171 
172 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
173 		       struct scatterlist *src, unsigned int nbytes)
174 {
175 	struct blkcipher_walk walk;
176 	int err;
177 
178 	blkcipher_walk_init(&walk, dst, src, nbytes);
179 	err = blkcipher_walk_virt(desc, &walk);
180 
181 	while ((nbytes = walk.nbytes)) {
182 		nbytes = __cbc_encrypt(desc, &walk);
183 		err = blkcipher_walk_done(desc, &walk, nbytes);
184 	}
185 
186 	return err;
187 }
188 
189 static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
190 				  struct blkcipher_walk *walk)
191 {
192 	struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
193 	unsigned int bsize = TF_BLOCK_SIZE;
194 	unsigned int nbytes = walk->nbytes;
195 	u128 *src = (u128 *)walk->src.virt.addr;
196 	u128 *dst = (u128 *)walk->dst.virt.addr;
197 	u128 ivs[3 - 1];
198 	u128 last_iv;
199 
200 	/* Start of the last block. */
201 	src += nbytes / bsize - 1;
202 	dst += nbytes / bsize - 1;
203 
204 	last_iv = *src;
205 
206 	/* Process three block batch */
207 	if (nbytes >= bsize * 3) {
208 		do {
209 			nbytes -= bsize * (3 - 1);
210 			src -= 3 - 1;
211 			dst -= 3 - 1;
212 
213 			ivs[0] = src[0];
214 			ivs[1] = src[1];
215 
216 			twofish_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src);
217 
218 			u128_xor(dst + 1, dst + 1, ivs + 0);
219 			u128_xor(dst + 2, dst + 2, ivs + 1);
220 
221 			nbytes -= bsize;
222 			if (nbytes < bsize)
223 				goto done;
224 
225 			u128_xor(dst, dst, src - 1);
226 			src -= 1;
227 			dst -= 1;
228 		} while (nbytes >= bsize * 3);
229 
230 		if (nbytes < bsize)
231 			goto done;
232 	}
233 
234 	/* Handle leftovers */
235 	for (;;) {
236 		twofish_dec_blk(ctx, (u8 *)dst, (u8 *)src);
237 
238 		nbytes -= bsize;
239 		if (nbytes < bsize)
240 			break;
241 
242 		u128_xor(dst, dst, src - 1);
243 		src -= 1;
244 		dst -= 1;
245 	}
246 
247 done:
248 	u128_xor(dst, dst, (u128 *)walk->iv);
249 	*(u128 *)walk->iv = last_iv;
250 
251 	return nbytes;
252 }
253 
254 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
255 		       struct scatterlist *src, unsigned int nbytes)
256 {
257 	struct blkcipher_walk walk;
258 	int err;
259 
260 	blkcipher_walk_init(&walk, dst, src, nbytes);
261 	err = blkcipher_walk_virt(desc, &walk);
262 
263 	while ((nbytes = walk.nbytes)) {
264 		nbytes = __cbc_decrypt(desc, &walk);
265 		err = blkcipher_walk_done(desc, &walk, nbytes);
266 	}
267 
268 	return err;
269 }
270 
271 static struct crypto_alg blk_cbc_alg = {
272 	.cra_name		= "cbc(twofish)",
273 	.cra_driver_name	= "cbc-twofish-3way",
274 	.cra_priority		= 300,
275 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
276 	.cra_blocksize		= TF_BLOCK_SIZE,
277 	.cra_ctxsize		= sizeof(struct twofish_ctx),
278 	.cra_alignmask		= 0,
279 	.cra_type		= &crypto_blkcipher_type,
280 	.cra_module		= THIS_MODULE,
281 	.cra_list		= LIST_HEAD_INIT(blk_cbc_alg.cra_list),
282 	.cra_u = {
283 		.blkcipher = {
284 			.min_keysize	= TF_MIN_KEY_SIZE,
285 			.max_keysize	= TF_MAX_KEY_SIZE,
286 			.ivsize		= TF_BLOCK_SIZE,
287 			.setkey		= twofish_setkey,
288 			.encrypt	= cbc_encrypt,
289 			.decrypt	= cbc_decrypt,
290 		},
291 	},
292 };
293 
294 static inline void u128_to_be128(be128 *dst, const u128 *src)
295 {
296 	dst->a = cpu_to_be64(src->a);
297 	dst->b = cpu_to_be64(src->b);
298 }
299 
300 static inline void be128_to_u128(u128 *dst, const be128 *src)
301 {
302 	dst->a = be64_to_cpu(src->a);
303 	dst->b = be64_to_cpu(src->b);
304 }
305 
306 static inline void u128_inc(u128 *i)
307 {
308 	i->b++;
309 	if (!i->b)
310 		i->a++;
311 }
312 
313 static void ctr_crypt_final(struct blkcipher_desc *desc,
314 			    struct blkcipher_walk *walk)
315 {
316 	struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
317 	u8 *ctrblk = walk->iv;
318 	u8 keystream[TF_BLOCK_SIZE];
319 	u8 *src = walk->src.virt.addr;
320 	u8 *dst = walk->dst.virt.addr;
321 	unsigned int nbytes = walk->nbytes;
322 
323 	twofish_enc_blk(ctx, keystream, ctrblk);
324 	crypto_xor(keystream, src, nbytes);
325 	memcpy(dst, keystream, nbytes);
326 
327 	crypto_inc(ctrblk, TF_BLOCK_SIZE);
328 }
329 
330 static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
331 				struct blkcipher_walk *walk)
332 {
333 	struct twofish_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
334 	unsigned int bsize = TF_BLOCK_SIZE;
335 	unsigned int nbytes = walk->nbytes;
336 	u128 *src = (u128 *)walk->src.virt.addr;
337 	u128 *dst = (u128 *)walk->dst.virt.addr;
338 	u128 ctrblk;
339 	be128 ctrblocks[3];
340 
341 	be128_to_u128(&ctrblk, (be128 *)walk->iv);
342 
343 	/* Process three block batch */
344 	if (nbytes >= bsize * 3) {
345 		do {
346 			if (dst != src) {
347 				dst[0] = src[0];
348 				dst[1] = src[1];
349 				dst[2] = src[2];
350 			}
351 
352 			/* create ctrblks for parallel encrypt */
353 			u128_to_be128(&ctrblocks[0], &ctrblk);
354 			u128_inc(&ctrblk);
355 			u128_to_be128(&ctrblocks[1], &ctrblk);
356 			u128_inc(&ctrblk);
357 			u128_to_be128(&ctrblocks[2], &ctrblk);
358 			u128_inc(&ctrblk);
359 
360 			twofish_enc_blk_xor_3way(ctx, (u8 *)dst,
361 						 (u8 *)ctrblocks);
362 
363 			src += 3;
364 			dst += 3;
365 			nbytes -= bsize * 3;
366 		} while (nbytes >= bsize * 3);
367 
368 		if (nbytes < bsize)
369 			goto done;
370 	}
371 
372 	/* Handle leftovers */
373 	do {
374 		if (dst != src)
375 			*dst = *src;
376 
377 		u128_to_be128(&ctrblocks[0], &ctrblk);
378 		u128_inc(&ctrblk);
379 
380 		twofish_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
381 		u128_xor(dst, dst, (u128 *)ctrblocks);
382 
383 		src += 1;
384 		dst += 1;
385 		nbytes -= bsize;
386 	} while (nbytes >= bsize);
387 
388 done:
389 	u128_to_be128((be128 *)walk->iv, &ctrblk);
390 	return nbytes;
391 }
392 
393 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
394 		     struct scatterlist *src, unsigned int nbytes)
395 {
396 	struct blkcipher_walk walk;
397 	int err;
398 
399 	blkcipher_walk_init(&walk, dst, src, nbytes);
400 	err = blkcipher_walk_virt_block(desc, &walk, TF_BLOCK_SIZE);
401 
402 	while ((nbytes = walk.nbytes) >= TF_BLOCK_SIZE) {
403 		nbytes = __ctr_crypt(desc, &walk);
404 		err = blkcipher_walk_done(desc, &walk, nbytes);
405 	}
406 
407 	if (walk.nbytes) {
408 		ctr_crypt_final(desc, &walk);
409 		err = blkcipher_walk_done(desc, &walk, 0);
410 	}
411 
412 	return err;
413 }
414 
415 static struct crypto_alg blk_ctr_alg = {
416 	.cra_name		= "ctr(twofish)",
417 	.cra_driver_name	= "ctr-twofish-3way",
418 	.cra_priority		= 300,
419 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
420 	.cra_blocksize		= 1,
421 	.cra_ctxsize		= sizeof(struct twofish_ctx),
422 	.cra_alignmask		= 0,
423 	.cra_type		= &crypto_blkcipher_type,
424 	.cra_module		= THIS_MODULE,
425 	.cra_list		= LIST_HEAD_INIT(blk_ctr_alg.cra_list),
426 	.cra_u = {
427 		.blkcipher = {
428 			.min_keysize	= TF_MIN_KEY_SIZE,
429 			.max_keysize	= TF_MAX_KEY_SIZE,
430 			.ivsize		= TF_BLOCK_SIZE,
431 			.setkey		= twofish_setkey,
432 			.encrypt	= ctr_crypt,
433 			.decrypt	= ctr_crypt,
434 		},
435 	},
436 };
437 
438 static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
439 {
440 	const unsigned int bsize = TF_BLOCK_SIZE;
441 	struct twofish_ctx *ctx = priv;
442 	int i;
443 
444 	if (nbytes == 3 * bsize) {
445 		twofish_enc_blk_3way(ctx, srcdst, srcdst);
446 		return;
447 	}
448 
449 	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
450 		twofish_enc_blk(ctx, srcdst, srcdst);
451 }
452 
453 static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
454 {
455 	const unsigned int bsize = TF_BLOCK_SIZE;
456 	struct twofish_ctx *ctx = priv;
457 	int i;
458 
459 	if (nbytes == 3 * bsize) {
460 		twofish_dec_blk_3way(ctx, srcdst, srcdst);
461 		return;
462 	}
463 
464 	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
465 		twofish_dec_blk(ctx, srcdst, srcdst);
466 }
467 
468 struct twofish_lrw_ctx {
469 	struct lrw_table_ctx lrw_table;
470 	struct twofish_ctx twofish_ctx;
471 };
472 
473 static int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
474 			      unsigned int keylen)
475 {
476 	struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
477 	int err;
478 
479 	err = __twofish_setkey(&ctx->twofish_ctx, key, keylen - TF_BLOCK_SIZE,
480 			       &tfm->crt_flags);
481 	if (err)
482 		return err;
483 
484 	return lrw_init_table(&ctx->lrw_table, key + keylen - TF_BLOCK_SIZE);
485 }
486 
487 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
488 		       struct scatterlist *src, unsigned int nbytes)
489 {
490 	struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
491 	be128 buf[3];
492 	struct lrw_crypt_req req = {
493 		.tbuf = buf,
494 		.tbuflen = sizeof(buf),
495 
496 		.table_ctx = &ctx->lrw_table,
497 		.crypt_ctx = &ctx->twofish_ctx,
498 		.crypt_fn = encrypt_callback,
499 	};
500 
501 	return lrw_crypt(desc, dst, src, nbytes, &req);
502 }
503 
504 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
505 		       struct scatterlist *src, unsigned int nbytes)
506 {
507 	struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
508 	be128 buf[3];
509 	struct lrw_crypt_req req = {
510 		.tbuf = buf,
511 		.tbuflen = sizeof(buf),
512 
513 		.table_ctx = &ctx->lrw_table,
514 		.crypt_ctx = &ctx->twofish_ctx,
515 		.crypt_fn = decrypt_callback,
516 	};
517 
518 	return lrw_crypt(desc, dst, src, nbytes, &req);
519 }
520 
521 static void lrw_exit_tfm(struct crypto_tfm *tfm)
522 {
523 	struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
524 
525 	lrw_free_table(&ctx->lrw_table);
526 }
527 
528 static struct crypto_alg blk_lrw_alg = {
529 	.cra_name		= "lrw(twofish)",
530 	.cra_driver_name	= "lrw-twofish-3way",
531 	.cra_priority		= 300,
532 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
533 	.cra_blocksize		= TF_BLOCK_SIZE,
534 	.cra_ctxsize		= sizeof(struct twofish_lrw_ctx),
535 	.cra_alignmask		= 0,
536 	.cra_type		= &crypto_blkcipher_type,
537 	.cra_module		= THIS_MODULE,
538 	.cra_list		= LIST_HEAD_INIT(blk_lrw_alg.cra_list),
539 	.cra_exit		= lrw_exit_tfm,
540 	.cra_u = {
541 		.blkcipher = {
542 			.min_keysize	= TF_MIN_KEY_SIZE + TF_BLOCK_SIZE,
543 			.max_keysize	= TF_MAX_KEY_SIZE + TF_BLOCK_SIZE,
544 			.ivsize		= TF_BLOCK_SIZE,
545 			.setkey		= lrw_twofish_setkey,
546 			.encrypt	= lrw_encrypt,
547 			.decrypt	= lrw_decrypt,
548 		},
549 	},
550 };
551 
552 struct twofish_xts_ctx {
553 	struct twofish_ctx tweak_ctx;
554 	struct twofish_ctx crypt_ctx;
555 };
556 
557 static int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
558 			      unsigned int keylen)
559 {
560 	struct twofish_xts_ctx *ctx = crypto_tfm_ctx(tfm);
561 	u32 *flags = &tfm->crt_flags;
562 	int err;
563 
564 	/* key consists of keys of equal size concatenated, therefore
565 	 * the length must be even
566 	 */
567 	if (keylen % 2) {
568 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
569 		return -EINVAL;
570 	}
571 
572 	/* first half of xts-key is for crypt */
573 	err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
574 	if (err)
575 		return err;
576 
577 	/* second half of xts-key is for tweak */
578 	return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
579 				flags);
580 }
581 
582 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
583 		       struct scatterlist *src, unsigned int nbytes)
584 {
585 	struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
586 	be128 buf[3];
587 	struct xts_crypt_req req = {
588 		.tbuf = buf,
589 		.tbuflen = sizeof(buf),
590 
591 		.tweak_ctx = &ctx->tweak_ctx,
592 		.tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
593 		.crypt_ctx = &ctx->crypt_ctx,
594 		.crypt_fn = encrypt_callback,
595 	};
596 
597 	return xts_crypt(desc, dst, src, nbytes, &req);
598 }
599 
600 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
601 		       struct scatterlist *src, unsigned int nbytes)
602 {
603 	struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
604 	be128 buf[3];
605 	struct xts_crypt_req req = {
606 		.tbuf = buf,
607 		.tbuflen = sizeof(buf),
608 
609 		.tweak_ctx = &ctx->tweak_ctx,
610 		.tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
611 		.crypt_ctx = &ctx->crypt_ctx,
612 		.crypt_fn = decrypt_callback,
613 	};
614 
615 	return xts_crypt(desc, dst, src, nbytes, &req);
616 }
617 
618 static struct crypto_alg blk_xts_alg = {
619 	.cra_name		= "xts(twofish)",
620 	.cra_driver_name	= "xts-twofish-3way",
621 	.cra_priority		= 300,
622 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
623 	.cra_blocksize		= TF_BLOCK_SIZE,
624 	.cra_ctxsize		= sizeof(struct twofish_xts_ctx),
625 	.cra_alignmask		= 0,
626 	.cra_type		= &crypto_blkcipher_type,
627 	.cra_module		= THIS_MODULE,
628 	.cra_list		= LIST_HEAD_INIT(blk_xts_alg.cra_list),
629 	.cra_u = {
630 		.blkcipher = {
631 			.min_keysize	= TF_MIN_KEY_SIZE * 2,
632 			.max_keysize	= TF_MAX_KEY_SIZE * 2,
633 			.ivsize		= TF_BLOCK_SIZE,
634 			.setkey		= xts_twofish_setkey,
635 			.encrypt	= xts_encrypt,
636 			.decrypt	= xts_decrypt,
637 		},
638 	},
639 };
640 
641 static bool is_blacklisted_cpu(void)
642 {
643 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
644 		return false;
645 
646 	if (boot_cpu_data.x86 == 0x06 &&
647 		(boot_cpu_data.x86_model == 0x1c ||
648 		 boot_cpu_data.x86_model == 0x26 ||
649 		 boot_cpu_data.x86_model == 0x36)) {
650 		/*
651 		 * On Atom, twofish-3way is slower than original assembler
652 		 * implementation. Twofish-3way trades off some performance in
653 		 * storing blocks in 64bit registers to allow three blocks to
654 		 * be processed parallel. Parallel operation then allows gaining
655 		 * more performance than was trade off, on out-of-order CPUs.
656 		 * However Atom does not benefit from this parallellism and
657 		 * should be blacklisted.
658 		 */
659 		return true;
660 	}
661 
662 	if (boot_cpu_data.x86 == 0x0f) {
663 		/*
664 		 * On Pentium 4, twofish-3way is slower than original assembler
665 		 * implementation because excessive uses of 64bit rotate and
666 		 * left-shifts (which are really slow on P4) needed to store and
667 		 * handle 128bit block in two 64bit registers.
668 		 */
669 		return true;
670 	}
671 
672 	return false;
673 }
674 
675 static int force;
676 module_param(force, int, 0);
677 MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
678 
679 int __init init(void)
680 {
681 	int err;
682 
683 	if (!force && is_blacklisted_cpu()) {
684 		printk(KERN_INFO
685 			"twofish-x86_64-3way: performance on this CPU "
686 			"would be suboptimal: disabling "
687 			"twofish-x86_64-3way.\n");
688 		return -ENODEV;
689 	}
690 
691 	err = crypto_register_alg(&blk_ecb_alg);
692 	if (err)
693 		goto ecb_err;
694 	err = crypto_register_alg(&blk_cbc_alg);
695 	if (err)
696 		goto cbc_err;
697 	err = crypto_register_alg(&blk_ctr_alg);
698 	if (err)
699 		goto ctr_err;
700 	err = crypto_register_alg(&blk_lrw_alg);
701 	if (err)
702 		goto blk_lrw_err;
703 	err = crypto_register_alg(&blk_xts_alg);
704 	if (err)
705 		goto blk_xts_err;
706 
707 	return 0;
708 
709 	crypto_unregister_alg(&blk_xts_alg);
710 blk_xts_err:
711 	crypto_unregister_alg(&blk_lrw_alg);
712 blk_lrw_err:
713 	crypto_unregister_alg(&blk_ctr_alg);
714 ctr_err:
715 	crypto_unregister_alg(&blk_cbc_alg);
716 cbc_err:
717 	crypto_unregister_alg(&blk_ecb_alg);
718 ecb_err:
719 	return err;
720 }
721 
722 void __exit fini(void)
723 {
724 	crypto_unregister_alg(&blk_xts_alg);
725 	crypto_unregister_alg(&blk_lrw_alg);
726 	crypto_unregister_alg(&blk_ctr_alg);
727 	crypto_unregister_alg(&blk_cbc_alg);
728 	crypto_unregister_alg(&blk_ecb_alg);
729 }
730 
731 module_init(init);
732 module_exit(fini);
733 
734 MODULE_LICENSE("GPL");
735 MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
736 MODULE_ALIAS("twofish");
737 MODULE_ALIAS("twofish-asm");
738