xref: /linux/arch/x86/crypto/twofish-x86_64-asm_64.S (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1/***************************************************************************
2*   Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de>        *
3*                                                                         *
4*   This program is free software; you can redistribute it and/or modify  *
5*   it under the terms of the GNU General Public License as published by  *
6*   the Free Software Foundation; either version 2 of the License, or     *
7*   (at your option) any later version.                                   *
8*                                                                         *
9*   This program is distributed in the hope that it will be useful,       *
10*   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
11*   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
12*   GNU General Public License for more details.                          *
13*                                                                         *
14*   You should have received a copy of the GNU General Public License     *
15*   along with this program; if not, write to the                         *
16*   Free Software Foundation, Inc.,                                       *
17*   59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.             *
18***************************************************************************/
19
20.file "twofish-x86_64-asm.S"
21.text
22
23#include <linux/linkage.h>
24#include <asm/asm-offsets.h>
25
26#define a_offset	0
27#define b_offset	4
28#define c_offset	8
29#define d_offset	12
30
31/* Structure of the crypto context struct*/
32
33#define s0	0	/* S0 Array 256 Words each */
34#define s1	1024	/* S1 Array */
35#define s2	2048	/* S2 Array */
36#define s3	3072	/* S3 Array */
37#define w	4096	/* 8 whitening keys (word) */
38#define k	4128	/* key 1-32 ( word ) */
39
40/* define a few register aliases to allow macro substitution */
41
42#define R0     %rax
43#define R0D    %eax
44#define R0B    %al
45#define R0H    %ah
46
47#define R1     %rbx
48#define R1D    %ebx
49#define R1B    %bl
50#define R1H    %bh
51
52#define R2     %rcx
53#define R2D    %ecx
54#define R2B    %cl
55#define R2H    %ch
56
57#define R3     %rdx
58#define R3D    %edx
59#define R3B    %dl
60#define R3H    %dh
61
62
63/* performs input whitening */
64#define input_whitening(src,context,offset)\
65	xor	w+offset(context),	src;
66
67/* performs input whitening */
68#define output_whitening(src,context,offset)\
69	xor	w+16+offset(context),	src;
70
71
72/*
73 * a input register containing a (rotated 16)
74 * b input register containing b
75 * c input register containing c
76 * d input register containing d (already rol $1)
77 * operations on a and b are interleaved to increase performance
78 */
79#define encrypt_round(a,b,c,d,round)\
80	movzx	b ## B,		%edi;\
81	mov	s1(%r11,%rdi,4),%r8d;\
82	movzx	a ## B,		%edi;\
83	mov	s2(%r11,%rdi,4),%r9d;\
84	movzx	b ## H,		%edi;\
85	ror	$16,		b ## D;\
86	xor	s2(%r11,%rdi,4),%r8d;\
87	movzx	a ## H,		%edi;\
88	ror	$16,		a ## D;\
89	xor	s3(%r11,%rdi,4),%r9d;\
90	movzx	b ## B,		%edi;\
91	xor	s3(%r11,%rdi,4),%r8d;\
92	movzx	a ## B,		%edi;\
93	xor	(%r11,%rdi,4),	%r9d;\
94	movzx	b ## H,		%edi;\
95	ror	$15,		b ## D;\
96	xor	(%r11,%rdi,4),	%r8d;\
97	movzx	a ## H,		%edi;\
98	xor	s1(%r11,%rdi,4),%r9d;\
99	add	%r8d,		%r9d;\
100	add	%r9d,		%r8d;\
101	add	k+round(%r11),	%r9d;\
102	xor	%r9d,		c ## D;\
103	rol	$15,		c ## D;\
104	add	k+4+round(%r11),%r8d;\
105	xor	%r8d,		d ## D;
106
107/*
108 * a input register containing a(rotated 16)
109 * b input register containing b
110 * c input register containing c
111 * d input register containing d (already rol $1)
112 * operations on a and b are interleaved to increase performance
113 * during the round a and b are prepared for the output whitening
114 */
115#define encrypt_last_round(a,b,c,d,round)\
116	mov	b ## D,		%r10d;\
117	shl	$32,		%r10;\
118	movzx	b ## B,		%edi;\
119	mov	s1(%r11,%rdi,4),%r8d;\
120	movzx	a ## B,		%edi;\
121	mov	s2(%r11,%rdi,4),%r9d;\
122	movzx	b ## H,		%edi;\
123	ror	$16,		b ## D;\
124	xor	s2(%r11,%rdi,4),%r8d;\
125	movzx	a ## H,		%edi;\
126	ror	$16,		a ## D;\
127	xor	s3(%r11,%rdi,4),%r9d;\
128	movzx	b ## B,		%edi;\
129	xor	s3(%r11,%rdi,4),%r8d;\
130	movzx	a ## B,		%edi;\
131	xor	(%r11,%rdi,4),	%r9d;\
132	xor	a,		%r10;\
133	movzx	b ## H,		%edi;\
134	xor	(%r11,%rdi,4),	%r8d;\
135	movzx	a ## H,		%edi;\
136	xor	s1(%r11,%rdi,4),%r9d;\
137	add	%r8d,		%r9d;\
138	add	%r9d,		%r8d;\
139	add	k+round(%r11),	%r9d;\
140	xor	%r9d,		c ## D;\
141	ror	$1,		c ## D;\
142	add	k+4+round(%r11),%r8d;\
143	xor	%r8d,		d ## D
144
145/*
146 * a input register containing a
147 * b input register containing b (rotated 16)
148 * c input register containing c (already rol $1)
149 * d input register containing d
150 * operations on a and b are interleaved to increase performance
151 */
152#define decrypt_round(a,b,c,d,round)\
153	movzx	a ## B,		%edi;\
154	mov	(%r11,%rdi,4),	%r9d;\
155	movzx	b ## B,		%edi;\
156	mov	s3(%r11,%rdi,4),%r8d;\
157	movzx	a ## H,		%edi;\
158	ror	$16,		a ## D;\
159	xor	s1(%r11,%rdi,4),%r9d;\
160	movzx	b ## H,		%edi;\
161	ror	$16,		b ## D;\
162	xor	(%r11,%rdi,4),	%r8d;\
163	movzx	a ## B,		%edi;\
164	xor	s2(%r11,%rdi,4),%r9d;\
165	movzx	b ## B,		%edi;\
166	xor	s1(%r11,%rdi,4),%r8d;\
167	movzx	a ## H,		%edi;\
168	ror	$15,		a ## D;\
169	xor	s3(%r11,%rdi,4),%r9d;\
170	movzx	b ## H,		%edi;\
171	xor	s2(%r11,%rdi,4),%r8d;\
172	add	%r8d,		%r9d;\
173	add	%r9d,		%r8d;\
174	add	k+round(%r11),	%r9d;\
175	xor	%r9d,		c ## D;\
176	add	k+4+round(%r11),%r8d;\
177	xor	%r8d,		d ## D;\
178	rol	$15,		d ## D;
179
180/*
181 * a input register containing a
182 * b input register containing b
183 * c input register containing c (already rol $1)
184 * d input register containing d
185 * operations on a and b are interleaved to increase performance
186 * during the round a and b are prepared for the output whitening
187 */
188#define decrypt_last_round(a,b,c,d,round)\
189	movzx	a ## B,		%edi;\
190	mov	(%r11,%rdi,4),	%r9d;\
191	movzx	b ## B,		%edi;\
192	mov	s3(%r11,%rdi,4),%r8d;\
193	movzx	b ## H,		%edi;\
194	ror	$16,		b ## D;\
195	xor	(%r11,%rdi,4),	%r8d;\
196	movzx	a ## H,		%edi;\
197	mov	b ## D,		%r10d;\
198	shl	$32,		%r10;\
199	xor	a,		%r10;\
200	ror	$16,		a ## D;\
201	xor	s1(%r11,%rdi,4),%r9d;\
202	movzx	b ## B,		%edi;\
203	xor	s1(%r11,%rdi,4),%r8d;\
204	movzx	a ## B,		%edi;\
205	xor	s2(%r11,%rdi,4),%r9d;\
206	movzx	b ## H,		%edi;\
207	xor	s2(%r11,%rdi,4),%r8d;\
208	movzx	a ## H,		%edi;\
209	xor	s3(%r11,%rdi,4),%r9d;\
210	add	%r8d,		%r9d;\
211	add	%r9d,		%r8d;\
212	add	k+round(%r11),	%r9d;\
213	xor	%r9d,		c ## D;\
214	add	k+4+round(%r11),%r8d;\
215	xor	%r8d,		d ## D;\
216	ror	$1,		d ## D;
217
218ENTRY(twofish_enc_blk)
219	pushq    R1
220
221	/* %rdi contains the ctx address */
222	/* %rsi contains the output address */
223	/* %rdx contains the input address */
224	/* ctx address is moved to free one non-rex register
225	as target for the 8bit high operations */
226	mov	%rdi,		%r11
227
228	movq	(R3),	R1
229	movq	8(R3),	R3
230	input_whitening(R1,%r11,a_offset)
231	input_whitening(R3,%r11,c_offset)
232	mov	R1D,	R0D
233	rol	$16,	R0D
234	shr	$32,	R1
235	mov	R3D,	R2D
236	shr	$32,	R3
237	rol	$1,	R3D
238
239	encrypt_round(R0,R1,R2,R3,0);
240	encrypt_round(R2,R3,R0,R1,8);
241	encrypt_round(R0,R1,R2,R3,2*8);
242	encrypt_round(R2,R3,R0,R1,3*8);
243	encrypt_round(R0,R1,R2,R3,4*8);
244	encrypt_round(R2,R3,R0,R1,5*8);
245	encrypt_round(R0,R1,R2,R3,6*8);
246	encrypt_round(R2,R3,R0,R1,7*8);
247	encrypt_round(R0,R1,R2,R3,8*8);
248	encrypt_round(R2,R3,R0,R1,9*8);
249	encrypt_round(R0,R1,R2,R3,10*8);
250	encrypt_round(R2,R3,R0,R1,11*8);
251	encrypt_round(R0,R1,R2,R3,12*8);
252	encrypt_round(R2,R3,R0,R1,13*8);
253	encrypt_round(R0,R1,R2,R3,14*8);
254	encrypt_last_round(R2,R3,R0,R1,15*8);
255
256
257	output_whitening(%r10,%r11,a_offset)
258	movq	%r10,	(%rsi)
259
260	shl	$32,	R1
261	xor	R0,	R1
262
263	output_whitening(R1,%r11,c_offset)
264	movq	R1,	8(%rsi)
265
266	popq	R1
267	movl	$1,%eax
268	ret
269ENDPROC(twofish_enc_blk)
270
271ENTRY(twofish_dec_blk)
272	pushq    R1
273
274	/* %rdi contains the ctx address */
275	/* %rsi contains the output address */
276	/* %rdx contains the input address */
277	/* ctx address is moved to free one non-rex register
278	as target for the 8bit high operations */
279	mov	%rdi,		%r11
280
281	movq	(R3),	R1
282	movq	8(R3),	R3
283	output_whitening(R1,%r11,a_offset)
284	output_whitening(R3,%r11,c_offset)
285	mov	R1D,	R0D
286	shr	$32,	R1
287	rol	$16,	R1D
288	mov	R3D,	R2D
289	shr	$32,	R3
290	rol	$1,	R2D
291
292	decrypt_round(R0,R1,R2,R3,15*8);
293	decrypt_round(R2,R3,R0,R1,14*8);
294	decrypt_round(R0,R1,R2,R3,13*8);
295	decrypt_round(R2,R3,R0,R1,12*8);
296	decrypt_round(R0,R1,R2,R3,11*8);
297	decrypt_round(R2,R3,R0,R1,10*8);
298	decrypt_round(R0,R1,R2,R3,9*8);
299	decrypt_round(R2,R3,R0,R1,8*8);
300	decrypt_round(R0,R1,R2,R3,7*8);
301	decrypt_round(R2,R3,R0,R1,6*8);
302	decrypt_round(R0,R1,R2,R3,5*8);
303	decrypt_round(R2,R3,R0,R1,4*8);
304	decrypt_round(R0,R1,R2,R3,3*8);
305	decrypt_round(R2,R3,R0,R1,2*8);
306	decrypt_round(R0,R1,R2,R3,1*8);
307	decrypt_last_round(R2,R3,R0,R1,0);
308
309	input_whitening(%r10,%r11,a_offset)
310	movq	%r10,	(%rsi)
311
312	shl	$32,	R1
313	xor	R0,	R1
314
315	input_whitening(R1,%r11,c_offset)
316	movq	R1,	8(%rsi)
317
318	popq	R1
319	movl	$1,%eax
320	ret
321ENDPROC(twofish_dec_blk)
322