xref: /titanic_50/usr/src/common/crypto/md5/amd64/md5_amd64.pl (revision 84ba300aaa958c8e8427c2ec66a932d86bee71c4)
1#!/usr/bin/perl -w
2#
3# MD5 optimized for AMD64.
4#
5# Author: Marc Bevand <bevand_m (at) epita.fr>
6# Licence: I hereby disclaim the copyright on this code and place it
7# in the public domain.
8#
9
10#
11# The following is Marc Bevand's MD5 implementation optimized for
12# AMD64.  It has been lifted intact, except for changing the comment
13# character and adding comments.
14#
15# typedef struct {
16#	uint32_t state[4];	/* state (ABCD) */
17#	uint32_t count[2];	/* number of bits, modulo 2^64 (lsb first) */
18#	union	{
19#		uint8_t		buf8[64];	/* undigested input */
20#		uint32_t	buf32[16];	/* realigned input */
21#		} buf_un;
22#	} MD5_CTX;
23#
24# void md5_block_asm_host_order(MD5_CTX *ctx, const void *inpp,
25#        unsigned int input_length_in_blocks);
26#
27# Registers used:
28#	rax  A		r8  old A
29#	rbx  B		r9  old B
30#	rcx  C		r10 tmp
31#	rdx  D		r11 tmp
32#	rsi  ptr	r12 tmp
33#	rdi  end	r13 -
34#	rbp  -		r14 old C
35#	rsp  stack	r15 old D
36#
37
38use strict;
39my $code;
40
41
42# round1_step() does:
43#   dst = x + ((dst + F(x,y,z) + X[k] + T_i) <<< s)
44#   %r10d = X[k_next]
45#   %r11d = z' (copy of z for the next step)
46# Each round1_step() takes about 5.3 clocks (9 instructions, 1.7 IPC)
47sub round1_step
48{
49    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
50    $code .= "	mov	0*4(%rsi),	%r10d		/* (NEXT STEP) X[0] */\n" if ($pos == -1);
51    $code .= "	mov	%edx,		%r11d		/* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
52    $T_i = sprintf("-0x%08x", (0xffffffff ^ hex($T_i))+1)
53        if (hex($T_i) >= 0x80000000);
54
55    $code .= <<EOF;
56	xor	$y,		%r11d		/* y ^ ... */
57	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... r1 */
58	and	$x,		%r11d		/* x & ... */
59	xor	$z,		%r11d		/* z ^ ... */
60	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
61	add	%r11d,		$dst		/* dst += ... */
62	rol	\$$s,		$dst		/* dst <<< s */
63	mov	$y,		%r11d		/* (NEXT STEP) z' = $y */
64	add	$x,		$dst		/* dst += x */
65EOF
66}
67
68# round2_step() does:
69#   dst = x + ((dst + G(x,y,z) + X[k] + T_i) <<< s)
70#   %r10d = X[k_next]
71#   %r11d = z' (copy of z for the next step)
72#   %r12d = z' (copy of z for the next step)
73# Each round2_step() takes about 5.4 clocks (11 instructions, 2.0 IPC)
74sub round2_step
75{
76    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
77    $code .= "	mov	1*4(%rsi),	%r10d		/* (NEXT STEP) X[1] */\n" if ($pos == -1);
78    $code .= "	mov	%edx,		%r11d		/* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
79    $code .= "	mov	%edx,		%r12d		/* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
80    $T_i = sprintf("-0x%08x", (0xffffffff ^ hex($T_i))+1)
81        if (hex($T_i) >= 0x80000000);
82
83    $code .= <<EOF;
84	not	%r11d				/* not z */
85	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... r2 */
86	and	$x,		%r12d		/* x & z */
87	and	$y,		%r11d		/* y & (not z) */
88	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
89	or	%r11d,		%r12d		/* (y & (not z)) | (x & z) */
90	mov	$y,		%r11d		/* (NEXT STEP) z' = $y */
91	add	%r12d,		$dst		/* dst += ... */
92	mov	$y,		%r12d		/* (NEXT STEP) z' = $y */
93	rol	\$$s,		$dst		/* dst <<< s */
94	add	$x,		$dst		/* dst += x */
95EOF
96}
97
98# round3_step() does:
99#   dst = x + ((dst + H(x,y,z) + X[k] + T_i) <<< s)
100#   %r10d = X[k_next]
101#   %r11d = y' (copy of y for the next step)
102# Each round3_step() takes about 4.2 clocks (8 instructions, 1.9 IPC)
103sub round3_step
104{
105    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
106    $code .= "	mov	5*4(%rsi),	%r10d		/* (NEXT STEP) X[5] */\n" if ($pos == -1);
107    $code .= "	mov	%ecx,		%r11d		/* (NEXT STEP) y' = %ecx */\n" if ($pos == -1);
108    $T_i = sprintf("-0x%08x", (0xffffffff ^ hex($T_i))+1)
109        if (hex($T_i) >= 0x80000000);
110
111    $code .= <<EOF;
112	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... r3 */
113	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
114	xor	$z,		%r11d		/* z ^ ... */
115	xor	$x,		%r11d		/* x ^ ... */
116	add	%r11d,		$dst		/* dst += ... */
117	rol	\$$s,		$dst		/* dst <<< s */
118	mov	$x,		%r11d		/* (NEXT STEP) y' = $x */
119	add	$x,		$dst		/* dst += x */
120EOF
121}
122
123# round4_step() does:
124#   dst = x + ((dst + I(x,y,z) + X[k] + T_i) <<< s)
125#   %r10d = X[k_next]
126#   %r11d = not z' (copy of not z for the next step)
127# Each round4_step() takes about 5.2 clocks (9 instructions, 1.7 IPC)
128sub round4_step
129{
130    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
131    $code .= "	mov	0*4(%rsi),	%r10d		/* (NEXT STEP) X[0] */\n" if ($pos == -1);
132    $code .= "	mov	\$0xffffffff,	%r11d\n" if ($pos == -1);
133    $code .= "	xor	%edx,		%r11d		/* (NEXT STEP) not z' = not %edx*/\n"
134    if ($pos == -1);
135    $T_i = sprintf("-0x%08x", (0xffffffff ^ hex($T_i))+1)
136        if (hex($T_i) >= 0x80000000);
137
138    $code .= <<EOF;
139	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... r4 */
140	or	$x,		%r11d		/* x | ... */
141	xor	$y,		%r11d		/* y ^ ... */
142	add	%r11d,		$dst		/* dst += ... */
143	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
144	mov	\$0xffffffff,	%r11d
145	rol	\$$s,		$dst		/* dst <<< s */
146	xor	$y,		%r11d		/* (NEXT STEP) not z' = not $y */
147	add	$x,		$dst		/* dst += x */
148EOF
149}
150
151
152#
153# Execution begins here.
154#
155
156my $output = shift;
157open STDOUT,">$output" or die "can't open $output: $!";
158
159$code .= <<EOF;
160#if defined(lint) || defined(__lint)
161#include <sys/md5.h>
162
163/* ARGSUSED */
164void md5_block_asm_host_order(MD5_CTX *ctx, const void *inpp,
165    unsigned int input_length_in_blocks)
166{
167}
168
169#else
170#include <sys/asm_linkage.h>
171
172	ENTRY_NP(md5_block_asm_host_order)
173	push	%rbp
174	push	%rbx
175	push	%r12
176	push	%r13
177	push	%r14
178	push	%r15
179
180	/ rdi = arg #1 (ctx, MD5_CTX pointer)
181	/ rsi = arg #2 (ptr, data pointer)
182	/ rdx = arg #3 (nbr, number of 64-byte blocks to process)
183	mov	%rdi,		%rbp	/ rbp = ctx
184	shl	\$6,		%rdx	/ rdx = nbr in bytes
185	lea	(%rsi,%rdx),	%rdi	/ rdi = end
186	mov	0*4(%rbp),	%eax	/ eax = ctx->A
187	mov	1*4(%rbp),	%ebx	/ ebx = ctx->B
188	mov	2*4(%rbp),	%ecx	/ ecx = ctx->C
189	mov	3*4(%rbp),	%edx	/ edx = ctx->D
190	push	%rbp			/ save ctx
191	/ end is 'rdi'
192	/ ptr is 'rsi'
193	/ A is 'eax'
194	/ B is 'ebx'
195	/ C is 'ecx'
196	/ D is 'edx'
197
198	cmp	%rdi,		%rsi		/ cmp end with ptr
199	je	1f				/ jmp if ptr == end
200
201	/ BEGIN of loop over 64-byte blocks
2022:	/ save old values of A, B, C, D
203	mov	%eax,		%r8d
204	mov	%ebx,		%r9d
205	mov	%ecx,		%r14d
206	mov	%edx,		%r15d
207EOF
208round1_step(-1,'%eax','%ebx','%ecx','%edx', '1','0xd76aa478', '7');
209round1_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xe8c7b756','12');
210round1_step( 0,'%ecx','%edx','%eax','%ebx', '3','0x242070db','17');
211round1_step( 0,'%ebx','%ecx','%edx','%eax', '4','0xc1bdceee','22');
212round1_step( 0,'%eax','%ebx','%ecx','%edx', '5','0xf57c0faf', '7');
213round1_step( 0,'%edx','%eax','%ebx','%ecx', '6','0x4787c62a','12');
214round1_step( 0,'%ecx','%edx','%eax','%ebx', '7','0xa8304613','17');
215round1_step( 0,'%ebx','%ecx','%edx','%eax', '8','0xfd469501','22');
216round1_step( 0,'%eax','%ebx','%ecx','%edx', '9','0x698098d8', '7');
217round1_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8b44f7af','12');
218round1_step( 0,'%ecx','%edx','%eax','%ebx','11','0xffff5bb1','17');
219round1_step( 0,'%ebx','%ecx','%edx','%eax','12','0x895cd7be','22');
220round1_step( 0,'%eax','%ebx','%ecx','%edx','13','0x6b901122', '7');
221round1_step( 0,'%edx','%eax','%ebx','%ecx','14','0xfd987193','12');
222round1_step( 0,'%ecx','%edx','%eax','%ebx','15','0xa679438e','17');
223round1_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x49b40821','22');
224
225round2_step(-1,'%eax','%ebx','%ecx','%edx', '6','0xf61e2562', '5');
226round2_step( 0,'%edx','%eax','%ebx','%ecx','11','0xc040b340', '9');
227round2_step( 0,'%ecx','%edx','%eax','%ebx', '0','0x265e5a51','14');
228round2_step( 0,'%ebx','%ecx','%edx','%eax', '5','0xe9b6c7aa','20');
229round2_step( 0,'%eax','%ebx','%ecx','%edx','10','0xd62f105d', '5');
230round2_step( 0,'%edx','%eax','%ebx','%ecx','15', '0x2441453', '9');
231round2_step( 0,'%ecx','%edx','%eax','%ebx', '4','0xd8a1e681','14');
232round2_step( 0,'%ebx','%ecx','%edx','%eax', '9','0xe7d3fbc8','20');
233round2_step( 0,'%eax','%ebx','%ecx','%edx','14','0x21e1cde6', '5');
234round2_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xc33707d6', '9');
235round2_step( 0,'%ecx','%edx','%eax','%ebx', '8','0xf4d50d87','14');
236round2_step( 0,'%ebx','%ecx','%edx','%eax','13','0x455a14ed','20');
237round2_step( 0,'%eax','%ebx','%ecx','%edx', '2','0xa9e3e905', '5');
238round2_step( 0,'%edx','%eax','%ebx','%ecx', '7','0xfcefa3f8', '9');
239round2_step( 0,'%ecx','%edx','%eax','%ebx','12','0x676f02d9','14');
240round2_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x8d2a4c8a','20');
241
242round3_step(-1,'%eax','%ebx','%ecx','%edx', '8','0xfffa3942', '4');
243round3_step( 0,'%edx','%eax','%ebx','%ecx','11','0x8771f681','11');
244round3_step( 0,'%ecx','%edx','%eax','%ebx','14','0x6d9d6122','16');
245round3_step( 0,'%ebx','%ecx','%edx','%eax', '1','0xfde5380c','23');
246round3_step( 0,'%eax','%ebx','%ecx','%edx', '4','0xa4beea44', '4');
247round3_step( 0,'%edx','%eax','%ebx','%ecx', '7','0x4bdecfa9','11');
248round3_step( 0,'%ecx','%edx','%eax','%ebx','10','0xf6bb4b60','16');
249round3_step( 0,'%ebx','%ecx','%edx','%eax','13','0xbebfbc70','23');
250round3_step( 0,'%eax','%ebx','%ecx','%edx', '0','0x289b7ec6', '4');
251round3_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xeaa127fa','11');
252round3_step( 0,'%ecx','%edx','%eax','%ebx', '6','0xd4ef3085','16');
253round3_step( 0,'%ebx','%ecx','%edx','%eax', '9', '0x4881d05','23');
254round3_step( 0,'%eax','%ebx','%ecx','%edx','12','0xd9d4d039', '4');
255round3_step( 0,'%edx','%eax','%ebx','%ecx','15','0xe6db99e5','11');
256round3_step( 0,'%ecx','%edx','%eax','%ebx', '2','0x1fa27cf8','16');
257round3_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xc4ac5665','23');
258
259round4_step(-1,'%eax','%ebx','%ecx','%edx', '7','0xf4292244', '6');
260round4_step( 0,'%edx','%eax','%ebx','%ecx','14','0x432aff97','10');
261round4_step( 0,'%ecx','%edx','%eax','%ebx', '5','0xab9423a7','15');
262round4_step( 0,'%ebx','%ecx','%edx','%eax','12','0xfc93a039','21');
263round4_step( 0,'%eax','%ebx','%ecx','%edx', '3','0x655b59c3', '6');
264round4_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8f0ccc92','10');
265round4_step( 0,'%ecx','%edx','%eax','%ebx', '1','0xffeff47d','15');
266round4_step( 0,'%ebx','%ecx','%edx','%eax', '8','0x85845dd1','21');
267round4_step( 0,'%eax','%ebx','%ecx','%edx','15','0x6fa87e4f', '6');
268round4_step( 0,'%edx','%eax','%ebx','%ecx', '6','0xfe2ce6e0','10');
269round4_step( 0,'%ecx','%edx','%eax','%ebx','13','0xa3014314','15');
270round4_step( 0,'%ebx','%ecx','%edx','%eax', '4','0x4e0811a1','21');
271round4_step( 0,'%eax','%ebx','%ecx','%edx','11','0xf7537e82', '6');
272round4_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xbd3af235','10');
273round4_step( 0,'%ecx','%edx','%eax','%ebx', '9','0x2ad7d2bb','15');
274round4_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xeb86d391','21');
275$code .= <<EOF;
276	/ add old values of A, B, C, D
277	add	%r8d,	%eax
278	add	%r9d,	%ebx
279	add	%r14d,	%ecx
280	add	%r15d,	%edx
281
282	/ loop control
283	add	\$64,		%rsi		/ ptr += 64
284	cmp	%rdi,		%rsi		/ cmp end with ptr
285	jb	2b				/ jmp if ptr < end
286	/ END of loop over 64-byte blocks
287
2881:	pop	%rbp				/ restore ctx
289	mov	%eax,		0*4(%rbp)	/ ctx->A = A
290	mov	%ebx,		1*4(%rbp)	/ ctx->B = B
291	mov	%ecx,		2*4(%rbp)	/ ctx->C = C
292	mov	%edx,		3*4(%rbp)	/ ctx->D = D
293
294	pop	%r15
295	pop	%r14
296	pop	%r13
297	pop	%r12
298	pop	%rbx
299	pop	%rbp
300	ret
301	SET_SIZE(md5_block_asm_host_order)
302
303#endif /* lint || __lint */
304EOF
305
306print $code;
307