xref: /freebsd/crypto/openssl/crypto/bn/asm/x86_64-mont5.pl (revision d65cd7a57bf0600b722afc770838a5d0c1c3a8e1)
1#! /usr/bin/env perl
2# Copyright 2011-2020 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16
17# August 2011.
18#
19# Companion to x86_64-mont.pl that optimizes cache-timing attack
20# countermeasures. The subroutines are produced by replacing bp[i]
21# references in their x86_64-mont.pl counterparts with cache-neutral
22# references to powers table computed in BN_mod_exp_mont_consttime.
23# In addition subroutine that scatters elements of the powers table
24# is implemented, so that scatter-/gathering can be tuned without
25# bn_exp.c modifications.
26
27# August 2013.
28#
29# Add MULX/AD*X code paths and additional interfaces to optimize for
30# branch prediction unit. For input lengths that are multiples of 8
31# the np argument is not just modulus value, but one interleaved
32# with 0. This is to optimize post-condition...
33
34$flavour = shift;
35$output  = shift;
36if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
37
38$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
39
40$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
41( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
42( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
43die "can't locate x86_64-xlate.pl";
44
45open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
46*STDOUT=*OUT;
47
48if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
49		=~ /GNU assembler version ([2-9]\.[0-9]+)/) {
50	$addx = ($1>=2.23);
51}
52
53if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
54	    `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
55	$addx = ($1>=2.10);
56}
57
58if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
59	    `ml64 2>&1` =~ /Version ([0-9]+)\./) {
60	$addx = ($1>=12);
61}
62
63if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([0-9]+)\.([0-9]+)/) {
64	my $ver = $2 + $3/100.0;	# 3.1->3.01, 3.10->3.10
65	$addx = ($ver>=3.03);
66}
67
68# int bn_mul_mont_gather5(
69$rp="%rdi";	# BN_ULONG *rp,
70$ap="%rsi";	# const BN_ULONG *ap,
71$bp="%rdx";	# const BN_ULONG *bp,
72$np="%rcx";	# const BN_ULONG *np,
73$n0="%r8";	# const BN_ULONG *n0,
74$num="%r9";	# int num,
75		# int idx);	# 0 to 2^5-1, "index" in $bp holding
76				# pre-computed powers of a', interlaced
77				# in such manner that b[0] is $bp[idx],
78				# b[1] is [2^5+idx], etc.
79$lo0="%r10";
80$hi0="%r11";
81$hi1="%r13";
82$i="%r14";
83$j="%r15";
84$m0="%rbx";
85$m1="%rbp";
86
87$code=<<___;
88.text
89
90.extern	OPENSSL_ia32cap_P
91
92.globl	bn_mul_mont_gather5
93.type	bn_mul_mont_gather5,\@function,6
94.align	64
95bn_mul_mont_gather5:
96.cfi_startproc
97	mov	${num}d,${num}d
98	mov	%rsp,%rax
99.cfi_def_cfa_register	%rax
100	test	\$7,${num}d
101	jnz	.Lmul_enter
102___
103$code.=<<___ if ($addx);
104	mov	OPENSSL_ia32cap_P+8(%rip),%r11d
105___
106$code.=<<___;
107	jmp	.Lmul4x_enter
108
109.align	16
110.Lmul_enter:
111	movd	`($win64?56:8)`(%rsp),%xmm5	# load 7th argument
112	push	%rbx
113.cfi_push	%rbx
114	push	%rbp
115.cfi_push	%rbp
116	push	%r12
117.cfi_push	%r12
118	push	%r13
119.cfi_push	%r13
120	push	%r14
121.cfi_push	%r14
122	push	%r15
123.cfi_push	%r15
124
125	neg	$num
126	mov	%rsp,%r11
127	lea	-280(%rsp,$num,8),%r10	# future alloca(8*(num+2)+256+8)
128	neg	$num			# restore $num
129	and	\$-1024,%r10		# minimize TLB usage
130
131	# An OS-agnostic version of __chkstk.
132	#
133	# Some OSes (Windows) insist on stack being "wired" to
134	# physical memory in strictly sequential manner, i.e. if stack
135	# allocation spans two pages, then reference to farmost one can
136	# be punishable by SEGV. But page walking can do good even on
137	# other OSes, because it guarantees that villain thread hits
138	# the guard page before it can make damage to innocent one...
139	sub	%r10,%r11
140	and	\$-4096,%r11
141	lea	(%r10,%r11),%rsp
142	mov	(%rsp),%r11
143	cmp	%r10,%rsp
144	ja	.Lmul_page_walk
145	jmp	.Lmul_page_walk_done
146
147.Lmul_page_walk:
148	lea	-4096(%rsp),%rsp
149	mov	(%rsp),%r11
150	cmp	%r10,%rsp
151	ja	.Lmul_page_walk
152.Lmul_page_walk_done:
153
154	lea	.Linc(%rip),%r10
155	mov	%rax,8(%rsp,$num,8)	# tp[num+1]=%rsp
156.cfi_cfa_expression	%rsp+8,$num,8,mul,plus,deref,+8
157.Lmul_body:
158
159	lea	128($bp),%r12		# reassign $bp (+size optimization)
160___
161		$bp="%r12";
162		$STRIDE=2**5*8;		# 5 is "window size"
163		$N=$STRIDE/4;		# should match cache line size
164$code.=<<___;
165	movdqa	0(%r10),%xmm0		# 00000001000000010000000000000000
166	movdqa	16(%r10),%xmm1		# 00000002000000020000000200000002
167	lea	24-112(%rsp,$num,8),%r10# place the mask after tp[num+3] (+ICache optimization)
168	and	\$-16,%r10
169
170	pshufd	\$0,%xmm5,%xmm5		# broadcast index
171	movdqa	%xmm1,%xmm4
172	movdqa	%xmm1,%xmm2
173___
174########################################################################
175# calculate mask by comparing 0..31 to index and save result to stack
176#
177$code.=<<___;
178	paddd	%xmm0,%xmm1
179	pcmpeqd	%xmm5,%xmm0		# compare to 1,0
180	.byte	0x67
181	movdqa	%xmm4,%xmm3
182___
183for($k=0;$k<$STRIDE/16-4;$k+=4) {
184$code.=<<___;
185	paddd	%xmm1,%xmm2
186	pcmpeqd	%xmm5,%xmm1		# compare to 3,2
187	movdqa	%xmm0,`16*($k+0)+112`(%r10)
188	movdqa	%xmm4,%xmm0
189
190	paddd	%xmm2,%xmm3
191	pcmpeqd	%xmm5,%xmm2		# compare to 5,4
192	movdqa	%xmm1,`16*($k+1)+112`(%r10)
193	movdqa	%xmm4,%xmm1
194
195	paddd	%xmm3,%xmm0
196	pcmpeqd	%xmm5,%xmm3		# compare to 7,6
197	movdqa	%xmm2,`16*($k+2)+112`(%r10)
198	movdqa	%xmm4,%xmm2
199
200	paddd	%xmm0,%xmm1
201	pcmpeqd	%xmm5,%xmm0
202	movdqa	%xmm3,`16*($k+3)+112`(%r10)
203	movdqa	%xmm4,%xmm3
204___
205}
206$code.=<<___;				# last iteration can be optimized
207	paddd	%xmm1,%xmm2
208	pcmpeqd	%xmm5,%xmm1
209	movdqa	%xmm0,`16*($k+0)+112`(%r10)
210
211	paddd	%xmm2,%xmm3
212	.byte	0x67
213	pcmpeqd	%xmm5,%xmm2
214	movdqa	%xmm1,`16*($k+1)+112`(%r10)
215
216	pcmpeqd	%xmm5,%xmm3
217	movdqa	%xmm2,`16*($k+2)+112`(%r10)
218	pand	`16*($k+0)-128`($bp),%xmm0	# while it's still in register
219
220	pand	`16*($k+1)-128`($bp),%xmm1
221	pand	`16*($k+2)-128`($bp),%xmm2
222	movdqa	%xmm3,`16*($k+3)+112`(%r10)
223	pand	`16*($k+3)-128`($bp),%xmm3
224	por	%xmm2,%xmm0
225	por	%xmm3,%xmm1
226___
227for($k=0;$k<$STRIDE/16-4;$k+=4) {
228$code.=<<___;
229	movdqa	`16*($k+0)-128`($bp),%xmm4
230	movdqa	`16*($k+1)-128`($bp),%xmm5
231	movdqa	`16*($k+2)-128`($bp),%xmm2
232	pand	`16*($k+0)+112`(%r10),%xmm4
233	movdqa	`16*($k+3)-128`($bp),%xmm3
234	pand	`16*($k+1)+112`(%r10),%xmm5
235	por	%xmm4,%xmm0
236	pand	`16*($k+2)+112`(%r10),%xmm2
237	por	%xmm5,%xmm1
238	pand	`16*($k+3)+112`(%r10),%xmm3
239	por	%xmm2,%xmm0
240	por	%xmm3,%xmm1
241___
242}
243$code.=<<___;
244	por	%xmm1,%xmm0
245	pshufd	\$0x4e,%xmm0,%xmm1
246	por	%xmm1,%xmm0
247	lea	$STRIDE($bp),$bp
248	movq	%xmm0,$m0		# m0=bp[0]
249
250	mov	($n0),$n0		# pull n0[0] value
251	mov	($ap),%rax
252
253	xor	$i,$i			# i=0
254	xor	$j,$j			# j=0
255
256	mov	$n0,$m1
257	mulq	$m0			# ap[0]*bp[0]
258	mov	%rax,$lo0
259	mov	($np),%rax
260
261	imulq	$lo0,$m1		# "tp[0]"*n0
262	mov	%rdx,$hi0
263
264	mulq	$m1			# np[0]*m1
265	add	%rax,$lo0		# discarded
266	mov	8($ap),%rax
267	adc	\$0,%rdx
268	mov	%rdx,$hi1
269
270	lea	1($j),$j		# j++
271	jmp	.L1st_enter
272
273.align	16
274.L1st:
275	add	%rax,$hi1
276	mov	($ap,$j,8),%rax
277	adc	\$0,%rdx
278	add	$hi0,$hi1		# np[j]*m1+ap[j]*bp[0]
279	mov	$lo0,$hi0
280	adc	\$0,%rdx
281	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
282	mov	%rdx,$hi1
283
284.L1st_enter:
285	mulq	$m0			# ap[j]*bp[0]
286	add	%rax,$hi0
287	mov	($np,$j,8),%rax
288	adc	\$0,%rdx
289	lea	1($j),$j		# j++
290	mov	%rdx,$lo0
291
292	mulq	$m1			# np[j]*m1
293	cmp	$num,$j
294	jne	.L1st			# note that upon exit $j==$num, so
295					# they can be used interchangeably
296
297	add	%rax,$hi1
298	adc	\$0,%rdx
299	add	$hi0,$hi1		# np[j]*m1+ap[j]*bp[0]
300	adc	\$0,%rdx
301	mov	$hi1,-16(%rsp,$num,8)	# tp[num-1]
302	mov	%rdx,$hi1
303	mov	$lo0,$hi0
304
305	xor	%rdx,%rdx
306	add	$hi0,$hi1
307	adc	\$0,%rdx
308	mov	$hi1,-8(%rsp,$num,8)
309	mov	%rdx,(%rsp,$num,8)	# store upmost overflow bit
310
311	lea	1($i),$i		# i++
312	jmp	.Louter
313.align	16
314.Louter:
315	lea	24+128(%rsp,$num,8),%rdx	# where 256-byte mask is (+size optimization)
316	and	\$-16,%rdx
317	pxor	%xmm4,%xmm4
318	pxor	%xmm5,%xmm5
319___
320for($k=0;$k<$STRIDE/16;$k+=4) {
321$code.=<<___;
322	movdqa	`16*($k+0)-128`($bp),%xmm0
323	movdqa	`16*($k+1)-128`($bp),%xmm1
324	movdqa	`16*($k+2)-128`($bp),%xmm2
325	movdqa	`16*($k+3)-128`($bp),%xmm3
326	pand	`16*($k+0)-128`(%rdx),%xmm0
327	pand	`16*($k+1)-128`(%rdx),%xmm1
328	por	%xmm0,%xmm4
329	pand	`16*($k+2)-128`(%rdx),%xmm2
330	por	%xmm1,%xmm5
331	pand	`16*($k+3)-128`(%rdx),%xmm3
332	por	%xmm2,%xmm4
333	por	%xmm3,%xmm5
334___
335}
336$code.=<<___;
337	por	%xmm5,%xmm4
338	pshufd	\$0x4e,%xmm4,%xmm0
339	por	%xmm4,%xmm0
340	lea	$STRIDE($bp),$bp
341
342	mov	($ap),%rax		# ap[0]
343	movq	%xmm0,$m0		# m0=bp[i]
344
345	xor	$j,$j			# j=0
346	mov	$n0,$m1
347	mov	(%rsp),$lo0
348
349	mulq	$m0			# ap[0]*bp[i]
350	add	%rax,$lo0		# ap[0]*bp[i]+tp[0]
351	mov	($np),%rax
352	adc	\$0,%rdx
353
354	imulq	$lo0,$m1		# tp[0]*n0
355	mov	%rdx,$hi0
356
357	mulq	$m1			# np[0]*m1
358	add	%rax,$lo0		# discarded
359	mov	8($ap),%rax
360	adc	\$0,%rdx
361	mov	8(%rsp),$lo0		# tp[1]
362	mov	%rdx,$hi1
363
364	lea	1($j),$j		# j++
365	jmp	.Linner_enter
366
367.align	16
368.Linner:
369	add	%rax,$hi1
370	mov	($ap,$j,8),%rax
371	adc	\$0,%rdx
372	add	$lo0,$hi1		# np[j]*m1+ap[j]*bp[i]+tp[j]
373	mov	(%rsp,$j,8),$lo0
374	adc	\$0,%rdx
375	mov	$hi1,-16(%rsp,$j,8)	# tp[j-1]
376	mov	%rdx,$hi1
377
378.Linner_enter:
379	mulq	$m0			# ap[j]*bp[i]
380	add	%rax,$hi0
381	mov	($np,$j,8),%rax
382	adc	\$0,%rdx
383	add	$hi0,$lo0		# ap[j]*bp[i]+tp[j]
384	mov	%rdx,$hi0
385	adc	\$0,$hi0
386	lea	1($j),$j		# j++
387
388	mulq	$m1			# np[j]*m1
389	cmp	$num,$j
390	jne	.Linner			# note that upon exit $j==$num, so
391					# they can be used interchangeably
392	add	%rax,$hi1
393	adc	\$0,%rdx
394	add	$lo0,$hi1		# np[j]*m1+ap[j]*bp[i]+tp[j]
395	mov	(%rsp,$num,8),$lo0
396	adc	\$0,%rdx
397	mov	$hi1,-16(%rsp,$num,8)	# tp[num-1]
398	mov	%rdx,$hi1
399
400	xor	%rdx,%rdx
401	add	$hi0,$hi1
402	adc	\$0,%rdx
403	add	$lo0,$hi1		# pull upmost overflow bit
404	adc	\$0,%rdx
405	mov	$hi1,-8(%rsp,$num,8)
406	mov	%rdx,(%rsp,$num,8)	# store upmost overflow bit
407
408	lea	1($i),$i		# i++
409	cmp	$num,$i
410	jb	.Louter
411
412	xor	$i,$i			# i=0 and clear CF!
413	mov	(%rsp),%rax		# tp[0]
414	lea	(%rsp),$ap		# borrow ap for tp
415	mov	$num,$j			# j=num
416	jmp	.Lsub
417.align	16
418.Lsub:	sbb	($np,$i,8),%rax
419	mov	%rax,($rp,$i,8)		# rp[i]=tp[i]-np[i]
420	mov	8($ap,$i,8),%rax	# tp[i+1]
421	lea	1($i),$i		# i++
422	dec	$j			# doesn't affect CF!
423	jnz	.Lsub
424
425	sbb	\$0,%rax		# handle upmost overflow bit
426	mov	\$-1,%rbx
427	xor	%rax,%rbx
428	xor	$i,$i
429	mov	$num,$j			# j=num
430
431.Lcopy:					# conditional copy
432	mov	($rp,$i,8),%rcx
433	mov	(%rsp,$i,8),%rdx
434	and	%rbx,%rcx
435	and	%rax,%rdx
436	mov	$i,(%rsp,$i,8)		# zap temporary vector
437	or	%rcx,%rdx
438	mov	%rdx,($rp,$i,8)		# rp[i]=tp[i]
439	lea	1($i),$i
440	sub	\$1,$j
441	jnz	.Lcopy
442
443	mov	8(%rsp,$num,8),%rsi	# restore %rsp
444.cfi_def_cfa	%rsi,8
445	mov	\$1,%rax
446
447	mov	-48(%rsi),%r15
448.cfi_restore	%r15
449	mov	-40(%rsi),%r14
450.cfi_restore	%r14
451	mov	-32(%rsi),%r13
452.cfi_restore	%r13
453	mov	-24(%rsi),%r12
454.cfi_restore	%r12
455	mov	-16(%rsi),%rbp
456.cfi_restore	%rbp
457	mov	-8(%rsi),%rbx
458.cfi_restore	%rbx
459	lea	(%rsi),%rsp
460.cfi_def_cfa_register	%rsp
461.Lmul_epilogue:
462	ret
463.cfi_endproc
464.size	bn_mul_mont_gather5,.-bn_mul_mont_gather5
465___
466{{{
467my @A=("%r10","%r11");
468my @N=("%r13","%rdi");
469$code.=<<___;
470.type	bn_mul4x_mont_gather5,\@function,6
471.align	32
472bn_mul4x_mont_gather5:
473.cfi_startproc
474	.byte	0x67
475	mov	%rsp,%rax
476.cfi_def_cfa_register	%rax
477.Lmul4x_enter:
478___
479$code.=<<___ if ($addx);
480	and	\$0x80108,%r11d
481	cmp	\$0x80108,%r11d		# check for AD*X+BMI2+BMI1
482	je	.Lmulx4x_enter
483___
484$code.=<<___;
485	push	%rbx
486.cfi_push	%rbx
487	push	%rbp
488.cfi_push	%rbp
489	push	%r12
490.cfi_push	%r12
491	push	%r13
492.cfi_push	%r13
493	push	%r14
494.cfi_push	%r14
495	push	%r15
496.cfi_push	%r15
497.Lmul4x_prologue:
498
499	.byte	0x67
500	shl	\$3,${num}d		# convert $num to bytes
501	lea	($num,$num,2),%r10	# 3*$num in bytes
502	neg	$num			# -$num
503
504	##############################################################
505	# Ensure that stack frame doesn't alias with $rptr+3*$num
506	# modulo 4096, which covers ret[num], am[num] and n[num]
507	# (see bn_exp.c). This is done to allow memory disambiguation
508	# logic do its magic. [Extra [num] is allocated in order
509	# to align with bn_power5's frame, which is cleansed after
510	# completing exponentiation. Extra 256 bytes is for power mask
511	# calculated from 7th argument, the index.]
512	#
513	lea	-320(%rsp,$num,2),%r11
514	mov	%rsp,%rbp
515	sub	$rp,%r11
516	and	\$4095,%r11
517	cmp	%r11,%r10
518	jb	.Lmul4xsp_alt
519	sub	%r11,%rbp		# align with $rp
520	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*num*8+256)
521	jmp	.Lmul4xsp_done
522
523.align	32
524.Lmul4xsp_alt:
525	lea	4096-320(,$num,2),%r10
526	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*num*8+256)
527	sub	%r10,%r11
528	mov	\$0,%r10
529	cmovc	%r10,%r11
530	sub	%r11,%rbp
531.Lmul4xsp_done:
532	and	\$-64,%rbp
533	mov	%rsp,%r11
534	sub	%rbp,%r11
535	and	\$-4096,%r11
536	lea	(%rbp,%r11),%rsp
537	mov	(%rsp),%r10
538	cmp	%rbp,%rsp
539	ja	.Lmul4x_page_walk
540	jmp	.Lmul4x_page_walk_done
541
542.Lmul4x_page_walk:
543	lea	-4096(%rsp),%rsp
544	mov	(%rsp),%r10
545	cmp	%rbp,%rsp
546	ja	.Lmul4x_page_walk
547.Lmul4x_page_walk_done:
548
549	neg	$num
550
551	mov	%rax,40(%rsp)
552.cfi_cfa_expression	%rsp+40,deref,+8
553.Lmul4x_body:
554
555	call	mul4x_internal
556
557	mov	40(%rsp),%rsi		# restore %rsp
558.cfi_def_cfa	%rsi,8
559	mov	\$1,%rax
560
561	mov	-48(%rsi),%r15
562.cfi_restore	%r15
563	mov	-40(%rsi),%r14
564.cfi_restore	%r14
565	mov	-32(%rsi),%r13
566.cfi_restore	%r13
567	mov	-24(%rsi),%r12
568.cfi_restore	%r12
569	mov	-16(%rsi),%rbp
570.cfi_restore	%rbp
571	mov	-8(%rsi),%rbx
572.cfi_restore	%rbx
573	lea	(%rsi),%rsp
574.cfi_def_cfa_register	%rsp
575.Lmul4x_epilogue:
576	ret
577.cfi_endproc
578.size	bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
579
580.type	mul4x_internal,\@abi-omnipotent
581.align	32
582mul4x_internal:
583.cfi_startproc
584	shl	\$5,$num		# $num was in bytes
585	movd	`($win64?56:8)`(%rax),%xmm5	# load 7th argument, index
586	lea	.Linc(%rip),%rax
587	lea	128(%rdx,$num),%r13	# end of powers table (+size optimization)
588	shr	\$5,$num		# restore $num
589___
590		$bp="%r12";
591		$STRIDE=2**5*8;		# 5 is "window size"
592		$N=$STRIDE/4;		# should match cache line size
593		$tp=$i;
594$code.=<<___;
595	movdqa	0(%rax),%xmm0		# 00000001000000010000000000000000
596	movdqa	16(%rax),%xmm1		# 00000002000000020000000200000002
597	lea	88-112(%rsp,$num),%r10	# place the mask after tp[num+1] (+ICache optimization)
598	lea	128(%rdx),$bp		# size optimization
599
600	pshufd	\$0,%xmm5,%xmm5		# broadcast index
601	movdqa	%xmm1,%xmm4
602	.byte	0x67,0x67
603	movdqa	%xmm1,%xmm2
604___
605########################################################################
606# calculate mask by comparing 0..31 to index and save result to stack
607#
608$code.=<<___;
609	paddd	%xmm0,%xmm1
610	pcmpeqd	%xmm5,%xmm0		# compare to 1,0
611	.byte	0x67
612	movdqa	%xmm4,%xmm3
613___
614for($i=0;$i<$STRIDE/16-4;$i+=4) {
615$code.=<<___;
616	paddd	%xmm1,%xmm2
617	pcmpeqd	%xmm5,%xmm1		# compare to 3,2
618	movdqa	%xmm0,`16*($i+0)+112`(%r10)
619	movdqa	%xmm4,%xmm0
620
621	paddd	%xmm2,%xmm3
622	pcmpeqd	%xmm5,%xmm2		# compare to 5,4
623	movdqa	%xmm1,`16*($i+1)+112`(%r10)
624	movdqa	%xmm4,%xmm1
625
626	paddd	%xmm3,%xmm0
627	pcmpeqd	%xmm5,%xmm3		# compare to 7,6
628	movdqa	%xmm2,`16*($i+2)+112`(%r10)
629	movdqa	%xmm4,%xmm2
630
631	paddd	%xmm0,%xmm1
632	pcmpeqd	%xmm5,%xmm0
633	movdqa	%xmm3,`16*($i+3)+112`(%r10)
634	movdqa	%xmm4,%xmm3
635___
636}
637$code.=<<___;				# last iteration can be optimized
638	paddd	%xmm1,%xmm2
639	pcmpeqd	%xmm5,%xmm1
640	movdqa	%xmm0,`16*($i+0)+112`(%r10)
641
642	paddd	%xmm2,%xmm3
643	.byte	0x67
644	pcmpeqd	%xmm5,%xmm2
645	movdqa	%xmm1,`16*($i+1)+112`(%r10)
646
647	pcmpeqd	%xmm5,%xmm3
648	movdqa	%xmm2,`16*($i+2)+112`(%r10)
649	pand	`16*($i+0)-128`($bp),%xmm0	# while it's still in register
650
651	pand	`16*($i+1)-128`($bp),%xmm1
652	pand	`16*($i+2)-128`($bp),%xmm2
653	movdqa	%xmm3,`16*($i+3)+112`(%r10)
654	pand	`16*($i+3)-128`($bp),%xmm3
655	por	%xmm2,%xmm0
656	por	%xmm3,%xmm1
657___
658for($i=0;$i<$STRIDE/16-4;$i+=4) {
659$code.=<<___;
660	movdqa	`16*($i+0)-128`($bp),%xmm4
661	movdqa	`16*($i+1)-128`($bp),%xmm5
662	movdqa	`16*($i+2)-128`($bp),%xmm2
663	pand	`16*($i+0)+112`(%r10),%xmm4
664	movdqa	`16*($i+3)-128`($bp),%xmm3
665	pand	`16*($i+1)+112`(%r10),%xmm5
666	por	%xmm4,%xmm0
667	pand	`16*($i+2)+112`(%r10),%xmm2
668	por	%xmm5,%xmm1
669	pand	`16*($i+3)+112`(%r10),%xmm3
670	por	%xmm2,%xmm0
671	por	%xmm3,%xmm1
672___
673}
674$code.=<<___;
675	por	%xmm1,%xmm0
676	pshufd	\$0x4e,%xmm0,%xmm1
677	por	%xmm1,%xmm0
678	lea	$STRIDE($bp),$bp
679	movq	%xmm0,$m0		# m0=bp[0]
680
681	mov	%r13,16+8(%rsp)		# save end of b[num]
682	mov	$rp, 56+8(%rsp)		# save $rp
683
684	mov	($n0),$n0		# pull n0[0] value
685	mov	($ap),%rax
686	lea	($ap,$num),$ap		# end of a[num]
687	neg	$num
688
689	mov	$n0,$m1
690	mulq	$m0			# ap[0]*bp[0]
691	mov	%rax,$A[0]
692	mov	($np),%rax
693
694	imulq	$A[0],$m1		# "tp[0]"*n0
695	lea	64+8(%rsp),$tp
696	mov	%rdx,$A[1]
697
698	mulq	$m1			# np[0]*m1
699	add	%rax,$A[0]		# discarded
700	mov	8($ap,$num),%rax
701	adc	\$0,%rdx
702	mov	%rdx,$N[1]
703
704	mulq	$m0
705	add	%rax,$A[1]
706	mov	8*1($np),%rax
707	adc	\$0,%rdx
708	mov	%rdx,$A[0]
709
710	mulq	$m1
711	add	%rax,$N[1]
712	mov	16($ap,$num),%rax
713	adc	\$0,%rdx
714	add	$A[1],$N[1]
715	lea	4*8($num),$j		# j=4
716	lea	8*4($np),$np
717	adc	\$0,%rdx
718	mov	$N[1],($tp)
719	mov	%rdx,$N[0]
720	jmp	.L1st4x
721
722.align	32
723.L1st4x:
724	mulq	$m0			# ap[j]*bp[0]
725	add	%rax,$A[0]
726	mov	-8*2($np),%rax
727	lea	32($tp),$tp
728	adc	\$0,%rdx
729	mov	%rdx,$A[1]
730
731	mulq	$m1			# np[j]*m1
732	add	%rax,$N[0]
733	mov	-8($ap,$j),%rax
734	adc	\$0,%rdx
735	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
736	adc	\$0,%rdx
737	mov	$N[0],-24($tp)		# tp[j-1]
738	mov	%rdx,$N[1]
739
740	mulq	$m0			# ap[j]*bp[0]
741	add	%rax,$A[1]
742	mov	-8*1($np),%rax
743	adc	\$0,%rdx
744	mov	%rdx,$A[0]
745
746	mulq	$m1			# np[j]*m1
747	add	%rax,$N[1]
748	mov	($ap,$j),%rax
749	adc	\$0,%rdx
750	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
751	adc	\$0,%rdx
752	mov	$N[1],-16($tp)		# tp[j-1]
753	mov	%rdx,$N[0]
754
755	mulq	$m0			# ap[j]*bp[0]
756	add	%rax,$A[0]
757	mov	8*0($np),%rax
758	adc	\$0,%rdx
759	mov	%rdx,$A[1]
760
761	mulq	$m1			# np[j]*m1
762	add	%rax,$N[0]
763	mov	8($ap,$j),%rax
764	adc	\$0,%rdx
765	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
766	adc	\$0,%rdx
767	mov	$N[0],-8($tp)		# tp[j-1]
768	mov	%rdx,$N[1]
769
770	mulq	$m0			# ap[j]*bp[0]
771	add	%rax,$A[1]
772	mov	8*1($np),%rax
773	adc	\$0,%rdx
774	mov	%rdx,$A[0]
775
776	mulq	$m1			# np[j]*m1
777	add	%rax,$N[1]
778	mov	16($ap,$j),%rax
779	adc	\$0,%rdx
780	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
781	lea	8*4($np),$np
782	adc	\$0,%rdx
783	mov	$N[1],($tp)		# tp[j-1]
784	mov	%rdx,$N[0]
785
786	add	\$32,$j			# j+=4
787	jnz	.L1st4x
788
789	mulq	$m0			# ap[j]*bp[0]
790	add	%rax,$A[0]
791	mov	-8*2($np),%rax
792	lea	32($tp),$tp
793	adc	\$0,%rdx
794	mov	%rdx,$A[1]
795
796	mulq	$m1			# np[j]*m1
797	add	%rax,$N[0]
798	mov	-8($ap),%rax
799	adc	\$0,%rdx
800	add	$A[0],$N[0]		# np[j]*m1+ap[j]*bp[0]
801	adc	\$0,%rdx
802	mov	$N[0],-24($tp)		# tp[j-1]
803	mov	%rdx,$N[1]
804
805	mulq	$m0			# ap[j]*bp[0]
806	add	%rax,$A[1]
807	mov	-8*1($np),%rax
808	adc	\$0,%rdx
809	mov	%rdx,$A[0]
810
811	mulq	$m1			# np[j]*m1
812	add	%rax,$N[1]
813	mov	($ap,$num),%rax		# ap[0]
814	adc	\$0,%rdx
815	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[0]
816	adc	\$0,%rdx
817	mov	$N[1],-16($tp)		# tp[j-1]
818	mov	%rdx,$N[0]
819
820	lea	($np,$num),$np		# rewind $np
821
822	xor	$N[1],$N[1]
823	add	$A[0],$N[0]
824	adc	\$0,$N[1]
825	mov	$N[0],-8($tp)
826
827	jmp	.Louter4x
828
829.align	32
830.Louter4x:
831	lea	16+128($tp),%rdx	# where 256-byte mask is (+size optimization)
832	pxor	%xmm4,%xmm4
833	pxor	%xmm5,%xmm5
834___
835for($i=0;$i<$STRIDE/16;$i+=4) {
836$code.=<<___;
837	movdqa	`16*($i+0)-128`($bp),%xmm0
838	movdqa	`16*($i+1)-128`($bp),%xmm1
839	movdqa	`16*($i+2)-128`($bp),%xmm2
840	movdqa	`16*($i+3)-128`($bp),%xmm3
841	pand	`16*($i+0)-128`(%rdx),%xmm0
842	pand	`16*($i+1)-128`(%rdx),%xmm1
843	por	%xmm0,%xmm4
844	pand	`16*($i+2)-128`(%rdx),%xmm2
845	por	%xmm1,%xmm5
846	pand	`16*($i+3)-128`(%rdx),%xmm3
847	por	%xmm2,%xmm4
848	por	%xmm3,%xmm5
849___
850}
851$code.=<<___;
852	por	%xmm5,%xmm4
853	pshufd	\$0x4e,%xmm4,%xmm0
854	por	%xmm4,%xmm0
855	lea	$STRIDE($bp),$bp
856	movq	%xmm0,$m0		# m0=bp[i]
857
858	mov	($tp,$num),$A[0]
859	mov	$n0,$m1
860	mulq	$m0			# ap[0]*bp[i]
861	add	%rax,$A[0]		# ap[0]*bp[i]+tp[0]
862	mov	($np),%rax
863	adc	\$0,%rdx
864
865	imulq	$A[0],$m1		# tp[0]*n0
866	mov	%rdx,$A[1]
867	mov	$N[1],($tp)		# store upmost overflow bit
868
869	lea	($tp,$num),$tp		# rewind $tp
870
871	mulq	$m1			# np[0]*m1
872	add	%rax,$A[0]		# "$N[0]", discarded
873	mov	8($ap,$num),%rax
874	adc	\$0,%rdx
875	mov	%rdx,$N[1]
876
877	mulq	$m0			# ap[j]*bp[i]
878	add	%rax,$A[1]
879	mov	8*1($np),%rax
880	adc	\$0,%rdx
881	add	8($tp),$A[1]		# +tp[1]
882	adc	\$0,%rdx
883	mov	%rdx,$A[0]
884
885	mulq	$m1			# np[j]*m1
886	add	%rax,$N[1]
887	mov	16($ap,$num),%rax
888	adc	\$0,%rdx
889	add	$A[1],$N[1]		# np[j]*m1+ap[j]*bp[i]+tp[j]
890	lea	4*8($num),$j		# j=4
891	lea	8*4($np),$np
892	adc	\$0,%rdx
893	mov	%rdx,$N[0]
894	jmp	.Linner4x
895
896.align	32
897.Linner4x:
898	mulq	$m0			# ap[j]*bp[i]
899	add	%rax,$A[0]
900	mov	-8*2($np),%rax
901	adc	\$0,%rdx
902	add	16($tp),$A[0]		# ap[j]*bp[i]+tp[j]
903	lea	32($tp),$tp
904	adc	\$0,%rdx
905	mov	%rdx,$A[1]
906
907	mulq	$m1			# np[j]*m1
908	add	%rax,$N[0]
909	mov	-8($ap,$j),%rax
910	adc	\$0,%rdx
911	add	$A[0],$N[0]
912	adc	\$0,%rdx
913	mov	$N[1],-32($tp)		# tp[j-1]
914	mov	%rdx,$N[1]
915
916	mulq	$m0			# ap[j]*bp[i]
917	add	%rax,$A[1]
918	mov	-8*1($np),%rax
919	adc	\$0,%rdx
920	add	-8($tp),$A[1]
921	adc	\$0,%rdx
922	mov	%rdx,$A[0]
923
924	mulq	$m1			# np[j]*m1
925	add	%rax,$N[1]
926	mov	($ap,$j),%rax
927	adc	\$0,%rdx
928	add	$A[1],$N[1]
929	adc	\$0,%rdx
930	mov	$N[0],-24($tp)		# tp[j-1]
931	mov	%rdx,$N[0]
932
933	mulq	$m0			# ap[j]*bp[i]
934	add	%rax,$A[0]
935	mov	8*0($np),%rax
936	adc	\$0,%rdx
937	add	($tp),$A[0]		# ap[j]*bp[i]+tp[j]
938	adc	\$0,%rdx
939	mov	%rdx,$A[1]
940
941	mulq	$m1			# np[j]*m1
942	add	%rax,$N[0]
943	mov	8($ap,$j),%rax
944	adc	\$0,%rdx
945	add	$A[0],$N[0]
946	adc	\$0,%rdx
947	mov	$N[1],-16($tp)		# tp[j-1]
948	mov	%rdx,$N[1]
949
950	mulq	$m0			# ap[j]*bp[i]
951	add	%rax,$A[1]
952	mov	8*1($np),%rax
953	adc	\$0,%rdx
954	add	8($tp),$A[1]
955	adc	\$0,%rdx
956	mov	%rdx,$A[0]
957
958	mulq	$m1			# np[j]*m1
959	add	%rax,$N[1]
960	mov	16($ap,$j),%rax
961	adc	\$0,%rdx
962	add	$A[1],$N[1]
963	lea	8*4($np),$np
964	adc	\$0,%rdx
965	mov	$N[0],-8($tp)		# tp[j-1]
966	mov	%rdx,$N[0]
967
968	add	\$32,$j			# j+=4
969	jnz	.Linner4x
970
971	mulq	$m0			# ap[j]*bp[i]
972	add	%rax,$A[0]
973	mov	-8*2($np),%rax
974	adc	\$0,%rdx
975	add	16($tp),$A[0]		# ap[j]*bp[i]+tp[j]
976	lea	32($tp),$tp
977	adc	\$0,%rdx
978	mov	%rdx,$A[1]
979
980	mulq	$m1			# np[j]*m1
981	add	%rax,$N[0]
982	mov	-8($ap),%rax
983	adc	\$0,%rdx
984	add	$A[0],$N[0]
985	adc	\$0,%rdx
986	mov	$N[1],-32($tp)		# tp[j-1]
987	mov	%rdx,$N[1]
988
989	mulq	$m0			# ap[j]*bp[i]
990	add	%rax,$A[1]
991	mov	$m1,%rax
992	mov	-8*1($np),$m1
993	adc	\$0,%rdx
994	add	-8($tp),$A[1]
995	adc	\$0,%rdx
996	mov	%rdx,$A[0]
997
998	mulq	$m1			# np[j]*m1
999	add	%rax,$N[1]
1000	mov	($ap,$num),%rax		# ap[0]
1001	adc	\$0,%rdx
1002	add	$A[1],$N[1]
1003	adc	\$0,%rdx
1004	mov	$N[0],-24($tp)		# tp[j-1]
1005	mov	%rdx,$N[0]
1006
1007	mov	$N[1],-16($tp)		# tp[j-1]
1008	lea	($np,$num),$np		# rewind $np
1009
1010	xor	$N[1],$N[1]
1011	add	$A[0],$N[0]
1012	adc	\$0,$N[1]
1013	add	($tp),$N[0]		# pull upmost overflow bit
1014	adc	\$0,$N[1]		# upmost overflow bit
1015	mov	$N[0],-8($tp)
1016
1017	cmp	16+8(%rsp),$bp
1018	jb	.Louter4x
1019___
1020if (1) {
1021$code.=<<___;
1022	xor	%rax,%rax
1023	sub	$N[0],$m1		# compare top-most words
1024	adc	$j,$j			# $j is zero
1025	or	$j,$N[1]
1026	sub	$N[1],%rax		# %rax=-$N[1]
1027	lea	($tp,$num),%rbx		# tptr in .sqr4x_sub
1028	mov	($np),%r12
1029	lea	($np),%rbp		# nptr in .sqr4x_sub
1030	mov	%r9,%rcx
1031	sar	\$3+2,%rcx
1032	mov	56+8(%rsp),%rdi		# rptr in .sqr4x_sub
1033	dec	%r12			# so that after 'not' we get -n[0]
1034	xor	%r10,%r10
1035	mov	8*1(%rbp),%r13
1036	mov	8*2(%rbp),%r14
1037	mov	8*3(%rbp),%r15
1038	jmp	.Lsqr4x_sub_entry
1039___
1040} else {
1041my @ri=("%rax",$bp,$m0,$m1);
1042my $rp="%rdx";
1043$code.=<<___
1044	xor	\$1,$N[1]
1045	lea	($tp,$num),$tp		# rewind $tp
1046	sar	\$5,$num		# cf=0
1047	lea	($np,$N[1],8),$np
1048	mov	56+8(%rsp),$rp		# restore $rp
1049	jmp	.Lsub4x
1050
1051.align	32
1052.Lsub4x:
1053	.byte	0x66
1054	mov	8*0($tp),@ri[0]
1055	mov	8*1($tp),@ri[1]
1056	.byte	0x66
1057	sbb	16*0($np),@ri[0]
1058	mov	8*2($tp),@ri[2]
1059	sbb	16*1($np),@ri[1]
1060	mov	3*8($tp),@ri[3]
1061	lea	4*8($tp),$tp
1062	sbb	16*2($np),@ri[2]
1063	mov	@ri[0],8*0($rp)
1064	sbb	16*3($np),@ri[3]
1065	lea	16*4($np),$np
1066	mov	@ri[1],8*1($rp)
1067	mov	@ri[2],8*2($rp)
1068	mov	@ri[3],8*3($rp)
1069	lea	8*4($rp),$rp
1070
1071	inc	$num
1072	jnz	.Lsub4x
1073
1074	ret
1075___
1076}
1077$code.=<<___;
1078.cfi_endproc
1079.size	mul4x_internal,.-mul4x_internal
1080___
1081}}}
1082{{{
1083######################################################################
1084# void bn_power5(
1085my $rptr="%rdi";	# BN_ULONG *rptr,
1086my $aptr="%rsi";	# const BN_ULONG *aptr,
1087my $bptr="%rdx";	# const void *table,
1088my $nptr="%rcx";	# const BN_ULONG *nptr,
1089my $n0  ="%r8";		# const BN_ULONG *n0);
1090my $num ="%r9";		# int num, has to be divisible by 8
1091			# int pwr
1092
1093my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
1094my @A0=("%r10","%r11");
1095my @A1=("%r12","%r13");
1096my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
1097
1098$code.=<<___;
1099.globl	bn_power5
1100.type	bn_power5,\@function,6
1101.align	32
1102bn_power5:
1103.cfi_startproc
1104	mov	%rsp,%rax
1105.cfi_def_cfa_register	%rax
1106___
1107$code.=<<___ if ($addx);
1108	mov	OPENSSL_ia32cap_P+8(%rip),%r11d
1109	and	\$0x80108,%r11d
1110	cmp	\$0x80108,%r11d		# check for AD*X+BMI2+BMI1
1111	je	.Lpowerx5_enter
1112___
1113$code.=<<___;
1114	push	%rbx
1115.cfi_push	%rbx
1116	push	%rbp
1117.cfi_push	%rbp
1118	push	%r12
1119.cfi_push	%r12
1120	push	%r13
1121.cfi_push	%r13
1122	push	%r14
1123.cfi_push	%r14
1124	push	%r15
1125.cfi_push	%r15
1126.Lpower5_prologue:
1127
1128	shl	\$3,${num}d		# convert $num to bytes
1129	lea	($num,$num,2),%r10d	# 3*$num
1130	neg	$num
1131	mov	($n0),$n0		# *n0
1132
1133	##############################################################
1134	# Ensure that stack frame doesn't alias with $rptr+3*$num
1135	# modulo 4096, which covers ret[num], am[num] and n[num]
1136	# (see bn_exp.c). This is done to allow memory disambiguation
1137	# logic do its magic. [Extra 256 bytes is for power mask
1138	# calculated from 7th argument, the index.]
1139	#
1140	lea	-320(%rsp,$num,2),%r11
1141	mov	%rsp,%rbp
1142	sub	$rptr,%r11
1143	and	\$4095,%r11
1144	cmp	%r11,%r10
1145	jb	.Lpwr_sp_alt
1146	sub	%r11,%rbp		# align with $aptr
1147	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*num*8+256)
1148	jmp	.Lpwr_sp_done
1149
1150.align	32
1151.Lpwr_sp_alt:
1152	lea	4096-320(,$num,2),%r10
1153	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*num*8+256)
1154	sub	%r10,%r11
1155	mov	\$0,%r10
1156	cmovc	%r10,%r11
1157	sub	%r11,%rbp
1158.Lpwr_sp_done:
1159	and	\$-64,%rbp
1160	mov	%rsp,%r11
1161	sub	%rbp,%r11
1162	and	\$-4096,%r11
1163	lea	(%rbp,%r11),%rsp
1164	mov	(%rsp),%r10
1165	cmp	%rbp,%rsp
1166	ja	.Lpwr_page_walk
1167	jmp	.Lpwr_page_walk_done
1168
1169.Lpwr_page_walk:
1170	lea	-4096(%rsp),%rsp
1171	mov	(%rsp),%r10
1172	cmp	%rbp,%rsp
1173	ja	.Lpwr_page_walk
1174.Lpwr_page_walk_done:
1175
1176	mov	$num,%r10
1177	neg	$num
1178
1179	##############################################################
1180	# Stack layout
1181	#
1182	# +0	saved $num, used in reduction section
1183	# +8	&t[2*$num], used in reduction section
1184	# +32	saved *n0
1185	# +40	saved %rsp
1186	# +48	t[2*$num]
1187	#
1188	mov	$n0,  32(%rsp)
1189	mov	%rax, 40(%rsp)		# save original %rsp
1190.cfi_cfa_expression	%rsp+40,deref,+8
1191.Lpower5_body:
1192	movq	$rptr,%xmm1		# save $rptr, used in sqr8x
1193	movq	$nptr,%xmm2		# save $nptr
1194	movq	%r10, %xmm3		# -$num, used in sqr8x
1195	movq	$bptr,%xmm4
1196
1197	call	__bn_sqr8x_internal
1198	call	__bn_post4x_internal
1199	call	__bn_sqr8x_internal
1200	call	__bn_post4x_internal
1201	call	__bn_sqr8x_internal
1202	call	__bn_post4x_internal
1203	call	__bn_sqr8x_internal
1204	call	__bn_post4x_internal
1205	call	__bn_sqr8x_internal
1206	call	__bn_post4x_internal
1207
1208	movq	%xmm2,$nptr
1209	movq	%xmm4,$bptr
1210	mov	$aptr,$rptr
1211	mov	40(%rsp),%rax
1212	lea	32(%rsp),$n0
1213
1214	call	mul4x_internal
1215
1216	mov	40(%rsp),%rsi		# restore %rsp
1217.cfi_def_cfa	%rsi,8
1218	mov	\$1,%rax
1219	mov	-48(%rsi),%r15
1220.cfi_restore	%r15
1221	mov	-40(%rsi),%r14
1222.cfi_restore	%r14
1223	mov	-32(%rsi),%r13
1224.cfi_restore	%r13
1225	mov	-24(%rsi),%r12
1226.cfi_restore	%r12
1227	mov	-16(%rsi),%rbp
1228.cfi_restore	%rbp
1229	mov	-8(%rsi),%rbx
1230.cfi_restore	%rbx
1231	lea	(%rsi),%rsp
1232.cfi_def_cfa_register	%rsp
1233.Lpower5_epilogue:
1234	ret
1235.cfi_endproc
1236.size	bn_power5,.-bn_power5
1237
1238.globl	bn_sqr8x_internal
1239.hidden	bn_sqr8x_internal
1240.type	bn_sqr8x_internal,\@abi-omnipotent
1241.align	32
1242bn_sqr8x_internal:
1243__bn_sqr8x_internal:
1244.cfi_startproc
1245	##############################################################
1246	# Squaring part:
1247	#
1248	# a) multiply-n-add everything but a[i]*a[i];
1249	# b) shift result of a) by 1 to the left and accumulate
1250	#    a[i]*a[i] products;
1251	#
1252	##############################################################
1253	#                                                     a[1]a[0]
1254	#                                                 a[2]a[0]
1255	#                                             a[3]a[0]
1256	#                                             a[2]a[1]
1257	#                                         a[4]a[0]
1258	#                                         a[3]a[1]
1259	#                                     a[5]a[0]
1260	#                                     a[4]a[1]
1261	#                                     a[3]a[2]
1262	#                                 a[6]a[0]
1263	#                                 a[5]a[1]
1264	#                                 a[4]a[2]
1265	#                             a[7]a[0]
1266	#                             a[6]a[1]
1267	#                             a[5]a[2]
1268	#                             a[4]a[3]
1269	#                         a[7]a[1]
1270	#                         a[6]a[2]
1271	#                         a[5]a[3]
1272	#                     a[7]a[2]
1273	#                     a[6]a[3]
1274	#                     a[5]a[4]
1275	#                 a[7]a[3]
1276	#                 a[6]a[4]
1277	#             a[7]a[4]
1278	#             a[6]a[5]
1279	#         a[7]a[5]
1280	#     a[7]a[6]
1281	#                                                     a[1]a[0]
1282	#                                                 a[2]a[0]
1283	#                                             a[3]a[0]
1284	#                                         a[4]a[0]
1285	#                                     a[5]a[0]
1286	#                                 a[6]a[0]
1287	#                             a[7]a[0]
1288	#                                             a[2]a[1]
1289	#                                         a[3]a[1]
1290	#                                     a[4]a[1]
1291	#                                 a[5]a[1]
1292	#                             a[6]a[1]
1293	#                         a[7]a[1]
1294	#                                     a[3]a[2]
1295	#                                 a[4]a[2]
1296	#                             a[5]a[2]
1297	#                         a[6]a[2]
1298	#                     a[7]a[2]
1299	#                             a[4]a[3]
1300	#                         a[5]a[3]
1301	#                     a[6]a[3]
1302	#                 a[7]a[3]
1303	#                     a[5]a[4]
1304	#                 a[6]a[4]
1305	#             a[7]a[4]
1306	#             a[6]a[5]
1307	#         a[7]a[5]
1308	#     a[7]a[6]
1309	#                                                         a[0]a[0]
1310	#                                                 a[1]a[1]
1311	#                                         a[2]a[2]
1312	#                                 a[3]a[3]
1313	#                         a[4]a[4]
1314	#                 a[5]a[5]
1315	#         a[6]a[6]
1316	# a[7]a[7]
1317
1318	lea	32(%r10),$i		# $i=-($num-32)
1319	lea	($aptr,$num),$aptr	# end of a[] buffer, ($aptr,$i)=&ap[2]
1320
1321	mov	$num,$j			# $j=$num
1322
1323					# comments apply to $num==8 case
1324	mov	-32($aptr,$i),$a0	# a[0]
1325	lea	48+8(%rsp,$num,2),$tptr	# end of tp[] buffer, &tp[2*$num]
1326	mov	-24($aptr,$i),%rax	# a[1]
1327	lea	-32($tptr,$i),$tptr	# end of tp[] window, &tp[2*$num-"$i"]
1328	mov	-16($aptr,$i),$ai	# a[2]
1329	mov	%rax,$a1
1330
1331	mul	$a0			# a[1]*a[0]
1332	mov	%rax,$A0[0]		# a[1]*a[0]
1333	 mov	$ai,%rax		# a[2]
1334	mov	%rdx,$A0[1]
1335	mov	$A0[0],-24($tptr,$i)	# t[1]
1336
1337	mul	$a0			# a[2]*a[0]
1338	add	%rax,$A0[1]
1339	 mov	$ai,%rax
1340	adc	\$0,%rdx
1341	mov	$A0[1],-16($tptr,$i)	# t[2]
1342	mov	%rdx,$A0[0]
1343
1344
1345	 mov	-8($aptr,$i),$ai	# a[3]
1346	mul	$a1			# a[2]*a[1]
1347	mov	%rax,$A1[0]		# a[2]*a[1]+t[3]
1348	 mov	$ai,%rax
1349	mov	%rdx,$A1[1]
1350
1351	 lea	($i),$j
1352	mul	$a0			# a[3]*a[0]
1353	add	%rax,$A0[0]		# a[3]*a[0]+a[2]*a[1]+t[3]
1354	 mov	$ai,%rax
1355	mov	%rdx,$A0[1]
1356	adc	\$0,$A0[1]
1357	add	$A1[0],$A0[0]
1358	adc	\$0,$A0[1]
1359	mov	$A0[0],-8($tptr,$j)	# t[3]
1360	jmp	.Lsqr4x_1st
1361
1362.align	32
1363.Lsqr4x_1st:
1364	 mov	($aptr,$j),$ai		# a[4]
1365	mul	$a1			# a[3]*a[1]
1366	add	%rax,$A1[1]		# a[3]*a[1]+t[4]
1367	 mov	$ai,%rax
1368	mov	%rdx,$A1[0]
1369	adc	\$0,$A1[0]
1370
1371	mul	$a0			# a[4]*a[0]
1372	add	%rax,$A0[1]		# a[4]*a[0]+a[3]*a[1]+t[4]
1373	 mov	$ai,%rax		# a[3]
1374	 mov	8($aptr,$j),$ai		# a[5]
1375	mov	%rdx,$A0[0]
1376	adc	\$0,$A0[0]
1377	add	$A1[1],$A0[1]
1378	adc	\$0,$A0[0]
1379
1380
1381	mul	$a1			# a[4]*a[3]
1382	add	%rax,$A1[0]		# a[4]*a[3]+t[5]
1383	 mov	$ai,%rax
1384	 mov	$A0[1],($tptr,$j)	# t[4]
1385	mov	%rdx,$A1[1]
1386	adc	\$0,$A1[1]
1387
1388	mul	$a0			# a[5]*a[2]
1389	add	%rax,$A0[0]		# a[5]*a[2]+a[4]*a[3]+t[5]
1390	 mov	$ai,%rax
1391	 mov	16($aptr,$j),$ai	# a[6]
1392	mov	%rdx,$A0[1]
1393	adc	\$0,$A0[1]
1394	add	$A1[0],$A0[0]
1395	adc	\$0,$A0[1]
1396
1397	mul	$a1			# a[5]*a[3]
1398	add	%rax,$A1[1]		# a[5]*a[3]+t[6]
1399	 mov	$ai,%rax
1400	 mov	$A0[0],8($tptr,$j)	# t[5]
1401	mov	%rdx,$A1[0]
1402	adc	\$0,$A1[0]
1403
1404	mul	$a0			# a[6]*a[2]
1405	add	%rax,$A0[1]		# a[6]*a[2]+a[5]*a[3]+t[6]
1406	 mov	$ai,%rax		# a[3]
1407	 mov	24($aptr,$j),$ai	# a[7]
1408	mov	%rdx,$A0[0]
1409	adc	\$0,$A0[0]
1410	add	$A1[1],$A0[1]
1411	adc	\$0,$A0[0]
1412
1413
1414	mul	$a1			# a[6]*a[5]
1415	add	%rax,$A1[0]		# a[6]*a[5]+t[7]
1416	 mov	$ai,%rax
1417	 mov	$A0[1],16($tptr,$j)	# t[6]
1418	mov	%rdx,$A1[1]
1419	adc	\$0,$A1[1]
1420	 lea	32($j),$j
1421
1422	mul	$a0			# a[7]*a[4]
1423	add	%rax,$A0[0]		# a[7]*a[4]+a[6]*a[5]+t[6]
1424	 mov	$ai,%rax
1425	mov	%rdx,$A0[1]
1426	adc	\$0,$A0[1]
1427	add	$A1[0],$A0[0]
1428	adc	\$0,$A0[1]
1429	mov	$A0[0],-8($tptr,$j)	# t[7]
1430
1431	cmp	\$0,$j
1432	jne	.Lsqr4x_1st
1433
1434	mul	$a1			# a[7]*a[5]
1435	add	%rax,$A1[1]
1436	lea	16($i),$i
1437	adc	\$0,%rdx
1438	add	$A0[1],$A1[1]
1439	adc	\$0,%rdx
1440
1441	mov	$A1[1],($tptr)		# t[8]
1442	mov	%rdx,$A1[0]
1443	mov	%rdx,8($tptr)		# t[9]
1444	jmp	.Lsqr4x_outer
1445
1446.align	32
1447.Lsqr4x_outer:				# comments apply to $num==6 case
1448	mov	-32($aptr,$i),$a0	# a[0]
1449	lea	48+8(%rsp,$num,2),$tptr	# end of tp[] buffer, &tp[2*$num]
1450	mov	-24($aptr,$i),%rax	# a[1]
1451	lea	-32($tptr,$i),$tptr	# end of tp[] window, &tp[2*$num-"$i"]
1452	mov	-16($aptr,$i),$ai	# a[2]
1453	mov	%rax,$a1
1454
1455	mul	$a0			# a[1]*a[0]
1456	mov	-24($tptr,$i),$A0[0]	# t[1]
1457	add	%rax,$A0[0]		# a[1]*a[0]+t[1]
1458	 mov	$ai,%rax		# a[2]
1459	adc	\$0,%rdx
1460	mov	$A0[0],-24($tptr,$i)	# t[1]
1461	mov	%rdx,$A0[1]
1462
1463	mul	$a0			# a[2]*a[0]
1464	add	%rax,$A0[1]
1465	 mov	$ai,%rax
1466	adc	\$0,%rdx
1467	add	-16($tptr,$i),$A0[1]	# a[2]*a[0]+t[2]
1468	mov	%rdx,$A0[0]
1469	adc	\$0,$A0[0]
1470	mov	$A0[1],-16($tptr,$i)	# t[2]
1471
1472	xor	$A1[0],$A1[0]
1473
1474	 mov	-8($aptr,$i),$ai	# a[3]
1475	mul	$a1			# a[2]*a[1]
1476	add	%rax,$A1[0]		# a[2]*a[1]+t[3]
1477	 mov	$ai,%rax
1478	adc	\$0,%rdx
1479	add	-8($tptr,$i),$A1[0]
1480	mov	%rdx,$A1[1]
1481	adc	\$0,$A1[1]
1482
1483	mul	$a0			# a[3]*a[0]
1484	add	%rax,$A0[0]		# a[3]*a[0]+a[2]*a[1]+t[3]
1485	 mov	$ai,%rax
1486	adc	\$0,%rdx
1487	add	$A1[0],$A0[0]
1488	mov	%rdx,$A0[1]
1489	adc	\$0,$A0[1]
1490	mov	$A0[0],-8($tptr,$i)	# t[3]
1491
1492	lea	($i),$j
1493	jmp	.Lsqr4x_inner
1494
1495.align	32
1496.Lsqr4x_inner:
1497	 mov	($aptr,$j),$ai		# a[4]
1498	mul	$a1			# a[3]*a[1]
1499	add	%rax,$A1[1]		# a[3]*a[1]+t[4]
1500	 mov	$ai,%rax
1501	mov	%rdx,$A1[0]
1502	adc	\$0,$A1[0]
1503	add	($tptr,$j),$A1[1]
1504	adc	\$0,$A1[0]
1505
1506	.byte	0x67
1507	mul	$a0			# a[4]*a[0]
1508	add	%rax,$A0[1]		# a[4]*a[0]+a[3]*a[1]+t[4]
1509	 mov	$ai,%rax		# a[3]
1510	 mov	8($aptr,$j),$ai		# a[5]
1511	mov	%rdx,$A0[0]
1512	adc	\$0,$A0[0]
1513	add	$A1[1],$A0[1]
1514	adc	\$0,$A0[0]
1515
1516	mul	$a1			# a[4]*a[3]
1517	add	%rax,$A1[0]		# a[4]*a[3]+t[5]
1518	mov	$A0[1],($tptr,$j)	# t[4]
1519	 mov	$ai,%rax
1520	mov	%rdx,$A1[1]
1521	adc	\$0,$A1[1]
1522	add	8($tptr,$j),$A1[0]
1523	lea	16($j),$j		# j++
1524	adc	\$0,$A1[1]
1525
1526	mul	$a0			# a[5]*a[2]
1527	add	%rax,$A0[0]		# a[5]*a[2]+a[4]*a[3]+t[5]
1528	 mov	$ai,%rax
1529	adc	\$0,%rdx
1530	add	$A1[0],$A0[0]
1531	mov	%rdx,$A0[1]
1532	adc	\$0,$A0[1]
1533	mov	$A0[0],-8($tptr,$j)	# t[5], "preloaded t[1]" below
1534
1535	cmp	\$0,$j
1536	jne	.Lsqr4x_inner
1537
1538	.byte	0x67
1539	mul	$a1			# a[5]*a[3]
1540	add	%rax,$A1[1]
1541	adc	\$0,%rdx
1542	add	$A0[1],$A1[1]
1543	adc	\$0,%rdx
1544
1545	mov	$A1[1],($tptr)		# t[6], "preloaded t[2]" below
1546	mov	%rdx,$A1[0]
1547	mov	%rdx,8($tptr)		# t[7], "preloaded t[3]" below
1548
1549	add	\$16,$i
1550	jnz	.Lsqr4x_outer
1551
1552					# comments apply to $num==4 case
1553	mov	-32($aptr),$a0		# a[0]
1554	lea	48+8(%rsp,$num,2),$tptr	# end of tp[] buffer, &tp[2*$num]
1555	mov	-24($aptr),%rax		# a[1]
1556	lea	-32($tptr,$i),$tptr	# end of tp[] window, &tp[2*$num-"$i"]
1557	mov	-16($aptr),$ai		# a[2]
1558	mov	%rax,$a1
1559
1560	mul	$a0			# a[1]*a[0]
1561	add	%rax,$A0[0]		# a[1]*a[0]+t[1], preloaded t[1]
1562	 mov	$ai,%rax		# a[2]
1563	mov	%rdx,$A0[1]
1564	adc	\$0,$A0[1]
1565
1566	mul	$a0			# a[2]*a[0]
1567	add	%rax,$A0[1]
1568	 mov	$ai,%rax
1569	 mov	$A0[0],-24($tptr)	# t[1]
1570	mov	%rdx,$A0[0]
1571	adc	\$0,$A0[0]
1572	add	$A1[1],$A0[1]		# a[2]*a[0]+t[2], preloaded t[2]
1573	 mov	-8($aptr),$ai		# a[3]
1574	adc	\$0,$A0[0]
1575
1576	mul	$a1			# a[2]*a[1]
1577	add	%rax,$A1[0]		# a[2]*a[1]+t[3], preloaded t[3]
1578	 mov	$ai,%rax
1579	 mov	$A0[1],-16($tptr)	# t[2]
1580	mov	%rdx,$A1[1]
1581	adc	\$0,$A1[1]
1582
1583	mul	$a0			# a[3]*a[0]
1584	add	%rax,$A0[0]		# a[3]*a[0]+a[2]*a[1]+t[3]
1585	 mov	$ai,%rax
1586	mov	%rdx,$A0[1]
1587	adc	\$0,$A0[1]
1588	add	$A1[0],$A0[0]
1589	adc	\$0,$A0[1]
1590	mov	$A0[0],-8($tptr)	# t[3]
1591
1592	mul	$a1			# a[3]*a[1]
1593	add	%rax,$A1[1]
1594	 mov	-16($aptr),%rax		# a[2]
1595	adc	\$0,%rdx
1596	add	$A0[1],$A1[1]
1597	adc	\$0,%rdx
1598
1599	mov	$A1[1],($tptr)		# t[4]
1600	mov	%rdx,$A1[0]
1601	mov	%rdx,8($tptr)		# t[5]
1602
1603	mul	$ai			# a[2]*a[3]
1604___
1605{
1606my ($shift,$carry)=($a0,$a1);
1607my @S=(@A1,$ai,$n0);
1608$code.=<<___;
1609	 add	\$16,$i
1610	 xor	$shift,$shift
1611	 sub	$num,$i			# $i=16-$num
1612	 xor	$carry,$carry
1613
1614	add	$A1[0],%rax		# t[5]
1615	adc	\$0,%rdx
1616	mov	%rax,8($tptr)		# t[5]
1617	mov	%rdx,16($tptr)		# t[6]
1618	mov	$carry,24($tptr)	# t[7]
1619
1620	 mov	-16($aptr,$i),%rax	# a[0]
1621	lea	48+8(%rsp),$tptr
1622	 xor	$A0[0],$A0[0]		# t[0]
1623	 mov	8($tptr),$A0[1]		# t[1]
1624
1625	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
1626	shr	\$63,$A0[0]
1627	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
1628	shr	\$63,$A0[1]
1629	or	$A0[0],$S[1]		# | t[2*i]>>63
1630	 mov	16($tptr),$A0[0]	# t[2*i+2]	# prefetch
1631	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
1632	mul	%rax			# a[i]*a[i]
1633	neg	$carry			# mov $carry,cf
1634	 mov	24($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
1635	adc	%rax,$S[0]
1636	 mov	-8($aptr,$i),%rax	# a[i+1]	# prefetch
1637	mov	$S[0],($tptr)
1638	adc	%rdx,$S[1]
1639
1640	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1 | shift
1641	 mov	$S[1],8($tptr)
1642	 sbb	$carry,$carry		# mov cf,$carry
1643	shr	\$63,$A0[0]
1644	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
1645	shr	\$63,$A0[1]
1646	or	$A0[0],$S[3]		# | t[2*i]>>63
1647	 mov	32($tptr),$A0[0]	# t[2*i+2]	# prefetch
1648	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
1649	mul	%rax			# a[i]*a[i]
1650	neg	$carry			# mov $carry,cf
1651	 mov	40($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
1652	adc	%rax,$S[2]
1653	 mov	0($aptr,$i),%rax	# a[i+1]	# prefetch
1654	mov	$S[2],16($tptr)
1655	adc	%rdx,$S[3]
1656	lea	16($i),$i
1657	mov	$S[3],24($tptr)
1658	sbb	$carry,$carry		# mov cf,$carry
1659	lea	64($tptr),$tptr
1660	jmp	.Lsqr4x_shift_n_add
1661
1662.align	32
1663.Lsqr4x_shift_n_add:
1664	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
1665	shr	\$63,$A0[0]
1666	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
1667	shr	\$63,$A0[1]
1668	or	$A0[0],$S[1]		# | t[2*i]>>63
1669	 mov	-16($tptr),$A0[0]	# t[2*i+2]	# prefetch
1670	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
1671	mul	%rax			# a[i]*a[i]
1672	neg	$carry			# mov $carry,cf
1673	 mov	-8($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
1674	adc	%rax,$S[0]
1675	 mov	-8($aptr,$i),%rax	# a[i+1]	# prefetch
1676	mov	$S[0],-32($tptr)
1677	adc	%rdx,$S[1]
1678
1679	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1 | shift
1680	 mov	$S[1],-24($tptr)
1681	 sbb	$carry,$carry		# mov cf,$carry
1682	shr	\$63,$A0[0]
1683	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
1684	shr	\$63,$A0[1]
1685	or	$A0[0],$S[3]		# | t[2*i]>>63
1686	 mov	0($tptr),$A0[0]		# t[2*i+2]	# prefetch
1687	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
1688	mul	%rax			# a[i]*a[i]
1689	neg	$carry			# mov $carry,cf
1690	 mov	8($tptr),$A0[1]		# t[2*i+2+1]	# prefetch
1691	adc	%rax,$S[2]
1692	 mov	0($aptr,$i),%rax	# a[i+1]	# prefetch
1693	mov	$S[2],-16($tptr)
1694	adc	%rdx,$S[3]
1695
1696	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
1697	 mov	$S[3],-8($tptr)
1698	 sbb	$carry,$carry		# mov cf,$carry
1699	shr	\$63,$A0[0]
1700	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
1701	shr	\$63,$A0[1]
1702	or	$A0[0],$S[1]		# | t[2*i]>>63
1703	 mov	16($tptr),$A0[0]	# t[2*i+2]	# prefetch
1704	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
1705	mul	%rax			# a[i]*a[i]
1706	neg	$carry			# mov $carry,cf
1707	 mov	24($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
1708	adc	%rax,$S[0]
1709	 mov	8($aptr,$i),%rax	# a[i+1]	# prefetch
1710	mov	$S[0],0($tptr)
1711	adc	%rdx,$S[1]
1712
1713	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1 | shift
1714	 mov	$S[1],8($tptr)
1715	 sbb	$carry,$carry		# mov cf,$carry
1716	shr	\$63,$A0[0]
1717	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
1718	shr	\$63,$A0[1]
1719	or	$A0[0],$S[3]		# | t[2*i]>>63
1720	 mov	32($tptr),$A0[0]	# t[2*i+2]	# prefetch
1721	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
1722	mul	%rax			# a[i]*a[i]
1723	neg	$carry			# mov $carry,cf
1724	 mov	40($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
1725	adc	%rax,$S[2]
1726	 mov	16($aptr,$i),%rax	# a[i+1]	# prefetch
1727	mov	$S[2],16($tptr)
1728	adc	%rdx,$S[3]
1729	mov	$S[3],24($tptr)
1730	sbb	$carry,$carry		# mov cf,$carry
1731	lea	64($tptr),$tptr
1732	add	\$32,$i
1733	jnz	.Lsqr4x_shift_n_add
1734
1735	lea	($shift,$A0[0],2),$S[0]	# t[2*i]<<1 | shift
1736	.byte	0x67
1737	shr	\$63,$A0[0]
1738	lea	($j,$A0[1],2),$S[1]	# t[2*i+1]<<1 |
1739	shr	\$63,$A0[1]
1740	or	$A0[0],$S[1]		# | t[2*i]>>63
1741	 mov	-16($tptr),$A0[0]	# t[2*i+2]	# prefetch
1742	mov	$A0[1],$shift		# shift=t[2*i+1]>>63
1743	mul	%rax			# a[i]*a[i]
1744	neg	$carry			# mov $carry,cf
1745	 mov	-8($tptr),$A0[1]	# t[2*i+2+1]	# prefetch
1746	adc	%rax,$S[0]
1747	 mov	-8($aptr),%rax		# a[i+1]	# prefetch
1748	mov	$S[0],-32($tptr)
1749	adc	%rdx,$S[1]
1750
1751	lea	($shift,$A0[0],2),$S[2]	# t[2*i]<<1|shift
1752	 mov	$S[1],-24($tptr)
1753	 sbb	$carry,$carry		# mov cf,$carry
1754	shr	\$63,$A0[0]
1755	lea	($j,$A0[1],2),$S[3]	# t[2*i+1]<<1 |
1756	shr	\$63,$A0[1]
1757	or	$A0[0],$S[3]		# | t[2*i]>>63
1758	mul	%rax			# a[i]*a[i]
1759	neg	$carry			# mov $carry,cf
1760	adc	%rax,$S[2]
1761	adc	%rdx,$S[3]
1762	mov	$S[2],-16($tptr)
1763	mov	$S[3],-8($tptr)
1764___
1765}
1766######################################################################
1767# Montgomery reduction part, "word-by-word" algorithm.
1768#
1769# This new path is inspired by multiple submissions from Intel, by
1770# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
1771# Vinodh Gopal...
1772{
1773my ($nptr,$tptr,$carry,$m0)=("%rbp","%rdi","%rsi","%rbx");
1774
1775$code.=<<___;
1776	movq	%xmm2,$nptr
1777__bn_sqr8x_reduction:
1778	xor	%rax,%rax
1779	lea	($nptr,$num),%rcx	# end of n[]
1780	lea	48+8(%rsp,$num,2),%rdx	# end of t[] buffer
1781	mov	%rcx,0+8(%rsp)
1782	lea	48+8(%rsp,$num),$tptr	# end of initial t[] window
1783	mov	%rdx,8+8(%rsp)
1784	neg	$num
1785	jmp	.L8x_reduction_loop
1786
1787.align	32
1788.L8x_reduction_loop:
1789	lea	($tptr,$num),$tptr	# start of current t[] window
1790	.byte	0x66
1791	mov	8*0($tptr),$m0
1792	mov	8*1($tptr),%r9
1793	mov	8*2($tptr),%r10
1794	mov	8*3($tptr),%r11
1795	mov	8*4($tptr),%r12
1796	mov	8*5($tptr),%r13
1797	mov	8*6($tptr),%r14
1798	mov	8*7($tptr),%r15
1799	mov	%rax,(%rdx)		# store top-most carry bit
1800	lea	8*8($tptr),$tptr
1801
1802	.byte	0x67
1803	mov	$m0,%r8
1804	imulq	32+8(%rsp),$m0		# n0*a[0]
1805	mov	8*0($nptr),%rax		# n[0]
1806	mov	\$8,%ecx
1807	jmp	.L8x_reduce
1808
1809.align	32
1810.L8x_reduce:
1811	mulq	$m0
1812	 mov	8*1($nptr),%rax		# n[1]
1813	neg	%r8
1814	mov	%rdx,%r8
1815	adc	\$0,%r8
1816
1817	mulq	$m0
1818	add	%rax,%r9
1819	 mov	8*2($nptr),%rax
1820	adc	\$0,%rdx
1821	add	%r9,%r8
1822	 mov	$m0,48-8+8(%rsp,%rcx,8)	# put aside n0*a[i]
1823	mov	%rdx,%r9
1824	adc	\$0,%r9
1825
1826	mulq	$m0
1827	add	%rax,%r10
1828	 mov	8*3($nptr),%rax
1829	adc	\$0,%rdx
1830	add	%r10,%r9
1831	 mov	32+8(%rsp),$carry	# pull n0, borrow $carry
1832	mov	%rdx,%r10
1833	adc	\$0,%r10
1834
1835	mulq	$m0
1836	add	%rax,%r11
1837	 mov	8*4($nptr),%rax
1838	adc	\$0,%rdx
1839	 imulq	%r8,$carry		# modulo-scheduled
1840	add	%r11,%r10
1841	mov	%rdx,%r11
1842	adc	\$0,%r11
1843
1844	mulq	$m0
1845	add	%rax,%r12
1846	 mov	8*5($nptr),%rax
1847	adc	\$0,%rdx
1848	add	%r12,%r11
1849	mov	%rdx,%r12
1850	adc	\$0,%r12
1851
1852	mulq	$m0
1853	add	%rax,%r13
1854	 mov	8*6($nptr),%rax
1855	adc	\$0,%rdx
1856	add	%r13,%r12
1857	mov	%rdx,%r13
1858	adc	\$0,%r13
1859
1860	mulq	$m0
1861	add	%rax,%r14
1862	 mov	8*7($nptr),%rax
1863	adc	\$0,%rdx
1864	add	%r14,%r13
1865	mov	%rdx,%r14
1866	adc	\$0,%r14
1867
1868	mulq	$m0
1869	 mov	$carry,$m0		# n0*a[i]
1870	add	%rax,%r15
1871	 mov	8*0($nptr),%rax		# n[0]
1872	adc	\$0,%rdx
1873	add	%r15,%r14
1874	mov	%rdx,%r15
1875	adc	\$0,%r15
1876
1877	dec	%ecx
1878	jnz	.L8x_reduce
1879
1880	lea	8*8($nptr),$nptr
1881	xor	%rax,%rax
1882	mov	8+8(%rsp),%rdx		# pull end of t[]
1883	cmp	0+8(%rsp),$nptr		# end of n[]?
1884	jae	.L8x_no_tail
1885
1886	.byte	0x66
1887	add	8*0($tptr),%r8
1888	adc	8*1($tptr),%r9
1889	adc	8*2($tptr),%r10
1890	adc	8*3($tptr),%r11
1891	adc	8*4($tptr),%r12
1892	adc	8*5($tptr),%r13
1893	adc	8*6($tptr),%r14
1894	adc	8*7($tptr),%r15
1895	sbb	$carry,$carry		# top carry
1896
1897	mov	48+56+8(%rsp),$m0	# pull n0*a[0]
1898	mov	\$8,%ecx
1899	mov	8*0($nptr),%rax
1900	jmp	.L8x_tail
1901
1902.align	32
1903.L8x_tail:
1904	mulq	$m0
1905	add	%rax,%r8
1906	 mov	8*1($nptr),%rax
1907	 mov	%r8,($tptr)		# save result
1908	mov	%rdx,%r8
1909	adc	\$0,%r8
1910
1911	mulq	$m0
1912	add	%rax,%r9
1913	 mov	8*2($nptr),%rax
1914	adc	\$0,%rdx
1915	add	%r9,%r8
1916	 lea	8($tptr),$tptr		# $tptr++
1917	mov	%rdx,%r9
1918	adc	\$0,%r9
1919
1920	mulq	$m0
1921	add	%rax,%r10
1922	 mov	8*3($nptr),%rax
1923	adc	\$0,%rdx
1924	add	%r10,%r9
1925	mov	%rdx,%r10
1926	adc	\$0,%r10
1927
1928	mulq	$m0
1929	add	%rax,%r11
1930	 mov	8*4($nptr),%rax
1931	adc	\$0,%rdx
1932	add	%r11,%r10
1933	mov	%rdx,%r11
1934	adc	\$0,%r11
1935
1936	mulq	$m0
1937	add	%rax,%r12
1938	 mov	8*5($nptr),%rax
1939	adc	\$0,%rdx
1940	add	%r12,%r11
1941	mov	%rdx,%r12
1942	adc	\$0,%r12
1943
1944	mulq	$m0
1945	add	%rax,%r13
1946	 mov	8*6($nptr),%rax
1947	adc	\$0,%rdx
1948	add	%r13,%r12
1949	mov	%rdx,%r13
1950	adc	\$0,%r13
1951
1952	mulq	$m0
1953	add	%rax,%r14
1954	 mov	8*7($nptr),%rax
1955	adc	\$0,%rdx
1956	add	%r14,%r13
1957	mov	%rdx,%r14
1958	adc	\$0,%r14
1959
1960	mulq	$m0
1961	 mov	48-16+8(%rsp,%rcx,8),$m0# pull n0*a[i]
1962	add	%rax,%r15
1963	adc	\$0,%rdx
1964	add	%r15,%r14
1965	 mov	8*0($nptr),%rax		# pull n[0]
1966	mov	%rdx,%r15
1967	adc	\$0,%r15
1968
1969	dec	%ecx
1970	jnz	.L8x_tail
1971
1972	lea	8*8($nptr),$nptr
1973	mov	8+8(%rsp),%rdx		# pull end of t[]
1974	cmp	0+8(%rsp),$nptr		# end of n[]?
1975	jae	.L8x_tail_done		# break out of loop
1976
1977	 mov	48+56+8(%rsp),$m0	# pull n0*a[0]
1978	neg	$carry
1979	 mov	8*0($nptr),%rax		# pull n[0]
1980	adc	8*0($tptr),%r8
1981	adc	8*1($tptr),%r9
1982	adc	8*2($tptr),%r10
1983	adc	8*3($tptr),%r11
1984	adc	8*4($tptr),%r12
1985	adc	8*5($tptr),%r13
1986	adc	8*6($tptr),%r14
1987	adc	8*7($tptr),%r15
1988	sbb	$carry,$carry		# top carry
1989
1990	mov	\$8,%ecx
1991	jmp	.L8x_tail
1992
1993.align	32
1994.L8x_tail_done:
1995	xor	%rax,%rax
1996	add	(%rdx),%r8		# can this overflow?
1997	adc	\$0,%r9
1998	adc	\$0,%r10
1999	adc	\$0,%r11
2000	adc	\$0,%r12
2001	adc	\$0,%r13
2002	adc	\$0,%r14
2003	adc	\$0,%r15
2004	adc	\$0,%rax
2005
2006	neg	$carry
2007.L8x_no_tail:
2008	adc	8*0($tptr),%r8
2009	adc	8*1($tptr),%r9
2010	adc	8*2($tptr),%r10
2011	adc	8*3($tptr),%r11
2012	adc	8*4($tptr),%r12
2013	adc	8*5($tptr),%r13
2014	adc	8*6($tptr),%r14
2015	adc	8*7($tptr),%r15
2016	adc	\$0,%rax		# top-most carry
2017	 mov	-8($nptr),%rcx		# np[num-1]
2018	 xor	$carry,$carry
2019
2020	movq	%xmm2,$nptr		# restore $nptr
2021
2022	mov	%r8,8*0($tptr)		# store top 512 bits
2023	mov	%r9,8*1($tptr)
2024	 movq	%xmm3,$num		# $num is %r9, can't be moved upwards
2025	mov	%r10,8*2($tptr)
2026	mov	%r11,8*3($tptr)
2027	mov	%r12,8*4($tptr)
2028	mov	%r13,8*5($tptr)
2029	mov	%r14,8*6($tptr)
2030	mov	%r15,8*7($tptr)
2031	lea	8*8($tptr),$tptr
2032
2033	cmp	%rdx,$tptr		# end of t[]?
2034	jb	.L8x_reduction_loop
2035	ret
2036.cfi_endproc
2037.size	bn_sqr8x_internal,.-bn_sqr8x_internal
2038___
2039}
2040##############################################################
2041# Post-condition, 4x unrolled
2042#
2043{
2044my ($tptr,$nptr)=("%rbx","%rbp");
2045$code.=<<___;
2046.type	__bn_post4x_internal,\@abi-omnipotent
2047.align	32
2048__bn_post4x_internal:
2049.cfi_startproc
2050	mov	8*0($nptr),%r12
2051	lea	(%rdi,$num),$tptr	# %rdi was $tptr above
2052	mov	$num,%rcx
2053	movq	%xmm1,$rptr		# restore $rptr
2054	neg	%rax
2055	movq	%xmm1,$aptr		# prepare for back-to-back call
2056	sar	\$3+2,%rcx
2057	dec	%r12			# so that after 'not' we get -n[0]
2058	xor	%r10,%r10
2059	mov	8*1($nptr),%r13
2060	mov	8*2($nptr),%r14
2061	mov	8*3($nptr),%r15
2062	jmp	.Lsqr4x_sub_entry
2063
2064.align	16
2065.Lsqr4x_sub:
2066	mov	8*0($nptr),%r12
2067	mov	8*1($nptr),%r13
2068	mov	8*2($nptr),%r14
2069	mov	8*3($nptr),%r15
2070.Lsqr4x_sub_entry:
2071	lea	8*4($nptr),$nptr
2072	not	%r12
2073	not	%r13
2074	not	%r14
2075	not	%r15
2076	and	%rax,%r12
2077	and	%rax,%r13
2078	and	%rax,%r14
2079	and	%rax,%r15
2080
2081	neg	%r10			# mov %r10,%cf
2082	adc	8*0($tptr),%r12
2083	adc	8*1($tptr),%r13
2084	adc	8*2($tptr),%r14
2085	adc	8*3($tptr),%r15
2086	mov	%r12,8*0($rptr)
2087	lea	8*4($tptr),$tptr
2088	mov	%r13,8*1($rptr)
2089	sbb	%r10,%r10		# mov %cf,%r10
2090	mov	%r14,8*2($rptr)
2091	mov	%r15,8*3($rptr)
2092	lea	8*4($rptr),$rptr
2093
2094	inc	%rcx			# pass %cf
2095	jnz	.Lsqr4x_sub
2096
2097	mov	$num,%r10		# prepare for back-to-back call
2098	neg	$num			# restore $num
2099	ret
2100.cfi_endproc
2101.size	__bn_post4x_internal,.-__bn_post4x_internal
2102___
2103}
2104{
2105$code.=<<___;
2106.globl	bn_from_montgomery
2107.type	bn_from_montgomery,\@abi-omnipotent
2108.align	32
2109bn_from_montgomery:
2110.cfi_startproc
2111	testl	\$7,`($win64?"48(%rsp)":"%r9d")`
2112	jz	bn_from_mont8x
2113	xor	%eax,%eax
2114	ret
2115.cfi_endproc
2116.size	bn_from_montgomery,.-bn_from_montgomery
2117
2118.type	bn_from_mont8x,\@function,6
2119.align	32
2120bn_from_mont8x:
2121.cfi_startproc
2122	.byte	0x67
2123	mov	%rsp,%rax
2124.cfi_def_cfa_register	%rax
2125	push	%rbx
2126.cfi_push	%rbx
2127	push	%rbp
2128.cfi_push	%rbp
2129	push	%r12
2130.cfi_push	%r12
2131	push	%r13
2132.cfi_push	%r13
2133	push	%r14
2134.cfi_push	%r14
2135	push	%r15
2136.cfi_push	%r15
2137.Lfrom_prologue:
2138
2139	shl	\$3,${num}d		# convert $num to bytes
2140	lea	($num,$num,2),%r10	# 3*$num in bytes
2141	neg	$num
2142	mov	($n0),$n0		# *n0
2143
2144	##############################################################
2145	# Ensure that stack frame doesn't alias with $rptr+3*$num
2146	# modulo 4096, which covers ret[num], am[num] and n[num]
2147	# (see bn_exp.c). The stack is allocated to aligned with
2148	# bn_power5's frame, and as bn_from_montgomery happens to be
2149	# last operation, we use the opportunity to cleanse it.
2150	#
2151	lea	-320(%rsp,$num,2),%r11
2152	mov	%rsp,%rbp
2153	sub	$rptr,%r11
2154	and	\$4095,%r11
2155	cmp	%r11,%r10
2156	jb	.Lfrom_sp_alt
2157	sub	%r11,%rbp		# align with $aptr
2158	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*$num*8+256)
2159	jmp	.Lfrom_sp_done
2160
2161.align	32
2162.Lfrom_sp_alt:
2163	lea	4096-320(,$num,2),%r10
2164	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*$num*8+256)
2165	sub	%r10,%r11
2166	mov	\$0,%r10
2167	cmovc	%r10,%r11
2168	sub	%r11,%rbp
2169.Lfrom_sp_done:
2170	and	\$-64,%rbp
2171	mov	%rsp,%r11
2172	sub	%rbp,%r11
2173	and	\$-4096,%r11
2174	lea	(%rbp,%r11),%rsp
2175	mov	(%rsp),%r10
2176	cmp	%rbp,%rsp
2177	ja	.Lfrom_page_walk
2178	jmp	.Lfrom_page_walk_done
2179
2180.Lfrom_page_walk:
2181	lea	-4096(%rsp),%rsp
2182	mov	(%rsp),%r10
2183	cmp	%rbp,%rsp
2184	ja	.Lfrom_page_walk
2185.Lfrom_page_walk_done:
2186
2187	mov	$num,%r10
2188	neg	$num
2189
2190	##############################################################
2191	# Stack layout
2192	#
2193	# +0	saved $num, used in reduction section
2194	# +8	&t[2*$num], used in reduction section
2195	# +32	saved *n0
2196	# +40	saved %rsp
2197	# +48	t[2*$num]
2198	#
2199	mov	$n0,  32(%rsp)
2200	mov	%rax, 40(%rsp)		# save original %rsp
2201.cfi_cfa_expression	%rsp+40,deref,+8
2202.Lfrom_body:
2203	mov	$num,%r11
2204	lea	48(%rsp),%rax
2205	pxor	%xmm0,%xmm0
2206	jmp	.Lmul_by_1
2207
2208.align	32
2209.Lmul_by_1:
2210	movdqu	($aptr),%xmm1
2211	movdqu	16($aptr),%xmm2
2212	movdqu	32($aptr),%xmm3
2213	movdqa	%xmm0,(%rax,$num)
2214	movdqu	48($aptr),%xmm4
2215	movdqa	%xmm0,16(%rax,$num)
2216	.byte	0x48,0x8d,0xb6,0x40,0x00,0x00,0x00	# lea	64($aptr),$aptr
2217	movdqa	%xmm1,(%rax)
2218	movdqa	%xmm0,32(%rax,$num)
2219	movdqa	%xmm2,16(%rax)
2220	movdqa	%xmm0,48(%rax,$num)
2221	movdqa	%xmm3,32(%rax)
2222	movdqa	%xmm4,48(%rax)
2223	lea	64(%rax),%rax
2224	sub	\$64,%r11
2225	jnz	.Lmul_by_1
2226
2227	movq	$rptr,%xmm1
2228	movq	$nptr,%xmm2
2229	.byte	0x67
2230	mov	$nptr,%rbp
2231	movq	%r10, %xmm3		# -num
2232___
2233$code.=<<___ if ($addx);
2234	mov	OPENSSL_ia32cap_P+8(%rip),%r11d
2235	and	\$0x80108,%r11d
2236	cmp	\$0x80108,%r11d		# check for AD*X+BMI2+BMI1
2237	jne	.Lfrom_mont_nox
2238
2239	lea	(%rax,$num),$rptr
2240	call	__bn_sqrx8x_reduction
2241	call	__bn_postx4x_internal
2242
2243	pxor	%xmm0,%xmm0
2244	lea	48(%rsp),%rax
2245	jmp	.Lfrom_mont_zero
2246
2247.align	32
2248.Lfrom_mont_nox:
2249___
2250$code.=<<___;
2251	call	__bn_sqr8x_reduction
2252	call	__bn_post4x_internal
2253
2254	pxor	%xmm0,%xmm0
2255	lea	48(%rsp),%rax
2256	jmp	.Lfrom_mont_zero
2257
2258.align	32
2259.Lfrom_mont_zero:
2260	mov	40(%rsp),%rsi		# restore %rsp
2261.cfi_def_cfa	%rsi,8
2262	movdqa	%xmm0,16*0(%rax)
2263	movdqa	%xmm0,16*1(%rax)
2264	movdqa	%xmm0,16*2(%rax)
2265	movdqa	%xmm0,16*3(%rax)
2266	lea	16*4(%rax),%rax
2267	sub	\$32,$num
2268	jnz	.Lfrom_mont_zero
2269
2270	mov	\$1,%rax
2271	mov	-48(%rsi),%r15
2272.cfi_restore	%r15
2273	mov	-40(%rsi),%r14
2274.cfi_restore	%r14
2275	mov	-32(%rsi),%r13
2276.cfi_restore	%r13
2277	mov	-24(%rsi),%r12
2278.cfi_restore	%r12
2279	mov	-16(%rsi),%rbp
2280.cfi_restore	%rbp
2281	mov	-8(%rsi),%rbx
2282.cfi_restore	%rbx
2283	lea	(%rsi),%rsp
2284.cfi_def_cfa_register	%rsp
2285.Lfrom_epilogue:
2286	ret
2287.cfi_endproc
2288.size	bn_from_mont8x,.-bn_from_mont8x
2289___
2290}
2291}}}
2292
2293if ($addx) {{{
2294my $bp="%rdx";	# restore original value
2295
2296$code.=<<___;
2297.type	bn_mulx4x_mont_gather5,\@function,6
2298.align	32
2299bn_mulx4x_mont_gather5:
2300.cfi_startproc
2301	mov	%rsp,%rax
2302.cfi_def_cfa_register	%rax
2303.Lmulx4x_enter:
2304	push	%rbx
2305.cfi_push	%rbx
2306	push	%rbp
2307.cfi_push	%rbp
2308	push	%r12
2309.cfi_push	%r12
2310	push	%r13
2311.cfi_push	%r13
2312	push	%r14
2313.cfi_push	%r14
2314	push	%r15
2315.cfi_push	%r15
2316.Lmulx4x_prologue:
2317
2318	shl	\$3,${num}d		# convert $num to bytes
2319	lea	($num,$num,2),%r10	# 3*$num in bytes
2320	neg	$num			# -$num
2321	mov	($n0),$n0		# *n0
2322
2323	##############################################################
2324	# Ensure that stack frame doesn't alias with $rptr+3*$num
2325	# modulo 4096, which covers ret[num], am[num] and n[num]
2326	# (see bn_exp.c). This is done to allow memory disambiguation
2327	# logic do its magic. [Extra [num] is allocated in order
2328	# to align with bn_power5's frame, which is cleansed after
2329	# completing exponentiation. Extra 256 bytes is for power mask
2330	# calculated from 7th argument, the index.]
2331	#
2332	lea	-320(%rsp,$num,2),%r11
2333	mov	%rsp,%rbp
2334	sub	$rp,%r11
2335	and	\$4095,%r11
2336	cmp	%r11,%r10
2337	jb	.Lmulx4xsp_alt
2338	sub	%r11,%rbp		# align with $aptr
2339	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*$num*8+256)
2340	jmp	.Lmulx4xsp_done
2341
2342.Lmulx4xsp_alt:
2343	lea	4096-320(,$num,2),%r10
2344	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*$num*8+256)
2345	sub	%r10,%r11
2346	mov	\$0,%r10
2347	cmovc	%r10,%r11
2348	sub	%r11,%rbp
2349.Lmulx4xsp_done:
2350	and	\$-64,%rbp		# ensure alignment
2351	mov	%rsp,%r11
2352	sub	%rbp,%r11
2353	and	\$-4096,%r11
2354	lea	(%rbp,%r11),%rsp
2355	mov	(%rsp),%r10
2356	cmp	%rbp,%rsp
2357	ja	.Lmulx4x_page_walk
2358	jmp	.Lmulx4x_page_walk_done
2359
2360.Lmulx4x_page_walk:
2361	lea	-4096(%rsp),%rsp
2362	mov	(%rsp),%r10
2363	cmp	%rbp,%rsp
2364	ja	.Lmulx4x_page_walk
2365.Lmulx4x_page_walk_done:
2366
2367	##############################################################
2368	# Stack layout
2369	# +0	-num
2370	# +8	off-loaded &b[i]
2371	# +16	end of b[num]
2372	# +24	inner counter
2373	# +32	saved n0
2374	# +40	saved %rsp
2375	# +48
2376	# +56	saved rp
2377	# +64	tmp[num+1]
2378	#
2379	mov	$n0, 32(%rsp)		# save *n0
2380	mov	%rax,40(%rsp)		# save original %rsp
2381.cfi_cfa_expression	%rsp+40,deref,+8
2382.Lmulx4x_body:
2383	call	mulx4x_internal
2384
2385	mov	40(%rsp),%rsi		# restore %rsp
2386.cfi_def_cfa	%rsi,8
2387	mov	\$1,%rax
2388
2389	mov	-48(%rsi),%r15
2390.cfi_restore	%r15
2391	mov	-40(%rsi),%r14
2392.cfi_restore	%r14
2393	mov	-32(%rsi),%r13
2394.cfi_restore	%r13
2395	mov	-24(%rsi),%r12
2396.cfi_restore	%r12
2397	mov	-16(%rsi),%rbp
2398.cfi_restore	%rbp
2399	mov	-8(%rsi),%rbx
2400.cfi_restore	%rbx
2401	lea	(%rsi),%rsp
2402.cfi_def_cfa_register	%rsp
2403.Lmulx4x_epilogue:
2404	ret
2405.cfi_endproc
2406.size	bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5
2407
2408.type	mulx4x_internal,\@abi-omnipotent
2409.align	32
2410mulx4x_internal:
2411.cfi_startproc
2412	mov	$num,8(%rsp)		# save -$num (it was in bytes)
2413	mov	$num,%r10
2414	neg	$num			# restore $num
2415	shl	\$5,$num
2416	neg	%r10			# restore $num
2417	lea	128($bp,$num),%r13	# end of powers table (+size optimization)
2418	shr	\$5+5,$num
2419	movd	`($win64?56:8)`(%rax),%xmm5	# load 7th argument
2420	sub	\$1,$num
2421	lea	.Linc(%rip),%rax
2422	mov	%r13,16+8(%rsp)		# end of b[num]
2423	mov	$num,24+8(%rsp)		# inner counter
2424	mov	$rp, 56+8(%rsp)		# save $rp
2425___
2426my ($aptr, $bptr, $nptr, $tptr, $mi,  $bi,  $zero, $num)=
2427   ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
2428my $rptr=$bptr;
2429my $STRIDE=2**5*8;		# 5 is "window size"
2430my $N=$STRIDE/4;		# should match cache line size
2431$code.=<<___;
2432	movdqa	0(%rax),%xmm0		# 00000001000000010000000000000000
2433	movdqa	16(%rax),%xmm1		# 00000002000000020000000200000002
2434	lea	88-112(%rsp,%r10),%r10	# place the mask after tp[num+1] (+ICache optimization)
2435	lea	128($bp),$bptr		# size optimization
2436
2437	pshufd	\$0,%xmm5,%xmm5		# broadcast index
2438	movdqa	%xmm1,%xmm4
2439	.byte	0x67
2440	movdqa	%xmm1,%xmm2
2441___
2442########################################################################
2443# calculate mask by comparing 0..31 to index and save result to stack
2444#
2445$code.=<<___;
2446	.byte	0x67
2447	paddd	%xmm0,%xmm1
2448	pcmpeqd	%xmm5,%xmm0		# compare to 1,0
2449	movdqa	%xmm4,%xmm3
2450___
2451for($i=0;$i<$STRIDE/16-4;$i+=4) {
2452$code.=<<___;
2453	paddd	%xmm1,%xmm2
2454	pcmpeqd	%xmm5,%xmm1		# compare to 3,2
2455	movdqa	%xmm0,`16*($i+0)+112`(%r10)
2456	movdqa	%xmm4,%xmm0
2457
2458	paddd	%xmm2,%xmm3
2459	pcmpeqd	%xmm5,%xmm2		# compare to 5,4
2460	movdqa	%xmm1,`16*($i+1)+112`(%r10)
2461	movdqa	%xmm4,%xmm1
2462
2463	paddd	%xmm3,%xmm0
2464	pcmpeqd	%xmm5,%xmm3		# compare to 7,6
2465	movdqa	%xmm2,`16*($i+2)+112`(%r10)
2466	movdqa	%xmm4,%xmm2
2467
2468	paddd	%xmm0,%xmm1
2469	pcmpeqd	%xmm5,%xmm0
2470	movdqa	%xmm3,`16*($i+3)+112`(%r10)
2471	movdqa	%xmm4,%xmm3
2472___
2473}
2474$code.=<<___;				# last iteration can be optimized
2475	.byte	0x67
2476	paddd	%xmm1,%xmm2
2477	pcmpeqd	%xmm5,%xmm1
2478	movdqa	%xmm0,`16*($i+0)+112`(%r10)
2479
2480	paddd	%xmm2,%xmm3
2481	pcmpeqd	%xmm5,%xmm2
2482	movdqa	%xmm1,`16*($i+1)+112`(%r10)
2483
2484	pcmpeqd	%xmm5,%xmm3
2485	movdqa	%xmm2,`16*($i+2)+112`(%r10)
2486
2487	pand	`16*($i+0)-128`($bptr),%xmm0	# while it's still in register
2488	pand	`16*($i+1)-128`($bptr),%xmm1
2489	pand	`16*($i+2)-128`($bptr),%xmm2
2490	movdqa	%xmm3,`16*($i+3)+112`(%r10)
2491	pand	`16*($i+3)-128`($bptr),%xmm3
2492	por	%xmm2,%xmm0
2493	por	%xmm3,%xmm1
2494___
2495for($i=0;$i<$STRIDE/16-4;$i+=4) {
2496$code.=<<___;
2497	movdqa	`16*($i+0)-128`($bptr),%xmm4
2498	movdqa	`16*($i+1)-128`($bptr),%xmm5
2499	movdqa	`16*($i+2)-128`($bptr),%xmm2
2500	pand	`16*($i+0)+112`(%r10),%xmm4
2501	movdqa	`16*($i+3)-128`($bptr),%xmm3
2502	pand	`16*($i+1)+112`(%r10),%xmm5
2503	por	%xmm4,%xmm0
2504	pand	`16*($i+2)+112`(%r10),%xmm2
2505	por	%xmm5,%xmm1
2506	pand	`16*($i+3)+112`(%r10),%xmm3
2507	por	%xmm2,%xmm0
2508	por	%xmm3,%xmm1
2509___
2510}
2511$code.=<<___;
2512	pxor	%xmm1,%xmm0
2513	pshufd	\$0x4e,%xmm0,%xmm1
2514	por	%xmm1,%xmm0
2515	lea	$STRIDE($bptr),$bptr
2516	movq	%xmm0,%rdx		# bp[0]
2517	lea	64+8*4+8(%rsp),$tptr
2518
2519	mov	%rdx,$bi
2520	mulx	0*8($aptr),$mi,%rax	# a[0]*b[0]
2521	mulx	1*8($aptr),%r11,%r12	# a[1]*b[0]
2522	add	%rax,%r11
2523	mulx	2*8($aptr),%rax,%r13	# ...
2524	adc	%rax,%r12
2525	adc	\$0,%r13
2526	mulx	3*8($aptr),%rax,%r14
2527
2528	mov	$mi,%r15
2529	imulq	32+8(%rsp),$mi		# "t[0]"*n0
2530	xor	$zero,$zero		# cf=0, of=0
2531	mov	$mi,%rdx
2532
2533	mov	$bptr,8+8(%rsp)		# off-load &b[i]
2534
2535	lea	4*8($aptr),$aptr
2536	adcx	%rax,%r13
2537	adcx	$zero,%r14		# cf=0
2538
2539	mulx	0*8($nptr),%rax,%r10
2540	adcx	%rax,%r15		# discarded
2541	adox	%r11,%r10
2542	mulx	1*8($nptr),%rax,%r11
2543	adcx	%rax,%r10
2544	adox	%r12,%r11
2545	mulx	2*8($nptr),%rax,%r12
2546	mov	24+8(%rsp),$bptr	# counter value
2547	mov	%r10,-8*4($tptr)
2548	adcx	%rax,%r11
2549	adox	%r13,%r12
2550	mulx	3*8($nptr),%rax,%r15
2551	 mov	$bi,%rdx
2552	mov	%r11,-8*3($tptr)
2553	adcx	%rax,%r12
2554	adox	$zero,%r15		# of=0
2555	lea	4*8($nptr),$nptr
2556	mov	%r12,-8*2($tptr)
2557	jmp	.Lmulx4x_1st
2558
2559.align	32
2560.Lmulx4x_1st:
2561	adcx	$zero,%r15		# cf=0, modulo-scheduled
2562	mulx	0*8($aptr),%r10,%rax	# a[4]*b[0]
2563	adcx	%r14,%r10
2564	mulx	1*8($aptr),%r11,%r14	# a[5]*b[0]
2565	adcx	%rax,%r11
2566	mulx	2*8($aptr),%r12,%rax	# ...
2567	adcx	%r14,%r12
2568	mulx	3*8($aptr),%r13,%r14
2569	 .byte	0x67,0x67
2570	 mov	$mi,%rdx
2571	adcx	%rax,%r13
2572	adcx	$zero,%r14		# cf=0
2573	lea	4*8($aptr),$aptr
2574	lea	4*8($tptr),$tptr
2575
2576	adox	%r15,%r10
2577	mulx	0*8($nptr),%rax,%r15
2578	adcx	%rax,%r10
2579	adox	%r15,%r11
2580	mulx	1*8($nptr),%rax,%r15
2581	adcx	%rax,%r11
2582	adox	%r15,%r12
2583	mulx	2*8($nptr),%rax,%r15
2584	mov	%r10,-5*8($tptr)
2585	adcx	%rax,%r12
2586	mov	%r11,-4*8($tptr)
2587	adox	%r15,%r13
2588	mulx	3*8($nptr),%rax,%r15
2589	 mov	$bi,%rdx
2590	mov	%r12,-3*8($tptr)
2591	adcx	%rax,%r13
2592	adox	$zero,%r15
2593	lea	4*8($nptr),$nptr
2594	mov	%r13,-2*8($tptr)
2595
2596	dec	$bptr			# of=0, pass cf
2597	jnz	.Lmulx4x_1st
2598
2599	mov	8(%rsp),$num		# load -num
2600	adc	$zero,%r15		# modulo-scheduled
2601	lea	($aptr,$num),$aptr	# rewind $aptr
2602	add	%r15,%r14
2603	mov	8+8(%rsp),$bptr		# re-load &b[i]
2604	adc	$zero,$zero		# top-most carry
2605	mov	%r14,-1*8($tptr)
2606	jmp	.Lmulx4x_outer
2607
2608.align	32
2609.Lmulx4x_outer:
2610	lea	16-256($tptr),%r10	# where 256-byte mask is (+density control)
2611	pxor	%xmm4,%xmm4
2612	.byte	0x67,0x67
2613	pxor	%xmm5,%xmm5
2614___
2615for($i=0;$i<$STRIDE/16;$i+=4) {
2616$code.=<<___;
2617	movdqa	`16*($i+0)-128`($bptr),%xmm0
2618	movdqa	`16*($i+1)-128`($bptr),%xmm1
2619	movdqa	`16*($i+2)-128`($bptr),%xmm2
2620	pand	`16*($i+0)+256`(%r10),%xmm0
2621	movdqa	`16*($i+3)-128`($bptr),%xmm3
2622	pand	`16*($i+1)+256`(%r10),%xmm1
2623	por	%xmm0,%xmm4
2624	pand	`16*($i+2)+256`(%r10),%xmm2
2625	por	%xmm1,%xmm5
2626	pand	`16*($i+3)+256`(%r10),%xmm3
2627	por	%xmm2,%xmm4
2628	por	%xmm3,%xmm5
2629___
2630}
2631$code.=<<___;
2632	por	%xmm5,%xmm4
2633	pshufd	\$0x4e,%xmm4,%xmm0
2634	por	%xmm4,%xmm0
2635	lea	$STRIDE($bptr),$bptr
2636	movq	%xmm0,%rdx		# m0=bp[i]
2637
2638	mov	$zero,($tptr)		# save top-most carry
2639	lea	4*8($tptr,$num),$tptr	# rewind $tptr
2640	mulx	0*8($aptr),$mi,%r11	# a[0]*b[i]
2641	xor	$zero,$zero		# cf=0, of=0
2642	mov	%rdx,$bi
2643	mulx	1*8($aptr),%r14,%r12	# a[1]*b[i]
2644	adox	-4*8($tptr),$mi		# +t[0]
2645	adcx	%r14,%r11
2646	mulx	2*8($aptr),%r15,%r13	# ...
2647	adox	-3*8($tptr),%r11
2648	adcx	%r15,%r12
2649	mulx	3*8($aptr),%rdx,%r14
2650	adox	-2*8($tptr),%r12
2651	adcx	%rdx,%r13
2652	lea	($nptr,$num),$nptr	# rewind $nptr
2653	lea	4*8($aptr),$aptr
2654	adox	-1*8($tptr),%r13
2655	adcx	$zero,%r14
2656	adox	$zero,%r14
2657
2658	mov	$mi,%r15
2659	imulq	32+8(%rsp),$mi		# "t[0]"*n0
2660
2661	mov	$mi,%rdx
2662	xor	$zero,$zero		# cf=0, of=0
2663	mov	$bptr,8+8(%rsp)		# off-load &b[i]
2664
2665	mulx	0*8($nptr),%rax,%r10
2666	adcx	%rax,%r15		# discarded
2667	adox	%r11,%r10
2668	mulx	1*8($nptr),%rax,%r11
2669	adcx	%rax,%r10
2670	adox	%r12,%r11
2671	mulx	2*8($nptr),%rax,%r12
2672	adcx	%rax,%r11
2673	adox	%r13,%r12
2674	mulx	3*8($nptr),%rax,%r15
2675	 mov	$bi,%rdx
2676	mov	24+8(%rsp),$bptr	# counter value
2677	mov	%r10,-8*4($tptr)
2678	adcx	%rax,%r12
2679	mov	%r11,-8*3($tptr)
2680	adox	$zero,%r15		# of=0
2681	mov	%r12,-8*2($tptr)
2682	lea	4*8($nptr),$nptr
2683	jmp	.Lmulx4x_inner
2684
2685.align	32
2686.Lmulx4x_inner:
2687	mulx	0*8($aptr),%r10,%rax	# a[4]*b[i]
2688	adcx	$zero,%r15		# cf=0, modulo-scheduled
2689	adox	%r14,%r10
2690	mulx	1*8($aptr),%r11,%r14	# a[5]*b[i]
2691	adcx	0*8($tptr),%r10
2692	adox	%rax,%r11
2693	mulx	2*8($aptr),%r12,%rax	# ...
2694	adcx	1*8($tptr),%r11
2695	adox	%r14,%r12
2696	mulx	3*8($aptr),%r13,%r14
2697	 mov	$mi,%rdx
2698	adcx	2*8($tptr),%r12
2699	adox	%rax,%r13
2700	adcx	3*8($tptr),%r13
2701	adox	$zero,%r14		# of=0
2702	lea	4*8($aptr),$aptr
2703	lea	4*8($tptr),$tptr
2704	adcx	$zero,%r14		# cf=0
2705
2706	adox	%r15,%r10
2707	mulx	0*8($nptr),%rax,%r15
2708	adcx	%rax,%r10
2709	adox	%r15,%r11
2710	mulx	1*8($nptr),%rax,%r15
2711	adcx	%rax,%r11
2712	adox	%r15,%r12
2713	mulx	2*8($nptr),%rax,%r15
2714	mov	%r10,-5*8($tptr)
2715	adcx	%rax,%r12
2716	adox	%r15,%r13
2717	mov	%r11,-4*8($tptr)
2718	mulx	3*8($nptr),%rax,%r15
2719	 mov	$bi,%rdx
2720	lea	4*8($nptr),$nptr
2721	mov	%r12,-3*8($tptr)
2722	adcx	%rax,%r13
2723	adox	$zero,%r15
2724	mov	%r13,-2*8($tptr)
2725
2726	dec	$bptr			# of=0, pass cf
2727	jnz	.Lmulx4x_inner
2728
2729	mov	0+8(%rsp),$num		# load -num
2730	adc	$zero,%r15		# modulo-scheduled
2731	sub	0*8($tptr),$bptr	# pull top-most carry to %cf
2732	mov	8+8(%rsp),$bptr		# re-load &b[i]
2733	mov	16+8(%rsp),%r10
2734	adc	%r15,%r14
2735	lea	($aptr,$num),$aptr	# rewind $aptr
2736	adc	$zero,$zero		# top-most carry
2737	mov	%r14,-1*8($tptr)
2738
2739	cmp	%r10,$bptr
2740	jb	.Lmulx4x_outer
2741
2742	mov	-8($nptr),%r10
2743	mov	$zero,%r8
2744	mov	($nptr,$num),%r12
2745	lea	($nptr,$num),%rbp	# rewind $nptr
2746	mov	$num,%rcx
2747	lea	($tptr,$num),%rdi	# rewind $tptr
2748	xor	%eax,%eax
2749	xor	%r15,%r15
2750	sub	%r14,%r10		# compare top-most words
2751	adc	%r15,%r15
2752	or	%r15,%r8
2753	sar	\$3+2,%rcx
2754	sub	%r8,%rax		# %rax=-%r8
2755	mov	56+8(%rsp),%rdx		# restore rp
2756	dec	%r12			# so that after 'not' we get -n[0]
2757	mov	8*1(%rbp),%r13
2758	xor	%r8,%r8
2759	mov	8*2(%rbp),%r14
2760	mov	8*3(%rbp),%r15
2761	jmp	.Lsqrx4x_sub_entry	# common post-condition
2762.cfi_endproc
2763.size	mulx4x_internal,.-mulx4x_internal
2764___
2765}{
2766######################################################################
2767# void bn_power5(
2768my $rptr="%rdi";	# BN_ULONG *rptr,
2769my $aptr="%rsi";	# const BN_ULONG *aptr,
2770my $bptr="%rdx";	# const void *table,
2771my $nptr="%rcx";	# const BN_ULONG *nptr,
2772my $n0  ="%r8";		# const BN_ULONG *n0);
2773my $num ="%r9";		# int num, has to be divisible by 8
2774			# int pwr);
2775
2776my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
2777my @A0=("%r10","%r11");
2778my @A1=("%r12","%r13");
2779my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
2780
2781$code.=<<___;
2782.type	bn_powerx5,\@function,6
2783.align	32
2784bn_powerx5:
2785.cfi_startproc
2786	mov	%rsp,%rax
2787.cfi_def_cfa_register	%rax
2788.Lpowerx5_enter:
2789	push	%rbx
2790.cfi_push	%rbx
2791	push	%rbp
2792.cfi_push	%rbp
2793	push	%r12
2794.cfi_push	%r12
2795	push	%r13
2796.cfi_push	%r13
2797	push	%r14
2798.cfi_push	%r14
2799	push	%r15
2800.cfi_push	%r15
2801.Lpowerx5_prologue:
2802
2803	shl	\$3,${num}d		# convert $num to bytes
2804	lea	($num,$num,2),%r10	# 3*$num in bytes
2805	neg	$num
2806	mov	($n0),$n0		# *n0
2807
2808	##############################################################
2809	# Ensure that stack frame doesn't alias with $rptr+3*$num
2810	# modulo 4096, which covers ret[num], am[num] and n[num]
2811	# (see bn_exp.c). This is done to allow memory disambiguation
2812	# logic do its magic. [Extra 256 bytes is for power mask
2813	# calculated from 7th argument, the index.]
2814	#
2815	lea	-320(%rsp,$num,2),%r11
2816	mov	%rsp,%rbp
2817	sub	$rptr,%r11
2818	and	\$4095,%r11
2819	cmp	%r11,%r10
2820	jb	.Lpwrx_sp_alt
2821	sub	%r11,%rbp		# align with $aptr
2822	lea	-320(%rbp,$num,2),%rbp	# future alloca(frame+2*$num*8+256)
2823	jmp	.Lpwrx_sp_done
2824
2825.align	32
2826.Lpwrx_sp_alt:
2827	lea	4096-320(,$num,2),%r10
2828	lea	-320(%rbp,$num,2),%rbp	# alloca(frame+2*$num*8+256)
2829	sub	%r10,%r11
2830	mov	\$0,%r10
2831	cmovc	%r10,%r11
2832	sub	%r11,%rbp
2833.Lpwrx_sp_done:
2834	and	\$-64,%rbp
2835	mov	%rsp,%r11
2836	sub	%rbp,%r11
2837	and	\$-4096,%r11
2838	lea	(%rbp,%r11),%rsp
2839	mov	(%rsp),%r10
2840	cmp	%rbp,%rsp
2841	ja	.Lpwrx_page_walk
2842	jmp	.Lpwrx_page_walk_done
2843
2844.Lpwrx_page_walk:
2845	lea	-4096(%rsp),%rsp
2846	mov	(%rsp),%r10
2847	cmp	%rbp,%rsp
2848	ja	.Lpwrx_page_walk
2849.Lpwrx_page_walk_done:
2850
2851	mov	$num,%r10
2852	neg	$num
2853
2854	##############################################################
2855	# Stack layout
2856	#
2857	# +0	saved $num, used in reduction section
2858	# +8	&t[2*$num], used in reduction section
2859	# +16	intermediate carry bit
2860	# +24	top-most carry bit, used in reduction section
2861	# +32	saved *n0
2862	# +40	saved %rsp
2863	# +48	t[2*$num]
2864	#
2865	pxor	%xmm0,%xmm0
2866	movq	$rptr,%xmm1		# save $rptr
2867	movq	$nptr,%xmm2		# save $nptr
2868	movq	%r10, %xmm3		# -$num
2869	movq	$bptr,%xmm4
2870	mov	$n0,  32(%rsp)
2871	mov	%rax, 40(%rsp)		# save original %rsp
2872.cfi_cfa_expression	%rsp+40,deref,+8
2873.Lpowerx5_body:
2874
2875	call	__bn_sqrx8x_internal
2876	call	__bn_postx4x_internal
2877	call	__bn_sqrx8x_internal
2878	call	__bn_postx4x_internal
2879	call	__bn_sqrx8x_internal
2880	call	__bn_postx4x_internal
2881	call	__bn_sqrx8x_internal
2882	call	__bn_postx4x_internal
2883	call	__bn_sqrx8x_internal
2884	call	__bn_postx4x_internal
2885
2886	mov	%r10,$num		# -num
2887	mov	$aptr,$rptr
2888	movq	%xmm2,$nptr
2889	movq	%xmm4,$bptr
2890	mov	40(%rsp),%rax
2891
2892	call	mulx4x_internal
2893
2894	mov	40(%rsp),%rsi		# restore %rsp
2895.cfi_def_cfa	%rsi,8
2896	mov	\$1,%rax
2897
2898	mov	-48(%rsi),%r15
2899.cfi_restore	%r15
2900	mov	-40(%rsi),%r14
2901.cfi_restore	%r14
2902	mov	-32(%rsi),%r13
2903.cfi_restore	%r13
2904	mov	-24(%rsi),%r12
2905.cfi_restore	%r12
2906	mov	-16(%rsi),%rbp
2907.cfi_restore	%rbp
2908	mov	-8(%rsi),%rbx
2909.cfi_restore	%rbx
2910	lea	(%rsi),%rsp
2911.cfi_def_cfa_register	%rsp
2912.Lpowerx5_epilogue:
2913	ret
2914.cfi_endproc
2915.size	bn_powerx5,.-bn_powerx5
2916
2917.globl	bn_sqrx8x_internal
2918.hidden	bn_sqrx8x_internal
2919.type	bn_sqrx8x_internal,\@abi-omnipotent
2920.align	32
2921bn_sqrx8x_internal:
2922__bn_sqrx8x_internal:
2923.cfi_startproc
2924	##################################################################
2925	# Squaring part:
2926	#
2927	# a) multiply-n-add everything but a[i]*a[i];
2928	# b) shift result of a) by 1 to the left and accumulate
2929	#    a[i]*a[i] products;
2930	#
2931	##################################################################
2932	# a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2933	#                                                     a[1]a[0]
2934	#                                                 a[2]a[0]
2935	#                                             a[3]a[0]
2936	#                                             a[2]a[1]
2937	#                                         a[3]a[1]
2938	#                                     a[3]a[2]
2939	#
2940	#                                         a[4]a[0]
2941	#                                     a[5]a[0]
2942	#                                 a[6]a[0]
2943	#                             a[7]a[0]
2944	#                                     a[4]a[1]
2945	#                                 a[5]a[1]
2946	#                             a[6]a[1]
2947	#                         a[7]a[1]
2948	#                                 a[4]a[2]
2949	#                             a[5]a[2]
2950	#                         a[6]a[2]
2951	#                     a[7]a[2]
2952	#                             a[4]a[3]
2953	#                         a[5]a[3]
2954	#                     a[6]a[3]
2955	#                 a[7]a[3]
2956	#
2957	#                     a[5]a[4]
2958	#                 a[6]a[4]
2959	#             a[7]a[4]
2960	#             a[6]a[5]
2961	#         a[7]a[5]
2962	#     a[7]a[6]
2963	# a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2964___
2965{
2966my ($zero,$carry)=("%rbp","%rcx");
2967my $aaptr=$zero;
2968$code.=<<___;
2969	lea	48+8(%rsp),$tptr
2970	lea	($aptr,$num),$aaptr
2971	mov	$num,0+8(%rsp)			# save $num
2972	mov	$aaptr,8+8(%rsp)		# save end of $aptr
2973	jmp	.Lsqr8x_zero_start
2974
2975.align	32
2976.byte	0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
2977.Lsqrx8x_zero:
2978	.byte	0x3e
2979	movdqa	%xmm0,0*8($tptr)
2980	movdqa	%xmm0,2*8($tptr)
2981	movdqa	%xmm0,4*8($tptr)
2982	movdqa	%xmm0,6*8($tptr)
2983.Lsqr8x_zero_start:			# aligned at 32
2984	movdqa	%xmm0,8*8($tptr)
2985	movdqa	%xmm0,10*8($tptr)
2986	movdqa	%xmm0,12*8($tptr)
2987	movdqa	%xmm0,14*8($tptr)
2988	lea	16*8($tptr),$tptr
2989	sub	\$64,$num
2990	jnz	.Lsqrx8x_zero
2991
2992	mov	0*8($aptr),%rdx		# a[0], modulo-scheduled
2993	#xor	%r9,%r9			# t[1], ex-$num, zero already
2994	xor	%r10,%r10
2995	xor	%r11,%r11
2996	xor	%r12,%r12
2997	xor	%r13,%r13
2998	xor	%r14,%r14
2999	xor	%r15,%r15
3000	lea	48+8(%rsp),$tptr
3001	xor	$zero,$zero		# cf=0, cf=0
3002	jmp	.Lsqrx8x_outer_loop
3003
3004.align	32
3005.Lsqrx8x_outer_loop:
3006	mulx	1*8($aptr),%r8,%rax	# a[1]*a[0]
3007	adcx	%r9,%r8			# a[1]*a[0]+=t[1]
3008	adox	%rax,%r10
3009	mulx	2*8($aptr),%r9,%rax	# a[2]*a[0]
3010	adcx	%r10,%r9
3011	adox	%rax,%r11
3012	.byte	0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00	# mulx	3*8($aptr),%r10,%rax	# ...
3013	adcx	%r11,%r10
3014	adox	%rax,%r12
3015	.byte	0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00	# mulx	4*8($aptr),%r11,%rax
3016	adcx	%r12,%r11
3017	adox	%rax,%r13
3018	mulx	5*8($aptr),%r12,%rax
3019	adcx	%r13,%r12
3020	adox	%rax,%r14
3021	mulx	6*8($aptr),%r13,%rax
3022	adcx	%r14,%r13
3023	adox	%r15,%rax
3024	mulx	7*8($aptr),%r14,%r15
3025	 mov	1*8($aptr),%rdx		# a[1]
3026	adcx	%rax,%r14
3027	adox	$zero,%r15
3028	adc	8*8($tptr),%r15
3029	mov	%r8,1*8($tptr)		# t[1]
3030	mov	%r9,2*8($tptr)		# t[2]
3031	sbb	$carry,$carry		# mov %cf,$carry
3032	xor	$zero,$zero		# cf=0, of=0
3033
3034
3035	mulx	2*8($aptr),%r8,%rbx	# a[2]*a[1]
3036	mulx	3*8($aptr),%r9,%rax	# a[3]*a[1]
3037	adcx	%r10,%r8
3038	adox	%rbx,%r9
3039	mulx	4*8($aptr),%r10,%rbx	# ...
3040	adcx	%r11,%r9
3041	adox	%rax,%r10
3042	.byte	0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00	# mulx	5*8($aptr),%r11,%rax
3043	adcx	%r12,%r10
3044	adox	%rbx,%r11
3045	.byte	0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00	# mulx	6*8($aptr),%r12,%rbx
3046	adcx	%r13,%r11
3047	adox	%r14,%r12
3048	.byte	0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00	# mulx	7*8($aptr),%r13,%r14
3049	 mov	2*8($aptr),%rdx		# a[2]
3050	adcx	%rax,%r12
3051	adox	%rbx,%r13
3052	adcx	%r15,%r13
3053	adox	$zero,%r14		# of=0
3054	adcx	$zero,%r14		# cf=0
3055
3056	mov	%r8,3*8($tptr)		# t[3]
3057	mov	%r9,4*8($tptr)		# t[4]
3058
3059	mulx	3*8($aptr),%r8,%rbx	# a[3]*a[2]
3060	mulx	4*8($aptr),%r9,%rax	# a[4]*a[2]
3061	adcx	%r10,%r8
3062	adox	%rbx,%r9
3063	mulx	5*8($aptr),%r10,%rbx	# ...
3064	adcx	%r11,%r9
3065	adox	%rax,%r10
3066	.byte	0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00	# mulx	6*8($aptr),%r11,%rax
3067	adcx	%r12,%r10
3068	adox	%r13,%r11
3069	.byte	0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00	# mulx	7*8($aptr),%r12,%r13
3070	.byte	0x3e
3071	 mov	3*8($aptr),%rdx		# a[3]
3072	adcx	%rbx,%r11
3073	adox	%rax,%r12
3074	adcx	%r14,%r12
3075	mov	%r8,5*8($tptr)		# t[5]
3076	mov	%r9,6*8($tptr)		# t[6]
3077	 mulx	4*8($aptr),%r8,%rax	# a[4]*a[3]
3078	adox	$zero,%r13		# of=0
3079	adcx	$zero,%r13		# cf=0
3080
3081	mulx	5*8($aptr),%r9,%rbx	# a[5]*a[3]
3082	adcx	%r10,%r8
3083	adox	%rax,%r9
3084	mulx	6*8($aptr),%r10,%rax	# ...
3085	adcx	%r11,%r9
3086	adox	%r12,%r10
3087	mulx	7*8($aptr),%r11,%r12
3088	 mov	4*8($aptr),%rdx		# a[4]
3089	 mov	5*8($aptr),%r14		# a[5]
3090	adcx	%rbx,%r10
3091	adox	%rax,%r11
3092	 mov	6*8($aptr),%r15		# a[6]
3093	adcx	%r13,%r11
3094	adox	$zero,%r12		# of=0
3095	adcx	$zero,%r12		# cf=0
3096
3097	mov	%r8,7*8($tptr)		# t[7]
3098	mov	%r9,8*8($tptr)		# t[8]
3099
3100	mulx	%r14,%r9,%rax		# a[5]*a[4]
3101	 mov	7*8($aptr),%r8		# a[7]
3102	adcx	%r10,%r9
3103	mulx	%r15,%r10,%rbx		# a[6]*a[4]
3104	adox	%rax,%r10
3105	adcx	%r11,%r10
3106	mulx	%r8,%r11,%rax		# a[7]*a[4]
3107	 mov	%r14,%rdx		# a[5]
3108	adox	%rbx,%r11
3109	adcx	%r12,%r11
3110	#adox	$zero,%rax		# of=0
3111	adcx	$zero,%rax		# cf=0
3112
3113	mulx	%r15,%r14,%rbx		# a[6]*a[5]
3114	mulx	%r8,%r12,%r13		# a[7]*a[5]
3115	 mov	%r15,%rdx		# a[6]
3116	 lea	8*8($aptr),$aptr
3117	adcx	%r14,%r11
3118	adox	%rbx,%r12
3119	adcx	%rax,%r12
3120	adox	$zero,%r13
3121
3122	.byte	0x67,0x67
3123	mulx	%r8,%r8,%r14		# a[7]*a[6]
3124	adcx	%r8,%r13
3125	adcx	$zero,%r14
3126
3127	cmp	8+8(%rsp),$aptr
3128	je	.Lsqrx8x_outer_break
3129
3130	neg	$carry			# mov $carry,%cf
3131	mov	\$-8,%rcx
3132	mov	$zero,%r15
3133	mov	8*8($tptr),%r8
3134	adcx	9*8($tptr),%r9		# +=t[9]
3135	adcx	10*8($tptr),%r10	# ...
3136	adcx	11*8($tptr),%r11
3137	adc	12*8($tptr),%r12
3138	adc	13*8($tptr),%r13
3139	adc	14*8($tptr),%r14
3140	adc	15*8($tptr),%r15
3141	lea	($aptr),$aaptr
3142	lea	2*64($tptr),$tptr
3143	sbb	%rax,%rax		# mov %cf,$carry
3144
3145	mov	-64($aptr),%rdx		# a[0]
3146	mov	%rax,16+8(%rsp)		# offload $carry
3147	mov	$tptr,24+8(%rsp)
3148
3149	#lea	8*8($tptr),$tptr	# see 2*8*8($tptr) above
3150	xor	%eax,%eax		# cf=0, of=0
3151	jmp	.Lsqrx8x_loop
3152
3153.align	32
3154.Lsqrx8x_loop:
3155	mov	%r8,%rbx
3156	mulx	0*8($aaptr),%rax,%r8	# a[8]*a[i]
3157	adcx	%rax,%rbx		# +=t[8]
3158	adox	%r9,%r8
3159
3160	mulx	1*8($aaptr),%rax,%r9	# ...
3161	adcx	%rax,%r8
3162	adox	%r10,%r9
3163
3164	mulx	2*8($aaptr),%rax,%r10
3165	adcx	%rax,%r9
3166	adox	%r11,%r10
3167
3168	mulx	3*8($aaptr),%rax,%r11
3169	adcx	%rax,%r10
3170	adox	%r12,%r11
3171
3172	.byte	0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00	# mulx	4*8($aaptr),%rax,%r12
3173	adcx	%rax,%r11
3174	adox	%r13,%r12
3175
3176	mulx	5*8($aaptr),%rax,%r13
3177	adcx	%rax,%r12
3178	adox	%r14,%r13
3179
3180	mulx	6*8($aaptr),%rax,%r14
3181	 mov	%rbx,($tptr,%rcx,8)	# store t[8+i]
3182	 mov	\$0,%ebx
3183	adcx	%rax,%r13
3184	adox	%r15,%r14
3185
3186	.byte	0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00	# mulx	7*8($aaptr),%rax,%r15
3187	 mov	8($aptr,%rcx,8),%rdx	# a[i]
3188	adcx	%rax,%r14
3189	adox	%rbx,%r15		# %rbx is 0, of=0
3190	adcx	%rbx,%r15		# cf=0
3191
3192	.byte	0x67
3193	inc	%rcx			# of=0
3194	jnz	.Lsqrx8x_loop
3195
3196	lea	8*8($aaptr),$aaptr
3197	mov	\$-8,%rcx
3198	cmp	8+8(%rsp),$aaptr	# done?
3199	je	.Lsqrx8x_break
3200
3201	sub	16+8(%rsp),%rbx		# mov 16(%rsp),%cf
3202	.byte	0x66
3203	mov	-64($aptr),%rdx
3204	adcx	0*8($tptr),%r8
3205	adcx	1*8($tptr),%r9
3206	adc	2*8($tptr),%r10
3207	adc	3*8($tptr),%r11
3208	adc	4*8($tptr),%r12
3209	adc	5*8($tptr),%r13
3210	adc	6*8($tptr),%r14
3211	adc	7*8($tptr),%r15
3212	lea	8*8($tptr),$tptr
3213	.byte	0x67
3214	sbb	%rax,%rax		# mov %cf,%rax
3215	xor	%ebx,%ebx		# cf=0, of=0
3216	mov	%rax,16+8(%rsp)		# offload carry
3217	jmp	.Lsqrx8x_loop
3218
3219.align	32
3220.Lsqrx8x_break:
3221	xor	$zero,$zero
3222	sub	16+8(%rsp),%rbx		# mov 16(%rsp),%cf
3223	adcx	$zero,%r8
3224	mov	24+8(%rsp),$carry	# initial $tptr, borrow $carry
3225	adcx	$zero,%r9
3226	mov	0*8($aptr),%rdx		# a[8], modulo-scheduled
3227	adc	\$0,%r10
3228	mov	%r8,0*8($tptr)
3229	adc	\$0,%r11
3230	adc	\$0,%r12
3231	adc	\$0,%r13
3232	adc	\$0,%r14
3233	adc	\$0,%r15
3234	cmp	$carry,$tptr		# cf=0, of=0
3235	je	.Lsqrx8x_outer_loop
3236
3237	mov	%r9,1*8($tptr)
3238	 mov	1*8($carry),%r9
3239	mov	%r10,2*8($tptr)
3240	 mov	2*8($carry),%r10
3241	mov	%r11,3*8($tptr)
3242	 mov	3*8($carry),%r11
3243	mov	%r12,4*8($tptr)
3244	 mov	4*8($carry),%r12
3245	mov	%r13,5*8($tptr)
3246	 mov	5*8($carry),%r13
3247	mov	%r14,6*8($tptr)
3248	 mov	6*8($carry),%r14
3249	mov	%r15,7*8($tptr)
3250	 mov	7*8($carry),%r15
3251	mov	$carry,$tptr
3252	jmp	.Lsqrx8x_outer_loop
3253
3254.align	32
3255.Lsqrx8x_outer_break:
3256	mov	%r9,9*8($tptr)		# t[9]
3257	 movq	%xmm3,%rcx		# -$num
3258	mov	%r10,10*8($tptr)	# ...
3259	mov	%r11,11*8($tptr)
3260	mov	%r12,12*8($tptr)
3261	mov	%r13,13*8($tptr)
3262	mov	%r14,14*8($tptr)
3263___
3264}{
3265my $i="%rcx";
3266$code.=<<___;
3267	lea	48+8(%rsp),$tptr
3268	mov	($aptr,$i),%rdx		# a[0]
3269
3270	mov	8($tptr),$A0[1]		# t[1]
3271	xor	$A0[0],$A0[0]		# t[0], of=0, cf=0
3272	mov	0+8(%rsp),$num		# restore $num
3273	adox	$A0[1],$A0[1]
3274	 mov	16($tptr),$A1[0]	# t[2]	# prefetch
3275	 mov	24($tptr),$A1[1]	# t[3]	# prefetch
3276	#jmp	.Lsqrx4x_shift_n_add	# happens to be aligned
3277
3278.align	32
3279.Lsqrx4x_shift_n_add:
3280	mulx	%rdx,%rax,%rbx
3281	 adox	$A1[0],$A1[0]
3282	adcx	$A0[0],%rax
3283	 .byte	0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00	# mov	8($aptr,$i),%rdx	# a[i+1]	# prefetch
3284	 .byte	0x4c,0x8b,0x97,0x20,0x00,0x00,0x00	# mov	32($tptr),$A0[0]	# t[2*i+4]	# prefetch
3285	 adox	$A1[1],$A1[1]
3286	adcx	$A0[1],%rbx
3287	 mov	40($tptr),$A0[1]		# t[2*i+4+1]	# prefetch
3288	mov	%rax,0($tptr)
3289	mov	%rbx,8($tptr)
3290
3291	mulx	%rdx,%rax,%rbx
3292	 adox	$A0[0],$A0[0]
3293	adcx	$A1[0],%rax
3294	 mov	16($aptr,$i),%rdx	# a[i+2]	# prefetch
3295	 mov	48($tptr),$A1[0]	# t[2*i+6]	# prefetch
3296	 adox	$A0[1],$A0[1]
3297	adcx	$A1[1],%rbx
3298	 mov	56($tptr),$A1[1]	# t[2*i+6+1]	# prefetch
3299	mov	%rax,16($tptr)
3300	mov	%rbx,24($tptr)
3301
3302	mulx	%rdx,%rax,%rbx
3303	 adox	$A1[0],$A1[0]
3304	adcx	$A0[0],%rax
3305	 mov	24($aptr,$i),%rdx	# a[i+3]	# prefetch
3306	 lea	32($i),$i
3307	 mov	64($tptr),$A0[0]	# t[2*i+8]	# prefetch
3308	 adox	$A1[1],$A1[1]
3309	adcx	$A0[1],%rbx
3310	 mov	72($tptr),$A0[1]	# t[2*i+8+1]	# prefetch
3311	mov	%rax,32($tptr)
3312	mov	%rbx,40($tptr)
3313
3314	mulx	%rdx,%rax,%rbx
3315	 adox	$A0[0],$A0[0]
3316	adcx	$A1[0],%rax
3317	jrcxz	.Lsqrx4x_shift_n_add_break
3318	 .byte	0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00	# mov	0($aptr,$i),%rdx	# a[i+4]	# prefetch
3319	 adox	$A0[1],$A0[1]
3320	adcx	$A1[1],%rbx
3321	 mov	80($tptr),$A1[0]	# t[2*i+10]	# prefetch
3322	 mov	88($tptr),$A1[1]	# t[2*i+10+1]	# prefetch
3323	mov	%rax,48($tptr)
3324	mov	%rbx,56($tptr)
3325	lea	64($tptr),$tptr
3326	nop
3327	jmp	.Lsqrx4x_shift_n_add
3328
3329.align	32
3330.Lsqrx4x_shift_n_add_break:
3331	adcx	$A1[1],%rbx
3332	mov	%rax,48($tptr)
3333	mov	%rbx,56($tptr)
3334	lea	64($tptr),$tptr		# end of t[] buffer
3335___
3336}
3337######################################################################
3338# Montgomery reduction part, "word-by-word" algorithm.
3339#
3340# This new path is inspired by multiple submissions from Intel, by
3341# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
3342# Vinodh Gopal...
3343{
3344my ($nptr,$carry,$m0)=("%rbp","%rsi","%rdx");
3345
3346$code.=<<___;
3347	movq	%xmm2,$nptr
3348__bn_sqrx8x_reduction:
3349	xor	%eax,%eax		# initial top-most carry bit
3350	mov	32+8(%rsp),%rbx		# n0
3351	mov	48+8(%rsp),%rdx		# "%r8", 8*0($tptr)
3352	lea	-8*8($nptr,$num),%rcx	# end of n[]
3353	#lea	48+8(%rsp,$num,2),$tptr	# end of t[] buffer
3354	mov	%rcx, 0+8(%rsp)		# save end of n[]
3355	mov	$tptr,8+8(%rsp)		# save end of t[]
3356
3357	lea	48+8(%rsp),$tptr		# initial t[] window
3358	jmp	.Lsqrx8x_reduction_loop
3359
3360.align	32
3361.Lsqrx8x_reduction_loop:
3362	mov	8*1($tptr),%r9
3363	mov	8*2($tptr),%r10
3364	mov	8*3($tptr),%r11
3365	mov	8*4($tptr),%r12
3366	mov	%rdx,%r8
3367	imulq	%rbx,%rdx		# n0*a[i]
3368	mov	8*5($tptr),%r13
3369	mov	8*6($tptr),%r14
3370	mov	8*7($tptr),%r15
3371	mov	%rax,24+8(%rsp)		# store top-most carry bit
3372
3373	lea	8*8($tptr),$tptr
3374	xor	$carry,$carry		# cf=0,of=0
3375	mov	\$-8,%rcx
3376	jmp	.Lsqrx8x_reduce
3377
3378.align	32
3379.Lsqrx8x_reduce:
3380	mov	%r8, %rbx
3381	mulx	8*0($nptr),%rax,%r8	# n[0]
3382	adcx	%rbx,%rax		# discarded
3383	adox	%r9,%r8
3384
3385	mulx	8*1($nptr),%rbx,%r9	# n[1]
3386	adcx	%rbx,%r8
3387	adox	%r10,%r9
3388
3389	mulx	8*2($nptr),%rbx,%r10
3390	adcx	%rbx,%r9
3391	adox	%r11,%r10
3392
3393	mulx	8*3($nptr),%rbx,%r11
3394	adcx	%rbx,%r10
3395	adox	%r12,%r11
3396
3397	.byte	0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00	# mulx	8*4($nptr),%rbx,%r12
3398	 mov	%rdx,%rax
3399	 mov	%r8,%rdx
3400	adcx	%rbx,%r11
3401	adox	%r13,%r12
3402
3403	 mulx	32+8(%rsp),%rbx,%rdx	# %rdx discarded
3404	 mov	%rax,%rdx
3405	 mov	%rax,64+48+8(%rsp,%rcx,8)	# put aside n0*a[i]
3406
3407	mulx	8*5($nptr),%rax,%r13
3408	adcx	%rax,%r12
3409	adox	%r14,%r13
3410
3411	mulx	8*6($nptr),%rax,%r14
3412	adcx	%rax,%r13
3413	adox	%r15,%r14
3414
3415	mulx	8*7($nptr),%rax,%r15
3416	 mov	%rbx,%rdx
3417	adcx	%rax,%r14
3418	adox	$carry,%r15		# $carry is 0
3419	adcx	$carry,%r15		# cf=0
3420
3421	.byte	0x67,0x67,0x67
3422	inc	%rcx			# of=0
3423	jnz	.Lsqrx8x_reduce
3424
3425	mov	$carry,%rax		# xor	%rax,%rax
3426	cmp	0+8(%rsp),$nptr		# end of n[]?
3427	jae	.Lsqrx8x_no_tail
3428
3429	mov	48+8(%rsp),%rdx		# pull n0*a[0]
3430	add	8*0($tptr),%r8
3431	lea	8*8($nptr),$nptr
3432	mov	\$-8,%rcx
3433	adcx	8*1($tptr),%r9
3434	adcx	8*2($tptr),%r10
3435	adc	8*3($tptr),%r11
3436	adc	8*4($tptr),%r12
3437	adc	8*5($tptr),%r13
3438	adc	8*6($tptr),%r14
3439	adc	8*7($tptr),%r15
3440	lea	8*8($tptr),$tptr
3441	sbb	%rax,%rax		# top carry
3442
3443	xor	$carry,$carry		# of=0, cf=0
3444	mov	%rax,16+8(%rsp)
3445	jmp	.Lsqrx8x_tail
3446
3447.align	32
3448.Lsqrx8x_tail:
3449	mov	%r8,%rbx
3450	mulx	8*0($nptr),%rax,%r8
3451	adcx	%rax,%rbx
3452	adox	%r9,%r8
3453
3454	mulx	8*1($nptr),%rax,%r9
3455	adcx	%rax,%r8
3456	adox	%r10,%r9
3457
3458	mulx	8*2($nptr),%rax,%r10
3459	adcx	%rax,%r9
3460	adox	%r11,%r10
3461
3462	mulx	8*3($nptr),%rax,%r11
3463	adcx	%rax,%r10
3464	adox	%r12,%r11
3465
3466	.byte	0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00	# mulx	8*4($nptr),%rax,%r12
3467	adcx	%rax,%r11
3468	adox	%r13,%r12
3469
3470	mulx	8*5($nptr),%rax,%r13
3471	adcx	%rax,%r12
3472	adox	%r14,%r13
3473
3474	mulx	8*6($nptr),%rax,%r14
3475	adcx	%rax,%r13
3476	adox	%r15,%r14
3477
3478	mulx	8*7($nptr),%rax,%r15
3479	 mov	72+48+8(%rsp,%rcx,8),%rdx	# pull n0*a[i]
3480	adcx	%rax,%r14
3481	adox	$carry,%r15
3482	 mov	%rbx,($tptr,%rcx,8)	# save result
3483	 mov	%r8,%rbx
3484	adcx	$carry,%r15		# cf=0
3485
3486	inc	%rcx			# of=0
3487	jnz	.Lsqrx8x_tail
3488
3489	cmp	0+8(%rsp),$nptr		# end of n[]?
3490	jae	.Lsqrx8x_tail_done	# break out of loop
3491
3492	sub	16+8(%rsp),$carry	# mov 16(%rsp),%cf
3493	 mov	48+8(%rsp),%rdx		# pull n0*a[0]
3494	 lea	8*8($nptr),$nptr
3495	adc	8*0($tptr),%r8
3496	adc	8*1($tptr),%r9
3497	adc	8*2($tptr),%r10
3498	adc	8*3($tptr),%r11
3499	adc	8*4($tptr),%r12
3500	adc	8*5($tptr),%r13
3501	adc	8*6($tptr),%r14
3502	adc	8*7($tptr),%r15
3503	lea	8*8($tptr),$tptr
3504	sbb	%rax,%rax
3505	sub	\$8,%rcx		# mov	\$-8,%rcx
3506
3507	xor	$carry,$carry		# of=0, cf=0
3508	mov	%rax,16+8(%rsp)
3509	jmp	.Lsqrx8x_tail
3510
3511.align	32
3512.Lsqrx8x_tail_done:
3513	xor	%rax,%rax
3514	add	24+8(%rsp),%r8		# can this overflow?
3515	adc	\$0,%r9
3516	adc	\$0,%r10
3517	adc	\$0,%r11
3518	adc	\$0,%r12
3519	adc	\$0,%r13
3520	adc	\$0,%r14
3521	adc	\$0,%r15
3522	adc	\$0,%rax
3523
3524	sub	16+8(%rsp),$carry	# mov 16(%rsp),%cf
3525.Lsqrx8x_no_tail:			# %cf is 0 if jumped here
3526	adc	8*0($tptr),%r8
3527	 movq	%xmm3,%rcx
3528	adc	8*1($tptr),%r9
3529	 mov	8*7($nptr),$carry
3530	 movq	%xmm2,$nptr		# restore $nptr
3531	adc	8*2($tptr),%r10
3532	adc	8*3($tptr),%r11
3533	adc	8*4($tptr),%r12
3534	adc	8*5($tptr),%r13
3535	adc	8*6($tptr),%r14
3536	adc	8*7($tptr),%r15
3537	adc	\$0,%rax		# top-most carry
3538
3539	mov	32+8(%rsp),%rbx		# n0
3540	mov	8*8($tptr,%rcx),%rdx	# modulo-scheduled "%r8"
3541
3542	mov	%r8,8*0($tptr)		# store top 512 bits
3543	 lea	8*8($tptr),%r8		# borrow %r8
3544	mov	%r9,8*1($tptr)
3545	mov	%r10,8*2($tptr)
3546	mov	%r11,8*3($tptr)
3547	mov	%r12,8*4($tptr)
3548	mov	%r13,8*5($tptr)
3549	mov	%r14,8*6($tptr)
3550	mov	%r15,8*7($tptr)
3551
3552	lea	8*8($tptr,%rcx),$tptr	# start of current t[] window
3553	cmp	8+8(%rsp),%r8		# end of t[]?
3554	jb	.Lsqrx8x_reduction_loop
3555	ret
3556.cfi_endproc
3557.size	bn_sqrx8x_internal,.-bn_sqrx8x_internal
3558___
3559}
3560##############################################################
3561# Post-condition, 4x unrolled
3562#
3563{
3564my ($rptr,$nptr)=("%rdx","%rbp");
3565$code.=<<___;
3566.align	32
3567__bn_postx4x_internal:
3568.cfi_startproc
3569	mov	8*0($nptr),%r12
3570	mov	%rcx,%r10		# -$num
3571	mov	%rcx,%r9		# -$num
3572	neg	%rax
3573	sar	\$3+2,%rcx
3574	#lea	48+8(%rsp,%r9),$tptr
3575	movq	%xmm1,$rptr		# restore $rptr
3576	movq	%xmm1,$aptr		# prepare for back-to-back call
3577	dec	%r12			# so that after 'not' we get -n[0]
3578	mov	8*1($nptr),%r13
3579	xor	%r8,%r8
3580	mov	8*2($nptr),%r14
3581	mov	8*3($nptr),%r15
3582	jmp	.Lsqrx4x_sub_entry
3583
3584.align	16
3585.Lsqrx4x_sub:
3586	mov	8*0($nptr),%r12
3587	mov	8*1($nptr),%r13
3588	mov	8*2($nptr),%r14
3589	mov	8*3($nptr),%r15
3590.Lsqrx4x_sub_entry:
3591	andn	%rax,%r12,%r12
3592	lea	8*4($nptr),$nptr
3593	andn	%rax,%r13,%r13
3594	andn	%rax,%r14,%r14
3595	andn	%rax,%r15,%r15
3596
3597	neg	%r8			# mov %r8,%cf
3598	adc	8*0($tptr),%r12
3599	adc	8*1($tptr),%r13
3600	adc	8*2($tptr),%r14
3601	adc	8*3($tptr),%r15
3602	mov	%r12,8*0($rptr)
3603	lea	8*4($tptr),$tptr
3604	mov	%r13,8*1($rptr)
3605	sbb	%r8,%r8			# mov %cf,%r8
3606	mov	%r14,8*2($rptr)
3607	mov	%r15,8*3($rptr)
3608	lea	8*4($rptr),$rptr
3609
3610	inc	%rcx
3611	jnz	.Lsqrx4x_sub
3612
3613	neg	%r9			# restore $num
3614
3615	ret
3616.cfi_endproc
3617.size	__bn_postx4x_internal,.-__bn_postx4x_internal
3618___
3619}
3620}}}
3621{
3622my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%edx","%r8", "%r9d") : # Win64 order
3623				("%rdi","%esi","%rdx","%ecx");  # Unix order
3624my $out=$inp;
3625my $STRIDE=2**5*8;
3626my $N=$STRIDE/4;
3627
3628$code.=<<___;
3629.globl	bn_get_bits5
3630.type	bn_get_bits5,\@abi-omnipotent
3631.align	16
3632bn_get_bits5:
3633.cfi_startproc
3634	lea	0($inp),%r10
3635	lea	1($inp),%r11
3636	mov	$num,%ecx
3637	shr	\$4,$num
3638	and	\$15,%ecx
3639	lea	-8(%ecx),%eax
3640	cmp	\$11,%ecx
3641	cmova	%r11,%r10
3642	cmova	%eax,%ecx
3643	movzw	(%r10,$num,2),%eax
3644	shrl	%cl,%eax
3645	and	\$31,%eax
3646	ret
3647.cfi_endproc
3648.size	bn_get_bits5,.-bn_get_bits5
3649
3650.globl	bn_scatter5
3651.type	bn_scatter5,\@abi-omnipotent
3652.align	16
3653bn_scatter5:
3654.cfi_startproc
3655	cmp	\$0, $num
3656	jz	.Lscatter_epilogue
3657	lea	($tbl,$idx,8),$tbl
3658.Lscatter:
3659	mov	($inp),%rax
3660	lea	8($inp),$inp
3661	mov	%rax,($tbl)
3662	lea	32*8($tbl),$tbl
3663	sub	\$1,$num
3664	jnz	.Lscatter
3665.Lscatter_epilogue:
3666	ret
3667.cfi_endproc
3668.size	bn_scatter5,.-bn_scatter5
3669
3670.globl	bn_gather5
3671.type	bn_gather5,\@abi-omnipotent
3672.align	32
3673bn_gather5:
3674.LSEH_begin_bn_gather5:			# Win64 thing, but harmless in other cases
3675.cfi_startproc
3676	# I can't trust assembler to use specific encoding:-(
3677	.byte	0x4c,0x8d,0x14,0x24			#lea    (%rsp),%r10
3678	.byte	0x48,0x81,0xec,0x08,0x01,0x00,0x00	#sub	$0x108,%rsp
3679	lea	.Linc(%rip),%rax
3680	and	\$-16,%rsp		# shouldn't be formally required
3681
3682	movd	$idx,%xmm5
3683	movdqa	0(%rax),%xmm0		# 00000001000000010000000000000000
3684	movdqa	16(%rax),%xmm1		# 00000002000000020000000200000002
3685	lea	128($tbl),%r11		# size optimization
3686	lea	128(%rsp),%rax		# size optimization
3687
3688	pshufd	\$0,%xmm5,%xmm5		# broadcast $idx
3689	movdqa	%xmm1,%xmm4
3690	movdqa	%xmm1,%xmm2
3691___
3692########################################################################
3693# calculate mask by comparing 0..31 to $idx and save result to stack
3694#
3695for($i=0;$i<$STRIDE/16;$i+=4) {
3696$code.=<<___;
3697	paddd	%xmm0,%xmm1
3698	pcmpeqd	%xmm5,%xmm0		# compare to 1,0
3699___
3700$code.=<<___	if ($i);
3701	movdqa	%xmm3,`16*($i-1)-128`(%rax)
3702___
3703$code.=<<___;
3704	movdqa	%xmm4,%xmm3
3705
3706	paddd	%xmm1,%xmm2
3707	pcmpeqd	%xmm5,%xmm1		# compare to 3,2
3708	movdqa	%xmm0,`16*($i+0)-128`(%rax)
3709	movdqa	%xmm4,%xmm0
3710
3711	paddd	%xmm2,%xmm3
3712	pcmpeqd	%xmm5,%xmm2		# compare to 5,4
3713	movdqa	%xmm1,`16*($i+1)-128`(%rax)
3714	movdqa	%xmm4,%xmm1
3715
3716	paddd	%xmm3,%xmm0
3717	pcmpeqd	%xmm5,%xmm3		# compare to 7,6
3718	movdqa	%xmm2,`16*($i+2)-128`(%rax)
3719	movdqa	%xmm4,%xmm2
3720___
3721}
3722$code.=<<___;
3723	movdqa	%xmm3,`16*($i-1)-128`(%rax)
3724	jmp	.Lgather
3725
3726.align	32
3727.Lgather:
3728	pxor	%xmm4,%xmm4
3729	pxor	%xmm5,%xmm5
3730___
3731for($i=0;$i<$STRIDE/16;$i+=4) {
3732$code.=<<___;
3733	movdqa	`16*($i+0)-128`(%r11),%xmm0
3734	movdqa	`16*($i+1)-128`(%r11),%xmm1
3735	movdqa	`16*($i+2)-128`(%r11),%xmm2
3736	pand	`16*($i+0)-128`(%rax),%xmm0
3737	movdqa	`16*($i+3)-128`(%r11),%xmm3
3738	pand	`16*($i+1)-128`(%rax),%xmm1
3739	por	%xmm0,%xmm4
3740	pand	`16*($i+2)-128`(%rax),%xmm2
3741	por	%xmm1,%xmm5
3742	pand	`16*($i+3)-128`(%rax),%xmm3
3743	por	%xmm2,%xmm4
3744	por	%xmm3,%xmm5
3745___
3746}
3747$code.=<<___;
3748	por	%xmm5,%xmm4
3749	lea	$STRIDE(%r11),%r11
3750	pshufd	\$0x4e,%xmm4,%xmm0
3751	por	%xmm4,%xmm0
3752	movq	%xmm0,($out)		# m0=bp[0]
3753	lea	8($out),$out
3754	sub	\$1,$num
3755	jnz	.Lgather
3756
3757	lea	(%r10),%rsp
3758	ret
3759.LSEH_end_bn_gather5:
3760.cfi_endproc
3761.size	bn_gather5,.-bn_gather5
3762___
3763}
3764$code.=<<___;
3765.align	64
3766.Linc:
3767	.long	0,0, 1,1
3768	.long	2,2, 2,2
3769.asciz	"Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
3770___
3771
3772# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3773#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
3774if ($win64) {
3775$rec="%rcx";
3776$frame="%rdx";
3777$context="%r8";
3778$disp="%r9";
3779
3780$code.=<<___;
3781.extern	__imp_RtlVirtualUnwind
3782.type	mul_handler,\@abi-omnipotent
3783.align	16
3784mul_handler:
3785	push	%rsi
3786	push	%rdi
3787	push	%rbx
3788	push	%rbp
3789	push	%r12
3790	push	%r13
3791	push	%r14
3792	push	%r15
3793	pushfq
3794	sub	\$64,%rsp
3795
3796	mov	120($context),%rax	# pull context->Rax
3797	mov	248($context),%rbx	# pull context->Rip
3798
3799	mov	8($disp),%rsi		# disp->ImageBase
3800	mov	56($disp),%r11		# disp->HandlerData
3801
3802	mov	0(%r11),%r10d		# HandlerData[0]
3803	lea	(%rsi,%r10),%r10	# end of prologue label
3804	cmp	%r10,%rbx		# context->Rip<end of prologue label
3805	jb	.Lcommon_seh_tail
3806
3807	mov	4(%r11),%r10d		# HandlerData[1]
3808	lea	(%rsi,%r10),%r10	# beginning of body label
3809	cmp	%r10,%rbx		# context->Rip<body label
3810	jb	.Lcommon_pop_regs
3811
3812	mov	152($context),%rax	# pull context->Rsp
3813
3814	mov	8(%r11),%r10d		# HandlerData[2]
3815	lea	(%rsi,%r10),%r10	# epilogue label
3816	cmp	%r10,%rbx		# context->Rip>=epilogue label
3817	jae	.Lcommon_seh_tail
3818
3819	lea	.Lmul_epilogue(%rip),%r10
3820	cmp	%r10,%rbx
3821	ja	.Lbody_40
3822
3823	mov	192($context),%r10	# pull $num
3824	mov	8(%rax,%r10,8),%rax	# pull saved stack pointer
3825
3826	jmp	.Lcommon_pop_regs
3827
3828.Lbody_40:
3829	mov	40(%rax),%rax		# pull saved stack pointer
3830.Lcommon_pop_regs:
3831	mov	-8(%rax),%rbx
3832	mov	-16(%rax),%rbp
3833	mov	-24(%rax),%r12
3834	mov	-32(%rax),%r13
3835	mov	-40(%rax),%r14
3836	mov	-48(%rax),%r15
3837	mov	%rbx,144($context)	# restore context->Rbx
3838	mov	%rbp,160($context)	# restore context->Rbp
3839	mov	%r12,216($context)	# restore context->R12
3840	mov	%r13,224($context)	# restore context->R13
3841	mov	%r14,232($context)	# restore context->R14
3842	mov	%r15,240($context)	# restore context->R15
3843
3844.Lcommon_seh_tail:
3845	mov	8(%rax),%rdi
3846	mov	16(%rax),%rsi
3847	mov	%rax,152($context)	# restore context->Rsp
3848	mov	%rsi,168($context)	# restore context->Rsi
3849	mov	%rdi,176($context)	# restore context->Rdi
3850
3851	mov	40($disp),%rdi		# disp->ContextRecord
3852	mov	$context,%rsi		# context
3853	mov	\$154,%ecx		# sizeof(CONTEXT)
3854	.long	0xa548f3fc		# cld; rep movsq
3855
3856	mov	$disp,%rsi
3857	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
3858	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
3859	mov	0(%rsi),%r8		# arg3, disp->ControlPc
3860	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
3861	mov	40(%rsi),%r10		# disp->ContextRecord
3862	lea	56(%rsi),%r11		# &disp->HandlerData
3863	lea	24(%rsi),%r12		# &disp->EstablisherFrame
3864	mov	%r10,32(%rsp)		# arg5
3865	mov	%r11,40(%rsp)		# arg6
3866	mov	%r12,48(%rsp)		# arg7
3867	mov	%rcx,56(%rsp)		# arg8, (NULL)
3868	call	*__imp_RtlVirtualUnwind(%rip)
3869
3870	mov	\$1,%eax		# ExceptionContinueSearch
3871	add	\$64,%rsp
3872	popfq
3873	pop	%r15
3874	pop	%r14
3875	pop	%r13
3876	pop	%r12
3877	pop	%rbp
3878	pop	%rbx
3879	pop	%rdi
3880	pop	%rsi
3881	ret
3882.size	mul_handler,.-mul_handler
3883
3884.section	.pdata
3885.align	4
3886	.rva	.LSEH_begin_bn_mul_mont_gather5
3887	.rva	.LSEH_end_bn_mul_mont_gather5
3888	.rva	.LSEH_info_bn_mul_mont_gather5
3889
3890	.rva	.LSEH_begin_bn_mul4x_mont_gather5
3891	.rva	.LSEH_end_bn_mul4x_mont_gather5
3892	.rva	.LSEH_info_bn_mul4x_mont_gather5
3893
3894	.rva	.LSEH_begin_bn_power5
3895	.rva	.LSEH_end_bn_power5
3896	.rva	.LSEH_info_bn_power5
3897
3898	.rva	.LSEH_begin_bn_from_mont8x
3899	.rva	.LSEH_end_bn_from_mont8x
3900	.rva	.LSEH_info_bn_from_mont8x
3901___
3902$code.=<<___ if ($addx);
3903	.rva	.LSEH_begin_bn_mulx4x_mont_gather5
3904	.rva	.LSEH_end_bn_mulx4x_mont_gather5
3905	.rva	.LSEH_info_bn_mulx4x_mont_gather5
3906
3907	.rva	.LSEH_begin_bn_powerx5
3908	.rva	.LSEH_end_bn_powerx5
3909	.rva	.LSEH_info_bn_powerx5
3910___
3911$code.=<<___;
3912	.rva	.LSEH_begin_bn_gather5
3913	.rva	.LSEH_end_bn_gather5
3914	.rva	.LSEH_info_bn_gather5
3915
3916.section	.xdata
3917.align	8
3918.LSEH_info_bn_mul_mont_gather5:
3919	.byte	9,0,0,0
3920	.rva	mul_handler
3921	.rva	.Lmul_body,.Lmul_body,.Lmul_epilogue		# HandlerData[]
3922.align	8
3923.LSEH_info_bn_mul4x_mont_gather5:
3924	.byte	9,0,0,0
3925	.rva	mul_handler
3926	.rva	.Lmul4x_prologue,.Lmul4x_body,.Lmul4x_epilogue		# HandlerData[]
3927.align	8
3928.LSEH_info_bn_power5:
3929	.byte	9,0,0,0
3930	.rva	mul_handler
3931	.rva	.Lpower5_prologue,.Lpower5_body,.Lpower5_epilogue	# HandlerData[]
3932.align	8
3933.LSEH_info_bn_from_mont8x:
3934	.byte	9,0,0,0
3935	.rva	mul_handler
3936	.rva	.Lfrom_prologue,.Lfrom_body,.Lfrom_epilogue		# HandlerData[]
3937___
3938$code.=<<___ if ($addx);
3939.align	8
3940.LSEH_info_bn_mulx4x_mont_gather5:
3941	.byte	9,0,0,0
3942	.rva	mul_handler
3943	.rva	.Lmulx4x_prologue,.Lmulx4x_body,.Lmulx4x_epilogue	# HandlerData[]
3944.align	8
3945.LSEH_info_bn_powerx5:
3946	.byte	9,0,0,0
3947	.rva	mul_handler
3948	.rva	.Lpowerx5_prologue,.Lpowerx5_body,.Lpowerx5_epilogue	# HandlerData[]
3949___
3950$code.=<<___;
3951.align	8
3952.LSEH_info_bn_gather5:
3953	.byte	0x01,0x0b,0x03,0x0a
3954	.byte	0x0b,0x01,0x21,0x00	# sub	rsp,0x108
3955	.byte	0x04,0xa3,0x00,0x00	# lea	r10,(rsp)
3956.align	8
3957___
3958}
3959
3960$code =~ s/\`([^\`]*)\`/eval($1)/gem;
3961
3962print $code;
3963close STDOUT or die "error closing STDOUT: $!";
3964