1#! /usr/bin/env perl 2# Copyright 2011-2022 The OpenSSL Project Authors. All Rights Reserved. 3# 4# Licensed under the OpenSSL license (the "License"). You may not use 5# this file except in compliance with the License. You can obtain a copy 6# in the file LICENSE in the source distribution or at 7# https://www.openssl.org/source/license.html 8 9 10# ==================================================================== 11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL 12# project. The module is, however, dual licensed under OpenSSL and 13# CRYPTOGAMS licenses depending on where you obtain it. For further 14# details see http://www.openssl.org/~appro/cryptogams/. 15# ==================================================================== 16 17# August 2011. 18# 19# Companion to x86_64-mont.pl that optimizes cache-timing attack 20# countermeasures. The subroutines are produced by replacing bp[i] 21# references in their x86_64-mont.pl counterparts with cache-neutral 22# references to powers table computed in BN_mod_exp_mont_consttime. 23# In addition subroutine that scatters elements of the powers table 24# is implemented, so that scatter-/gathering can be tuned without 25# bn_exp.c modifications. 26 27# August 2013. 28# 29# Add MULX/AD*X code paths and additional interfaces to optimize for 30# branch prediction unit. For input lengths that are multiples of 8 31# the np argument is not just modulus value, but one interleaved 32# with 0. This is to optimize post-condition... 33 34$flavour = shift; 35$output = shift; 36if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } 37 38$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); 39 40$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 41( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or 42( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or 43die "can't locate x86_64-xlate.pl"; 44 45open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""; 46*STDOUT=*OUT; 47 48if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` 49 =~ /GNU assembler version ([2-9]\.[0-9]+)/) { 50 $addx = ($1>=2.23); 51} 52 53if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) && 54 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) { 55 $addx = ($1>=2.10); 56} 57 58if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && 59 `ml64 2>&1` =~ /Version ([0-9]+)\./) { 60 $addx = ($1>=12); 61} 62 63if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:clang|LLVM) version|.*based on LLVM) ([0-9]+)\.([0-9]+)/) { 64 my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10 65 $addx = ($ver>=3.03); 66} 67 68# int bn_mul_mont_gather5( 69$rp="%rdi"; # BN_ULONG *rp, 70$ap="%rsi"; # const BN_ULONG *ap, 71$bp="%rdx"; # const BN_ULONG *bp, 72$np="%rcx"; # const BN_ULONG *np, 73$n0="%r8"; # const BN_ULONG *n0, 74$num="%r9"; # int num, 75 # int idx); # 0 to 2^5-1, "index" in $bp holding 76 # pre-computed powers of a', interlaced 77 # in such manner that b[0] is $bp[idx], 78 # b[1] is [2^5+idx], etc. 79$lo0="%r10"; 80$hi0="%r11"; 81$hi1="%r13"; 82$i="%r14"; 83$j="%r15"; 84$m0="%rbx"; 85$m1="%rbp"; 86 87$code=<<___; 88.text 89 90.extern OPENSSL_ia32cap_P 91 92.globl bn_mul_mont_gather5 93.type bn_mul_mont_gather5,\@function,6 94.align 64 95bn_mul_mont_gather5: 96.cfi_startproc 97 mov ${num}d,${num}d 98 mov %rsp,%rax 99.cfi_def_cfa_register %rax 100 test \$7,${num}d 101 jnz .Lmul_enter 102___ 103$code.=<<___ if ($addx); 104 mov OPENSSL_ia32cap_P+8(%rip),%r11d 105___ 106$code.=<<___; 107 jmp .Lmul4x_enter 108 109.align 16 110.Lmul_enter: 111 movd `($win64?56:8)`(%rsp),%xmm5 # load 7th argument 112 push %rbx 113.cfi_push %rbx 114 push %rbp 115.cfi_push %rbp 116 push %r12 117.cfi_push %r12 118 push %r13 119.cfi_push %r13 120 push %r14 121.cfi_push %r14 122 push %r15 123.cfi_push %r15 124 125 neg $num 126 mov %rsp,%r11 127 lea -280(%rsp,$num,8),%r10 # future alloca(8*(num+2)+256+8) 128 neg $num # restore $num 129 and \$-1024,%r10 # minimize TLB usage 130 131 # An OS-agnostic version of __chkstk. 132 # 133 # Some OSes (Windows) insist on stack being "wired" to 134 # physical memory in strictly sequential manner, i.e. if stack 135 # allocation spans two pages, then reference to farmost one can 136 # be punishable by SEGV. But page walking can do good even on 137 # other OSes, because it guarantees that villain thread hits 138 # the guard page before it can make damage to innocent one... 139 sub %r10,%r11 140 and \$-4096,%r11 141 lea (%r10,%r11),%rsp 142 mov (%rsp),%r11 143 cmp %r10,%rsp 144 ja .Lmul_page_walk 145 jmp .Lmul_page_walk_done 146 147.Lmul_page_walk: 148 lea -4096(%rsp),%rsp 149 mov (%rsp),%r11 150 cmp %r10,%rsp 151 ja .Lmul_page_walk 152.Lmul_page_walk_done: 153 154 lea .Linc(%rip),%r10 155 mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp 156.cfi_cfa_expression %rsp+8,$num,8,mul,plus,deref,+8 157.Lmul_body: 158 159 lea 128($bp),%r12 # reassign $bp (+size optimization) 160___ 161 $bp="%r12"; 162 $STRIDE=2**5*8; # 5 is "window size" 163 $N=$STRIDE/4; # should match cache line size 164$code.=<<___; 165 movdqa 0(%r10),%xmm0 # 00000001000000010000000000000000 166 movdqa 16(%r10),%xmm1 # 00000002000000020000000200000002 167 lea 24-112(%rsp,$num,8),%r10# place the mask after tp[num+3] (+ICache optimization) 168 and \$-16,%r10 169 170 pshufd \$0,%xmm5,%xmm5 # broadcast index 171 movdqa %xmm1,%xmm4 172 movdqa %xmm1,%xmm2 173___ 174######################################################################## 175# calculate mask by comparing 0..31 to index and save result to stack 176# 177$code.=<<___; 178 paddd %xmm0,%xmm1 179 pcmpeqd %xmm5,%xmm0 # compare to 1,0 180 .byte 0x67 181 movdqa %xmm4,%xmm3 182___ 183for($k=0;$k<$STRIDE/16-4;$k+=4) { 184$code.=<<___; 185 paddd %xmm1,%xmm2 186 pcmpeqd %xmm5,%xmm1 # compare to 3,2 187 movdqa %xmm0,`16*($k+0)+112`(%r10) 188 movdqa %xmm4,%xmm0 189 190 paddd %xmm2,%xmm3 191 pcmpeqd %xmm5,%xmm2 # compare to 5,4 192 movdqa %xmm1,`16*($k+1)+112`(%r10) 193 movdqa %xmm4,%xmm1 194 195 paddd %xmm3,%xmm0 196 pcmpeqd %xmm5,%xmm3 # compare to 7,6 197 movdqa %xmm2,`16*($k+2)+112`(%r10) 198 movdqa %xmm4,%xmm2 199 200 paddd %xmm0,%xmm1 201 pcmpeqd %xmm5,%xmm0 202 movdqa %xmm3,`16*($k+3)+112`(%r10) 203 movdqa %xmm4,%xmm3 204___ 205} 206$code.=<<___; # last iteration can be optimized 207 paddd %xmm1,%xmm2 208 pcmpeqd %xmm5,%xmm1 209 movdqa %xmm0,`16*($k+0)+112`(%r10) 210 211 paddd %xmm2,%xmm3 212 .byte 0x67 213 pcmpeqd %xmm5,%xmm2 214 movdqa %xmm1,`16*($k+1)+112`(%r10) 215 216 pcmpeqd %xmm5,%xmm3 217 movdqa %xmm2,`16*($k+2)+112`(%r10) 218 pand `16*($k+0)-128`($bp),%xmm0 # while it's still in register 219 220 pand `16*($k+1)-128`($bp),%xmm1 221 pand `16*($k+2)-128`($bp),%xmm2 222 movdqa %xmm3,`16*($k+3)+112`(%r10) 223 pand `16*($k+3)-128`($bp),%xmm3 224 por %xmm2,%xmm0 225 por %xmm3,%xmm1 226___ 227for($k=0;$k<$STRIDE/16-4;$k+=4) { 228$code.=<<___; 229 movdqa `16*($k+0)-128`($bp),%xmm4 230 movdqa `16*($k+1)-128`($bp),%xmm5 231 movdqa `16*($k+2)-128`($bp),%xmm2 232 pand `16*($k+0)+112`(%r10),%xmm4 233 movdqa `16*($k+3)-128`($bp),%xmm3 234 pand `16*($k+1)+112`(%r10),%xmm5 235 por %xmm4,%xmm0 236 pand `16*($k+2)+112`(%r10),%xmm2 237 por %xmm5,%xmm1 238 pand `16*($k+3)+112`(%r10),%xmm3 239 por %xmm2,%xmm0 240 por %xmm3,%xmm1 241___ 242} 243$code.=<<___; 244 por %xmm1,%xmm0 245 pshufd \$0x4e,%xmm0,%xmm1 246 por %xmm1,%xmm0 247 lea $STRIDE($bp),$bp 248 movq %xmm0,$m0 # m0=bp[0] 249 250 mov ($n0),$n0 # pull n0[0] value 251 mov ($ap),%rax 252 253 xor $i,$i # i=0 254 xor $j,$j # j=0 255 256 mov $n0,$m1 257 mulq $m0 # ap[0]*bp[0] 258 mov %rax,$lo0 259 mov ($np),%rax 260 261 imulq $lo0,$m1 # "tp[0]"*n0 262 mov %rdx,$hi0 263 264 mulq $m1 # np[0]*m1 265 add %rax,$lo0 # discarded 266 mov 8($ap),%rax 267 adc \$0,%rdx 268 mov %rdx,$hi1 269 270 lea 1($j),$j # j++ 271 jmp .L1st_enter 272 273.align 16 274.L1st: 275 add %rax,$hi1 276 mov ($ap,$j,8),%rax 277 adc \$0,%rdx 278 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0] 279 mov $lo0,$hi0 280 adc \$0,%rdx 281 mov $hi1,-16(%rsp,$j,8) # tp[j-1] 282 mov %rdx,$hi1 283 284.L1st_enter: 285 mulq $m0 # ap[j]*bp[0] 286 add %rax,$hi0 287 mov ($np,$j,8),%rax 288 adc \$0,%rdx 289 lea 1($j),$j # j++ 290 mov %rdx,$lo0 291 292 mulq $m1 # np[j]*m1 293 cmp $num,$j 294 jne .L1st # note that upon exit $j==$num, so 295 # they can be used interchangeably 296 297 add %rax,$hi1 298 adc \$0,%rdx 299 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0] 300 adc \$0,%rdx 301 mov $hi1,-16(%rsp,$num,8) # tp[num-1] 302 mov %rdx,$hi1 303 mov $lo0,$hi0 304 305 xor %rdx,%rdx 306 add $hi0,$hi1 307 adc \$0,%rdx 308 mov $hi1,-8(%rsp,$num,8) 309 mov %rdx,(%rsp,$num,8) # store upmost overflow bit 310 311 lea 1($i),$i # i++ 312 jmp .Louter 313.align 16 314.Louter: 315 lea 24+128(%rsp,$num,8),%rdx # where 256-byte mask is (+size optimization) 316 and \$-16,%rdx 317 pxor %xmm4,%xmm4 318 pxor %xmm5,%xmm5 319___ 320for($k=0;$k<$STRIDE/16;$k+=4) { 321$code.=<<___; 322 movdqa `16*($k+0)-128`($bp),%xmm0 323 movdqa `16*($k+1)-128`($bp),%xmm1 324 movdqa `16*($k+2)-128`($bp),%xmm2 325 movdqa `16*($k+3)-128`($bp),%xmm3 326 pand `16*($k+0)-128`(%rdx),%xmm0 327 pand `16*($k+1)-128`(%rdx),%xmm1 328 por %xmm0,%xmm4 329 pand `16*($k+2)-128`(%rdx),%xmm2 330 por %xmm1,%xmm5 331 pand `16*($k+3)-128`(%rdx),%xmm3 332 por %xmm2,%xmm4 333 por %xmm3,%xmm5 334___ 335} 336$code.=<<___; 337 por %xmm5,%xmm4 338 pshufd \$0x4e,%xmm4,%xmm0 339 por %xmm4,%xmm0 340 lea $STRIDE($bp),$bp 341 342 mov ($ap),%rax # ap[0] 343 movq %xmm0,$m0 # m0=bp[i] 344 345 xor $j,$j # j=0 346 mov $n0,$m1 347 mov (%rsp),$lo0 348 349 mulq $m0 # ap[0]*bp[i] 350 add %rax,$lo0 # ap[0]*bp[i]+tp[0] 351 mov ($np),%rax 352 adc \$0,%rdx 353 354 imulq $lo0,$m1 # tp[0]*n0 355 mov %rdx,$hi0 356 357 mulq $m1 # np[0]*m1 358 add %rax,$lo0 # discarded 359 mov 8($ap),%rax 360 adc \$0,%rdx 361 mov 8(%rsp),$lo0 # tp[1] 362 mov %rdx,$hi1 363 364 lea 1($j),$j # j++ 365 jmp .Linner_enter 366 367.align 16 368.Linner: 369 add %rax,$hi1 370 mov ($ap,$j,8),%rax 371 adc \$0,%rdx 372 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j] 373 mov (%rsp,$j,8),$lo0 374 adc \$0,%rdx 375 mov $hi1,-16(%rsp,$j,8) # tp[j-1] 376 mov %rdx,$hi1 377 378.Linner_enter: 379 mulq $m0 # ap[j]*bp[i] 380 add %rax,$hi0 381 mov ($np,$j,8),%rax 382 adc \$0,%rdx 383 add $hi0,$lo0 # ap[j]*bp[i]+tp[j] 384 mov %rdx,$hi0 385 adc \$0,$hi0 386 lea 1($j),$j # j++ 387 388 mulq $m1 # np[j]*m1 389 cmp $num,$j 390 jne .Linner # note that upon exit $j==$num, so 391 # they can be used interchangeably 392 add %rax,$hi1 393 adc \$0,%rdx 394 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j] 395 mov (%rsp,$num,8),$lo0 396 adc \$0,%rdx 397 mov $hi1,-16(%rsp,$num,8) # tp[num-1] 398 mov %rdx,$hi1 399 400 xor %rdx,%rdx 401 add $hi0,$hi1 402 adc \$0,%rdx 403 add $lo0,$hi1 # pull upmost overflow bit 404 adc \$0,%rdx 405 mov $hi1,-8(%rsp,$num,8) 406 mov %rdx,(%rsp,$num,8) # store upmost overflow bit 407 408 lea 1($i),$i # i++ 409 cmp $num,$i 410 jb .Louter 411 412 xor $i,$i # i=0 and clear CF! 413 mov (%rsp),%rax # tp[0] 414 lea (%rsp),$ap # borrow ap for tp 415 mov $num,$j # j=num 416 jmp .Lsub 417.align 16 418.Lsub: sbb ($np,$i,8),%rax 419 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i] 420 mov 8($ap,$i,8),%rax # tp[i+1] 421 lea 1($i),$i # i++ 422 dec $j # doesn't affect CF! 423 jnz .Lsub 424 425 sbb \$0,%rax # handle upmost overflow bit 426 mov \$-1,%rbx 427 xor %rax,%rbx 428 xor $i,$i 429 mov $num,$j # j=num 430 431.Lcopy: # conditional copy 432 mov ($rp,$i,8),%rcx 433 mov (%rsp,$i,8),%rdx 434 and %rbx,%rcx 435 and %rax,%rdx 436 mov $i,(%rsp,$i,8) # zap temporary vector 437 or %rcx,%rdx 438 mov %rdx,($rp,$i,8) # rp[i]=tp[i] 439 lea 1($i),$i 440 sub \$1,$j 441 jnz .Lcopy 442 443 mov 8(%rsp,$num,8),%rsi # restore %rsp 444.cfi_def_cfa %rsi,8 445 mov \$1,%rax 446 447 mov -48(%rsi),%r15 448.cfi_restore %r15 449 mov -40(%rsi),%r14 450.cfi_restore %r14 451 mov -32(%rsi),%r13 452.cfi_restore %r13 453 mov -24(%rsi),%r12 454.cfi_restore %r12 455 mov -16(%rsi),%rbp 456.cfi_restore %rbp 457 mov -8(%rsi),%rbx 458.cfi_restore %rbx 459 lea (%rsi),%rsp 460.cfi_def_cfa_register %rsp 461.Lmul_epilogue: 462 ret 463.cfi_endproc 464.size bn_mul_mont_gather5,.-bn_mul_mont_gather5 465___ 466{{{ 467my @A=("%r10","%r11"); 468my @N=("%r13","%rdi"); 469$code.=<<___; 470.type bn_mul4x_mont_gather5,\@function,6 471.align 32 472bn_mul4x_mont_gather5: 473.cfi_startproc 474 .byte 0x67 475 mov %rsp,%rax 476.cfi_def_cfa_register %rax 477.Lmul4x_enter: 478___ 479$code.=<<___ if ($addx); 480 and \$0x80108,%r11d 481 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1 482 je .Lmulx4x_enter 483___ 484$code.=<<___; 485 push %rbx 486.cfi_push %rbx 487 push %rbp 488.cfi_push %rbp 489 push %r12 490.cfi_push %r12 491 push %r13 492.cfi_push %r13 493 push %r14 494.cfi_push %r14 495 push %r15 496.cfi_push %r15 497.Lmul4x_prologue: 498 499 .byte 0x67 500 shl \$3,${num}d # convert $num to bytes 501 lea ($num,$num,2),%r10 # 3*$num in bytes 502 neg $num # -$num 503 504 ############################################################## 505 # Ensure that stack frame doesn't alias with $rptr+3*$num 506 # modulo 4096, which covers ret[num], am[num] and n[num] 507 # (see bn_exp.c). This is done to allow memory disambiguation 508 # logic do its magic. [Extra [num] is allocated in order 509 # to align with bn_power5's frame, which is cleansed after 510 # completing exponentiation. Extra 256 bytes is for power mask 511 # calculated from 7th argument, the index.] 512 # 513 lea -320(%rsp,$num,2),%r11 514 mov %rsp,%rbp 515 sub $rp,%r11 516 and \$4095,%r11 517 cmp %r11,%r10 518 jb .Lmul4xsp_alt 519 sub %r11,%rbp # align with $rp 520 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256) 521 jmp .Lmul4xsp_done 522 523.align 32 524.Lmul4xsp_alt: 525 lea 4096-320(,$num,2),%r10 526 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256) 527 sub %r10,%r11 528 mov \$0,%r10 529 cmovc %r10,%r11 530 sub %r11,%rbp 531.Lmul4xsp_done: 532 and \$-64,%rbp 533 mov %rsp,%r11 534 sub %rbp,%r11 535 and \$-4096,%r11 536 lea (%rbp,%r11),%rsp 537 mov (%rsp),%r10 538 cmp %rbp,%rsp 539 ja .Lmul4x_page_walk 540 jmp .Lmul4x_page_walk_done 541 542.Lmul4x_page_walk: 543 lea -4096(%rsp),%rsp 544 mov (%rsp),%r10 545 cmp %rbp,%rsp 546 ja .Lmul4x_page_walk 547.Lmul4x_page_walk_done: 548 549 neg $num 550 551 mov %rax,40(%rsp) 552.cfi_cfa_expression %rsp+40,deref,+8 553.Lmul4x_body: 554 555 call mul4x_internal 556 557 mov 40(%rsp),%rsi # restore %rsp 558.cfi_def_cfa %rsi,8 559 mov \$1,%rax 560 561 mov -48(%rsi),%r15 562.cfi_restore %r15 563 mov -40(%rsi),%r14 564.cfi_restore %r14 565 mov -32(%rsi),%r13 566.cfi_restore %r13 567 mov -24(%rsi),%r12 568.cfi_restore %r12 569 mov -16(%rsi),%rbp 570.cfi_restore %rbp 571 mov -8(%rsi),%rbx 572.cfi_restore %rbx 573 lea (%rsi),%rsp 574.cfi_def_cfa_register %rsp 575.Lmul4x_epilogue: 576 ret 577.cfi_endproc 578.size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5 579 580.type mul4x_internal,\@abi-omnipotent 581.align 32 582mul4x_internal: 583.cfi_startproc 584 shl \$5,$num # $num was in bytes 585 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument, index 586 lea .Linc(%rip),%rax 587 lea 128(%rdx,$num),%r13 # end of powers table (+size optimization) 588 shr \$5,$num # restore $num 589___ 590 $bp="%r12"; 591 $STRIDE=2**5*8; # 5 is "window size" 592 $N=$STRIDE/4; # should match cache line size 593 $tp=$i; 594$code.=<<___; 595 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000 596 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002 597 lea 88-112(%rsp,$num),%r10 # place the mask after tp[num+1] (+ICache optimization) 598 lea 128(%rdx),$bp # size optimization 599 600 pshufd \$0,%xmm5,%xmm5 # broadcast index 601 movdqa %xmm1,%xmm4 602 .byte 0x67,0x67 603 movdqa %xmm1,%xmm2 604___ 605######################################################################## 606# calculate mask by comparing 0..31 to index and save result to stack 607# 608$code.=<<___; 609 paddd %xmm0,%xmm1 610 pcmpeqd %xmm5,%xmm0 # compare to 1,0 611 .byte 0x67 612 movdqa %xmm4,%xmm3 613___ 614for($i=0;$i<$STRIDE/16-4;$i+=4) { 615$code.=<<___; 616 paddd %xmm1,%xmm2 617 pcmpeqd %xmm5,%xmm1 # compare to 3,2 618 movdqa %xmm0,`16*($i+0)+112`(%r10) 619 movdqa %xmm4,%xmm0 620 621 paddd %xmm2,%xmm3 622 pcmpeqd %xmm5,%xmm2 # compare to 5,4 623 movdqa %xmm1,`16*($i+1)+112`(%r10) 624 movdqa %xmm4,%xmm1 625 626 paddd %xmm3,%xmm0 627 pcmpeqd %xmm5,%xmm3 # compare to 7,6 628 movdqa %xmm2,`16*($i+2)+112`(%r10) 629 movdqa %xmm4,%xmm2 630 631 paddd %xmm0,%xmm1 632 pcmpeqd %xmm5,%xmm0 633 movdqa %xmm3,`16*($i+3)+112`(%r10) 634 movdqa %xmm4,%xmm3 635___ 636} 637$code.=<<___; # last iteration can be optimized 638 paddd %xmm1,%xmm2 639 pcmpeqd %xmm5,%xmm1 640 movdqa %xmm0,`16*($i+0)+112`(%r10) 641 642 paddd %xmm2,%xmm3 643 .byte 0x67 644 pcmpeqd %xmm5,%xmm2 645 movdqa %xmm1,`16*($i+1)+112`(%r10) 646 647 pcmpeqd %xmm5,%xmm3 648 movdqa %xmm2,`16*($i+2)+112`(%r10) 649 pand `16*($i+0)-128`($bp),%xmm0 # while it's still in register 650 651 pand `16*($i+1)-128`($bp),%xmm1 652 pand `16*($i+2)-128`($bp),%xmm2 653 movdqa %xmm3,`16*($i+3)+112`(%r10) 654 pand `16*($i+3)-128`($bp),%xmm3 655 por %xmm2,%xmm0 656 por %xmm3,%xmm1 657___ 658for($i=0;$i<$STRIDE/16-4;$i+=4) { 659$code.=<<___; 660 movdqa `16*($i+0)-128`($bp),%xmm4 661 movdqa `16*($i+1)-128`($bp),%xmm5 662 movdqa `16*($i+2)-128`($bp),%xmm2 663 pand `16*($i+0)+112`(%r10),%xmm4 664 movdqa `16*($i+3)-128`($bp),%xmm3 665 pand `16*($i+1)+112`(%r10),%xmm5 666 por %xmm4,%xmm0 667 pand `16*($i+2)+112`(%r10),%xmm2 668 por %xmm5,%xmm1 669 pand `16*($i+3)+112`(%r10),%xmm3 670 por %xmm2,%xmm0 671 por %xmm3,%xmm1 672___ 673} 674$code.=<<___; 675 por %xmm1,%xmm0 676 pshufd \$0x4e,%xmm0,%xmm1 677 por %xmm1,%xmm0 678 lea $STRIDE($bp),$bp 679 movq %xmm0,$m0 # m0=bp[0] 680 681 mov %r13,16+8(%rsp) # save end of b[num] 682 mov $rp, 56+8(%rsp) # save $rp 683 684 mov ($n0),$n0 # pull n0[0] value 685 mov ($ap),%rax 686 lea ($ap,$num),$ap # end of a[num] 687 neg $num 688 689 mov $n0,$m1 690 mulq $m0 # ap[0]*bp[0] 691 mov %rax,$A[0] 692 mov ($np),%rax 693 694 imulq $A[0],$m1 # "tp[0]"*n0 695 lea 64+8(%rsp),$tp 696 mov %rdx,$A[1] 697 698 mulq $m1 # np[0]*m1 699 add %rax,$A[0] # discarded 700 mov 8($ap,$num),%rax 701 adc \$0,%rdx 702 mov %rdx,$N[1] 703 704 mulq $m0 705 add %rax,$A[1] 706 mov 8*1($np),%rax 707 adc \$0,%rdx 708 mov %rdx,$A[0] 709 710 mulq $m1 711 add %rax,$N[1] 712 mov 16($ap,$num),%rax 713 adc \$0,%rdx 714 add $A[1],$N[1] 715 lea 4*8($num),$j # j=4 716 lea 8*4($np),$np 717 adc \$0,%rdx 718 mov $N[1],($tp) 719 mov %rdx,$N[0] 720 jmp .L1st4x 721 722.align 32 723.L1st4x: 724 mulq $m0 # ap[j]*bp[0] 725 add %rax,$A[0] 726 mov -8*2($np),%rax 727 lea 32($tp),$tp 728 adc \$0,%rdx 729 mov %rdx,$A[1] 730 731 mulq $m1 # np[j]*m1 732 add %rax,$N[0] 733 mov -8($ap,$j),%rax 734 adc \$0,%rdx 735 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0] 736 adc \$0,%rdx 737 mov $N[0],-24($tp) # tp[j-1] 738 mov %rdx,$N[1] 739 740 mulq $m0 # ap[j]*bp[0] 741 add %rax,$A[1] 742 mov -8*1($np),%rax 743 adc \$0,%rdx 744 mov %rdx,$A[0] 745 746 mulq $m1 # np[j]*m1 747 add %rax,$N[1] 748 mov ($ap,$j),%rax 749 adc \$0,%rdx 750 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0] 751 adc \$0,%rdx 752 mov $N[1],-16($tp) # tp[j-1] 753 mov %rdx,$N[0] 754 755 mulq $m0 # ap[j]*bp[0] 756 add %rax,$A[0] 757 mov 8*0($np),%rax 758 adc \$0,%rdx 759 mov %rdx,$A[1] 760 761 mulq $m1 # np[j]*m1 762 add %rax,$N[0] 763 mov 8($ap,$j),%rax 764 adc \$0,%rdx 765 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0] 766 adc \$0,%rdx 767 mov $N[0],-8($tp) # tp[j-1] 768 mov %rdx,$N[1] 769 770 mulq $m0 # ap[j]*bp[0] 771 add %rax,$A[1] 772 mov 8*1($np),%rax 773 adc \$0,%rdx 774 mov %rdx,$A[0] 775 776 mulq $m1 # np[j]*m1 777 add %rax,$N[1] 778 mov 16($ap,$j),%rax 779 adc \$0,%rdx 780 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0] 781 lea 8*4($np),$np 782 adc \$0,%rdx 783 mov $N[1],($tp) # tp[j-1] 784 mov %rdx,$N[0] 785 786 add \$32,$j # j+=4 787 jnz .L1st4x 788 789 mulq $m0 # ap[j]*bp[0] 790 add %rax,$A[0] 791 mov -8*2($np),%rax 792 lea 32($tp),$tp 793 adc \$0,%rdx 794 mov %rdx,$A[1] 795 796 mulq $m1 # np[j]*m1 797 add %rax,$N[0] 798 mov -8($ap),%rax 799 adc \$0,%rdx 800 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0] 801 adc \$0,%rdx 802 mov $N[0],-24($tp) # tp[j-1] 803 mov %rdx,$N[1] 804 805 mulq $m0 # ap[j]*bp[0] 806 add %rax,$A[1] 807 mov -8*1($np),%rax 808 adc \$0,%rdx 809 mov %rdx,$A[0] 810 811 mulq $m1 # np[j]*m1 812 add %rax,$N[1] 813 mov ($ap,$num),%rax # ap[0] 814 adc \$0,%rdx 815 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0] 816 adc \$0,%rdx 817 mov $N[1],-16($tp) # tp[j-1] 818 mov %rdx,$N[0] 819 820 lea ($np,$num),$np # rewind $np 821 822 xor $N[1],$N[1] 823 add $A[0],$N[0] 824 adc \$0,$N[1] 825 mov $N[0],-8($tp) 826 827 jmp .Louter4x 828 829.align 32 830.Louter4x: 831 lea 16+128($tp),%rdx # where 256-byte mask is (+size optimization) 832 pxor %xmm4,%xmm4 833 pxor %xmm5,%xmm5 834___ 835for($i=0;$i<$STRIDE/16;$i+=4) { 836$code.=<<___; 837 movdqa `16*($i+0)-128`($bp),%xmm0 838 movdqa `16*($i+1)-128`($bp),%xmm1 839 movdqa `16*($i+2)-128`($bp),%xmm2 840 movdqa `16*($i+3)-128`($bp),%xmm3 841 pand `16*($i+0)-128`(%rdx),%xmm0 842 pand `16*($i+1)-128`(%rdx),%xmm1 843 por %xmm0,%xmm4 844 pand `16*($i+2)-128`(%rdx),%xmm2 845 por %xmm1,%xmm5 846 pand `16*($i+3)-128`(%rdx),%xmm3 847 por %xmm2,%xmm4 848 por %xmm3,%xmm5 849___ 850} 851$code.=<<___; 852 por %xmm5,%xmm4 853 pshufd \$0x4e,%xmm4,%xmm0 854 por %xmm4,%xmm0 855 lea $STRIDE($bp),$bp 856 movq %xmm0,$m0 # m0=bp[i] 857 858 mov ($tp,$num),$A[0] 859 mov $n0,$m1 860 mulq $m0 # ap[0]*bp[i] 861 add %rax,$A[0] # ap[0]*bp[i]+tp[0] 862 mov ($np),%rax 863 adc \$0,%rdx 864 865 imulq $A[0],$m1 # tp[0]*n0 866 mov %rdx,$A[1] 867 mov $N[1],($tp) # store upmost overflow bit 868 869 lea ($tp,$num),$tp # rewind $tp 870 871 mulq $m1 # np[0]*m1 872 add %rax,$A[0] # "$N[0]", discarded 873 mov 8($ap,$num),%rax 874 adc \$0,%rdx 875 mov %rdx,$N[1] 876 877 mulq $m0 # ap[j]*bp[i] 878 add %rax,$A[1] 879 mov 8*1($np),%rax 880 adc \$0,%rdx 881 add 8($tp),$A[1] # +tp[1] 882 adc \$0,%rdx 883 mov %rdx,$A[0] 884 885 mulq $m1 # np[j]*m1 886 add %rax,$N[1] 887 mov 16($ap,$num),%rax 888 adc \$0,%rdx 889 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j] 890 lea 4*8($num),$j # j=4 891 lea 8*4($np),$np 892 adc \$0,%rdx 893 mov %rdx,$N[0] 894 jmp .Linner4x 895 896.align 32 897.Linner4x: 898 mulq $m0 # ap[j]*bp[i] 899 add %rax,$A[0] 900 mov -8*2($np),%rax 901 adc \$0,%rdx 902 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j] 903 lea 32($tp),$tp 904 adc \$0,%rdx 905 mov %rdx,$A[1] 906 907 mulq $m1 # np[j]*m1 908 add %rax,$N[0] 909 mov -8($ap,$j),%rax 910 adc \$0,%rdx 911 add $A[0],$N[0] 912 adc \$0,%rdx 913 mov $N[1],-32($tp) # tp[j-1] 914 mov %rdx,$N[1] 915 916 mulq $m0 # ap[j]*bp[i] 917 add %rax,$A[1] 918 mov -8*1($np),%rax 919 adc \$0,%rdx 920 add -8($tp),$A[1] 921 adc \$0,%rdx 922 mov %rdx,$A[0] 923 924 mulq $m1 # np[j]*m1 925 add %rax,$N[1] 926 mov ($ap,$j),%rax 927 adc \$0,%rdx 928 add $A[1],$N[1] 929 adc \$0,%rdx 930 mov $N[0],-24($tp) # tp[j-1] 931 mov %rdx,$N[0] 932 933 mulq $m0 # ap[j]*bp[i] 934 add %rax,$A[0] 935 mov 8*0($np),%rax 936 adc \$0,%rdx 937 add ($tp),$A[0] # ap[j]*bp[i]+tp[j] 938 adc \$0,%rdx 939 mov %rdx,$A[1] 940 941 mulq $m1 # np[j]*m1 942 add %rax,$N[0] 943 mov 8($ap,$j),%rax 944 adc \$0,%rdx 945 add $A[0],$N[0] 946 adc \$0,%rdx 947 mov $N[1],-16($tp) # tp[j-1] 948 mov %rdx,$N[1] 949 950 mulq $m0 # ap[j]*bp[i] 951 add %rax,$A[1] 952 mov 8*1($np),%rax 953 adc \$0,%rdx 954 add 8($tp),$A[1] 955 adc \$0,%rdx 956 mov %rdx,$A[0] 957 958 mulq $m1 # np[j]*m1 959 add %rax,$N[1] 960 mov 16($ap,$j),%rax 961 adc \$0,%rdx 962 add $A[1],$N[1] 963 lea 8*4($np),$np 964 adc \$0,%rdx 965 mov $N[0],-8($tp) # tp[j-1] 966 mov %rdx,$N[0] 967 968 add \$32,$j # j+=4 969 jnz .Linner4x 970 971 mulq $m0 # ap[j]*bp[i] 972 add %rax,$A[0] 973 mov -8*2($np),%rax 974 adc \$0,%rdx 975 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j] 976 lea 32($tp),$tp 977 adc \$0,%rdx 978 mov %rdx,$A[1] 979 980 mulq $m1 # np[j]*m1 981 add %rax,$N[0] 982 mov -8($ap),%rax 983 adc \$0,%rdx 984 add $A[0],$N[0] 985 adc \$0,%rdx 986 mov $N[1],-32($tp) # tp[j-1] 987 mov %rdx,$N[1] 988 989 mulq $m0 # ap[j]*bp[i] 990 add %rax,$A[1] 991 mov $m1,%rax 992 mov -8*1($np),$m1 993 adc \$0,%rdx 994 add -8($tp),$A[1] 995 adc \$0,%rdx 996 mov %rdx,$A[0] 997 998 mulq $m1 # np[j]*m1 999 add %rax,$N[1] 1000 mov ($ap,$num),%rax # ap[0] 1001 adc \$0,%rdx 1002 add $A[1],$N[1] 1003 adc \$0,%rdx 1004 mov $N[0],-24($tp) # tp[j-1] 1005 mov %rdx,$N[0] 1006 1007 mov $N[1],-16($tp) # tp[j-1] 1008 lea ($np,$num),$np # rewind $np 1009 1010 xor $N[1],$N[1] 1011 add $A[0],$N[0] 1012 adc \$0,$N[1] 1013 add ($tp),$N[0] # pull upmost overflow bit 1014 adc \$0,$N[1] # upmost overflow bit 1015 mov $N[0],-8($tp) 1016 1017 cmp 16+8(%rsp),$bp 1018 jb .Louter4x 1019___ 1020if (1) { 1021$code.=<<___; 1022 xor %rax,%rax 1023 sub $N[0],$m1 # compare top-most words 1024 adc $j,$j # $j is zero 1025 or $j,$N[1] 1026 sub $N[1],%rax # %rax=-$N[1] 1027 lea ($tp,$num),%rbx # tptr in .sqr4x_sub 1028 mov ($np),%r12 1029 lea ($np),%rbp # nptr in .sqr4x_sub 1030 mov %r9,%rcx 1031 sar \$3+2,%rcx 1032 mov 56+8(%rsp),%rdi # rptr in .sqr4x_sub 1033 dec %r12 # so that after 'not' we get -n[0] 1034 xor %r10,%r10 1035 mov 8*1(%rbp),%r13 1036 mov 8*2(%rbp),%r14 1037 mov 8*3(%rbp),%r15 1038 jmp .Lsqr4x_sub_entry 1039___ 1040} else { 1041my @ri=("%rax",$bp,$m0,$m1); 1042my $rp="%rdx"; 1043$code.=<<___ 1044 xor \$1,$N[1] 1045 lea ($tp,$num),$tp # rewind $tp 1046 sar \$5,$num # cf=0 1047 lea ($np,$N[1],8),$np 1048 mov 56+8(%rsp),$rp # restore $rp 1049 jmp .Lsub4x 1050 1051.align 32 1052.Lsub4x: 1053 .byte 0x66 1054 mov 8*0($tp),@ri[0] 1055 mov 8*1($tp),@ri[1] 1056 .byte 0x66 1057 sbb 16*0($np),@ri[0] 1058 mov 8*2($tp),@ri[2] 1059 sbb 16*1($np),@ri[1] 1060 mov 3*8($tp),@ri[3] 1061 lea 4*8($tp),$tp 1062 sbb 16*2($np),@ri[2] 1063 mov @ri[0],8*0($rp) 1064 sbb 16*3($np),@ri[3] 1065 lea 16*4($np),$np 1066 mov @ri[1],8*1($rp) 1067 mov @ri[2],8*2($rp) 1068 mov @ri[3],8*3($rp) 1069 lea 8*4($rp),$rp 1070 1071 inc $num 1072 jnz .Lsub4x 1073 1074 ret 1075___ 1076} 1077$code.=<<___; 1078.cfi_endproc 1079.size mul4x_internal,.-mul4x_internal 1080___ 1081}}} 1082{{{ 1083###################################################################### 1084# void bn_power5( 1085my $rptr="%rdi"; # BN_ULONG *rptr, 1086my $aptr="%rsi"; # const BN_ULONG *aptr, 1087my $bptr="%rdx"; # const void *table, 1088my $nptr="%rcx"; # const BN_ULONG *nptr, 1089my $n0 ="%r8"; # const BN_ULONG *n0); 1090my $num ="%r9"; # int num, has to be divisible by 8 1091 # int pwr 1092 1093my ($i,$j,$tptr)=("%rbp","%rcx",$rptr); 1094my @A0=("%r10","%r11"); 1095my @A1=("%r12","%r13"); 1096my ($a0,$a1,$ai)=("%r14","%r15","%rbx"); 1097 1098$code.=<<___; 1099.globl bn_power5 1100.type bn_power5,\@function,6 1101.align 32 1102bn_power5: 1103.cfi_startproc 1104 mov %rsp,%rax 1105.cfi_def_cfa_register %rax 1106___ 1107$code.=<<___ if ($addx); 1108 mov OPENSSL_ia32cap_P+8(%rip),%r11d 1109 and \$0x80108,%r11d 1110 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1 1111 je .Lpowerx5_enter 1112___ 1113$code.=<<___; 1114 push %rbx 1115.cfi_push %rbx 1116 push %rbp 1117.cfi_push %rbp 1118 push %r12 1119.cfi_push %r12 1120 push %r13 1121.cfi_push %r13 1122 push %r14 1123.cfi_push %r14 1124 push %r15 1125.cfi_push %r15 1126.Lpower5_prologue: 1127 1128 shl \$3,${num}d # convert $num to bytes 1129 lea ($num,$num,2),%r10d # 3*$num 1130 neg $num 1131 mov ($n0),$n0 # *n0 1132 1133 ############################################################## 1134 # Ensure that stack frame doesn't alias with $rptr+3*$num 1135 # modulo 4096, which covers ret[num], am[num] and n[num] 1136 # (see bn_exp.c). This is done to allow memory disambiguation 1137 # logic do its magic. [Extra 256 bytes is for power mask 1138 # calculated from 7th argument, the index.] 1139 # 1140 lea -320(%rsp,$num,2),%r11 1141 mov %rsp,%rbp 1142 sub $rptr,%r11 1143 and \$4095,%r11 1144 cmp %r11,%r10 1145 jb .Lpwr_sp_alt 1146 sub %r11,%rbp # align with $aptr 1147 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256) 1148 jmp .Lpwr_sp_done 1149 1150.align 32 1151.Lpwr_sp_alt: 1152 lea 4096-320(,$num,2),%r10 1153 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256) 1154 sub %r10,%r11 1155 mov \$0,%r10 1156 cmovc %r10,%r11 1157 sub %r11,%rbp 1158.Lpwr_sp_done: 1159 and \$-64,%rbp 1160 mov %rsp,%r11 1161 sub %rbp,%r11 1162 and \$-4096,%r11 1163 lea (%rbp,%r11),%rsp 1164 mov (%rsp),%r10 1165 cmp %rbp,%rsp 1166 ja .Lpwr_page_walk 1167 jmp .Lpwr_page_walk_done 1168 1169.Lpwr_page_walk: 1170 lea -4096(%rsp),%rsp 1171 mov (%rsp),%r10 1172 cmp %rbp,%rsp 1173 ja .Lpwr_page_walk 1174.Lpwr_page_walk_done: 1175 1176 mov $num,%r10 1177 neg $num 1178 1179 ############################################################## 1180 # Stack layout 1181 # 1182 # +0 saved $num, used in reduction section 1183 # +8 &t[2*$num], used in reduction section 1184 # +32 saved *n0 1185 # +40 saved %rsp 1186 # +48 t[2*$num] 1187 # 1188 mov $n0, 32(%rsp) 1189 mov %rax, 40(%rsp) # save original %rsp 1190.cfi_cfa_expression %rsp+40,deref,+8 1191.Lpower5_body: 1192 movq $rptr,%xmm1 # save $rptr, used in sqr8x 1193 movq $nptr,%xmm2 # save $nptr 1194 movq %r10, %xmm3 # -$num, used in sqr8x 1195 movq $bptr,%xmm4 1196 1197 call __bn_sqr8x_internal 1198 call __bn_post4x_internal 1199 call __bn_sqr8x_internal 1200 call __bn_post4x_internal 1201 call __bn_sqr8x_internal 1202 call __bn_post4x_internal 1203 call __bn_sqr8x_internal 1204 call __bn_post4x_internal 1205 call __bn_sqr8x_internal 1206 call __bn_post4x_internal 1207 1208 movq %xmm2,$nptr 1209 movq %xmm4,$bptr 1210 mov $aptr,$rptr 1211 mov 40(%rsp),%rax 1212 lea 32(%rsp),$n0 1213 1214 call mul4x_internal 1215 1216 mov 40(%rsp),%rsi # restore %rsp 1217.cfi_def_cfa %rsi,8 1218 mov \$1,%rax 1219 mov -48(%rsi),%r15 1220.cfi_restore %r15 1221 mov -40(%rsi),%r14 1222.cfi_restore %r14 1223 mov -32(%rsi),%r13 1224.cfi_restore %r13 1225 mov -24(%rsi),%r12 1226.cfi_restore %r12 1227 mov -16(%rsi),%rbp 1228.cfi_restore %rbp 1229 mov -8(%rsi),%rbx 1230.cfi_restore %rbx 1231 lea (%rsi),%rsp 1232.cfi_def_cfa_register %rsp 1233.Lpower5_epilogue: 1234 ret 1235.cfi_endproc 1236.size bn_power5,.-bn_power5 1237 1238.globl bn_sqr8x_internal 1239.hidden bn_sqr8x_internal 1240.type bn_sqr8x_internal,\@abi-omnipotent 1241.align 32 1242bn_sqr8x_internal: 1243__bn_sqr8x_internal: 1244.cfi_startproc 1245 ############################################################## 1246 # Squaring part: 1247 # 1248 # a) multiply-n-add everything but a[i]*a[i]; 1249 # b) shift result of a) by 1 to the left and accumulate 1250 # a[i]*a[i] products; 1251 # 1252 ############################################################## 1253 # a[1]a[0] 1254 # a[2]a[0] 1255 # a[3]a[0] 1256 # a[2]a[1] 1257 # a[4]a[0] 1258 # a[3]a[1] 1259 # a[5]a[0] 1260 # a[4]a[1] 1261 # a[3]a[2] 1262 # a[6]a[0] 1263 # a[5]a[1] 1264 # a[4]a[2] 1265 # a[7]a[0] 1266 # a[6]a[1] 1267 # a[5]a[2] 1268 # a[4]a[3] 1269 # a[7]a[1] 1270 # a[6]a[2] 1271 # a[5]a[3] 1272 # a[7]a[2] 1273 # a[6]a[3] 1274 # a[5]a[4] 1275 # a[7]a[3] 1276 # a[6]a[4] 1277 # a[7]a[4] 1278 # a[6]a[5] 1279 # a[7]a[5] 1280 # a[7]a[6] 1281 # a[1]a[0] 1282 # a[2]a[0] 1283 # a[3]a[0] 1284 # a[4]a[0] 1285 # a[5]a[0] 1286 # a[6]a[0] 1287 # a[7]a[0] 1288 # a[2]a[1] 1289 # a[3]a[1] 1290 # a[4]a[1] 1291 # a[5]a[1] 1292 # a[6]a[1] 1293 # a[7]a[1] 1294 # a[3]a[2] 1295 # a[4]a[2] 1296 # a[5]a[2] 1297 # a[6]a[2] 1298 # a[7]a[2] 1299 # a[4]a[3] 1300 # a[5]a[3] 1301 # a[6]a[3] 1302 # a[7]a[3] 1303 # a[5]a[4] 1304 # a[6]a[4] 1305 # a[7]a[4] 1306 # a[6]a[5] 1307 # a[7]a[5] 1308 # a[7]a[6] 1309 # a[0]a[0] 1310 # a[1]a[1] 1311 # a[2]a[2] 1312 # a[3]a[3] 1313 # a[4]a[4] 1314 # a[5]a[5] 1315 # a[6]a[6] 1316 # a[7]a[7] 1317 1318 lea 32(%r10),$i # $i=-($num-32) 1319 lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2] 1320 1321 mov $num,$j # $j=$num 1322 1323 # comments apply to $num==8 case 1324 mov -32($aptr,$i),$a0 # a[0] 1325 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num] 1326 mov -24($aptr,$i),%rax # a[1] 1327 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"] 1328 mov -16($aptr,$i),$ai # a[2] 1329 mov %rax,$a1 1330 1331 mul $a0 # a[1]*a[0] 1332 mov %rax,$A0[0] # a[1]*a[0] 1333 mov $ai,%rax # a[2] 1334 mov %rdx,$A0[1] 1335 mov $A0[0],-24($tptr,$i) # t[1] 1336 1337 mul $a0 # a[2]*a[0] 1338 add %rax,$A0[1] 1339 mov $ai,%rax 1340 adc \$0,%rdx 1341 mov $A0[1],-16($tptr,$i) # t[2] 1342 mov %rdx,$A0[0] 1343 1344 1345 mov -8($aptr,$i),$ai # a[3] 1346 mul $a1 # a[2]*a[1] 1347 mov %rax,$A1[0] # a[2]*a[1]+t[3] 1348 mov $ai,%rax 1349 mov %rdx,$A1[1] 1350 1351 lea ($i),$j 1352 mul $a0 # a[3]*a[0] 1353 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3] 1354 mov $ai,%rax 1355 mov %rdx,$A0[1] 1356 adc \$0,$A0[1] 1357 add $A1[0],$A0[0] 1358 adc \$0,$A0[1] 1359 mov $A0[0],-8($tptr,$j) # t[3] 1360 jmp .Lsqr4x_1st 1361 1362.align 32 1363.Lsqr4x_1st: 1364 mov ($aptr,$j),$ai # a[4] 1365 mul $a1 # a[3]*a[1] 1366 add %rax,$A1[1] # a[3]*a[1]+t[4] 1367 mov $ai,%rax 1368 mov %rdx,$A1[0] 1369 adc \$0,$A1[0] 1370 1371 mul $a0 # a[4]*a[0] 1372 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4] 1373 mov $ai,%rax # a[3] 1374 mov 8($aptr,$j),$ai # a[5] 1375 mov %rdx,$A0[0] 1376 adc \$0,$A0[0] 1377 add $A1[1],$A0[1] 1378 adc \$0,$A0[0] 1379 1380 1381 mul $a1 # a[4]*a[3] 1382 add %rax,$A1[0] # a[4]*a[3]+t[5] 1383 mov $ai,%rax 1384 mov $A0[1],($tptr,$j) # t[4] 1385 mov %rdx,$A1[1] 1386 adc \$0,$A1[1] 1387 1388 mul $a0 # a[5]*a[2] 1389 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5] 1390 mov $ai,%rax 1391 mov 16($aptr,$j),$ai # a[6] 1392 mov %rdx,$A0[1] 1393 adc \$0,$A0[1] 1394 add $A1[0],$A0[0] 1395 adc \$0,$A0[1] 1396 1397 mul $a1 # a[5]*a[3] 1398 add %rax,$A1[1] # a[5]*a[3]+t[6] 1399 mov $ai,%rax 1400 mov $A0[0],8($tptr,$j) # t[5] 1401 mov %rdx,$A1[0] 1402 adc \$0,$A1[0] 1403 1404 mul $a0 # a[6]*a[2] 1405 add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6] 1406 mov $ai,%rax # a[3] 1407 mov 24($aptr,$j),$ai # a[7] 1408 mov %rdx,$A0[0] 1409 adc \$0,$A0[0] 1410 add $A1[1],$A0[1] 1411 adc \$0,$A0[0] 1412 1413 1414 mul $a1 # a[6]*a[5] 1415 add %rax,$A1[0] # a[6]*a[5]+t[7] 1416 mov $ai,%rax 1417 mov $A0[1],16($tptr,$j) # t[6] 1418 mov %rdx,$A1[1] 1419 adc \$0,$A1[1] 1420 lea 32($j),$j 1421 1422 mul $a0 # a[7]*a[4] 1423 add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6] 1424 mov $ai,%rax 1425 mov %rdx,$A0[1] 1426 adc \$0,$A0[1] 1427 add $A1[0],$A0[0] 1428 adc \$0,$A0[1] 1429 mov $A0[0],-8($tptr,$j) # t[7] 1430 1431 cmp \$0,$j 1432 jne .Lsqr4x_1st 1433 1434 mul $a1 # a[7]*a[5] 1435 add %rax,$A1[1] 1436 lea 16($i),$i 1437 adc \$0,%rdx 1438 add $A0[1],$A1[1] 1439 adc \$0,%rdx 1440 1441 mov $A1[1],($tptr) # t[8] 1442 mov %rdx,$A1[0] 1443 mov %rdx,8($tptr) # t[9] 1444 jmp .Lsqr4x_outer 1445 1446.align 32 1447.Lsqr4x_outer: # comments apply to $num==6 case 1448 mov -32($aptr,$i),$a0 # a[0] 1449 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num] 1450 mov -24($aptr,$i),%rax # a[1] 1451 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"] 1452 mov -16($aptr,$i),$ai # a[2] 1453 mov %rax,$a1 1454 1455 mul $a0 # a[1]*a[0] 1456 mov -24($tptr,$i),$A0[0] # t[1] 1457 add %rax,$A0[0] # a[1]*a[0]+t[1] 1458 mov $ai,%rax # a[2] 1459 adc \$0,%rdx 1460 mov $A0[0],-24($tptr,$i) # t[1] 1461 mov %rdx,$A0[1] 1462 1463 mul $a0 # a[2]*a[0] 1464 add %rax,$A0[1] 1465 mov $ai,%rax 1466 adc \$0,%rdx 1467 add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2] 1468 mov %rdx,$A0[0] 1469 adc \$0,$A0[0] 1470 mov $A0[1],-16($tptr,$i) # t[2] 1471 1472 xor $A1[0],$A1[0] 1473 1474 mov -8($aptr,$i),$ai # a[3] 1475 mul $a1 # a[2]*a[1] 1476 add %rax,$A1[0] # a[2]*a[1]+t[3] 1477 mov $ai,%rax 1478 adc \$0,%rdx 1479 add -8($tptr,$i),$A1[0] 1480 mov %rdx,$A1[1] 1481 adc \$0,$A1[1] 1482 1483 mul $a0 # a[3]*a[0] 1484 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3] 1485 mov $ai,%rax 1486 adc \$0,%rdx 1487 add $A1[0],$A0[0] 1488 mov %rdx,$A0[1] 1489 adc \$0,$A0[1] 1490 mov $A0[0],-8($tptr,$i) # t[3] 1491 1492 lea ($i),$j 1493 jmp .Lsqr4x_inner 1494 1495.align 32 1496.Lsqr4x_inner: 1497 mov ($aptr,$j),$ai # a[4] 1498 mul $a1 # a[3]*a[1] 1499 add %rax,$A1[1] # a[3]*a[1]+t[4] 1500 mov $ai,%rax 1501 mov %rdx,$A1[0] 1502 adc \$0,$A1[0] 1503 add ($tptr,$j),$A1[1] 1504 adc \$0,$A1[0] 1505 1506 .byte 0x67 1507 mul $a0 # a[4]*a[0] 1508 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4] 1509 mov $ai,%rax # a[3] 1510 mov 8($aptr,$j),$ai # a[5] 1511 mov %rdx,$A0[0] 1512 adc \$0,$A0[0] 1513 add $A1[1],$A0[1] 1514 adc \$0,$A0[0] 1515 1516 mul $a1 # a[4]*a[3] 1517 add %rax,$A1[0] # a[4]*a[3]+t[5] 1518 mov $A0[1],($tptr,$j) # t[4] 1519 mov $ai,%rax 1520 mov %rdx,$A1[1] 1521 adc \$0,$A1[1] 1522 add 8($tptr,$j),$A1[0] 1523 lea 16($j),$j # j++ 1524 adc \$0,$A1[1] 1525 1526 mul $a0 # a[5]*a[2] 1527 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5] 1528 mov $ai,%rax 1529 adc \$0,%rdx 1530 add $A1[0],$A0[0] 1531 mov %rdx,$A0[1] 1532 adc \$0,$A0[1] 1533 mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below 1534 1535 cmp \$0,$j 1536 jne .Lsqr4x_inner 1537 1538 .byte 0x67 1539 mul $a1 # a[5]*a[3] 1540 add %rax,$A1[1] 1541 adc \$0,%rdx 1542 add $A0[1],$A1[1] 1543 adc \$0,%rdx 1544 1545 mov $A1[1],($tptr) # t[6], "preloaded t[2]" below 1546 mov %rdx,$A1[0] 1547 mov %rdx,8($tptr) # t[7], "preloaded t[3]" below 1548 1549 add \$16,$i 1550 jnz .Lsqr4x_outer 1551 1552 # comments apply to $num==4 case 1553 mov -32($aptr),$a0 # a[0] 1554 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num] 1555 mov -24($aptr),%rax # a[1] 1556 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"] 1557 mov -16($aptr),$ai # a[2] 1558 mov %rax,$a1 1559 1560 mul $a0 # a[1]*a[0] 1561 add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1] 1562 mov $ai,%rax # a[2] 1563 mov %rdx,$A0[1] 1564 adc \$0,$A0[1] 1565 1566 mul $a0 # a[2]*a[0] 1567 add %rax,$A0[1] 1568 mov $ai,%rax 1569 mov $A0[0],-24($tptr) # t[1] 1570 mov %rdx,$A0[0] 1571 adc \$0,$A0[0] 1572 add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2] 1573 mov -8($aptr),$ai # a[3] 1574 adc \$0,$A0[0] 1575 1576 mul $a1 # a[2]*a[1] 1577 add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3] 1578 mov $ai,%rax 1579 mov $A0[1],-16($tptr) # t[2] 1580 mov %rdx,$A1[1] 1581 adc \$0,$A1[1] 1582 1583 mul $a0 # a[3]*a[0] 1584 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3] 1585 mov $ai,%rax 1586 mov %rdx,$A0[1] 1587 adc \$0,$A0[1] 1588 add $A1[0],$A0[0] 1589 adc \$0,$A0[1] 1590 mov $A0[0],-8($tptr) # t[3] 1591 1592 mul $a1 # a[3]*a[1] 1593 add %rax,$A1[1] 1594 mov -16($aptr),%rax # a[2] 1595 adc \$0,%rdx 1596 add $A0[1],$A1[1] 1597 adc \$0,%rdx 1598 1599 mov $A1[1],($tptr) # t[4] 1600 mov %rdx,$A1[0] 1601 mov %rdx,8($tptr) # t[5] 1602 1603 mul $ai # a[2]*a[3] 1604___ 1605{ 1606my ($shift,$carry)=($a0,$a1); 1607my @S=(@A1,$ai,$n0); 1608$code.=<<___; 1609 add \$16,$i 1610 xor $shift,$shift 1611 sub $num,$i # $i=16-$num 1612 xor $carry,$carry 1613 1614 add $A1[0],%rax # t[5] 1615 adc \$0,%rdx 1616 mov %rax,8($tptr) # t[5] 1617 mov %rdx,16($tptr) # t[6] 1618 mov $carry,24($tptr) # t[7] 1619 1620 mov -16($aptr,$i),%rax # a[0] 1621 lea 48+8(%rsp),$tptr 1622 xor $A0[0],$A0[0] # t[0] 1623 mov 8($tptr),$A0[1] # t[1] 1624 1625 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift 1626 shr \$63,$A0[0] 1627 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 | 1628 shr \$63,$A0[1] 1629 or $A0[0],$S[1] # | t[2*i]>>63 1630 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch 1631 mov $A0[1],$shift # shift=t[2*i+1]>>63 1632 mul %rax # a[i]*a[i] 1633 neg $carry # mov $carry,cf 1634 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch 1635 adc %rax,$S[0] 1636 mov -8($aptr,$i),%rax # a[i+1] # prefetch 1637 mov $S[0],($tptr) 1638 adc %rdx,$S[1] 1639 1640 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift 1641 mov $S[1],8($tptr) 1642 sbb $carry,$carry # mov cf,$carry 1643 shr \$63,$A0[0] 1644 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 | 1645 shr \$63,$A0[1] 1646 or $A0[0],$S[3] # | t[2*i]>>63 1647 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch 1648 mov $A0[1],$shift # shift=t[2*i+1]>>63 1649 mul %rax # a[i]*a[i] 1650 neg $carry # mov $carry,cf 1651 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch 1652 adc %rax,$S[2] 1653 mov 0($aptr,$i),%rax # a[i+1] # prefetch 1654 mov $S[2],16($tptr) 1655 adc %rdx,$S[3] 1656 lea 16($i),$i 1657 mov $S[3],24($tptr) 1658 sbb $carry,$carry # mov cf,$carry 1659 lea 64($tptr),$tptr 1660 jmp .Lsqr4x_shift_n_add 1661 1662.align 32 1663.Lsqr4x_shift_n_add: 1664 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift 1665 shr \$63,$A0[0] 1666 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 | 1667 shr \$63,$A0[1] 1668 or $A0[0],$S[1] # | t[2*i]>>63 1669 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch 1670 mov $A0[1],$shift # shift=t[2*i+1]>>63 1671 mul %rax # a[i]*a[i] 1672 neg $carry # mov $carry,cf 1673 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch 1674 adc %rax,$S[0] 1675 mov -8($aptr,$i),%rax # a[i+1] # prefetch 1676 mov $S[0],-32($tptr) 1677 adc %rdx,$S[1] 1678 1679 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift 1680 mov $S[1],-24($tptr) 1681 sbb $carry,$carry # mov cf,$carry 1682 shr \$63,$A0[0] 1683 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 | 1684 shr \$63,$A0[1] 1685 or $A0[0],$S[3] # | t[2*i]>>63 1686 mov 0($tptr),$A0[0] # t[2*i+2] # prefetch 1687 mov $A0[1],$shift # shift=t[2*i+1]>>63 1688 mul %rax # a[i]*a[i] 1689 neg $carry # mov $carry,cf 1690 mov 8($tptr),$A0[1] # t[2*i+2+1] # prefetch 1691 adc %rax,$S[2] 1692 mov 0($aptr,$i),%rax # a[i+1] # prefetch 1693 mov $S[2],-16($tptr) 1694 adc %rdx,$S[3] 1695 1696 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift 1697 mov $S[3],-8($tptr) 1698 sbb $carry,$carry # mov cf,$carry 1699 shr \$63,$A0[0] 1700 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 | 1701 shr \$63,$A0[1] 1702 or $A0[0],$S[1] # | t[2*i]>>63 1703 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch 1704 mov $A0[1],$shift # shift=t[2*i+1]>>63 1705 mul %rax # a[i]*a[i] 1706 neg $carry # mov $carry,cf 1707 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch 1708 adc %rax,$S[0] 1709 mov 8($aptr,$i),%rax # a[i+1] # prefetch 1710 mov $S[0],0($tptr) 1711 adc %rdx,$S[1] 1712 1713 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift 1714 mov $S[1],8($tptr) 1715 sbb $carry,$carry # mov cf,$carry 1716 shr \$63,$A0[0] 1717 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 | 1718 shr \$63,$A0[1] 1719 or $A0[0],$S[3] # | t[2*i]>>63 1720 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch 1721 mov $A0[1],$shift # shift=t[2*i+1]>>63 1722 mul %rax # a[i]*a[i] 1723 neg $carry # mov $carry,cf 1724 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch 1725 adc %rax,$S[2] 1726 mov 16($aptr,$i),%rax # a[i+1] # prefetch 1727 mov $S[2],16($tptr) 1728 adc %rdx,$S[3] 1729 mov $S[3],24($tptr) 1730 sbb $carry,$carry # mov cf,$carry 1731 lea 64($tptr),$tptr 1732 add \$32,$i 1733 jnz .Lsqr4x_shift_n_add 1734 1735 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift 1736 .byte 0x67 1737 shr \$63,$A0[0] 1738 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 | 1739 shr \$63,$A0[1] 1740 or $A0[0],$S[1] # | t[2*i]>>63 1741 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch 1742 mov $A0[1],$shift # shift=t[2*i+1]>>63 1743 mul %rax # a[i]*a[i] 1744 neg $carry # mov $carry,cf 1745 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch 1746 adc %rax,$S[0] 1747 mov -8($aptr),%rax # a[i+1] # prefetch 1748 mov $S[0],-32($tptr) 1749 adc %rdx,$S[1] 1750 1751 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift 1752 mov $S[1],-24($tptr) 1753 sbb $carry,$carry # mov cf,$carry 1754 shr \$63,$A0[0] 1755 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 | 1756 shr \$63,$A0[1] 1757 or $A0[0],$S[3] # | t[2*i]>>63 1758 mul %rax # a[i]*a[i] 1759 neg $carry # mov $carry,cf 1760 adc %rax,$S[2] 1761 adc %rdx,$S[3] 1762 mov $S[2],-16($tptr) 1763 mov $S[3],-8($tptr) 1764___ 1765} 1766###################################################################### 1767# Montgomery reduction part, "word-by-word" algorithm. 1768# 1769# This new path is inspired by multiple submissions from Intel, by 1770# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford, 1771# Vinodh Gopal... 1772{ 1773my ($nptr,$tptr,$carry,$m0)=("%rbp","%rdi","%rsi","%rbx"); 1774 1775$code.=<<___; 1776 movq %xmm2,$nptr 1777__bn_sqr8x_reduction: 1778 xor %rax,%rax 1779 lea ($nptr,$num),%rcx # end of n[] 1780 lea 48+8(%rsp,$num,2),%rdx # end of t[] buffer 1781 mov %rcx,0+8(%rsp) 1782 lea 48+8(%rsp,$num),$tptr # end of initial t[] window 1783 mov %rdx,8+8(%rsp) 1784 neg $num 1785 jmp .L8x_reduction_loop 1786 1787.align 32 1788.L8x_reduction_loop: 1789 lea ($tptr,$num),$tptr # start of current t[] window 1790 .byte 0x66 1791 mov 8*0($tptr),$m0 1792 mov 8*1($tptr),%r9 1793 mov 8*2($tptr),%r10 1794 mov 8*3($tptr),%r11 1795 mov 8*4($tptr),%r12 1796 mov 8*5($tptr),%r13 1797 mov 8*6($tptr),%r14 1798 mov 8*7($tptr),%r15 1799 mov %rax,(%rdx) # store top-most carry bit 1800 lea 8*8($tptr),$tptr 1801 1802 .byte 0x67 1803 mov $m0,%r8 1804 imulq 32+8(%rsp),$m0 # n0*a[0] 1805 mov 8*0($nptr),%rax # n[0] 1806 mov \$8,%ecx 1807 jmp .L8x_reduce 1808 1809.align 32 1810.L8x_reduce: 1811 mulq $m0 1812 mov 8*1($nptr),%rax # n[1] 1813 neg %r8 1814 mov %rdx,%r8 1815 adc \$0,%r8 1816 1817 mulq $m0 1818 add %rax,%r9 1819 mov 8*2($nptr),%rax 1820 adc \$0,%rdx 1821 add %r9,%r8 1822 mov $m0,48-8+8(%rsp,%rcx,8) # put aside n0*a[i] 1823 mov %rdx,%r9 1824 adc \$0,%r9 1825 1826 mulq $m0 1827 add %rax,%r10 1828 mov 8*3($nptr),%rax 1829 adc \$0,%rdx 1830 add %r10,%r9 1831 mov 32+8(%rsp),$carry # pull n0, borrow $carry 1832 mov %rdx,%r10 1833 adc \$0,%r10 1834 1835 mulq $m0 1836 add %rax,%r11 1837 mov 8*4($nptr),%rax 1838 adc \$0,%rdx 1839 imulq %r8,$carry # modulo-scheduled 1840 add %r11,%r10 1841 mov %rdx,%r11 1842 adc \$0,%r11 1843 1844 mulq $m0 1845 add %rax,%r12 1846 mov 8*5($nptr),%rax 1847 adc \$0,%rdx 1848 add %r12,%r11 1849 mov %rdx,%r12 1850 adc \$0,%r12 1851 1852 mulq $m0 1853 add %rax,%r13 1854 mov 8*6($nptr),%rax 1855 adc \$0,%rdx 1856 add %r13,%r12 1857 mov %rdx,%r13 1858 adc \$0,%r13 1859 1860 mulq $m0 1861 add %rax,%r14 1862 mov 8*7($nptr),%rax 1863 adc \$0,%rdx 1864 add %r14,%r13 1865 mov %rdx,%r14 1866 adc \$0,%r14 1867 1868 mulq $m0 1869 mov $carry,$m0 # n0*a[i] 1870 add %rax,%r15 1871 mov 8*0($nptr),%rax # n[0] 1872 adc \$0,%rdx 1873 add %r15,%r14 1874 mov %rdx,%r15 1875 adc \$0,%r15 1876 1877 dec %ecx 1878 jnz .L8x_reduce 1879 1880 lea 8*8($nptr),$nptr 1881 xor %rax,%rax 1882 mov 8+8(%rsp),%rdx # pull end of t[] 1883 cmp 0+8(%rsp),$nptr # end of n[]? 1884 jae .L8x_no_tail 1885 1886 .byte 0x66 1887 add 8*0($tptr),%r8 1888 adc 8*1($tptr),%r9 1889 adc 8*2($tptr),%r10 1890 adc 8*3($tptr),%r11 1891 adc 8*4($tptr),%r12 1892 adc 8*5($tptr),%r13 1893 adc 8*6($tptr),%r14 1894 adc 8*7($tptr),%r15 1895 sbb $carry,$carry # top carry 1896 1897 mov 48+56+8(%rsp),$m0 # pull n0*a[0] 1898 mov \$8,%ecx 1899 mov 8*0($nptr),%rax 1900 jmp .L8x_tail 1901 1902.align 32 1903.L8x_tail: 1904 mulq $m0 1905 add %rax,%r8 1906 mov 8*1($nptr),%rax 1907 mov %r8,($tptr) # save result 1908 mov %rdx,%r8 1909 adc \$0,%r8 1910 1911 mulq $m0 1912 add %rax,%r9 1913 mov 8*2($nptr),%rax 1914 adc \$0,%rdx 1915 add %r9,%r8 1916 lea 8($tptr),$tptr # $tptr++ 1917 mov %rdx,%r9 1918 adc \$0,%r9 1919 1920 mulq $m0 1921 add %rax,%r10 1922 mov 8*3($nptr),%rax 1923 adc \$0,%rdx 1924 add %r10,%r9 1925 mov %rdx,%r10 1926 adc \$0,%r10 1927 1928 mulq $m0 1929 add %rax,%r11 1930 mov 8*4($nptr),%rax 1931 adc \$0,%rdx 1932 add %r11,%r10 1933 mov %rdx,%r11 1934 adc \$0,%r11 1935 1936 mulq $m0 1937 add %rax,%r12 1938 mov 8*5($nptr),%rax 1939 adc \$0,%rdx 1940 add %r12,%r11 1941 mov %rdx,%r12 1942 adc \$0,%r12 1943 1944 mulq $m0 1945 add %rax,%r13 1946 mov 8*6($nptr),%rax 1947 adc \$0,%rdx 1948 add %r13,%r12 1949 mov %rdx,%r13 1950 adc \$0,%r13 1951 1952 mulq $m0 1953 add %rax,%r14 1954 mov 8*7($nptr),%rax 1955 adc \$0,%rdx 1956 add %r14,%r13 1957 mov %rdx,%r14 1958 adc \$0,%r14 1959 1960 mulq $m0 1961 mov 48-16+8(%rsp,%rcx,8),$m0# pull n0*a[i] 1962 add %rax,%r15 1963 adc \$0,%rdx 1964 add %r15,%r14 1965 mov 8*0($nptr),%rax # pull n[0] 1966 mov %rdx,%r15 1967 adc \$0,%r15 1968 1969 dec %ecx 1970 jnz .L8x_tail 1971 1972 lea 8*8($nptr),$nptr 1973 mov 8+8(%rsp),%rdx # pull end of t[] 1974 cmp 0+8(%rsp),$nptr # end of n[]? 1975 jae .L8x_tail_done # break out of loop 1976 1977 mov 48+56+8(%rsp),$m0 # pull n0*a[0] 1978 neg $carry 1979 mov 8*0($nptr),%rax # pull n[0] 1980 adc 8*0($tptr),%r8 1981 adc 8*1($tptr),%r9 1982 adc 8*2($tptr),%r10 1983 adc 8*3($tptr),%r11 1984 adc 8*4($tptr),%r12 1985 adc 8*5($tptr),%r13 1986 adc 8*6($tptr),%r14 1987 adc 8*7($tptr),%r15 1988 sbb $carry,$carry # top carry 1989 1990 mov \$8,%ecx 1991 jmp .L8x_tail 1992 1993.align 32 1994.L8x_tail_done: 1995 xor %rax,%rax 1996 add (%rdx),%r8 # can this overflow? 1997 adc \$0,%r9 1998 adc \$0,%r10 1999 adc \$0,%r11 2000 adc \$0,%r12 2001 adc \$0,%r13 2002 adc \$0,%r14 2003 adc \$0,%r15 2004 adc \$0,%rax 2005 2006 neg $carry 2007.L8x_no_tail: 2008 adc 8*0($tptr),%r8 2009 adc 8*1($tptr),%r9 2010 adc 8*2($tptr),%r10 2011 adc 8*3($tptr),%r11 2012 adc 8*4($tptr),%r12 2013 adc 8*5($tptr),%r13 2014 adc 8*6($tptr),%r14 2015 adc 8*7($tptr),%r15 2016 adc \$0,%rax # top-most carry 2017 mov -8($nptr),%rcx # np[num-1] 2018 xor $carry,$carry 2019 2020 movq %xmm2,$nptr # restore $nptr 2021 2022 mov %r8,8*0($tptr) # store top 512 bits 2023 mov %r9,8*1($tptr) 2024 movq %xmm3,$num # $num is %r9, can't be moved upwards 2025 mov %r10,8*2($tptr) 2026 mov %r11,8*3($tptr) 2027 mov %r12,8*4($tptr) 2028 mov %r13,8*5($tptr) 2029 mov %r14,8*6($tptr) 2030 mov %r15,8*7($tptr) 2031 lea 8*8($tptr),$tptr 2032 2033 cmp %rdx,$tptr # end of t[]? 2034 jb .L8x_reduction_loop 2035 ret 2036.cfi_endproc 2037.size bn_sqr8x_internal,.-bn_sqr8x_internal 2038___ 2039} 2040############################################################## 2041# Post-condition, 4x unrolled 2042# 2043{ 2044my ($tptr,$nptr)=("%rbx","%rbp"); 2045$code.=<<___; 2046.type __bn_post4x_internal,\@abi-omnipotent 2047.align 32 2048__bn_post4x_internal: 2049.cfi_startproc 2050 mov 8*0($nptr),%r12 2051 lea (%rdi,$num),$tptr # %rdi was $tptr above 2052 mov $num,%rcx 2053 movq %xmm1,$rptr # restore $rptr 2054 neg %rax 2055 movq %xmm1,$aptr # prepare for back-to-back call 2056 sar \$3+2,%rcx 2057 dec %r12 # so that after 'not' we get -n[0] 2058 xor %r10,%r10 2059 mov 8*1($nptr),%r13 2060 mov 8*2($nptr),%r14 2061 mov 8*3($nptr),%r15 2062 jmp .Lsqr4x_sub_entry 2063 2064.align 16 2065.Lsqr4x_sub: 2066 mov 8*0($nptr),%r12 2067 mov 8*1($nptr),%r13 2068 mov 8*2($nptr),%r14 2069 mov 8*3($nptr),%r15 2070.Lsqr4x_sub_entry: 2071 lea 8*4($nptr),$nptr 2072 not %r12 2073 not %r13 2074 not %r14 2075 not %r15 2076 and %rax,%r12 2077 and %rax,%r13 2078 and %rax,%r14 2079 and %rax,%r15 2080 2081 neg %r10 # mov %r10,%cf 2082 adc 8*0($tptr),%r12 2083 adc 8*1($tptr),%r13 2084 adc 8*2($tptr),%r14 2085 adc 8*3($tptr),%r15 2086 mov %r12,8*0($rptr) 2087 lea 8*4($tptr),$tptr 2088 mov %r13,8*1($rptr) 2089 sbb %r10,%r10 # mov %cf,%r10 2090 mov %r14,8*2($rptr) 2091 mov %r15,8*3($rptr) 2092 lea 8*4($rptr),$rptr 2093 2094 inc %rcx # pass %cf 2095 jnz .Lsqr4x_sub 2096 2097 mov $num,%r10 # prepare for back-to-back call 2098 neg $num # restore $num 2099 ret 2100.cfi_endproc 2101.size __bn_post4x_internal,.-__bn_post4x_internal 2102___ 2103} 2104}}} 2105 2106if ($addx) {{{ 2107my $bp="%rdx"; # restore original value 2108 2109$code.=<<___; 2110.type bn_mulx4x_mont_gather5,\@function,6 2111.align 32 2112bn_mulx4x_mont_gather5: 2113.cfi_startproc 2114 mov %rsp,%rax 2115.cfi_def_cfa_register %rax 2116.Lmulx4x_enter: 2117 push %rbx 2118.cfi_push %rbx 2119 push %rbp 2120.cfi_push %rbp 2121 push %r12 2122.cfi_push %r12 2123 push %r13 2124.cfi_push %r13 2125 push %r14 2126.cfi_push %r14 2127 push %r15 2128.cfi_push %r15 2129.Lmulx4x_prologue: 2130 2131 shl \$3,${num}d # convert $num to bytes 2132 lea ($num,$num,2),%r10 # 3*$num in bytes 2133 neg $num # -$num 2134 mov ($n0),$n0 # *n0 2135 2136 ############################################################## 2137 # Ensure that stack frame doesn't alias with $rptr+3*$num 2138 # modulo 4096, which covers ret[num], am[num] and n[num] 2139 # (see bn_exp.c). This is done to allow memory disambiguation 2140 # logic do its magic. [Extra [num] is allocated in order 2141 # to align with bn_power5's frame, which is cleansed after 2142 # completing exponentiation. Extra 256 bytes is for power mask 2143 # calculated from 7th argument, the index.] 2144 # 2145 lea -320(%rsp,$num,2),%r11 2146 mov %rsp,%rbp 2147 sub $rp,%r11 2148 and \$4095,%r11 2149 cmp %r11,%r10 2150 jb .Lmulx4xsp_alt 2151 sub %r11,%rbp # align with $aptr 2152 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256) 2153 jmp .Lmulx4xsp_done 2154 2155.Lmulx4xsp_alt: 2156 lea 4096-320(,$num,2),%r10 2157 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256) 2158 sub %r10,%r11 2159 mov \$0,%r10 2160 cmovc %r10,%r11 2161 sub %r11,%rbp 2162.Lmulx4xsp_done: 2163 and \$-64,%rbp # ensure alignment 2164 mov %rsp,%r11 2165 sub %rbp,%r11 2166 and \$-4096,%r11 2167 lea (%rbp,%r11),%rsp 2168 mov (%rsp),%r10 2169 cmp %rbp,%rsp 2170 ja .Lmulx4x_page_walk 2171 jmp .Lmulx4x_page_walk_done 2172 2173.Lmulx4x_page_walk: 2174 lea -4096(%rsp),%rsp 2175 mov (%rsp),%r10 2176 cmp %rbp,%rsp 2177 ja .Lmulx4x_page_walk 2178.Lmulx4x_page_walk_done: 2179 2180 ############################################################## 2181 # Stack layout 2182 # +0 -num 2183 # +8 off-loaded &b[i] 2184 # +16 end of b[num] 2185 # +24 inner counter 2186 # +32 saved n0 2187 # +40 saved %rsp 2188 # +48 2189 # +56 saved rp 2190 # +64 tmp[num+1] 2191 # 2192 mov $n0, 32(%rsp) # save *n0 2193 mov %rax,40(%rsp) # save original %rsp 2194.cfi_cfa_expression %rsp+40,deref,+8 2195.Lmulx4x_body: 2196 call mulx4x_internal 2197 2198 mov 40(%rsp),%rsi # restore %rsp 2199.cfi_def_cfa %rsi,8 2200 mov \$1,%rax 2201 2202 mov -48(%rsi),%r15 2203.cfi_restore %r15 2204 mov -40(%rsi),%r14 2205.cfi_restore %r14 2206 mov -32(%rsi),%r13 2207.cfi_restore %r13 2208 mov -24(%rsi),%r12 2209.cfi_restore %r12 2210 mov -16(%rsi),%rbp 2211.cfi_restore %rbp 2212 mov -8(%rsi),%rbx 2213.cfi_restore %rbx 2214 lea (%rsi),%rsp 2215.cfi_def_cfa_register %rsp 2216.Lmulx4x_epilogue: 2217 ret 2218.cfi_endproc 2219.size bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5 2220 2221.type mulx4x_internal,\@abi-omnipotent 2222.align 32 2223mulx4x_internal: 2224.cfi_startproc 2225 mov $num,8(%rsp) # save -$num (it was in bytes) 2226 mov $num,%r10 2227 neg $num # restore $num 2228 shl \$5,$num 2229 neg %r10 # restore $num 2230 lea 128($bp,$num),%r13 # end of powers table (+size optimization) 2231 shr \$5+5,$num 2232 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument 2233 sub \$1,$num 2234 lea .Linc(%rip),%rax 2235 mov %r13,16+8(%rsp) # end of b[num] 2236 mov $num,24+8(%rsp) # inner counter 2237 mov $rp, 56+8(%rsp) # save $rp 2238___ 2239my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)= 2240 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax"); 2241my $rptr=$bptr; 2242my $STRIDE=2**5*8; # 5 is "window size" 2243my $N=$STRIDE/4; # should match cache line size 2244$code.=<<___; 2245 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000 2246 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002 2247 lea 88-112(%rsp,%r10),%r10 # place the mask after tp[num+1] (+ICache optimization) 2248 lea 128($bp),$bptr # size optimization 2249 2250 pshufd \$0,%xmm5,%xmm5 # broadcast index 2251 movdqa %xmm1,%xmm4 2252 .byte 0x67 2253 movdqa %xmm1,%xmm2 2254___ 2255######################################################################## 2256# calculate mask by comparing 0..31 to index and save result to stack 2257# 2258$code.=<<___; 2259 .byte 0x67 2260 paddd %xmm0,%xmm1 2261 pcmpeqd %xmm5,%xmm0 # compare to 1,0 2262 movdqa %xmm4,%xmm3 2263___ 2264for($i=0;$i<$STRIDE/16-4;$i+=4) { 2265$code.=<<___; 2266 paddd %xmm1,%xmm2 2267 pcmpeqd %xmm5,%xmm1 # compare to 3,2 2268 movdqa %xmm0,`16*($i+0)+112`(%r10) 2269 movdqa %xmm4,%xmm0 2270 2271 paddd %xmm2,%xmm3 2272 pcmpeqd %xmm5,%xmm2 # compare to 5,4 2273 movdqa %xmm1,`16*($i+1)+112`(%r10) 2274 movdqa %xmm4,%xmm1 2275 2276 paddd %xmm3,%xmm0 2277 pcmpeqd %xmm5,%xmm3 # compare to 7,6 2278 movdqa %xmm2,`16*($i+2)+112`(%r10) 2279 movdqa %xmm4,%xmm2 2280 2281 paddd %xmm0,%xmm1 2282 pcmpeqd %xmm5,%xmm0 2283 movdqa %xmm3,`16*($i+3)+112`(%r10) 2284 movdqa %xmm4,%xmm3 2285___ 2286} 2287$code.=<<___; # last iteration can be optimized 2288 .byte 0x67 2289 paddd %xmm1,%xmm2 2290 pcmpeqd %xmm5,%xmm1 2291 movdqa %xmm0,`16*($i+0)+112`(%r10) 2292 2293 paddd %xmm2,%xmm3 2294 pcmpeqd %xmm5,%xmm2 2295 movdqa %xmm1,`16*($i+1)+112`(%r10) 2296 2297 pcmpeqd %xmm5,%xmm3 2298 movdqa %xmm2,`16*($i+2)+112`(%r10) 2299 2300 pand `16*($i+0)-128`($bptr),%xmm0 # while it's still in register 2301 pand `16*($i+1)-128`($bptr),%xmm1 2302 pand `16*($i+2)-128`($bptr),%xmm2 2303 movdqa %xmm3,`16*($i+3)+112`(%r10) 2304 pand `16*($i+3)-128`($bptr),%xmm3 2305 por %xmm2,%xmm0 2306 por %xmm3,%xmm1 2307___ 2308for($i=0;$i<$STRIDE/16-4;$i+=4) { 2309$code.=<<___; 2310 movdqa `16*($i+0)-128`($bptr),%xmm4 2311 movdqa `16*($i+1)-128`($bptr),%xmm5 2312 movdqa `16*($i+2)-128`($bptr),%xmm2 2313 pand `16*($i+0)+112`(%r10),%xmm4 2314 movdqa `16*($i+3)-128`($bptr),%xmm3 2315 pand `16*($i+1)+112`(%r10),%xmm5 2316 por %xmm4,%xmm0 2317 pand `16*($i+2)+112`(%r10),%xmm2 2318 por %xmm5,%xmm1 2319 pand `16*($i+3)+112`(%r10),%xmm3 2320 por %xmm2,%xmm0 2321 por %xmm3,%xmm1 2322___ 2323} 2324$code.=<<___; 2325 pxor %xmm1,%xmm0 2326 pshufd \$0x4e,%xmm0,%xmm1 2327 por %xmm1,%xmm0 2328 lea $STRIDE($bptr),$bptr 2329 movq %xmm0,%rdx # bp[0] 2330 lea 64+8*4+8(%rsp),$tptr 2331 2332 mov %rdx,$bi 2333 mulx 0*8($aptr),$mi,%rax # a[0]*b[0] 2334 mulx 1*8($aptr),%r11,%r12 # a[1]*b[0] 2335 add %rax,%r11 2336 mulx 2*8($aptr),%rax,%r13 # ... 2337 adc %rax,%r12 2338 adc \$0,%r13 2339 mulx 3*8($aptr),%rax,%r14 2340 2341 mov $mi,%r15 2342 imulq 32+8(%rsp),$mi # "t[0]"*n0 2343 xor $zero,$zero # cf=0, of=0 2344 mov $mi,%rdx 2345 2346 mov $bptr,8+8(%rsp) # off-load &b[i] 2347 2348 lea 4*8($aptr),$aptr 2349 adcx %rax,%r13 2350 adcx $zero,%r14 # cf=0 2351 2352 mulx 0*8($nptr),%rax,%r10 2353 adcx %rax,%r15 # discarded 2354 adox %r11,%r10 2355 mulx 1*8($nptr),%rax,%r11 2356 adcx %rax,%r10 2357 adox %r12,%r11 2358 mulx 2*8($nptr),%rax,%r12 2359 mov 24+8(%rsp),$bptr # counter value 2360 mov %r10,-8*4($tptr) 2361 adcx %rax,%r11 2362 adox %r13,%r12 2363 mulx 3*8($nptr),%rax,%r15 2364 mov $bi,%rdx 2365 mov %r11,-8*3($tptr) 2366 adcx %rax,%r12 2367 adox $zero,%r15 # of=0 2368 lea 4*8($nptr),$nptr 2369 mov %r12,-8*2($tptr) 2370 jmp .Lmulx4x_1st 2371 2372.align 32 2373.Lmulx4x_1st: 2374 adcx $zero,%r15 # cf=0, modulo-scheduled 2375 mulx 0*8($aptr),%r10,%rax # a[4]*b[0] 2376 adcx %r14,%r10 2377 mulx 1*8($aptr),%r11,%r14 # a[5]*b[0] 2378 adcx %rax,%r11 2379 mulx 2*8($aptr),%r12,%rax # ... 2380 adcx %r14,%r12 2381 mulx 3*8($aptr),%r13,%r14 2382 .byte 0x67,0x67 2383 mov $mi,%rdx 2384 adcx %rax,%r13 2385 adcx $zero,%r14 # cf=0 2386 lea 4*8($aptr),$aptr 2387 lea 4*8($tptr),$tptr 2388 2389 adox %r15,%r10 2390 mulx 0*8($nptr),%rax,%r15 2391 adcx %rax,%r10 2392 adox %r15,%r11 2393 mulx 1*8($nptr),%rax,%r15 2394 adcx %rax,%r11 2395 adox %r15,%r12 2396 mulx 2*8($nptr),%rax,%r15 2397 mov %r10,-5*8($tptr) 2398 adcx %rax,%r12 2399 mov %r11,-4*8($tptr) 2400 adox %r15,%r13 2401 mulx 3*8($nptr),%rax,%r15 2402 mov $bi,%rdx 2403 mov %r12,-3*8($tptr) 2404 adcx %rax,%r13 2405 adox $zero,%r15 2406 lea 4*8($nptr),$nptr 2407 mov %r13,-2*8($tptr) 2408 2409 dec $bptr # of=0, pass cf 2410 jnz .Lmulx4x_1st 2411 2412 mov 8(%rsp),$num # load -num 2413 adc $zero,%r15 # modulo-scheduled 2414 lea ($aptr,$num),$aptr # rewind $aptr 2415 add %r15,%r14 2416 mov 8+8(%rsp),$bptr # re-load &b[i] 2417 adc $zero,$zero # top-most carry 2418 mov %r14,-1*8($tptr) 2419 jmp .Lmulx4x_outer 2420 2421.align 32 2422.Lmulx4x_outer: 2423 lea 16-256($tptr),%r10 # where 256-byte mask is (+density control) 2424 pxor %xmm4,%xmm4 2425 .byte 0x67,0x67 2426 pxor %xmm5,%xmm5 2427___ 2428for($i=0;$i<$STRIDE/16;$i+=4) { 2429$code.=<<___; 2430 movdqa `16*($i+0)-128`($bptr),%xmm0 2431 movdqa `16*($i+1)-128`($bptr),%xmm1 2432 movdqa `16*($i+2)-128`($bptr),%xmm2 2433 pand `16*($i+0)+256`(%r10),%xmm0 2434 movdqa `16*($i+3)-128`($bptr),%xmm3 2435 pand `16*($i+1)+256`(%r10),%xmm1 2436 por %xmm0,%xmm4 2437 pand `16*($i+2)+256`(%r10),%xmm2 2438 por %xmm1,%xmm5 2439 pand `16*($i+3)+256`(%r10),%xmm3 2440 por %xmm2,%xmm4 2441 por %xmm3,%xmm5 2442___ 2443} 2444$code.=<<___; 2445 por %xmm5,%xmm4 2446 pshufd \$0x4e,%xmm4,%xmm0 2447 por %xmm4,%xmm0 2448 lea $STRIDE($bptr),$bptr 2449 movq %xmm0,%rdx # m0=bp[i] 2450 2451 mov $zero,($tptr) # save top-most carry 2452 lea 4*8($tptr,$num),$tptr # rewind $tptr 2453 mulx 0*8($aptr),$mi,%r11 # a[0]*b[i] 2454 xor $zero,$zero # cf=0, of=0 2455 mov %rdx,$bi 2456 mulx 1*8($aptr),%r14,%r12 # a[1]*b[i] 2457 adox -4*8($tptr),$mi # +t[0] 2458 adcx %r14,%r11 2459 mulx 2*8($aptr),%r15,%r13 # ... 2460 adox -3*8($tptr),%r11 2461 adcx %r15,%r12 2462 mulx 3*8($aptr),%rdx,%r14 2463 adox -2*8($tptr),%r12 2464 adcx %rdx,%r13 2465 lea ($nptr,$num),$nptr # rewind $nptr 2466 lea 4*8($aptr),$aptr 2467 adox -1*8($tptr),%r13 2468 adcx $zero,%r14 2469 adox $zero,%r14 2470 2471 mov $mi,%r15 2472 imulq 32+8(%rsp),$mi # "t[0]"*n0 2473 2474 mov $mi,%rdx 2475 xor $zero,$zero # cf=0, of=0 2476 mov $bptr,8+8(%rsp) # off-load &b[i] 2477 2478 mulx 0*8($nptr),%rax,%r10 2479 adcx %rax,%r15 # discarded 2480 adox %r11,%r10 2481 mulx 1*8($nptr),%rax,%r11 2482 adcx %rax,%r10 2483 adox %r12,%r11 2484 mulx 2*8($nptr),%rax,%r12 2485 adcx %rax,%r11 2486 adox %r13,%r12 2487 mulx 3*8($nptr),%rax,%r15 2488 mov $bi,%rdx 2489 mov 24+8(%rsp),$bptr # counter value 2490 mov %r10,-8*4($tptr) 2491 adcx %rax,%r12 2492 mov %r11,-8*3($tptr) 2493 adox $zero,%r15 # of=0 2494 mov %r12,-8*2($tptr) 2495 lea 4*8($nptr),$nptr 2496 jmp .Lmulx4x_inner 2497 2498.align 32 2499.Lmulx4x_inner: 2500 mulx 0*8($aptr),%r10,%rax # a[4]*b[i] 2501 adcx $zero,%r15 # cf=0, modulo-scheduled 2502 adox %r14,%r10 2503 mulx 1*8($aptr),%r11,%r14 # a[5]*b[i] 2504 adcx 0*8($tptr),%r10 2505 adox %rax,%r11 2506 mulx 2*8($aptr),%r12,%rax # ... 2507 adcx 1*8($tptr),%r11 2508 adox %r14,%r12 2509 mulx 3*8($aptr),%r13,%r14 2510 mov $mi,%rdx 2511 adcx 2*8($tptr),%r12 2512 adox %rax,%r13 2513 adcx 3*8($tptr),%r13 2514 adox $zero,%r14 # of=0 2515 lea 4*8($aptr),$aptr 2516 lea 4*8($tptr),$tptr 2517 adcx $zero,%r14 # cf=0 2518 2519 adox %r15,%r10 2520 mulx 0*8($nptr),%rax,%r15 2521 adcx %rax,%r10 2522 adox %r15,%r11 2523 mulx 1*8($nptr),%rax,%r15 2524 adcx %rax,%r11 2525 adox %r15,%r12 2526 mulx 2*8($nptr),%rax,%r15 2527 mov %r10,-5*8($tptr) 2528 adcx %rax,%r12 2529 adox %r15,%r13 2530 mov %r11,-4*8($tptr) 2531 mulx 3*8($nptr),%rax,%r15 2532 mov $bi,%rdx 2533 lea 4*8($nptr),$nptr 2534 mov %r12,-3*8($tptr) 2535 adcx %rax,%r13 2536 adox $zero,%r15 2537 mov %r13,-2*8($tptr) 2538 2539 dec $bptr # of=0, pass cf 2540 jnz .Lmulx4x_inner 2541 2542 mov 0+8(%rsp),$num # load -num 2543 adc $zero,%r15 # modulo-scheduled 2544 sub 0*8($tptr),$bptr # pull top-most carry to %cf 2545 mov 8+8(%rsp),$bptr # re-load &b[i] 2546 mov 16+8(%rsp),%r10 2547 adc %r15,%r14 2548 lea ($aptr,$num),$aptr # rewind $aptr 2549 adc $zero,$zero # top-most carry 2550 mov %r14,-1*8($tptr) 2551 2552 cmp %r10,$bptr 2553 jb .Lmulx4x_outer 2554 2555 mov -8($nptr),%r10 2556 mov $zero,%r8 2557 mov ($nptr,$num),%r12 2558 lea ($nptr,$num),%rbp # rewind $nptr 2559 mov $num,%rcx 2560 lea ($tptr,$num),%rdi # rewind $tptr 2561 xor %eax,%eax 2562 xor %r15,%r15 2563 sub %r14,%r10 # compare top-most words 2564 adc %r15,%r15 2565 or %r15,%r8 2566 sar \$3+2,%rcx 2567 sub %r8,%rax # %rax=-%r8 2568 mov 56+8(%rsp),%rdx # restore rp 2569 dec %r12 # so that after 'not' we get -n[0] 2570 mov 8*1(%rbp),%r13 2571 xor %r8,%r8 2572 mov 8*2(%rbp),%r14 2573 mov 8*3(%rbp),%r15 2574 jmp .Lsqrx4x_sub_entry # common post-condition 2575.cfi_endproc 2576.size mulx4x_internal,.-mulx4x_internal 2577___ 2578}{ 2579###################################################################### 2580# void bn_power5( 2581my $rptr="%rdi"; # BN_ULONG *rptr, 2582my $aptr="%rsi"; # const BN_ULONG *aptr, 2583my $bptr="%rdx"; # const void *table, 2584my $nptr="%rcx"; # const BN_ULONG *nptr, 2585my $n0 ="%r8"; # const BN_ULONG *n0); 2586my $num ="%r9"; # int num, has to be divisible by 8 2587 # int pwr); 2588 2589my ($i,$j,$tptr)=("%rbp","%rcx",$rptr); 2590my @A0=("%r10","%r11"); 2591my @A1=("%r12","%r13"); 2592my ($a0,$a1,$ai)=("%r14","%r15","%rbx"); 2593 2594$code.=<<___; 2595.type bn_powerx5,\@function,6 2596.align 32 2597bn_powerx5: 2598.cfi_startproc 2599 mov %rsp,%rax 2600.cfi_def_cfa_register %rax 2601.Lpowerx5_enter: 2602 push %rbx 2603.cfi_push %rbx 2604 push %rbp 2605.cfi_push %rbp 2606 push %r12 2607.cfi_push %r12 2608 push %r13 2609.cfi_push %r13 2610 push %r14 2611.cfi_push %r14 2612 push %r15 2613.cfi_push %r15 2614.Lpowerx5_prologue: 2615 2616 shl \$3,${num}d # convert $num to bytes 2617 lea ($num,$num,2),%r10 # 3*$num in bytes 2618 neg $num 2619 mov ($n0),$n0 # *n0 2620 2621 ############################################################## 2622 # Ensure that stack frame doesn't alias with $rptr+3*$num 2623 # modulo 4096, which covers ret[num], am[num] and n[num] 2624 # (see bn_exp.c). This is done to allow memory disambiguation 2625 # logic do its magic. [Extra 256 bytes is for power mask 2626 # calculated from 7th argument, the index.] 2627 # 2628 lea -320(%rsp,$num,2),%r11 2629 mov %rsp,%rbp 2630 sub $rptr,%r11 2631 and \$4095,%r11 2632 cmp %r11,%r10 2633 jb .Lpwrx_sp_alt 2634 sub %r11,%rbp # align with $aptr 2635 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256) 2636 jmp .Lpwrx_sp_done 2637 2638.align 32 2639.Lpwrx_sp_alt: 2640 lea 4096-320(,$num,2),%r10 2641 lea -320(%rbp,$num,2),%rbp # alloca(frame+2*$num*8+256) 2642 sub %r10,%r11 2643 mov \$0,%r10 2644 cmovc %r10,%r11 2645 sub %r11,%rbp 2646.Lpwrx_sp_done: 2647 and \$-64,%rbp 2648 mov %rsp,%r11 2649 sub %rbp,%r11 2650 and \$-4096,%r11 2651 lea (%rbp,%r11),%rsp 2652 mov (%rsp),%r10 2653 cmp %rbp,%rsp 2654 ja .Lpwrx_page_walk 2655 jmp .Lpwrx_page_walk_done 2656 2657.Lpwrx_page_walk: 2658 lea -4096(%rsp),%rsp 2659 mov (%rsp),%r10 2660 cmp %rbp,%rsp 2661 ja .Lpwrx_page_walk 2662.Lpwrx_page_walk_done: 2663 2664 mov $num,%r10 2665 neg $num 2666 2667 ############################################################## 2668 # Stack layout 2669 # 2670 # +0 saved $num, used in reduction section 2671 # +8 &t[2*$num], used in reduction section 2672 # +16 intermediate carry bit 2673 # +24 top-most carry bit, used in reduction section 2674 # +32 saved *n0 2675 # +40 saved %rsp 2676 # +48 t[2*$num] 2677 # 2678 pxor %xmm0,%xmm0 2679 movq $rptr,%xmm1 # save $rptr 2680 movq $nptr,%xmm2 # save $nptr 2681 movq %r10, %xmm3 # -$num 2682 movq $bptr,%xmm4 2683 mov $n0, 32(%rsp) 2684 mov %rax, 40(%rsp) # save original %rsp 2685.cfi_cfa_expression %rsp+40,deref,+8 2686.Lpowerx5_body: 2687 2688 call __bn_sqrx8x_internal 2689 call __bn_postx4x_internal 2690 call __bn_sqrx8x_internal 2691 call __bn_postx4x_internal 2692 call __bn_sqrx8x_internal 2693 call __bn_postx4x_internal 2694 call __bn_sqrx8x_internal 2695 call __bn_postx4x_internal 2696 call __bn_sqrx8x_internal 2697 call __bn_postx4x_internal 2698 2699 mov %r10,$num # -num 2700 mov $aptr,$rptr 2701 movq %xmm2,$nptr 2702 movq %xmm4,$bptr 2703 mov 40(%rsp),%rax 2704 2705 call mulx4x_internal 2706 2707 mov 40(%rsp),%rsi # restore %rsp 2708.cfi_def_cfa %rsi,8 2709 mov \$1,%rax 2710 2711 mov -48(%rsi),%r15 2712.cfi_restore %r15 2713 mov -40(%rsi),%r14 2714.cfi_restore %r14 2715 mov -32(%rsi),%r13 2716.cfi_restore %r13 2717 mov -24(%rsi),%r12 2718.cfi_restore %r12 2719 mov -16(%rsi),%rbp 2720.cfi_restore %rbp 2721 mov -8(%rsi),%rbx 2722.cfi_restore %rbx 2723 lea (%rsi),%rsp 2724.cfi_def_cfa_register %rsp 2725.Lpowerx5_epilogue: 2726 ret 2727.cfi_endproc 2728.size bn_powerx5,.-bn_powerx5 2729 2730.globl bn_sqrx8x_internal 2731.hidden bn_sqrx8x_internal 2732.type bn_sqrx8x_internal,\@abi-omnipotent 2733.align 32 2734bn_sqrx8x_internal: 2735__bn_sqrx8x_internal: 2736.cfi_startproc 2737 ################################################################## 2738 # Squaring part: 2739 # 2740 # a) multiply-n-add everything but a[i]*a[i]; 2741 # b) shift result of a) by 1 to the left and accumulate 2742 # a[i]*a[i] products; 2743 # 2744 ################################################################## 2745 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0] 2746 # a[1]a[0] 2747 # a[2]a[0] 2748 # a[3]a[0] 2749 # a[2]a[1] 2750 # a[3]a[1] 2751 # a[3]a[2] 2752 # 2753 # a[4]a[0] 2754 # a[5]a[0] 2755 # a[6]a[0] 2756 # a[7]a[0] 2757 # a[4]a[1] 2758 # a[5]a[1] 2759 # a[6]a[1] 2760 # a[7]a[1] 2761 # a[4]a[2] 2762 # a[5]a[2] 2763 # a[6]a[2] 2764 # a[7]a[2] 2765 # a[4]a[3] 2766 # a[5]a[3] 2767 # a[6]a[3] 2768 # a[7]a[3] 2769 # 2770 # a[5]a[4] 2771 # a[6]a[4] 2772 # a[7]a[4] 2773 # a[6]a[5] 2774 # a[7]a[5] 2775 # a[7]a[6] 2776 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0] 2777___ 2778{ 2779my ($zero,$carry)=("%rbp","%rcx"); 2780my $aaptr=$zero; 2781$code.=<<___; 2782 lea 48+8(%rsp),$tptr 2783 lea ($aptr,$num),$aaptr 2784 mov $num,0+8(%rsp) # save $num 2785 mov $aaptr,8+8(%rsp) # save end of $aptr 2786 jmp .Lsqr8x_zero_start 2787 2788.align 32 2789.byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00 2790.Lsqrx8x_zero: 2791 .byte 0x3e 2792 movdqa %xmm0,0*8($tptr) 2793 movdqa %xmm0,2*8($tptr) 2794 movdqa %xmm0,4*8($tptr) 2795 movdqa %xmm0,6*8($tptr) 2796.Lsqr8x_zero_start: # aligned at 32 2797 movdqa %xmm0,8*8($tptr) 2798 movdqa %xmm0,10*8($tptr) 2799 movdqa %xmm0,12*8($tptr) 2800 movdqa %xmm0,14*8($tptr) 2801 lea 16*8($tptr),$tptr 2802 sub \$64,$num 2803 jnz .Lsqrx8x_zero 2804 2805 mov 0*8($aptr),%rdx # a[0], modulo-scheduled 2806 #xor %r9,%r9 # t[1], ex-$num, zero already 2807 xor %r10,%r10 2808 xor %r11,%r11 2809 xor %r12,%r12 2810 xor %r13,%r13 2811 xor %r14,%r14 2812 xor %r15,%r15 2813 lea 48+8(%rsp),$tptr 2814 xor $zero,$zero # cf=0, cf=0 2815 jmp .Lsqrx8x_outer_loop 2816 2817.align 32 2818.Lsqrx8x_outer_loop: 2819 mulx 1*8($aptr),%r8,%rax # a[1]*a[0] 2820 adcx %r9,%r8 # a[1]*a[0]+=t[1] 2821 adox %rax,%r10 2822 mulx 2*8($aptr),%r9,%rax # a[2]*a[0] 2823 adcx %r10,%r9 2824 adox %rax,%r11 2825 .byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 # mulx 3*8($aptr),%r10,%rax # ... 2826 adcx %r11,%r10 2827 adox %rax,%r12 2828 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 # mulx 4*8($aptr),%r11,%rax 2829 adcx %r12,%r11 2830 adox %rax,%r13 2831 mulx 5*8($aptr),%r12,%rax 2832 adcx %r13,%r12 2833 adox %rax,%r14 2834 mulx 6*8($aptr),%r13,%rax 2835 adcx %r14,%r13 2836 adox %r15,%rax 2837 mulx 7*8($aptr),%r14,%r15 2838 mov 1*8($aptr),%rdx # a[1] 2839 adcx %rax,%r14 2840 adox $zero,%r15 2841 adc 8*8($tptr),%r15 2842 mov %r8,1*8($tptr) # t[1] 2843 mov %r9,2*8($tptr) # t[2] 2844 sbb $carry,$carry # mov %cf,$carry 2845 xor $zero,$zero # cf=0, of=0 2846 2847 2848 mulx 2*8($aptr),%r8,%rbx # a[2]*a[1] 2849 mulx 3*8($aptr),%r9,%rax # a[3]*a[1] 2850 adcx %r10,%r8 2851 adox %rbx,%r9 2852 mulx 4*8($aptr),%r10,%rbx # ... 2853 adcx %r11,%r9 2854 adox %rax,%r10 2855 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 # mulx 5*8($aptr),%r11,%rax 2856 adcx %r12,%r10 2857 adox %rbx,%r11 2858 .byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r12,%rbx 2859 adcx %r13,%r11 2860 adox %r14,%r12 2861 .byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r13,%r14 2862 mov 2*8($aptr),%rdx # a[2] 2863 adcx %rax,%r12 2864 adox %rbx,%r13 2865 adcx %r15,%r13 2866 adox $zero,%r14 # of=0 2867 adcx $zero,%r14 # cf=0 2868 2869 mov %r8,3*8($tptr) # t[3] 2870 mov %r9,4*8($tptr) # t[4] 2871 2872 mulx 3*8($aptr),%r8,%rbx # a[3]*a[2] 2873 mulx 4*8($aptr),%r9,%rax # a[4]*a[2] 2874 adcx %r10,%r8 2875 adox %rbx,%r9 2876 mulx 5*8($aptr),%r10,%rbx # ... 2877 adcx %r11,%r9 2878 adox %rax,%r10 2879 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r11,%rax 2880 adcx %r12,%r10 2881 adox %r13,%r11 2882 .byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r12,%r13 2883 .byte 0x3e 2884 mov 3*8($aptr),%rdx # a[3] 2885 adcx %rbx,%r11 2886 adox %rax,%r12 2887 adcx %r14,%r12 2888 mov %r8,5*8($tptr) # t[5] 2889 mov %r9,6*8($tptr) # t[6] 2890 mulx 4*8($aptr),%r8,%rax # a[4]*a[3] 2891 adox $zero,%r13 # of=0 2892 adcx $zero,%r13 # cf=0 2893 2894 mulx 5*8($aptr),%r9,%rbx # a[5]*a[3] 2895 adcx %r10,%r8 2896 adox %rax,%r9 2897 mulx 6*8($aptr),%r10,%rax # ... 2898 adcx %r11,%r9 2899 adox %r12,%r10 2900 mulx 7*8($aptr),%r11,%r12 2901 mov 4*8($aptr),%rdx # a[4] 2902 mov 5*8($aptr),%r14 # a[5] 2903 adcx %rbx,%r10 2904 adox %rax,%r11 2905 mov 6*8($aptr),%r15 # a[6] 2906 adcx %r13,%r11 2907 adox $zero,%r12 # of=0 2908 adcx $zero,%r12 # cf=0 2909 2910 mov %r8,7*8($tptr) # t[7] 2911 mov %r9,8*8($tptr) # t[8] 2912 2913 mulx %r14,%r9,%rax # a[5]*a[4] 2914 mov 7*8($aptr),%r8 # a[7] 2915 adcx %r10,%r9 2916 mulx %r15,%r10,%rbx # a[6]*a[4] 2917 adox %rax,%r10 2918 adcx %r11,%r10 2919 mulx %r8,%r11,%rax # a[7]*a[4] 2920 mov %r14,%rdx # a[5] 2921 adox %rbx,%r11 2922 adcx %r12,%r11 2923 #adox $zero,%rax # of=0 2924 adcx $zero,%rax # cf=0 2925 2926 mulx %r15,%r14,%rbx # a[6]*a[5] 2927 mulx %r8,%r12,%r13 # a[7]*a[5] 2928 mov %r15,%rdx # a[6] 2929 lea 8*8($aptr),$aptr 2930 adcx %r14,%r11 2931 adox %rbx,%r12 2932 adcx %rax,%r12 2933 adox $zero,%r13 2934 2935 .byte 0x67,0x67 2936 mulx %r8,%r8,%r14 # a[7]*a[6] 2937 adcx %r8,%r13 2938 adcx $zero,%r14 2939 2940 cmp 8+8(%rsp),$aptr 2941 je .Lsqrx8x_outer_break 2942 2943 neg $carry # mov $carry,%cf 2944 mov \$-8,%rcx 2945 mov $zero,%r15 2946 mov 8*8($tptr),%r8 2947 adcx 9*8($tptr),%r9 # +=t[9] 2948 adcx 10*8($tptr),%r10 # ... 2949 adcx 11*8($tptr),%r11 2950 adc 12*8($tptr),%r12 2951 adc 13*8($tptr),%r13 2952 adc 14*8($tptr),%r14 2953 adc 15*8($tptr),%r15 2954 lea ($aptr),$aaptr 2955 lea 2*64($tptr),$tptr 2956 sbb %rax,%rax # mov %cf,$carry 2957 2958 mov -64($aptr),%rdx # a[0] 2959 mov %rax,16+8(%rsp) # offload $carry 2960 mov $tptr,24+8(%rsp) 2961 2962 #lea 8*8($tptr),$tptr # see 2*8*8($tptr) above 2963 xor %eax,%eax # cf=0, of=0 2964 jmp .Lsqrx8x_loop 2965 2966.align 32 2967.Lsqrx8x_loop: 2968 mov %r8,%rbx 2969 mulx 0*8($aaptr),%rax,%r8 # a[8]*a[i] 2970 adcx %rax,%rbx # +=t[8] 2971 adox %r9,%r8 2972 2973 mulx 1*8($aaptr),%rax,%r9 # ... 2974 adcx %rax,%r8 2975 adox %r10,%r9 2976 2977 mulx 2*8($aaptr),%rax,%r10 2978 adcx %rax,%r9 2979 adox %r11,%r10 2980 2981 mulx 3*8($aaptr),%rax,%r11 2982 adcx %rax,%r10 2983 adox %r12,%r11 2984 2985 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 4*8($aaptr),%rax,%r12 2986 adcx %rax,%r11 2987 adox %r13,%r12 2988 2989 mulx 5*8($aaptr),%rax,%r13 2990 adcx %rax,%r12 2991 adox %r14,%r13 2992 2993 mulx 6*8($aaptr),%rax,%r14 2994 mov %rbx,($tptr,%rcx,8) # store t[8+i] 2995 mov \$0,%ebx 2996 adcx %rax,%r13 2997 adox %r15,%r14 2998 2999 .byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 # mulx 7*8($aaptr),%rax,%r15 3000 mov 8($aptr,%rcx,8),%rdx # a[i] 3001 adcx %rax,%r14 3002 adox %rbx,%r15 # %rbx is 0, of=0 3003 adcx %rbx,%r15 # cf=0 3004 3005 .byte 0x67 3006 inc %rcx # of=0 3007 jnz .Lsqrx8x_loop 3008 3009 lea 8*8($aaptr),$aaptr 3010 mov \$-8,%rcx 3011 cmp 8+8(%rsp),$aaptr # done? 3012 je .Lsqrx8x_break 3013 3014 sub 16+8(%rsp),%rbx # mov 16(%rsp),%cf 3015 .byte 0x66 3016 mov -64($aptr),%rdx 3017 adcx 0*8($tptr),%r8 3018 adcx 1*8($tptr),%r9 3019 adc 2*8($tptr),%r10 3020 adc 3*8($tptr),%r11 3021 adc 4*8($tptr),%r12 3022 adc 5*8($tptr),%r13 3023 adc 6*8($tptr),%r14 3024 adc 7*8($tptr),%r15 3025 lea 8*8($tptr),$tptr 3026 .byte 0x67 3027 sbb %rax,%rax # mov %cf,%rax 3028 xor %ebx,%ebx # cf=0, of=0 3029 mov %rax,16+8(%rsp) # offload carry 3030 jmp .Lsqrx8x_loop 3031 3032.align 32 3033.Lsqrx8x_break: 3034 xor $zero,$zero 3035 sub 16+8(%rsp),%rbx # mov 16(%rsp),%cf 3036 adcx $zero,%r8 3037 mov 24+8(%rsp),$carry # initial $tptr, borrow $carry 3038 adcx $zero,%r9 3039 mov 0*8($aptr),%rdx # a[8], modulo-scheduled 3040 adc \$0,%r10 3041 mov %r8,0*8($tptr) 3042 adc \$0,%r11 3043 adc \$0,%r12 3044 adc \$0,%r13 3045 adc \$0,%r14 3046 adc \$0,%r15 3047 cmp $carry,$tptr # cf=0, of=0 3048 je .Lsqrx8x_outer_loop 3049 3050 mov %r9,1*8($tptr) 3051 mov 1*8($carry),%r9 3052 mov %r10,2*8($tptr) 3053 mov 2*8($carry),%r10 3054 mov %r11,3*8($tptr) 3055 mov 3*8($carry),%r11 3056 mov %r12,4*8($tptr) 3057 mov 4*8($carry),%r12 3058 mov %r13,5*8($tptr) 3059 mov 5*8($carry),%r13 3060 mov %r14,6*8($tptr) 3061 mov 6*8($carry),%r14 3062 mov %r15,7*8($tptr) 3063 mov 7*8($carry),%r15 3064 mov $carry,$tptr 3065 jmp .Lsqrx8x_outer_loop 3066 3067.align 32 3068.Lsqrx8x_outer_break: 3069 mov %r9,9*8($tptr) # t[9] 3070 movq %xmm3,%rcx # -$num 3071 mov %r10,10*8($tptr) # ... 3072 mov %r11,11*8($tptr) 3073 mov %r12,12*8($tptr) 3074 mov %r13,13*8($tptr) 3075 mov %r14,14*8($tptr) 3076___ 3077}{ 3078my $i="%rcx"; 3079$code.=<<___; 3080 lea 48+8(%rsp),$tptr 3081 mov ($aptr,$i),%rdx # a[0] 3082 3083 mov 8($tptr),$A0[1] # t[1] 3084 xor $A0[0],$A0[0] # t[0], of=0, cf=0 3085 mov 0+8(%rsp),$num # restore $num 3086 adox $A0[1],$A0[1] 3087 mov 16($tptr),$A1[0] # t[2] # prefetch 3088 mov 24($tptr),$A1[1] # t[3] # prefetch 3089 #jmp .Lsqrx4x_shift_n_add # happens to be aligned 3090 3091.align 32 3092.Lsqrx4x_shift_n_add: 3093 mulx %rdx,%rax,%rbx 3094 adox $A1[0],$A1[0] 3095 adcx $A0[0],%rax 3096 .byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 # mov 8($aptr,$i),%rdx # a[i+1] # prefetch 3097 .byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 # mov 32($tptr),$A0[0] # t[2*i+4] # prefetch 3098 adox $A1[1],$A1[1] 3099 adcx $A0[1],%rbx 3100 mov 40($tptr),$A0[1] # t[2*i+4+1] # prefetch 3101 mov %rax,0($tptr) 3102 mov %rbx,8($tptr) 3103 3104 mulx %rdx,%rax,%rbx 3105 adox $A0[0],$A0[0] 3106 adcx $A1[0],%rax 3107 mov 16($aptr,$i),%rdx # a[i+2] # prefetch 3108 mov 48($tptr),$A1[0] # t[2*i+6] # prefetch 3109 adox $A0[1],$A0[1] 3110 adcx $A1[1],%rbx 3111 mov 56($tptr),$A1[1] # t[2*i+6+1] # prefetch 3112 mov %rax,16($tptr) 3113 mov %rbx,24($tptr) 3114 3115 mulx %rdx,%rax,%rbx 3116 adox $A1[0],$A1[0] 3117 adcx $A0[0],%rax 3118 mov 24($aptr,$i),%rdx # a[i+3] # prefetch 3119 lea 32($i),$i 3120 mov 64($tptr),$A0[0] # t[2*i+8] # prefetch 3121 adox $A1[1],$A1[1] 3122 adcx $A0[1],%rbx 3123 mov 72($tptr),$A0[1] # t[2*i+8+1] # prefetch 3124 mov %rax,32($tptr) 3125 mov %rbx,40($tptr) 3126 3127 mulx %rdx,%rax,%rbx 3128 adox $A0[0],$A0[0] 3129 adcx $A1[0],%rax 3130 jrcxz .Lsqrx4x_shift_n_add_break 3131 .byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 # mov 0($aptr,$i),%rdx # a[i+4] # prefetch 3132 adox $A0[1],$A0[1] 3133 adcx $A1[1],%rbx 3134 mov 80($tptr),$A1[0] # t[2*i+10] # prefetch 3135 mov 88($tptr),$A1[1] # t[2*i+10+1] # prefetch 3136 mov %rax,48($tptr) 3137 mov %rbx,56($tptr) 3138 lea 64($tptr),$tptr 3139 nop 3140 jmp .Lsqrx4x_shift_n_add 3141 3142.align 32 3143.Lsqrx4x_shift_n_add_break: 3144 adcx $A1[1],%rbx 3145 mov %rax,48($tptr) 3146 mov %rbx,56($tptr) 3147 lea 64($tptr),$tptr # end of t[] buffer 3148___ 3149} 3150###################################################################### 3151# Montgomery reduction part, "word-by-word" algorithm. 3152# 3153# This new path is inspired by multiple submissions from Intel, by 3154# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford, 3155# Vinodh Gopal... 3156{ 3157my ($nptr,$carry,$m0)=("%rbp","%rsi","%rdx"); 3158 3159$code.=<<___; 3160 movq %xmm2,$nptr 3161__bn_sqrx8x_reduction: 3162 xor %eax,%eax # initial top-most carry bit 3163 mov 32+8(%rsp),%rbx # n0 3164 mov 48+8(%rsp),%rdx # "%r8", 8*0($tptr) 3165 lea -8*8($nptr,$num),%rcx # end of n[] 3166 #lea 48+8(%rsp,$num,2),$tptr # end of t[] buffer 3167 mov %rcx, 0+8(%rsp) # save end of n[] 3168 mov $tptr,8+8(%rsp) # save end of t[] 3169 3170 lea 48+8(%rsp),$tptr # initial t[] window 3171 jmp .Lsqrx8x_reduction_loop 3172 3173.align 32 3174.Lsqrx8x_reduction_loop: 3175 mov 8*1($tptr),%r9 3176 mov 8*2($tptr),%r10 3177 mov 8*3($tptr),%r11 3178 mov 8*4($tptr),%r12 3179 mov %rdx,%r8 3180 imulq %rbx,%rdx # n0*a[i] 3181 mov 8*5($tptr),%r13 3182 mov 8*6($tptr),%r14 3183 mov 8*7($tptr),%r15 3184 mov %rax,24+8(%rsp) # store top-most carry bit 3185 3186 lea 8*8($tptr),$tptr 3187 xor $carry,$carry # cf=0,of=0 3188 mov \$-8,%rcx 3189 jmp .Lsqrx8x_reduce 3190 3191.align 32 3192.Lsqrx8x_reduce: 3193 mov %r8, %rbx 3194 mulx 8*0($nptr),%rax,%r8 # n[0] 3195 adcx %rbx,%rax # discarded 3196 adox %r9,%r8 3197 3198 mulx 8*1($nptr),%rbx,%r9 # n[1] 3199 adcx %rbx,%r8 3200 adox %r10,%r9 3201 3202 mulx 8*2($nptr),%rbx,%r10 3203 adcx %rbx,%r9 3204 adox %r11,%r10 3205 3206 mulx 8*3($nptr),%rbx,%r11 3207 adcx %rbx,%r10 3208 adox %r12,%r11 3209 3210 .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rbx,%r12 3211 mov %rdx,%rax 3212 mov %r8,%rdx 3213 adcx %rbx,%r11 3214 adox %r13,%r12 3215 3216 mulx 32+8(%rsp),%rbx,%rdx # %rdx discarded 3217 mov %rax,%rdx 3218 mov %rax,64+48+8(%rsp,%rcx,8) # put aside n0*a[i] 3219 3220 mulx 8*5($nptr),%rax,%r13 3221 adcx %rax,%r12 3222 adox %r14,%r13 3223 3224 mulx 8*6($nptr),%rax,%r14 3225 adcx %rax,%r13 3226 adox %r15,%r14 3227 3228 mulx 8*7($nptr),%rax,%r15 3229 mov %rbx,%rdx 3230 adcx %rax,%r14 3231 adox $carry,%r15 # $carry is 0 3232 adcx $carry,%r15 # cf=0 3233 3234 .byte 0x67,0x67,0x67 3235 inc %rcx # of=0 3236 jnz .Lsqrx8x_reduce 3237 3238 mov $carry,%rax # xor %rax,%rax 3239 cmp 0+8(%rsp),$nptr # end of n[]? 3240 jae .Lsqrx8x_no_tail 3241 3242 mov 48+8(%rsp),%rdx # pull n0*a[0] 3243 add 8*0($tptr),%r8 3244 lea 8*8($nptr),$nptr 3245 mov \$-8,%rcx 3246 adcx 8*1($tptr),%r9 3247 adcx 8*2($tptr),%r10 3248 adc 8*3($tptr),%r11 3249 adc 8*4($tptr),%r12 3250 adc 8*5($tptr),%r13 3251 adc 8*6($tptr),%r14 3252 adc 8*7($tptr),%r15 3253 lea 8*8($tptr),$tptr 3254 sbb %rax,%rax # top carry 3255 3256 xor $carry,$carry # of=0, cf=0 3257 mov %rax,16+8(%rsp) 3258 jmp .Lsqrx8x_tail 3259 3260.align 32 3261.Lsqrx8x_tail: 3262 mov %r8,%rbx 3263 mulx 8*0($nptr),%rax,%r8 3264 adcx %rax,%rbx 3265 adox %r9,%r8 3266 3267 mulx 8*1($nptr),%rax,%r9 3268 adcx %rax,%r8 3269 adox %r10,%r9 3270 3271 mulx 8*2($nptr),%rax,%r10 3272 adcx %rax,%r9 3273 adox %r11,%r10 3274 3275 mulx 8*3($nptr),%rax,%r11 3276 adcx %rax,%r10 3277 adox %r12,%r11 3278 3279 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rax,%r12 3280 adcx %rax,%r11 3281 adox %r13,%r12 3282 3283 mulx 8*5($nptr),%rax,%r13 3284 adcx %rax,%r12 3285 adox %r14,%r13 3286 3287 mulx 8*6($nptr),%rax,%r14 3288 adcx %rax,%r13 3289 adox %r15,%r14 3290 3291 mulx 8*7($nptr),%rax,%r15 3292 mov 72+48+8(%rsp,%rcx,8),%rdx # pull n0*a[i] 3293 adcx %rax,%r14 3294 adox $carry,%r15 3295 mov %rbx,($tptr,%rcx,8) # save result 3296 mov %r8,%rbx 3297 adcx $carry,%r15 # cf=0 3298 3299 inc %rcx # of=0 3300 jnz .Lsqrx8x_tail 3301 3302 cmp 0+8(%rsp),$nptr # end of n[]? 3303 jae .Lsqrx8x_tail_done # break out of loop 3304 3305 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf 3306 mov 48+8(%rsp),%rdx # pull n0*a[0] 3307 lea 8*8($nptr),$nptr 3308 adc 8*0($tptr),%r8 3309 adc 8*1($tptr),%r9 3310 adc 8*2($tptr),%r10 3311 adc 8*3($tptr),%r11 3312 adc 8*4($tptr),%r12 3313 adc 8*5($tptr),%r13 3314 adc 8*6($tptr),%r14 3315 adc 8*7($tptr),%r15 3316 lea 8*8($tptr),$tptr 3317 sbb %rax,%rax 3318 sub \$8,%rcx # mov \$-8,%rcx 3319 3320 xor $carry,$carry # of=0, cf=0 3321 mov %rax,16+8(%rsp) 3322 jmp .Lsqrx8x_tail 3323 3324.align 32 3325.Lsqrx8x_tail_done: 3326 xor %rax,%rax 3327 add 24+8(%rsp),%r8 # can this overflow? 3328 adc \$0,%r9 3329 adc \$0,%r10 3330 adc \$0,%r11 3331 adc \$0,%r12 3332 adc \$0,%r13 3333 adc \$0,%r14 3334 adc \$0,%r15 3335 adc \$0,%rax 3336 3337 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf 3338.Lsqrx8x_no_tail: # %cf is 0 if jumped here 3339 adc 8*0($tptr),%r8 3340 movq %xmm3,%rcx 3341 adc 8*1($tptr),%r9 3342 mov 8*7($nptr),$carry 3343 movq %xmm2,$nptr # restore $nptr 3344 adc 8*2($tptr),%r10 3345 adc 8*3($tptr),%r11 3346 adc 8*4($tptr),%r12 3347 adc 8*5($tptr),%r13 3348 adc 8*6($tptr),%r14 3349 adc 8*7($tptr),%r15 3350 adc \$0,%rax # top-most carry 3351 3352 mov 32+8(%rsp),%rbx # n0 3353 mov 8*8($tptr,%rcx),%rdx # modulo-scheduled "%r8" 3354 3355 mov %r8,8*0($tptr) # store top 512 bits 3356 lea 8*8($tptr),%r8 # borrow %r8 3357 mov %r9,8*1($tptr) 3358 mov %r10,8*2($tptr) 3359 mov %r11,8*3($tptr) 3360 mov %r12,8*4($tptr) 3361 mov %r13,8*5($tptr) 3362 mov %r14,8*6($tptr) 3363 mov %r15,8*7($tptr) 3364 3365 lea 8*8($tptr,%rcx),$tptr # start of current t[] window 3366 cmp 8+8(%rsp),%r8 # end of t[]? 3367 jb .Lsqrx8x_reduction_loop 3368 ret 3369.cfi_endproc 3370.size bn_sqrx8x_internal,.-bn_sqrx8x_internal 3371___ 3372} 3373############################################################## 3374# Post-condition, 4x unrolled 3375# 3376{ 3377my ($rptr,$nptr)=("%rdx","%rbp"); 3378$code.=<<___; 3379.align 32 3380__bn_postx4x_internal: 3381.cfi_startproc 3382 mov 8*0($nptr),%r12 3383 mov %rcx,%r10 # -$num 3384 mov %rcx,%r9 # -$num 3385 neg %rax 3386 sar \$3+2,%rcx 3387 #lea 48+8(%rsp,%r9),$tptr 3388 movq %xmm1,$rptr # restore $rptr 3389 movq %xmm1,$aptr # prepare for back-to-back call 3390 dec %r12 # so that after 'not' we get -n[0] 3391 mov 8*1($nptr),%r13 3392 xor %r8,%r8 3393 mov 8*2($nptr),%r14 3394 mov 8*3($nptr),%r15 3395 jmp .Lsqrx4x_sub_entry 3396 3397.align 16 3398.Lsqrx4x_sub: 3399 mov 8*0($nptr),%r12 3400 mov 8*1($nptr),%r13 3401 mov 8*2($nptr),%r14 3402 mov 8*3($nptr),%r15 3403.Lsqrx4x_sub_entry: 3404 andn %rax,%r12,%r12 3405 lea 8*4($nptr),$nptr 3406 andn %rax,%r13,%r13 3407 andn %rax,%r14,%r14 3408 andn %rax,%r15,%r15 3409 3410 neg %r8 # mov %r8,%cf 3411 adc 8*0($tptr),%r12 3412 adc 8*1($tptr),%r13 3413 adc 8*2($tptr),%r14 3414 adc 8*3($tptr),%r15 3415 mov %r12,8*0($rptr) 3416 lea 8*4($tptr),$tptr 3417 mov %r13,8*1($rptr) 3418 sbb %r8,%r8 # mov %cf,%r8 3419 mov %r14,8*2($rptr) 3420 mov %r15,8*3($rptr) 3421 lea 8*4($rptr),$rptr 3422 3423 inc %rcx 3424 jnz .Lsqrx4x_sub 3425 3426 neg %r9 # restore $num 3427 3428 ret 3429.cfi_endproc 3430.size __bn_postx4x_internal,.-__bn_postx4x_internal 3431___ 3432} 3433}}} 3434{ 3435my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%edx","%r8", "%r9d") : # Win64 order 3436 ("%rdi","%esi","%rdx","%ecx"); # Unix order 3437my $out=$inp; 3438my $STRIDE=2**5*8; 3439my $N=$STRIDE/4; 3440 3441$code.=<<___; 3442.globl bn_get_bits5 3443.type bn_get_bits5,\@abi-omnipotent 3444.align 16 3445bn_get_bits5: 3446.cfi_startproc 3447 lea 0($inp),%r10 3448 lea 1($inp),%r11 3449 mov $num,%ecx 3450 shr \$4,$num 3451 and \$15,%ecx 3452 lea -8(%ecx),%eax 3453 cmp \$11,%ecx 3454 cmova %r11,%r10 3455 cmova %eax,%ecx 3456 movzw (%r10,$num,2),%eax 3457 shrl %cl,%eax 3458 and \$31,%eax 3459 ret 3460.cfi_endproc 3461.size bn_get_bits5,.-bn_get_bits5 3462 3463.globl bn_scatter5 3464.type bn_scatter5,\@abi-omnipotent 3465.align 16 3466bn_scatter5: 3467.cfi_startproc 3468 cmp \$0, $num 3469 jz .Lscatter_epilogue 3470 lea ($tbl,$idx,8),$tbl 3471.Lscatter: 3472 mov ($inp),%rax 3473 lea 8($inp),$inp 3474 mov %rax,($tbl) 3475 lea 32*8($tbl),$tbl 3476 sub \$1,$num 3477 jnz .Lscatter 3478.Lscatter_epilogue: 3479 ret 3480.cfi_endproc 3481.size bn_scatter5,.-bn_scatter5 3482 3483.globl bn_gather5 3484.type bn_gather5,\@abi-omnipotent 3485.align 32 3486bn_gather5: 3487.LSEH_begin_bn_gather5: # Win64 thing, but harmless in other cases 3488.cfi_startproc 3489 # I can't trust assembler to use specific encoding:-( 3490 .byte 0x4c,0x8d,0x14,0x24 #lea (%rsp),%r10 3491 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 #sub $0x108,%rsp 3492 lea .Linc(%rip),%rax 3493 and \$-16,%rsp # shouldn't be formally required 3494 3495 movd $idx,%xmm5 3496 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000 3497 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002 3498 lea 128($tbl),%r11 # size optimization 3499 lea 128(%rsp),%rax # size optimization 3500 3501 pshufd \$0,%xmm5,%xmm5 # broadcast $idx 3502 movdqa %xmm1,%xmm4 3503 movdqa %xmm1,%xmm2 3504___ 3505######################################################################## 3506# calculate mask by comparing 0..31 to $idx and save result to stack 3507# 3508for($i=0;$i<$STRIDE/16;$i+=4) { 3509$code.=<<___; 3510 paddd %xmm0,%xmm1 3511 pcmpeqd %xmm5,%xmm0 # compare to 1,0 3512___ 3513$code.=<<___ if ($i); 3514 movdqa %xmm3,`16*($i-1)-128`(%rax) 3515___ 3516$code.=<<___; 3517 movdqa %xmm4,%xmm3 3518 3519 paddd %xmm1,%xmm2 3520 pcmpeqd %xmm5,%xmm1 # compare to 3,2 3521 movdqa %xmm0,`16*($i+0)-128`(%rax) 3522 movdqa %xmm4,%xmm0 3523 3524 paddd %xmm2,%xmm3 3525 pcmpeqd %xmm5,%xmm2 # compare to 5,4 3526 movdqa %xmm1,`16*($i+1)-128`(%rax) 3527 movdqa %xmm4,%xmm1 3528 3529 paddd %xmm3,%xmm0 3530 pcmpeqd %xmm5,%xmm3 # compare to 7,6 3531 movdqa %xmm2,`16*($i+2)-128`(%rax) 3532 movdqa %xmm4,%xmm2 3533___ 3534} 3535$code.=<<___; 3536 movdqa %xmm3,`16*($i-1)-128`(%rax) 3537 jmp .Lgather 3538 3539.align 32 3540.Lgather: 3541 pxor %xmm4,%xmm4 3542 pxor %xmm5,%xmm5 3543___ 3544for($i=0;$i<$STRIDE/16;$i+=4) { 3545$code.=<<___; 3546 movdqa `16*($i+0)-128`(%r11),%xmm0 3547 movdqa `16*($i+1)-128`(%r11),%xmm1 3548 movdqa `16*($i+2)-128`(%r11),%xmm2 3549 pand `16*($i+0)-128`(%rax),%xmm0 3550 movdqa `16*($i+3)-128`(%r11),%xmm3 3551 pand `16*($i+1)-128`(%rax),%xmm1 3552 por %xmm0,%xmm4 3553 pand `16*($i+2)-128`(%rax),%xmm2 3554 por %xmm1,%xmm5 3555 pand `16*($i+3)-128`(%rax),%xmm3 3556 por %xmm2,%xmm4 3557 por %xmm3,%xmm5 3558___ 3559} 3560$code.=<<___; 3561 por %xmm5,%xmm4 3562 lea $STRIDE(%r11),%r11 3563 pshufd \$0x4e,%xmm4,%xmm0 3564 por %xmm4,%xmm0 3565 movq %xmm0,($out) # m0=bp[0] 3566 lea 8($out),$out 3567 sub \$1,$num 3568 jnz .Lgather 3569 3570 lea (%r10),%rsp 3571 ret 3572.LSEH_end_bn_gather5: 3573.cfi_endproc 3574.size bn_gather5,.-bn_gather5 3575___ 3576} 3577$code.=<<___; 3578.align 64 3579.Linc: 3580 .long 0,0, 1,1 3581 .long 2,2, 2,2 3582.asciz "Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>" 3583___ 3584 3585# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, 3586# CONTEXT *context,DISPATCHER_CONTEXT *disp) 3587if ($win64) { 3588$rec="%rcx"; 3589$frame="%rdx"; 3590$context="%r8"; 3591$disp="%r9"; 3592 3593$code.=<<___; 3594.extern __imp_RtlVirtualUnwind 3595.type mul_handler,\@abi-omnipotent 3596.align 16 3597mul_handler: 3598 push %rsi 3599 push %rdi 3600 push %rbx 3601 push %rbp 3602 push %r12 3603 push %r13 3604 push %r14 3605 push %r15 3606 pushfq 3607 sub \$64,%rsp 3608 3609 mov 120($context),%rax # pull context->Rax 3610 mov 248($context),%rbx # pull context->Rip 3611 3612 mov 8($disp),%rsi # disp->ImageBase 3613 mov 56($disp),%r11 # disp->HandlerData 3614 3615 mov 0(%r11),%r10d # HandlerData[0] 3616 lea (%rsi,%r10),%r10 # end of prologue label 3617 cmp %r10,%rbx # context->Rip<end of prologue label 3618 jb .Lcommon_seh_tail 3619 3620 mov 4(%r11),%r10d # HandlerData[1] 3621 lea (%rsi,%r10),%r10 # beginning of body label 3622 cmp %r10,%rbx # context->Rip<body label 3623 jb .Lcommon_pop_regs 3624 3625 mov 152($context),%rax # pull context->Rsp 3626 3627 mov 8(%r11),%r10d # HandlerData[2] 3628 lea (%rsi,%r10),%r10 # epilogue label 3629 cmp %r10,%rbx # context->Rip>=epilogue label 3630 jae .Lcommon_seh_tail 3631 3632 lea .Lmul_epilogue(%rip),%r10 3633 cmp %r10,%rbx 3634 ja .Lbody_40 3635 3636 mov 192($context),%r10 # pull $num 3637 mov 8(%rax,%r10,8),%rax # pull saved stack pointer 3638 3639 jmp .Lcommon_pop_regs 3640 3641.Lbody_40: 3642 mov 40(%rax),%rax # pull saved stack pointer 3643.Lcommon_pop_regs: 3644 mov -8(%rax),%rbx 3645 mov -16(%rax),%rbp 3646 mov -24(%rax),%r12 3647 mov -32(%rax),%r13 3648 mov -40(%rax),%r14 3649 mov -48(%rax),%r15 3650 mov %rbx,144($context) # restore context->Rbx 3651 mov %rbp,160($context) # restore context->Rbp 3652 mov %r12,216($context) # restore context->R12 3653 mov %r13,224($context) # restore context->R13 3654 mov %r14,232($context) # restore context->R14 3655 mov %r15,240($context) # restore context->R15 3656 3657.Lcommon_seh_tail: 3658 mov 8(%rax),%rdi 3659 mov 16(%rax),%rsi 3660 mov %rax,152($context) # restore context->Rsp 3661 mov %rsi,168($context) # restore context->Rsi 3662 mov %rdi,176($context) # restore context->Rdi 3663 3664 mov 40($disp),%rdi # disp->ContextRecord 3665 mov $context,%rsi # context 3666 mov \$154,%ecx # sizeof(CONTEXT) 3667 .long 0xa548f3fc # cld; rep movsq 3668 3669 mov $disp,%rsi 3670 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER 3671 mov 8(%rsi),%rdx # arg2, disp->ImageBase 3672 mov 0(%rsi),%r8 # arg3, disp->ControlPc 3673 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry 3674 mov 40(%rsi),%r10 # disp->ContextRecord 3675 lea 56(%rsi),%r11 # &disp->HandlerData 3676 lea 24(%rsi),%r12 # &disp->EstablisherFrame 3677 mov %r10,32(%rsp) # arg5 3678 mov %r11,40(%rsp) # arg6 3679 mov %r12,48(%rsp) # arg7 3680 mov %rcx,56(%rsp) # arg8, (NULL) 3681 call *__imp_RtlVirtualUnwind(%rip) 3682 3683 mov \$1,%eax # ExceptionContinueSearch 3684 add \$64,%rsp 3685 popfq 3686 pop %r15 3687 pop %r14 3688 pop %r13 3689 pop %r12 3690 pop %rbp 3691 pop %rbx 3692 pop %rdi 3693 pop %rsi 3694 ret 3695.size mul_handler,.-mul_handler 3696 3697.section .pdata 3698.align 4 3699 .rva .LSEH_begin_bn_mul_mont_gather5 3700 .rva .LSEH_end_bn_mul_mont_gather5 3701 .rva .LSEH_info_bn_mul_mont_gather5 3702 3703 .rva .LSEH_begin_bn_mul4x_mont_gather5 3704 .rva .LSEH_end_bn_mul4x_mont_gather5 3705 .rva .LSEH_info_bn_mul4x_mont_gather5 3706 3707 .rva .LSEH_begin_bn_power5 3708 .rva .LSEH_end_bn_power5 3709 .rva .LSEH_info_bn_power5 3710___ 3711$code.=<<___ if ($addx); 3712 .rva .LSEH_begin_bn_mulx4x_mont_gather5 3713 .rva .LSEH_end_bn_mulx4x_mont_gather5 3714 .rva .LSEH_info_bn_mulx4x_mont_gather5 3715 3716 .rva .LSEH_begin_bn_powerx5 3717 .rva .LSEH_end_bn_powerx5 3718 .rva .LSEH_info_bn_powerx5 3719___ 3720$code.=<<___; 3721 .rva .LSEH_begin_bn_gather5 3722 .rva .LSEH_end_bn_gather5 3723 .rva .LSEH_info_bn_gather5 3724 3725.section .xdata 3726.align 8 3727.LSEH_info_bn_mul_mont_gather5: 3728 .byte 9,0,0,0 3729 .rva mul_handler 3730 .rva .Lmul_body,.Lmul_body,.Lmul_epilogue # HandlerData[] 3731.align 8 3732.LSEH_info_bn_mul4x_mont_gather5: 3733 .byte 9,0,0,0 3734 .rva mul_handler 3735 .rva .Lmul4x_prologue,.Lmul4x_body,.Lmul4x_epilogue # HandlerData[] 3736.align 8 3737.LSEH_info_bn_power5: 3738 .byte 9,0,0,0 3739 .rva mul_handler 3740 .rva .Lpower5_prologue,.Lpower5_body,.Lpower5_epilogue # HandlerData[] 3741___ 3742$code.=<<___ if ($addx); 3743.align 8 3744.LSEH_info_bn_mulx4x_mont_gather5: 3745 .byte 9,0,0,0 3746 .rva mul_handler 3747 .rva .Lmulx4x_prologue,.Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[] 3748.align 8 3749.LSEH_info_bn_powerx5: 3750 .byte 9,0,0,0 3751 .rva mul_handler 3752 .rva .Lpowerx5_prologue,.Lpowerx5_body,.Lpowerx5_epilogue # HandlerData[] 3753___ 3754$code.=<<___; 3755.align 8 3756.LSEH_info_bn_gather5: 3757 .byte 0x01,0x0b,0x03,0x0a 3758 .byte 0x0b,0x01,0x21,0x00 # sub rsp,0x108 3759 .byte 0x04,0xa3,0x00,0x00 # lea r10,(rsp) 3760.align 8 3761___ 3762} 3763 3764$code =~ s/\`([^\`]*)\`/eval($1)/gem; 3765 3766print $code; 3767close STDOUT or die "error closing STDOUT: $!"; 3768