Lines Matching +full:16 +full:- +full:17

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
5 # Copyright 2023- IBM Corp. All rights reserved
11 # 1. a += b; d ^= a; d <<<= 16;
16 # row1 = (row1 + row2), row4 = row1 xor row4, row4 rotate each word by 16
40 #include <asm/asm-offsets.h>
41 #include <asm/asm-compat.h>
52 li 16, \OFFSET
53 stvx \VRS, 16, \FRAME
57 li 16, \OFFSET
58 stxvx \VSX, 16, \FRAME
66 li 16, \OFFSET
67 lvx \VRS, 16, \FRAME
71 li 16, \OFFSET
72 lxvx \VSX, 16, \FRAME
77 std 0, 16(1)
78 stdu 1,-752(1)
82 SAVE_GPR 16, 128, 1
83 SAVE_GPR 17, 136, 1
101 SAVE_VRS 21, 16, 9
115 SAVE_VSX 16, 224, 9
116 SAVE_VSX 17, 240, 9
136 RESTORE_VRS 21, 16, 9
150 RESTORE_VSX 16, 224, 9
151 RESTORE_VSX 17, 240, 9
169 RESTORE_GPR 16, 128, 1
170 RESTORE_GPR 17, 136, 1
187 ld 0, 16(1)
199 vadduwm 16, 16, 20
200 vadduwm 17, 17, 21
208 vpermxor 28, 28, 16, 25
209 vpermxor 29, 29, 17, 25
245 vadduwm 16, 16, 20
246 vadduwm 17, 17, 21
256 vpermxor 28, 28, 16, 25
257 vpermxor 29, 29, 17, 25
296 vadduwm 16, 16, 21
297 vadduwm 17, 17, 22
305 vpermxor 31, 31, 16, 25
306 vpermxor 28, 28, 17, 25
344 vadduwm 16, 16, 21
345 vadduwm 17, 17, 22
355 vpermxor 31, 31, 16, 25
356 vpermxor 28, 28, 17, 25
491 vadduwm \S+0, \S+0, 16-\S
492 vadduwm \S+4, \S+4, 17-\S
493 vadduwm \S+8, \S+8, 18-\S
494 vadduwm \S+12, \S+12, 19-\S
496 vadduwm \S+1, \S+1, 16-\S
497 vadduwm \S+5, \S+5, 17-\S
498 vadduwm \S+9, \S+9, 18-\S
499 vadduwm \S+13, \S+13, 19-\S
501 vadduwm \S+2, \S+2, 16-\S
502 vadduwm \S+6, \S+6, 17-\S
503 vadduwm \S+10, \S+10, 18-\S
504 vadduwm \S+14, \S+14, 19-\S
506 vadduwm \S+3, \S+3, 16-\S
507 vadduwm \S+7, \S+7, 17-\S
508 vadduwm \S+11, \S+11, 18-\S
509 vadduwm \S+15, \S+15, 19-\S
517 add 16, 14, 4
519 lxvw4x 1, 17, 9
552 stxvw4x \S+32, 0, 16
553 stxvw4x \S+36, 17, 16
554 stxvw4x \S+40, 18, 16
555 stxvw4x \S+44, 19, 16
557 stxvw4x \S+33, 20, 16
558 stxvw4x \S+37, 21, 16
559 stxvw4x \S+41, 22, 16
560 stxvw4x \S+45, 23, 16
562 stxvw4x \S+34, 24, 16
563 stxvw4x \S+38, 25, 16
564 stxvw4x \S+42, 26, 16
565 stxvw4x \S+46, 27, 16
567 stxvw4x \S+35, 28, 16
568 stxvw4x \S+39, 29, 16
569 stxvw4x \S+43, 30, 16
570 stxvw4x \S+47, 31, 16
585 # r17 - r31 mainly for Write_256 macro.
586 li 17, 16
606 lxvw4x 49, 17, 3 # vr17, key 1
625 lxvw4x 32+22, 17, 11
632 xxlor 16, 48, 48
633 xxlor 17, 49, 49
656 xxspltw 32+0, 16, 0
657 xxspltw 32+1, 16, 1
658 xxspltw 32+2, 16, 2
659 xxspltw 32+3, 16, 3
661 xxspltw 32+4, 17, 0
662 xxspltw 32+5, 17, 1
663 xxspltw 32+6, 17, 2
664 xxspltw 32+7, 17, 3
675 xxspltw 32+16, 16, 0
676 xxspltw 32+17, 16, 1
677 xxspltw 32+18, 16, 2
678 xxspltw 32+19, 16, 3
680 xxspltw 32+20, 17, 0
681 xxspltw 32+21, 17, 1
682 xxspltw 32+22, 17, 2
683 xxspltw 32+23, 17, 3
713 xxlor 48, 16, 16
714 xxlor 49, 17, 17
724 addi 15, 15, -256 # len -=256
730 TP_4x 16+0, 16+1, 16+2, 16+3
731 TP_4x 16+4, 16+5, 16+6, 16+7
732 TP_4x 16+8, 16+9, 16+10, 16+11
733 TP_4x 16+12, 16+13, 16+14, 16+15
735 xxlor 32, 16, 16
736 xxlor 33, 17, 17
739 Add_state 16
740 Write_256 16
742 addi 15, 15, -256 # len +=256
763 lxvw4x 49, 17, 3 # vr17, key 1
772 lxvw4x 32+22, 17, 11
778 vspltw 0, 16, 0
779 vspltw 1, 16, 1
780 vspltw 2, 16, 2
781 vspltw 3, 16, 3
783 vspltw 4, 17, 0
784 vspltw 5, 17, 1
785 vspltw 6, 17, 2
786 vspltw 7, 17, 3
812 addi 15, 15, -256 # len += 256