Lines Matching +full:0 +full:- +full:23

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
5 # Copyright 2023- IBM Corp. All rights reserved
40 #include <asm/asm-offsets.h>
41 #include <asm/asm-compat.h>
76 mflr 0
77 std 0, 16(1)
78 stdu 1,-752(1)
89 SAVE_GPR 23, 184, 1
100 SAVE_VRS 20, 0, 9
103 SAVE_VRS 23, 48, 9
122 SAVE_VSX 23, 336, 9
135 RESTORE_VRS 20, 0, 9
138 RESTORE_VRS 23, 48, 9
157 RESTORE_VSX 23, 336, 9
176 RESTORE_GPR 23, 184, 1
187 ld 0, 16(1)
188 mtlr 0
193 xxlor 0, 32+25, 32+25
195 vadduwm 0, 0, 4
202 vadduwm 19, 19, 23
204 vpermxor 12, 12, 0, 25
212 xxlor 32+25, 0, 0
228 vxor 23, 23, 27
230 xxlor 0, 32+25, 32+25
239 vrlw 23, 23, 25
240 xxlor 32+25, 0, 0
241 vadduwm 0, 0, 4
248 vadduwm 19, 19, 23
250 xxlor 0, 32+25, 32+25
252 vpermxor 12, 12, 0, 25
260 xxlor 32+25, 0, 0
269 xxlor 0, 32+28, 32+28
270 xxlor 32+28, 23, 23
278 vxor 23, 23, 27
286 vrlw 23, 23, 28
287 xxlor 32+28, 0, 0
290 xxlor 0, 32+25, 32+25
292 vadduwm 0, 0, 5
298 vadduwm 18, 18, 23
301 vpermxor 15, 15, 0, 25
310 xxlor 32+25, 0, 0
325 vxor 23, 23, 24
328 xxlor 0, 32+25, 32+25
336 vrlw 23, 23, 25
338 xxlor 32+25, 0, 0
340 vadduwm 0, 0, 5
346 vadduwm 18, 18, 23
349 xxlor 0, 32+25, 32+25
351 vpermxor 15, 15, 0, 25
359 xxlor 32+25, 0, 0
370 xxlor 0, 32+28, 32+28
371 xxlor 32+28, 23, 23
378 vxor 23, 23, 24
386 vrlw 23, 23, 28
388 xxlor 32+28, 0, 0
393 vadduwm 0, 0, 4
397 vpermxor 12, 12, 0, 20
413 vadduwm 0, 0, 4
417 vpermxor 12, 12, 0, 22
429 vrlw 4, 4, 23
430 vrlw 5, 5, 23
431 vrlw 6, 6, 23
432 vrlw 7, 7, 23
435 vadduwm 0, 0, 5
439 vpermxor 15, 15, 0, 20
455 vadduwm 0, 0, 5
459 vpermxor 15, 15, 0, 22
471 vrlw 5, 5, 23
472 vrlw 6, 6, 23
473 vrlw 7, 7, 23
474 vrlw 4, 4, 23
483 xxpermdi 32+\a0, 10, 11, 0 # a0, a1, a2, a3
485 xxpermdi 32+\a2, 12, 13, 0 # c0, c1, c2, c3
491 vadduwm \S+0, \S+0, 16-\S
492 vadduwm \S+4, \S+4, 17-\S
493 vadduwm \S+8, \S+8, 18-\S
494 vadduwm \S+12, \S+12, 19-\S
496 vadduwm \S+1, \S+1, 16-\S
497 vadduwm \S+5, \S+5, 17-\S
498 vadduwm \S+9, \S+9, 18-\S
499 vadduwm \S+13, \S+13, 19-\S
501 vadduwm \S+2, \S+2, 16-\S
502 vadduwm \S+6, \S+6, 17-\S
503 vadduwm \S+10, \S+10, 18-\S
504 vadduwm \S+14, \S+14, 19-\S
506 vadduwm \S+3, \S+3, 16-\S
507 vadduwm \S+7, \S+7, 17-\S
508 vadduwm \S+11, \S+11, 18-\S
509 vadduwm \S+15, \S+15, 19-\S
518 lxvw4x 0, 0, 9
525 lxvw4x 7, 23, 9
535 xxlxor \S+32, \S+32, 0
552 stxvw4x \S+32, 0, 16
560 stxvw4x \S+45, 23, 16
580 cmpdi 6, 0
585 # r17 - r31 mainly for Write_256 macro.
592 li 23, 112
603 li 14, 0 # offset to inp and outp
605 lxvw4x 48, 0, 3 # vr16, constants
610 # create (0, 1, 2, 3) counters
611 vspltisw 0, 0
615 vmrghw 4, 0, 1
617 vsldoi 30, 4, 5, 8 # vr30 counter, 4 (0, 1, 2, 3)
620 vspltisw 23, 7
624 lxvw4x 32+20, 0, 11
643 vadduwm 31, 30, 25 # counter = (0, 1, 2, 3) + (4, 4, 4, 4)
650 xxlor 23, 32+23, 32+23
656 xxspltw 32+0, 16, 0
661 xxspltw 32+4, 17, 0
665 xxspltw 32+8, 18, 0
669 xxspltw 32+12, 19, 0
675 xxspltw 32+16, 16, 0
680 xxspltw 32+20, 17, 0
683 xxspltw 32+23, 17, 3
684 xxspltw 32+24, 18, 0
688 xxspltw 32+28, 19, 0
700 xxlor 0, 32+30, 32+30
703 xxlor 32+30, 0, 0
704 TP_4x 0, 1, 2, 3
709 xxlor 0, 48, 48
717 Add_state 0
718 xxlor 48, 0, 0
722 Write_256 0
724 addi 15, 15, -256 # len -=256
730 TP_4x 16+0, 16+1, 16+2, 16+3
742 addi 15, 15, -256 # len +=256
752 cmpdi 15, 0
762 lxvw4x 48, 0, 3 # vr16, constants
768 vspltisw 23, 7
771 lxvw4x 32+20, 0, 11
778 vspltw 0, 16, 0
783 vspltw 4, 17, 0
787 vspltw 8, 18, 0
791 vspltw 12, 19, 0
804 TP_4x 0, 1, 2, 3
809 Add_state 0
810 Write_256 0
812 addi 15, 15, -256 # len += 256
818 cmpdi 15, 0
831 li 3, 0
838 .long 0x22330011, 0x66774455, 0xaabb8899, 0xeeffccdd
839 .long 0x11223300, 0x55667744, 0x99aabb88, 0xddeeffcc