Lines Matching +full:10 +full:- +full:14

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
5 # Copyright 2023- IBM Corp. All rights reserved
40 #include <asm/asm-offsets.h>
41 #include <asm/asm-compat.h>
78 stdu 1,-752(1)
80 SAVE_GPR 14, 112, 1
113 SAVE_VSX 14, 192, 9
148 RESTORE_VSX 14, 192, 9
167 RESTORE_GPR 14, 112, 1
206 vpermxor 14, 14, 2, 25
215 vadduwm 10, 10, 14
223 vxor 6, 6, 10
254 vpermxor 14, 14, 2, 25
263 vadduwm 10, 10, 14
273 vxor 6, 6, 10
304 vpermxor 14, 14, 3, 25
311 vadduwm 10, 10, 15
314 vadduwm 9, 9, 14
319 vxor 5, 5, 10
354 vpermxor 14, 14, 3, 25
361 vadduwm 10, 10, 15
364 vadduwm 9, 9, 14
372 vxor 5, 5, 10
399 vpermxor 14, 14, 2, 20
403 vadduwm 10, 10, 14
407 vxor 6, 6, 10
419 vpermxor 14, 14, 2, 22
423 vadduwm 10, 10, 14
427 vxor 6, 6, 10
442 vpermxor 14, 14, 3, 20
443 vadduwm 10, 10, 15
446 vadduwm 9, 9, 14
447 vxor 5, 5, 10
462 vpermxor 14, 14, 3, 22
463 vadduwm 10, 10, 15
466 vadduwm 9, 9, 14
467 vxor 5, 5, 10
479 xxmrghw 10, 32+\a0, 32+\a1 # a0, a1, b0, b1
483 xxpermdi 32+\a0, 10, 11, 0 # a0, a1, a2, a3
484 xxpermdi 32+\a1, 10, 11, 3 # b0, b1, b2, b3
491 vadduwm \S+0, \S+0, 16-\S
492 vadduwm \S+4, \S+4, 17-\S
493 vadduwm \S+8, \S+8, 18-\S
494 vadduwm \S+12, \S+12, 19-\S
496 vadduwm \S+1, \S+1, 16-\S
497 vadduwm \S+5, \S+5, 17-\S
498 vadduwm \S+9, \S+9, 18-\S
499 vadduwm \S+13, \S+13, 19-\S
501 vadduwm \S+2, \S+2, 16-\S
502 vadduwm \S+6, \S+6, 17-\S
503 vadduwm \S+10, \S+10, 18-\S
504 vadduwm \S+14, \S+14, 19-\S
506 vadduwm \S+3, \S+3, 16-\S
507 vadduwm \S+7, \S+7, 17-\S
508 vadduwm \S+11, \S+11, 18-\S
509 vadduwm \S+15, \S+15, 19-\S
516 add 9, 14, 5
517 add 16, 14, 4
528 lxvw4x 10, 26, 9
532 lxvw4x 14, 30, 9
545 xxlxor \S+42, \S+42, 10
549 xxlxor \S+43, \S+43, 14
585 # r17 - r31 mainly for Write_256 macro.
603 li 14, 0 # offset to inp and outp
667 xxspltw 32+10, 18, 2
671 xxspltw 32+14, 19, 2
706 TP_4x 8, 9, 10, 11
707 TP_4x 12, 13, 14, 15
723 addi 14, 14, 256 # offset +=256
724 addi 15, 15, -256 # len -=256
732 TP_4x 16+8, 16+9, 16+10, 16+11
733 TP_4x 16+12, 16+13, 16+14, 16+15
741 addi 14, 14, 256 # offset +=256
742 addi 15, 15, -256 # len +=256
789 vspltw 10, 18, 2
794 vspltw 14, 19, 2
806 TP_4x 8, 9, 10, 11
807 TP_4x 12, 13, 14, 15
811 addi 14, 14, 256 # offset += 256
812 addi 15, 15, -256 # len += 256