Lines Matching +full:xor +full:- +full:v2

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
5 # Copyright 2023- IBM Corp. All rights reserved
16 # row1 = (row1 + row2), row4 = row1 xor row4, row4 rotate each word by 16
17 # row3 = (row3 + row4), row2 = row3 xor row2, row2 rotate each word by 12
18 # row1 = (row1 + row2), row4 = row1 xor row4, row4 rotate each word by 8
19 # row3 = (row3 + row4), row2 = row3 xor row2, row2 rotate each word by 7
35 # Column round (v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
36 # Diagnal round (v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14)
40 #include <asm/asm-offsets.h>
41 #include <asm/asm-compat.h>
78 stdu 1,-752(1)
192 # QR(v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
289 # QR(v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14)
392 # QR(v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
434 # QR(v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14)
491 vadduwm \S+0, \S+0, 16-\S
492 vadduwm \S+4, \S+4, 17-\S
493 vadduwm \S+8, \S+8, 18-\S
494 vadduwm \S+12, \S+12, 19-\S
496 vadduwm \S+1, \S+1, 16-\S
497 vadduwm \S+5, \S+5, 17-\S
498 vadduwm \S+9, \S+9, 18-\S
499 vadduwm \S+13, \S+13, 19-\S
501 vadduwm \S+2, \S+2, 16-\S
502 vadduwm \S+6, \S+6, 17-\S
503 vadduwm \S+10, \S+10, 18-\S
504 vadduwm \S+14, \S+14, 19-\S
506 vadduwm \S+3, \S+3, 16-\S
507 vadduwm \S+7, \S+7, 17-\S
508 vadduwm \S+11, \S+11, 18-\S
509 vadduwm \S+15, \S+15, 19-\S
585 # r17 - r31 mainly for Write_256 macro.
724 addi 15, 15, -256 # len -=256
742 addi 15, 15, -256 # len +=256
812 addi 15, 15, -256 # len += 256