Lines Matching +full:- +full:4

1 //===----------------------------------------------------------------------===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 #define FROM_0_TO_15 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
14 #define FROM_0_TO_31 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,2…
32 # +-----------------------+
34 # +-----------------------+
36 # +-----------------------+ <-- SP
40 movl 4(%esp), %eax
48 movl %ebx, 4(%edx)
51 movl 4(%eax), %ebx
194 addi 4, 3, PPC64_OFFS_FP
198 // For little-endian targets, we need a swap since lxvd2x will load the register
201 // this can be changed to simply `lxv n, (16 * n)(4)`.
203 lxvd2x n, 0, 4 ;\
205 addi 4, 4, 16
208 lxvd2x n, 0, 4 ;\
209 addi 4, 4, 16
217 PPC64_LVS(4)
248 addi 4, 3, PPC64_OFFS_FP + n * 16 ;\
249 lxvd2x n, 0, 4 ;\
253 addi 4, 3, PPC64_OFFS_FP + n * 16 ;\
254 lxvd2x n, 0, 4
266 andis. 0, 5, (1 PPC_LEFT_SHIFT(47-n)) ;\
272 andi. 0, 5, (1 PPC_LEFT_SHIFT(63-n)) ;\
328 PPC64_LF(4)
361 std 0, 0(4) ;\
363 std 0, 8(4) ;\
364 lvx n, 0, 4
374 andis. 0, 5, (1 PPC_LEFT_SHIFT(15-n)) ;\
380 andi. 0, 5, (1 PPC_LEFT_SHIFT(31-n)) ;\
392 subi 4, 1, 16
393 // r4 is now a 16-byte aligned pointer into the red zone
394 // the _vectorScalarRegisters may not be 16-byte aligned
401 PPC64_CLV_UNALIGNEDl(4)
449 PPC64_LR(4)
508 lfd 4, 192(3)
542 stw 0, 0(4) SEPARATOR \
543 lwz 0, 424+_index*16+4(3) SEPARATOR \
544 stw 0, 4(4) SEPARATOR \
546 stw 0, 8(4) SEPARATOR \
548 stw 0, 12(4) SEPARATOR \
549 lvx _index, 0, 4
559 andis. 0, 5, (1 PPC_LEFT_SHIFT(15-_index)) SEPARATOR \
565 andi. 0, 5, (1 PPC_LEFT_SHIFT(31-_index)) SEPARATOR \
577 subi 4, 1, 16
578 rlwinm 4, 4, 0, 0, 27 // mask low 4-bits
579 // r4 is now a 16-byte aligned pointer into the red zone
580 // the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
586 LOAD_VECTOR_UNALIGNEDl(4)
625 lwz 4, 24(3) // do r4 now
682 // could clobber the de-allocated portion of the stack after sp has been
717 @ r8-r11: ldm into r1-r4, then mov to r8-r11
719 ldm r0!, {r1-r4}
725 @ r12 does not need loading, it it the intra-procedure-call scratch register
730 ldm r0, {r0-r7}
734 @ 32bit thumb-2 restrictions for ldm:
737 ldm lr, {r0-r12}
757 .fpu vfpv3-d16
767 vldmia r0, {d0-d15}
778 .fpu vfpv3-d16
781 vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia
795 vldmia r0, {d16-d31}
840 ldc2 p1, cr8, [r0], #4 @ wldrw wCGR0, [r0], #4
841 ldc2 p1, cr9, [r0], #4 @ wldrw wCGR1, [r0], #4
842 ldc2 p1, cr10, [r0], #4 @ wldrw wCGR2, [r0], #4
843 ldc2 p1, cr11, [r0], #4 @ wldrw wCGR3, [r0], #4
860 l.lwz r1, 4(r3)
941 r1 = memw(r0+#4)
950 // thread state pointer is in a0 ($4)
959 ldc1 $f0, (4 * 36 + 8 * 0)($4)
960 ldc1 $f2, (4 * 36 + 8 * 2)($4)
961 ldc1 $f4, (4 * 36 + 8 * 4)($4)
962 ldc1 $f6, (4 * 36 + 8 * 6)($4)
963 ldc1 $f8, (4 * 36 + 8 * 8)($4)
964 ldc1 $f10, (4 * 36 + 8 * 10)($4)
965 ldc1 $f12, (4 * 36 + 8 * 12)($4)
966 ldc1 $f14, (4 * 36 + 8 * 14)($4)
967 ldc1 $f16, (4 * 36 + 8 * 16)($4)
968 ldc1 $f18, (4 * 36 + 8 * 18)($4)
969 ldc1 $f20, (4 * 36 + 8 * 20)($4)
970 ldc1 $f22, (4 * 36 + 8 * 22)($4)
971 ldc1 $f24, (4 * 36 + 8 * 24)($4)
972 ldc1 $f26, (4 * 36 + 8 * 26)($4)
973 ldc1 $f28, (4 * 36 + 8 * 28)($4)
974 ldc1 $f30, (4 * 36 + 8 * 30)($4)
976 ldc1 $f0, (4 * 36 + 8 * 0)($4)
977 ldc1 $f1, (4 * 36 + 8 * 1)($4)
978 ldc1 $f2, (4 * 36 + 8 * 2)($4)
979 ldc1 $f3, (4 * 36 + 8 * 3)($4)
980 ldc1 $f4, (4 * 36 + 8 * 4)($4)
981 ldc1 $f5, (4 * 36 + 8 * 5)($4)
982 ldc1 $f6, (4 * 36 + 8 * 6)($4)
983 ldc1 $f7, (4 * 36 + 8 * 7)($4)
984 ldc1 $f8, (4 * 36 + 8 * 8)($4)
985 ldc1 $f9, (4 * 36 + 8 * 9)($4)
986 ldc1 $f10, (4 * 36 + 8 * 10)($4)
987 ldc1 $f11, (4 * 36 + 8 * 11)($4)
988 ldc1 $f12, (4 * 36 + 8 * 12)($4)
989 ldc1 $f13, (4 * 36 + 8 * 13)($4)
990 ldc1 $f14, (4 * 36 + 8 * 14)($4)
991 ldc1 $f15, (4 * 36 + 8 * 15)($4)
992 ldc1 $f16, (4 * 36 + 8 * 16)($4)
993 ldc1 $f17, (4 * 36 + 8 * 17)($4)
994 ldc1 $f18, (4 * 36 + 8 * 18)($4)
995 ldc1 $f19, (4 * 36 + 8 * 19)($4)
996 ldc1 $f20, (4 * 36 + 8 * 20)($4)
997 ldc1 $f21, (4 * 36 + 8 * 21)($4)
998 ldc1 $f22, (4 * 36 + 8 * 22)($4)
999 ldc1 $f23, (4 * 36 + 8 * 23)($4)
1000 ldc1 $f24, (4 * 36 + 8 * 24)($4)
1001 ldc1 $f25, (4 * 36 + 8 * 25)($4)
1002 ldc1 $f26, (4 * 36 + 8 * 26)($4)
1003 ldc1 $f27, (4 * 36 + 8 * 27)($4)
1004 ldc1 $f28, (4 * 36 + 8 * 28)($4)
1005 ldc1 $f29, (4 * 36 + 8 * 29)($4)
1006 ldc1 $f30, (4 * 36 + 8 * 30)($4)
1007 ldc1 $f31, (4 * 36 + 8 * 31)($4)
1012 lw $8, (4 * 33)($4)
1014 lw $8, (4 * 34)($4)
1018 lw $1, (4 * 1)($4)
1019 lw $2, (4 * 2)($4)
1020 lw $3, (4 * 3)($4)
1022 lw $5, (4 * 5)($4)
1023 lw $6, (4 * 6)($4)
1024 lw $7, (4 * 7)($4)
1025 lw $8, (4 * 8)($4)
1026 lw $9, (4 * 9)($4)
1027 lw $10, (4 * 10)($4)
1028 lw $11, (4 * 11)($4)
1029 lw $12, (4 * 12)($4)
1030 lw $13, (4 * 13)($4)
1031 lw $14, (4 * 14)($4)
1032 lw $15, (4 * 15)($4)
1033 lw $16, (4 * 16)($4)
1034 lw $17, (4 * 17)($4)
1035 lw $18, (4 * 18)($4)
1036 lw $19, (4 * 19)($4)
1037 lw $20, (4 * 20)($4)
1038 lw $21, (4 * 21)($4)
1039 lw $22, (4 * 22)($4)
1040 lw $23, (4 * 23)($4)
1041 lw $24, (4 * 24)($4)
1042 lw $25, (4 * 25)($4)
1043 lw $26, (4 * 26)($4)
1044 lw $27, (4 * 27)($4)
1045 lw $28, (4 * 28)($4)
1046 lw $29, (4 * 29)($4)
1047 lw $30, (4 * 30)($4)
1049 lw $31, (4 * 32)($4)
1052 lw $4, (4 * 4)($4)
1061 // thread state pointer is in a0 ($4)
1070 ldc1 $f\i, (280+8*\i)($4)
1075 ld $8, (8 * 33)($4)
1077 ld $8, (8 * 34)($4)
1081 ld $1, (8 * 1)($4)
1082 ld $2, (8 * 2)($4)
1083 ld $3, (8 * 3)($4)
1086 ld $\i, (8 * \i)($4)
1089 ld $31, (8 * 32)($4)
1092 ld $4, (8 * 4)($4)
1182 .irp i,2,3,4,5,6,7,8,9
1211 // Restore GPRs - skipping %r0 and %r1
1243 ld.d $a0, $a0, (8 * 4) // restore $a0 last