Lines Matching +full:8 +full:- +full:bit

48 	/* Word-align the destination buffer */
64 ands ip, r1, #0x03 /* Is src also word-aligned? */
67 /* Quad-align the destination buffer */
70 stmfd sp!, {r4-r9} /* Free up some registers */
80 ldr r4, [r1], #0x04 /* LD:00-03 */
81 ldr r5, [r1], #0x04 /* LD:04-07 */
83 ldr r6, [r1], #0x04 /* LD:08-0b */
84 ldr r7, [r1], #0x04 /* LD:0c-0f */
85 ldr r8, [r1], #0x04 /* LD:10-13 */
86 ldr r9, [r1], #0x04 /* LD:14-17 */
87 strd r4, [r3], #0x08 /* ST:00-07 */
88 ldr r4, [r1], #0x04 /* LD:18-1b */
89 ldr r5, [r1], #0x04 /* LD:1c-1f */
90 strd r6, [r3], #0x08 /* ST:08-0f */
91 ldr r6, [r1], #0x04 /* LD:20-23 */
92 ldr r7, [r1], #0x04 /* LD:24-27 */
94 strd r8, [r3], #0x08 /* ST:10-17 */
95 ldr r8, [r1], #0x04 /* LD:28-2b */
96 ldr r9, [r1], #0x04 /* LD:2c-2f */
97 strd r4, [r3], #0x08 /* ST:18-1f */
98 ldr r4, [r1], #0x04 /* LD:30-33 */
99 ldr r5, [r1], #0x04 /* LD:34-37 */
100 strd r6, [r3], #0x08 /* ST:20-27 */
101 ldr r6, [r1], #0x04 /* LD:38-3b */
102 ldr r7, [r1], #0x04 /* LD:3c-3f */
103 strd r8, [r3], #0x08 /* ST:28-2f */
104 ldr r8, [r1], #0x04 /* LD:40-43 */
105 ldr r9, [r1], #0x04 /* LD:44-47 */
107 strd r4, [r3], #0x08 /* ST:30-37 */
108 ldr r4, [r1], #0x04 /* LD:48-4b */
109 ldr r5, [r1], #0x04 /* LD:4c-4f */
110 strd r6, [r3], #0x08 /* ST:38-3f */
111 ldr r6, [r1], #0x04 /* LD:50-53 */
112 ldr r7, [r1], #0x04 /* LD:54-57 */
113 strd r8, [r3], #0x08 /* ST:40-47 */
114 ldr r8, [r1], #0x04 /* LD:58-5b */
115 ldr r9, [r1], #0x04 /* LD:5c-5f */
116 strd r4, [r3], #0x08 /* ST:48-4f */
117 ldr r4, [r1], #0x04 /* LD:60-63 */
118 ldr r5, [r1], #0x04 /* LD:64-67 */
120 strd r6, [r3], #0x08 /* ST:50-57 */
121 ldr r6, [r1], #0x04 /* LD:68-6b */
122 ldr r7, [r1], #0x04 /* LD:6c-6f */
123 strd r8, [r3], #0x08 /* ST:58-5f */
124 ldr r8, [r1], #0x04 /* LD:70-73 */
125 ldr r9, [r1], #0x04 /* LD:74-77 */
126 strd r4, [r3], #0x08 /* ST:60-67 */
127 ldr r4, [r1], #0x04 /* LD:78-7b */
128 ldr r5, [r1], #0x04 /* LD:7c-7f */
129 strd r6, [r3], #0x08 /* ST:68-6f */
130 strd r8, [r3], #0x08 /* ST:70-77 */
132 strd r4, [r3], #0x08 /* ST:78-7f */
137 ldmfdeq sp!, {r4-r9}
162 ldmfdeq sp!, {r4-r9}
182 /* At least 8 bytes remaining */
188 /* Less than 8 bytes remaining */
189 ldmfd sp!, {r4-r9}
211 stmfd sp!, {r4-r7}
220 mov r4, ip, lsr #8
227 mov r5, r5, lsr #8
229 mov r6, r6, lsr #8
231 mov r7, r7, lsr #8
242 ldmfdeq sp!, {r4-r7}
249 mov r4, ip, lsr #8
281 ldmfdeq sp!, {r4-r7}
304 orr r4, r4, r5, lsl #8
306 orr r5, r5, r6, lsl #8
308 orr r6, r6, r7, lsl #8
310 orr r7, r7, ip, lsl #8
320 ldmfdeq sp!, {r4-r7}
330 orr r4, r4, ip, lsl #8
336 ldmfd sp!, {r4-r7}
396 * 0000: dst is 32-bit aligned, src is 32-bit aligned
404 * 0001: dst is 32-bit aligned, src is 8-bit aligned
406 ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
408 mov r3, r3, lsr #8 /* r3 = .210 */
415 * 0010: dst is 32-bit aligned, src is 16-bit aligned
425 * 0011: dst is 32-bit aligned, src is 8-bit aligned
427 ldr r3, [r1, #-3] /* BE:r3 = xxx0 LE:r3 = 0xxx */
430 orr r3, r3, r2, lsl #8 /* r3 = 3210 */
436 * 0100: dst is 8-bit aligned, src is 32-bit aligned
440 mov r3, r2, lsr #8
448 * 0101: dst is 8-bit aligned, src is 8-bit aligned
460 * 0110: dst is 8-bit aligned, src is 16-bit aligned
465 mov r2, r2, lsr #8 /* r2 = ...1 */
466 orr r2, r2, r3, lsl #8 /* r2 = .321 */
467 mov r3, r3, lsr #8 /* r3 = ...3 */
474 * 0111: dst is 8-bit aligned, src is 8-bit aligned
486 * 1000: dst is 16-bit aligned, src is 32-bit aligned
496 * 1001: dst is 16-bit aligned, src is 8-bit aligned
498 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
500 mov r1, r2, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
503 orr r2, r2, r3, lsl #8 /* r2 = xx32 */
509 * 1010: dst is 16-bit aligned, src is 16-bit aligned
519 * 1011: dst is 16-bit aligned, src is 8-bit aligned
522 ldr r2, [r1, #-3] /* BE:r2 = xxx0 LE:r2 = 0xxx */
523 mov r1, r3, lsr #8 /* BE:r1 = .123 LE:r1 = .x32 */
525 mov r3, r3, lsl #8 /* r3 = 321. */
532 * 1100: dst is 8-bit aligned, src is 32-bit aligned
536 mov r3, r2, lsr #8
544 * 1101: dst is 8-bit aligned, src is 8-bit aligned
556 * 1110: dst is 8-bit aligned, src is 16-bit aligned
561 mov r2, r2, lsr #8 /* r2 = ...1 */
562 orr r2, r2, r3, lsl #8 /* r2 = .321 */
564 mov r3, r3, lsr #8 /* r3 = ...3 */
570 * 1111: dst is 8-bit aligned, src is 8-bit aligned
596 * 0000: dst is 32-bit aligned, src is 32-bit aligned
606 * 0001: dst is 32-bit aligned, src is 8-bit aligned
608 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
610 mov r2, r2, lsr #8 /* r2 = .210 */
612 mov r3, r3, lsr #8 /* BE:r3 = .345 LE:r3 = .x54 */
619 * 0010: dst is 32-bit aligned, src is 16-bit aligned
631 * 0011: dst is 32-bit aligned, src is 8-bit aligned
633 ldr r2, [r1, #-3] /* BE:r2 = xxx0 LE:r2 = 0xxx */
637 orr r2, r2, r3, lsl #8 /* r2 = 3210 */
638 mov r1, r1, lsl #8 /* r1 = xx5. */
646 * 0100: dst is 8-bit aligned, src is 32-bit aligned
650 mov r1, r3, lsr #8 /* BE:r1 = .012 LE:r1 = .321 */
654 orr r3, r3, r2, lsl #8 /* r3 = .543 */
655 mov r2, r2, lsr #8 /* r2 = ...5 */
662 * 0101: dst is 8-bit aligned, src is 8-bit aligned
676 * 0110: dst is 8-bit aligned, src is 16-bit aligned
683 mov r3, r1, lsr #8 /* r3 = .543 */
685 mov r3, r2, lsr #8 /* r3 = ...1 */
686 orr r3, r3, r1, lsl #8 /* r3 = 4321 */
692 * 0111: dst is 8-bit aligned, src is 8-bit aligned
706 * 1000: dst is 16-bit aligned, src is 32-bit aligned
718 * 1001: dst is 16-bit aligned, src is 8-bit aligned
720 ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
722 mov r1, r3, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
723 mov r2, r2, lsl #8 /* r2 = 543. */
731 * 1010: dst is 16-bit aligned, src is 16-bit aligned
741 * 1011: dst is 16-bit aligned, src is 8-bit aligned
746 orr r3, r3, r2, lsl #8 /* r3 = 3210 */
748 orr r1, r1, r2, lsr #8 /* r1 = 5432 */
755 * 1100: dst is 8-bit aligned, src is 32-bit aligned
760 mov r2, r2, lsr #8 /* r2 = .321 */
762 mov r1, r1, lsr #8 /* r1 = ...5 */
769 * 1101: dst is 8-bit aligned, src is 8-bit aligned
783 * 1110: dst is 8-bit aligned, src is 16-bit aligned
788 mov r2, r2, lsr #8 /* r2 = ...1 */
789 orr r2, r2, r1, lsl #8 /* r2 = 4321 */
797 * 1111: dst is 8-bit aligned, src is 8-bit aligned
810 * Special case for 8 byte copies
823 * 0000: dst is 32-bit aligned, src is 32-bit aligned
833 * 0001: dst is 32-bit aligned, src is 8-bit aligned
835 ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
838 mov r3, r3, lsr #8 /* r3 = .210 */
841 orr r2, r1, r2, lsr #8 /* r2 = 7654 */
848 * 0010: dst is 32-bit aligned, src is 16-bit aligned
862 * 0011: dst is 32-bit aligned, src is 8-bit aligned
867 orr r3, r3, r2, lsl #8 /* r3 = 3210 */
869 orr r2, r2, r1, lsl #8 /* r2 = 7654 */
876 * 0100: dst is 8-bit aligned, src is 32-bit aligned
883 mov r1, r3, lsr #8 /* r1 = .321 */
885 orr r3, r3, r2, lsl #8 /* r3 = 6543 */
892 * 0101: dst is 8-bit aligned, src is 8-bit aligned
906 * 0110: dst is 8-bit aligned, src is 16-bit aligned
912 mov ip, r1, lsr #8 /* ip = ...7 */
914 mov ip, r2, lsr #8 /* ip = ...1 */
915 orr ip, ip, r3, lsl #8 /* ip = 4321 */
916 mov r3, r3, lsr #8 /* r3 = .543 */
924 * 0111: dst is 8-bit aligned, src is 8-bit aligned
940 * 1000: dst is 16-bit aligned, src is 32-bit aligned
954 * 1001: dst is 16-bit aligned, src is 8-bit aligned
956 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
959 mov r1, r2, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
962 orr r1, r1, r3, lsl #8 /* r1 = 5432 */
964 orr r3, r3, ip, lsl #8 /* r3 = ..76 */
971 * 1010: dst is 16-bit aligned, src is 16-bit aligned
983 * 1011: dst is 16-bit aligned, src is 8-bit aligned
988 mov r1, r3, lsr #8 /* BE:r1 = .567 LE:r1 = .x76 */
991 orr r3, r3, r2, lsr #8 /* r3 = 5432 */
992 orr r2, ip, r2, lsl #8 /* r2 = 3210 */
999 * 1100: dst is 8-bit aligned, src is 32-bit aligned
1003 mov r1, r3, lsr #8 /* BE:r1 = .456 LE:r1 = .765 */
1008 mov r2, r2, lsr #8 /* r2 = .321 */
1015 * 1101: dst is 8-bit aligned, src is 8-bit aligned
1031 * 1110: dst is 8-bit aligned, src is 16-bit aligned
1037 mov ip, r2, lsr #8 /* ip = ...1 */
1038 orr ip, ip, r3, lsl #8 /* ip = 4321 */
1039 mov r2, r1, lsr #8 /* r2 = ...7 */
1041 mov r1, r1, lsl #8 /* r1 = .76. */
1049 * 1111: dst is 8-bit aligned, src is 8-bit aligned
1076 * 0000: dst is 32-bit aligned, src is 32-bit aligned
1088 * 0001: dst is 32-bit aligned, src is 8-bit aligned
1093 ldr r1, [r1, #-1] /* BE:r1 = x012 LE:r1 = 210x */
1095 orr r2, r2, ip, lsr #8 /* r2 = BA98 */
1098 orr r2, r2, r3, lsr #8 /* r2 = 7654 */
1099 mov r1, r1, lsr #8 /* r1 = .210 */
1107 * 0010: dst is 32-bit aligned, src is 16-bit aligned
1125 * 0011: dst is 32-bit aligned, src is 8-bit aligned
1131 orr r2, r2, r3, lsl #8 /* r2 = 3210 */
1134 orr r3, r3, ip, lsl #8 /* r3 = 7654 */
1135 mov r1, r1, lsl #8 /* r1 = BA9. */
1143 * 0100: dst is 8-bit aligned (byte 1), src is 32-bit aligned
1148 mov r1, r2, lsr #8 /* BE:r1 = .012 LE:r1 = .321 */
1152 orr r2, r1, r3, lsl #8 /* r1 = 6543 */
1154 orr r1, r1, ip, lsl #8 /* r1 = A987 */
1163 * 0101: dst is 8-bit aligned (byte 1), src is 8-bit aligned (byte 1)
1179 * 0110: dst is 8-bit aligned (byte 1), src is 16-bit aligned
1186 mov r2, r2, lsr #8 /* r2 = ...1 */
1187 orr r2, r2, r3, lsl #8 /* r2 = 4321 */
1189 mov r2, r3, lsr #8 /* r2 = .543 */
1191 mov r2, ip, lsr #8 /* r2 = .987 */
1193 mov r1, r1, lsr #8 /* r1 = ...B */
1201 * 0111: dst is 8-bit aligned (byte 1), src is 8-bit aligned (byte 3)
1221 * 1000: dst is 16-bit aligned, src is 32-bit aligned
1239 * 1001: dst is 16-bit aligned, src is 8-bit aligned (byte 1)
1241 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
1243 mov ip, r2, lsr #8 /* BE:ip = .x01 LE:ip = .210 */
1248 orr r2, r2, r3, lsl #8 /* r2 = 5432 */
1250 orr r3, r3, ip, lsl #8 /* r3 = 9876 */
1251 mov r1, r1, lsl #8 /* r1 = ..B. */
1260 * 1010: dst is 16-bit aligned, src is 16-bit aligned
1274 * 1011: dst is 16-bit aligned, src is 8-bit aligned (byte 3)
1278 mov ip, r2, lsr #8 /* BE:ip = .9AB LE:ip = .xBA */
1283 orr r2, r2, r3, lsr #8 /* r2 = 9876 */
1285 orr r3, r3, ip, lsr #8 /* r3 = 5432 */
1286 orr r1, r1, ip, lsl #8 /* r1 = 3210 */
1294 * 1100: dst is 8-bit aligned (byte 3), src is 32-bit aligned
1300 mov r3, r2, lsr #8 /* r3 = .321 */
1303 mov r3, ip, lsr #8 /* r3 = .765 */
1306 mov r1, r1, lsr #8 /* r1 = .BA9 */
1314 * 1101: dst is 8-bit aligned (byte 3), src is 8-bit aligned (byte 1)
1319 ldr r1, [r1, #-1] /* BE:r1 = x012 LE:r1 = 210x */
1327 mov r1, r1, lsr #8 /* r1 = .210 */
1335 * 1110: dst is 8-bit aligned (byte 3), src is 16-bit aligned
1342 mov r2, r2, lsr #8 /* r2 = ...1 */
1343 orr r2, r2, r3, lsl #8 /* r2 = 4321 */
1345 orr r3, r3, ip, lsl #8 /* r3 = 8765 */
1347 orr ip, ip, r1, lsl #8 /* ip = .BA9 */
1348 mov r1, r1, lsr #8 /* r1 = ...B */
1357 * 1111: dst is 8-bit aligned (byte 3), src is 8-bit aligned (byte 3)
1373 .section .note.GNU-stack,"",%progbits