/linux/arch/arm/lib/ |
H A D | io-writesw-armv3.S | 25 orr r3, r3, r3, lsl #16 43 mov ip, r3, lsl #16 48 orr ip, ip, ip, lsl #16 51 mov ip, r4, lsl #16 56 orr ip, ip, ip, lsl #16 59 mov ip, r5, lsl #16 64 orr ip, ip, ip, lsl #16 67 mov ip, r6, lsl #16 72 orr ip, ip, ip, lsl #16 86 mov ip, r3, lsl #16 [all …]
|
H A D | bitops.h | 14 add r1, r1, r0, lsl #2 @ Get word offset 20 mov r3, r2, lsl r3 39 add r1, r1, r0, lsl #2 @ Get word offset 40 mov r3, r2, lsl r3 @ create mask 77 mov r3, r3, lsl r2 79 ldr r2, [r1, r0, lsl #2] 81 str r2, [r1, r0, lsl #2] 104 ldr r2, [r1, r0, lsl #2]! 106 tst r2, r0, lsl r3 107 \instr r2, r2, r0, lsl r3
|
H A D | io-readsw-armv3.S | 36 orr ip, ip, ip, lsl #8 45 orr r3, r3, r4, lsl #16 50 orr r4, r4, r5, lsl #16 55 orr r5, r5, r6, lsl #16 60 orr r6, r6, lr, lsl #16 76 orr r3, r3, r4, lsl #16 81 orr r4, r4, r5, lsl #16 91 orr r3, r3, r4, lsl #16
|
H A D | io-readsw-armv4.S | 12 orr \rd, \hw1, \hw2, lsl #16 14 orr \rd, \hw2, \hw1, lsl #16 18 .Linsw_align: movs ip, r1, lsl #31 68 .Lno_insw_4: movs r2, r2, lsl #31 85 #define pull_hbyte1 lsl #24 89 #define push_hbyte0 lsl #24 106 _BE_ONLY_( mov ip, ip, lsl #24 ) 111 orr ip, ip, r3, lsl #8
|
H A D | div64.S | 69 mov ip, ip, lsl yl 70 mov yl, r4, lsl yl 78 movcc yl, yl, lsl #1 79 movcc ip, ip, lsl #1 104 4: movs xl, xl, lsl #1 127 mov xl, xl, lsl xh 132 7: movs xl, xl, lsl #1 180 ARM( orr yl, yl, xh, lsl ip ) 181 THUMB( lsl xh, xh, ip ) 183 mov xh, xl, lsl ip
|
H A D | muldi3.S | 32 bic xl, xl, ip, lsl #16 33 bic yl, yl, yh, lsl #16 38 adds xl, xl, yh, lsl #16 40 adds xl, xl, ip, lsl #16
|
H A D | ashldi3.S | 45 movmi ah, ah, lsl r2 46 movpl ah, al, lsl r3 50 mov al, al, lsl r2
|
H A D | lib1funcs.S | 48 mov \divisor, \divisor, lsl \result 49 mov \curbit, \curbit, lsl \result 59 moveq \divisor, \divisor, lsl #3 69 movlo \divisor, \divisor, lsl #4 70 movlo \curbit, \curbit, lsl #4 77 movlo \divisor, \divisor, lsl #1 78 movlo \curbit, \curbit, lsl #1 144 mov \divisor, \divisor, lsl \order 156 movlo \divisor, \divisor, lsl #4 164 movlo \divisor, \divisor, lsl #1
|
H A D | io-writesw-armv4.S | 22 .Loutsw_align: movs ip, r1, lsl #31 55 .Lno_outsw_4: movs r2, r2, lsl #31 67 #define pull_hbyte0 lsl #8 71 #define push_hbyte1 lsl #8
|
H A D | csumpartial.S | 58 orr td0, td0, td3, lsl #8 60 orr td0, td3, td0, lsl #8 91 orrne td0, td0, ip, lsl #8 93 orrne td0, ip, td0, lsl #8
|
/linux/drivers/scsi/arm/ |
H A D | acornscsi-io.S | 31 orr r3, r3, r4, lsl #16 33 orr r4, r4, r6, lsl #16 36 orr r5, r5, r6, lsl #16 38 orr r6, r6, ip, lsl #16 47 orr r3, r3, r4, lsl #16 49 orr r4, r4, r6, lsl #16 58 orr r3, r3, r4, lsl #16 82 mov r3, r4, lsl #16 85 orr r4, r4, r4, lsl #16 86 mov r5, r6, lsl #16 [all …]
|
/linux/arch/arm64/lib/ |
H A D | strncmp.S | 54 #define LS_FW lsl 58 #define LS_BK lsl 99 lsl data1, data1, pos 101 lsl data2, data2, pos 114 lsl limit, tmp1, #3 /* Bits -> bytes. */ 149 lsl data1, data1, pos 150 lsl data2, data2, pos 167 neg tmp3, count, lsl #3 /* 64 - bits(bytes beyond align). */ 233 lsl offset, src2, #3 301 cmp pos, limit, lsl #3
|
H A D | tishift.S | 16 lsl x1, x1, x2 18 lsl x2, x0, x2 26 lsl x1, x0, x1 39 lsl x3, x1, x3 61 lsl x3, x1, x3
|
H A D | strcmp.S | 44 # define LS_FW lsl 97 lsl data1, data1, shift 98 lsl data2, data2, shift 114 neg shift, src2, lsl 3 /* Bits to alignment -64. */ 135 neg shift, src2, lsl 3 165 lsl tmp, has_nul, shift
|
H A D | strlen.S | 127 tst tmp2, zeroones, lsl 7 133 tst tmp2, zeroones, lsl 7 195 lsl tmp1, srcin, 3 202 lsl tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */
|
/linux/arch/arm/boot/compressed/ |
H A D | ll_char_wr.S | 40 mov r1, r1, lsl #3 56 add r0, r0, r5, lsl #3 @ Move to bottom of character 65 ldr r7, [lr, r7, lsl #2] 70 ldr r7, [lr, r7, lsl #2] 84 ldr ip, [lr, ip, lsl #2] 87 ldr ip, [lr, ip, lsl #2] @ avoid r4 94 ldr ip, [lr, ip, lsl #2] 97 ldr ip, [lr, ip, lsl #2] @ avoid r4
|
/linux/arch/arm/mm/ |
H A D | abort-lv4t.S | 68 and r9, r8, r7, lsl #1 70 and r9, r8, r7, lsl #2 72 and r9, r8, r7, lsl #3 80 subne r7, r7, r6, lsl #2 @ Undo increment 81 addeq r7, r7, r6, lsl #2 @ Undo decrement 95 ldreq r6, [r2, r9, lsl #2] @ { load Rm value } 110 movs r6, r8, lsl #20 @ Get offset 127 ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' 136 mov r6, r6, lsl r9 @ 0: LSL #!0 216 addeq r7, r7, r6, lsl #2 @ increment SP if PUSH [all …]
|
H A D | tlb-v7.S | 46 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA 47 mov r1, r1, lsl #PAGE_SHIFT 75 mov r0, r0, lsl #PAGE_SHIFT 76 mov r1, r1, lsl #PAGE_SHIFT
|
H A D | tlb-v6.S | 44 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA 45 mov r1, r1, lsl #PAGE_SHIFT 75 mov r0, r0, lsl #PAGE_SHIFT 76 mov r1, r1, lsl #PAGE_SHIFT
|
/linux/arch/arm/kernel/ |
H A D | phys2virt.S | 42 teq r3, r0, lsl #21 @ must be 2 MiB aligned 64 @ lsl <reg>, #21 68 @ lsl <reg>, #21 72 @ lsl <reg>, #21 144 @ phys-to-virt: sub <VA>, <PA>, #offset<31:24>, lsl #24 145 @ sub <VA>, <PA>, #offset<23:16>, lsl #16 147 @ virt-to-phys (non-LPAE): add <PA>, <VA>, #offset<31:24>, lsl #24 148 @ add <PA>, <VA>, #offset<23:16>, lsl #16 151 @ adds <PAlo>, <VA>, <reg>, lsl #20 195 orreq ip, ip, r6, lsl #4 @ MOVW -> mask in offset bits 31-24 [all …]
|
/linux/arch/m68k/math-emu/ |
H A D | fp_movem.S | 157 3: lsl.b #1,%d1 164 lsl.w #1,%d2 165 lsl.l #7,%d2 166 lsl.l #8,%d2 176 4: lsl.b #1,%d1 307 lsl.l #5,%d1 312 3: lsl.b #1,%d1 320 lsl.l #5,%d1 331 3: lsl.b #1,%d1 341 4: lsl.b #1,%d1
|
H A D | fp_util.S | 131 lsl.l #8,%d0 | shift mantissa 168 lsl.l #8,%d0 | shift high mantissa 169 lsl.l #3,%d0 182 lsl.l #8,%d0 183 lsl.l #3,%d0
|
/linux/arch/arm/include/asm/ |
H A D | assembler.h | 35 #define lspush lsl 36 #define get_byte_0 lsl #0 40 #define put_byte_0 lsl #0 41 #define put_byte_1 lsl #8 42 #define put_byte_2 lsl #16 43 #define put_byte_3 lsl #24 45 #define lspull lsl 50 #define get_byte_3 lsl #0 51 #define put_byte_0 lsl #24 52 #define put_byte_1 lsl #16 [all …]
|
/linux/arch/arm/mach-tegra/ |
H A D | sleep.h | 54 movne \rd, \rd, lsl #3 63 movne \rd, \rd, lsl #3 98 moveq \tmp1, \tmp1, lsl #2 100 moveq \tmp2, \tmp2, lsl \tmp1
|
/linux/arch/arm/crypto/ |
H A D | sha1-armv4-large.S | 71 add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 87 orr r9,r9,r10,lsl#8 89 orr r9,r9,r11,lsl#16 91 orr r9,r9,r12,lsl#24 112 orr r9,r9,r10,lsl#8 114 orr r9,r9,r11,lsl#16 116 orr r9,r9,r12,lsl#24 137 orr r9,r9,r10,lsl#8 139 orr r9,r9,r11,lsl#16 141 orr r9,r9,r12,lsl#24 [all …]
|