/* * strcmp for ARMv6-M (optimized for performance, not size) * * Copyright (c) 2014-2020, Arm Limited. * SPDX-License-Identifier: MIT */ #if __ARM_ARCH == 6 && __ARM_ARCH_6M__ >= 1 .thumb_func .syntax unified .arch armv6-m .macro DoSub n, label subs r0, r0, r1 #ifdef __ARM_BIG_ENDIAN lsrs r1, r4, \n #else lsls r1, r4, \n #endif orrs r1, r0 bne \label .endm .macro Byte_Test n, label lsrs r0, r2, \n lsrs r1, r3, \n DoSub \n, \label .endm ENTRY_ALIGN (__strcmp_armv6m, 4) mov r2, r0 push {r4, r5, r6, lr} orrs r2, r1 lsls r2, r2, #30 bne 6f ldr r5, =0x01010101 lsls r6, r5, #7 1: ldmia r0!, {r2} ldmia r1!, {r3} subs r4, r2, r5 bics r4, r2 ands r4, r6 beq 3f #ifdef __ARM_BIG_ENDIAN Byte_Test #24, 4f Byte_Test #16, 4f Byte_Test #8, 4f b 7f 3: cmp r2, r3 beq 1b cmp r2, r3 #else uxtb r0, r2 uxtb r1, r3 DoSub #24, 2f uxth r0, r2 uxth r1, r3 DoSub #16, 2f lsls r0, r2, #8 lsls r1, r3, #8 lsrs r0, r0, #8 lsrs r1, r1, #8 DoSub #8, 2f lsrs r0, r2, #24 lsrs r1, r3, #24 subs r0, r0, r1 2: pop {r4, r5, r6, pc} 3: cmp r2, r3 beq 1b rev r0, r2 rev r1, r3 cmp r0, r1 #endif bls 5f movs r0, #1 4: pop {r4, r5, r6, pc} 5: movs r0, #0 mvns r0, r0 pop {r4, r5, r6, pc} 6: ldrb r2, [r0, #0] ldrb r3, [r1, #0] adds r0, #1 adds r1, #1 cmp r2, #0 beq 7f cmp r2, r3 bne 7f ldrb r2, [r0, #0] ldrb r3, [r1, #0] adds r0, #1 adds r1, #1 cmp r2, #0 beq 7f cmp r2, r3 beq 6b 7: subs r0, r2, r3 pop {r4, r5, r6, pc} END (__strcmp_armv6m) #endif /* __ARM_ARCH == 6 && __ARM_ARCH_6M__ >= 1 */