xref: /freebsd/contrib/arm-optimized-routines/string/aarch64/memcmp-sve.S (revision 2a58b312b62f908ec92311d1bd8536dbaeb8e55b)
1/*
2 * memcmp - compare memory
3 *
4 * Copyright (c) 2018-2021, Arm Limited.
5 * SPDX-License-Identifier: MIT
6 */
7
8#include "../asmdefs.h"
9
10#if __ARM_FEATURE_SVE
11/* Assumptions:
12 *
13 * ARMv8-a, AArch64
14 * SVE Available.
15 */
16
17ENTRY (__memcmp_aarch64_sve)
18	PTR_ARG (0)
19	PTR_ARG (1)
20	SIZE_ARG (2)
21	mov	x3, 0			/* initialize off */
22
230:	whilelo	p0.b, x3, x2		/* while off < max */
24	b.none	9f
25
26	ld1b	z0.b, p0/z, [x0, x3]	/* read vectors bounded by max.  */
27	ld1b	z1.b, p0/z, [x1, x3]
28
29	/* Increment for a whole vector, even if we've only read a partial.
30	   This is significantly cheaper than INCP, and since OFF is not
31	   used after the loop it is ok to increment OFF past MAX.  */
32	incb	x3
33
34	cmpne	p1.b, p0/z, z0.b, z1.b	/* while no inequalities */
35	b.none	0b
36
37	/* Found inequality.  */
381:	brkb	p1.b, p0/z, p1.b	/* find first such */
39	lasta	w0, p1, z0.b		/* extract each byte */
40	lasta	w1, p1, z1.b
41	sub	x0, x0, x1		/* return comparison */
42	ret
43
44	/* Found end-of-count.  */
459:	mov	x0, 0			/* return equality */
46	ret
47
48END (__memcmp_aarch64_sve)
49
50#endif
51
52