xref: /linux/arch/alpha/lib/ev67-strchr.S (revision 55f1b540d893da740a81200450014c45a8103f54)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * arch/alpha/lib/ev67-strchr.S
4 * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
5 *
6 * Return the address of a given character within a null-terminated
7 * string, or null if it is not found.
8 *
9 * Much of the information about 21264 scheduling/coding comes from:
10 *	Compiler Writer's Guide for the Alpha 21264
11 *	abbreviated as 'CWG' in other comments here
12 *	ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
13 * Scheduling notation:
14 *	E	- either cluster
15 *	U	- upper subcluster; U0 - subcluster U0; U1 - subcluster U1
16 *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1
17 * Try not to change the actual algorithm if possible for consistency.
18 */
19#include <linux/export.h>
20#include <asm/regdef.h>
21
22	.set noreorder
23	.set noat
24
25	.align 4
26	.globl strchr
27	.ent strchr
28strchr:
29	.frame sp, 0, ra
30	.prologue 0
31
32	ldq_u   t0, 0(a0)	# L : load first quadword Latency=3
33	and	a1, 0xff, t3	# E : 00000000000000ch
34	insbl	a1, 1, t5	# U : 000000000000ch00
35	insbl	a1, 7, a2	# U : ch00000000000000
36
37	insbl	t3, 6, a3	# U : 00ch000000000000
38	or	t5, t3, a1	# E : 000000000000chch
39	andnot  a0, 7, v0	# E : align our loop pointer
40	lda	t4, -1		# E : build garbage mask
41
42	mskqh	t4, a0, t4	# U : only want relevant part of first quad
43	or	a2, a3, a2	# E : chch000000000000
44	inswl	a1, 2, t5	# E : 00000000chch0000
45	inswl	a1, 4, a3	# E : 0000chch00000000
46
47	or	a1, a2, a1	# E : chch00000000chch
48	or	a3, t5, t5	# E : 0000chchchch0000
49	cmpbge  zero, t0, t2	# E : bits set iff byte == zero
50	cmpbge	zero, t4, t4	# E : bits set iff byte is garbage
51
52	/* This quad is _very_ serialized.  Lots of stalling happens */
53	or	t5, a1, a1	# E : chchchchchchchch
54	xor	t0, a1, t1	# E : make bytes == c zero
55	cmpbge  zero, t1, t3	# E : bits set iff byte == c
56	or	t2, t3, t0	# E : bits set iff char match or zero match
57
58	andnot	t0, t4, t0	# E : clear garbage bits
59	cttz	t0, a2		# U0 : speculative (in case we get a match)
60	nop			# E :
61	bne	t0, $found	# U :
62
63	/*
64	 * Yuk.  This loop is going to stall like crazy waiting for the
65	 * data to be loaded.  Not much can be done about it unless it's
66	 * unrolled multiple times - is that safe to do in kernel space?
67	 * Or would exception handling recovery code do the trick here?
68	 */
69$loop:	ldq	t0, 8(v0)	# L : Latency=3
70	addq	v0, 8, v0	# E :
71	xor	t0, a1, t1	# E :
72	cmpbge	zero, t0, t2	# E : bits set iff byte == 0
73
74	cmpbge	zero, t1, t3	# E : bits set iff byte == c
75	or	t2, t3, t0	# E :
76	cttz	t3, a2		# U0 : speculative (in case we get a match)
77	beq	t0, $loop	# U :
78
79$found:	negq    t0, t1		# E : clear all but least set bit
80	and     t0, t1, t0	# E :
81	and	t0, t3, t1	# E : bit set iff byte was the char
82	addq	v0, a2, v0	# E : Add in the bit number from above
83
84	cmoveq	t1, $31, v0	# E : Two mapping slots, latency = 2
85	nop
86	nop
87	ret			# L0 :
88
89	.end strchr
90	EXPORT_SYMBOL(strchr)
91