xref: /freebsd/lib/libc/i386/string/strncmp.S (revision bdd1243df58e60e85101c09001d9812a789b6bc4)
1/*
2 * Copyright (c) 1993,94 Winning Strategies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *      This product includes software developed by Winning Strategies, Inc.
16 * 4. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <machine/asm.h>
32__FBSDID("$FreeBSD$");
33
34/*
35 * strncmp(s1, s2, n)
36 *	return an integer greater than, equal to, or less than 0,
37 *	according as the first n characters of string s1 is greater
38 *	than, equal to, or less than the string s2.
39 *
40 * %eax - pointer to s1
41 * %ecx - pointer to s2
42 * %edx - length
43 *
44 * Written by:
45 *	J.T. Conklin (jtc@wimsey.com), Winning Strategies, Inc.
46 */
47
48/*
49 * I've unrolled the loop eight times: large enough to make a
50 * significant difference, and small enough not to totally trash the
51 * cache.
52 *
53 * TODO: change all the jz's back to je for consistency.
54 */
55
56ENTRY(strncmp)
57	pushl	%ebx
58	movl	8(%esp),%eax
59	movl	12(%esp),%ecx
60	movl	16(%esp),%edx
61	testl	%edx,%edx
62	jmp	L2			/* Jump into the loop! */
63
64	.align 2,0x90
65L1:	incl	%eax
66	incl	%ecx
67	decl	%edx
68L2:	jz	L4			/* strings are equal */
69	movb	(%eax),%bl
70	testb	%bl,%bl
71	jz	L3
72	cmpb	%bl,(%ecx)
73	jne	L3
74
75/*
76 * XXX it might be best to move the next 4 instructions to the end of the
77 * unrolled part of the loop.  The unrolled part would then be
78 *	movb n(%eax),%bl; testb %bl, %bl; je L3; cmpb n(%ecx); jne L3
79 * or maybe better
80 *	movb n(%eax),%bl; cmpb n(%ecx); jne L3; testb %bl,%bl; je return_0
81 * for n = 0, 1, ..., 8.  The end of the loop would be
82 *	L1: addl $8,%eax; addl $8,%ecx; subl $8,%edx; cmpl $8,%edx; jae Lx
83 * where residual counts of 0 to 7 are handled at Lx.  However, this would
84 * be slower for short strings.  Cache effects are probably not so
85 * important because we are only handling a byte at a time.
86 */
87	incl	%eax
88	incl	%ecx
89	decl	%edx
90	jz	L4
91	movb	(%eax),%bl
92	testb	%bl,%bl
93	jz	L3
94	cmpb	%bl,(%ecx)
95	jne	L3
96
97	incl	%eax
98	incl	%ecx
99	decl	%edx
100	jz	L4
101	movb	(%eax),%bl
102	testb	%bl,%bl
103	jz	L3
104	cmpb	%bl,(%ecx)
105	jne	L3
106
107	incl	%eax
108	incl	%ecx
109	decl	%edx
110	jz	L4
111	movb	(%eax),%bl
112	testb	%bl,%bl
113	jz	L3
114	cmpb	%bl,(%ecx)
115	jne	L3
116
117	incl	%eax
118	incl	%ecx
119	decl	%edx
120	jz	L4
121	movb	(%eax),%bl
122	testb	%bl,%bl
123	jz	L3
124	cmpb	%bl,(%ecx)
125	jne	L3
126
127	incl	%eax
128	incl	%ecx
129	decl	%edx
130	jz	L4
131	movb	(%eax),%bl
132	testb	%bl,%bl
133	jz	L3
134	cmpb	%bl,(%ecx)
135	jne	L3
136
137	incl	%eax
138	incl	%ecx
139	decl	%edx
140	jz	L4
141	movb	(%eax),%bl
142	testb	%bl,%bl
143	jz	L3
144	cmpb	%bl,(%ecx)
145	jne	L3
146
147	incl	%eax
148	incl	%ecx
149	decl	%edx
150	jz	L4
151	movb	(%eax),%bl
152	testb	%bl,%bl
153	jz	L3
154	cmpb	%bl,(%ecx)
155	je	L1
156
157	.align 2,0x90
158L3:	movzbl	(%eax),%eax		/* unsigned comparison */
159	movzbl	(%ecx),%ecx
160	subl	%ecx,%eax
161	popl	%ebx
162	ret
163	.align 2,0x90
164L4:	xorl	%eax,%eax
165	popl	%ebx
166	ret
167END(strncmp)
168
169	.section .note.GNU-stack,"",%progbits
170