xref: /freebsd/lib/libc/arm/aeabi/aeabi_vfp_float.S (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1/*
2 * Copyright (C) 2013 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <machine/asm.h>
29#include "aeabi_vfp.h"
30
31.fpu	vfp
32.syntax	unified
33
34/* void __aeabi_cfcmpeq(float, float) */
35AEABI_ENTRY(cfcmpeq)
36	LOAD_SREGS(s0, s1, r0, r1)
37	vcmp.f32	s0, s1
38	vmrs     	APSR_nzcv, fpscr
39	RET
40AEABI_END(cfcmpeq)
41
42/* void __aeabi_cfcmple(float, float) */
43AEABI_ENTRY(cfcmple)
44	LOAD_SREGS(s0, s1, r0, r1)
45	vcmpe.f32	s0, s1
46	vmrs     	APSR_nzcv, fpscr
47	RET
48AEABI_END(cfcmple)
49
50/* void __aeabi_cfrcmple(float, float) */
51AEABI_ENTRY(cfrcmple)
52	LOAD_SREGS(s0, s1, r0, r1)
53	vcmpe.f32	s1, s0
54	vmrs     	APSR_nzcv, fpscr
55	RET
56AEABI_END(cfrcmple)
57
58/* int __aeabi_fcmpeq(float, float) */
59AEABI_ENTRY(fcmpeq)
60	LOAD_SREGS(s0, s1, r0, r1)
61	vcmp.f32 s0, s1
62	vmrs     APSR_nzcv, fpscr
63	ite      ne
64	movne    r0, #0
65	moveq    r0, #1
66	RET
67AEABI_END(fcmpeq)
68
69/* int __aeabi_fcmplt(float, float) */
70AEABI_ENTRY(fcmplt)
71	LOAD_SREGS(s0, s1, r0, r1)
72	vcmp.f32 s0, s1
73	vmrs     APSR_nzcv, fpscr
74	ite      cs
75	movcs    r0, #0
76	movcc    r0, #1
77	RET
78AEABI_END(fcmplt)
79
80/* int __aeabi_fcmple(float, float) */
81AEABI_ENTRY(fcmple)
82	LOAD_SREGS(s0, s1, r0, r1)
83	vcmp.f32 s0, s1
84	vmrs     APSR_nzcv, fpscr
85	ite      hi
86	movhi    r0, #0
87	movls    r0, #1
88	RET
89AEABI_END(fcmple)
90
91/* int __aeabi_fcmpge(float, float) */
92AEABI_ENTRY(fcmpge)
93	LOAD_SREGS(s0, s1, r0, r1)
94	vcmp.f32 s0, s1
95	vmrs     APSR_nzcv, fpscr
96	ite      lt
97	movlt    r0, #0
98	movge    r0, #1
99	RET
100AEABI_END(fcmpge)
101
102/* int __aeabi_fcmpgt(float, float) */
103AEABI_ENTRY(fcmpgt)
104	LOAD_SREGS(s0, s1, r0, r1)
105	vcmp.f32 s0, s1
106	vmrs     APSR_nzcv, fpscr
107	ite      le
108	movle    r0, #0
109	movgt    r0, #1
110	RET
111AEABI_END(fcmpgt)
112
113/* int __aeabi_fcmpun(float, float) */
114AEABI_ENTRY(fcmpun)
115	LOAD_SREGS(s0, s1, r0, r1)
116	vcmp.f32 s0, s1
117	vmrs     APSR_nzcv, fpscr
118	ite      vc
119	movvc    r0, #0
120	movvs    r0, #1
121	RET
122AEABI_END(fcmpun)
123
124/* int __aeabi_f2iz(float) */
125AEABI_ENTRY(f2iz)
126	LOAD_SREG(s0, r0)
127#if 0
128	/*
129	 * This should be the correct instruction, but binutils incorrectly
130	 * encodes it as the version that used FPSCR to determine the rounding.
131	 * When binutils is fixed we can use this again.
132	 */
133	vcvt.s32.f32 s0, s0
134#else
135	ftosizs      s0, s0
136#endif
137	vmov         r0, s0
138	RET
139AEABI_END(f2iz)
140
141/* double __aeabi_f2d(float) */
142AEABI_ENTRY(f2d)
143	LOAD_SREG(s0, r0)
144	vcvt.f64.f32 d0, s0
145	UNLOAD_DREG(r0, r1, d0)
146	RET
147AEABI_END(f2d)
148
149/* float __aeabi_i2f(int) */
150AEABI_ENTRY(i2f)
151	vmov         s0, r0
152	vcvt.f32.s32 s0, s0
153	UNLOAD_SREG(r0, s0)
154	RET
155AEABI_END(i2f)
156
157/* float __aeabi_fadd(float, float) */
158AEABI_ENTRY(fadd)
159	LOAD_SREGS(s0, s1, r0, r1)
160	vadd.f32 s0, s0, s1
161	UNLOAD_SREG(r0, s0)
162	RET
163AEABI_END(fadd)
164
165/* float __aeabi_fmul(float, float) */
166AEABI_ENTRY(fdiv)
167	LOAD_SREGS(s0, s1, r0, r1)
168	vdiv.f32 s0, s0, s1
169	UNLOAD_SREG(r0, s0)
170	RET
171AEABI_END(fdiv)
172
173/* float __aeabi_fmul(float, float) */
174AEABI_ENTRY(fmul)
175	LOAD_SREGS(s0, s1, r0, r1)
176	vmul.f32 s0, s0, s1
177	UNLOAD_SREG(r0, s0)
178	RET
179AEABI_END(fmul)
180
181/* float __aeabi_fsub(float, float) */
182AEABI_ENTRY(fsub)
183	LOAD_SREGS(s0, s1, r0, r1)
184	vsub.f32 s0, s0, s1
185	UNLOAD_SREG(r0, s0)
186	RET
187AEABI_END(fsub)
188
189	.section .note.GNU-stack,"",%progbits
190