1/* 2 * Copyright (C) 2013 Andrew Turner 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28#include <machine/asm.h> 29__FBSDID("$FreeBSD$"); 30 31#include "aeabi_vfp.h" 32 33.fpu vfp 34.syntax unified 35 36/* void __aeabi_cfcmpeq(float, float) */ 37AEABI_ENTRY(cfcmpeq) 38 LOAD_SREGS(s0, s1, r0, r1) 39 vcmp.f32 s0, s1 40 vmrs APSR_nzcv, fpscr 41 RET 42AEABI_END(cfcmpeq) 43 44/* void __aeabi_cfcmple(float, float) */ 45AEABI_ENTRY(cfcmple) 46 LOAD_SREGS(s0, s1, r0, r1) 47 vcmpe.f32 s0, s1 48 vmrs APSR_nzcv, fpscr 49 RET 50AEABI_END(cfcmple) 51 52/* void __aeabi_cfrcmple(float, float) */ 53AEABI_ENTRY(cfrcmple) 54 LOAD_SREGS(s0, s1, r0, r1) 55 vcmpe.f32 s1, s0 56 vmrs APSR_nzcv, fpscr 57 RET 58AEABI_END(cfrcmple) 59 60/* int __aeabi_fcmpeq(float, float) */ 61AEABI_ENTRY(fcmpeq) 62 LOAD_SREGS(s0, s1, r0, r1) 63 vcmp.f32 s0, s1 64 vmrs APSR_nzcv, fpscr 65 movne r0, #0 66 moveq r0, #1 67 RET 68AEABI_END(fcmpeq) 69 70/* int __aeabi_fcmplt(float, float) */ 71AEABI_ENTRY(fcmplt) 72 LOAD_SREGS(s0, s1, r0, r1) 73 vcmp.f32 s0, s1 74 vmrs APSR_nzcv, fpscr 75 movcs r0, #0 76 movlt r0, #1 77 RET 78AEABI_END(fcmplt) 79 80/* int __aeabi_fcmple(float, float) */ 81AEABI_ENTRY(fcmple) 82 LOAD_SREGS(s0, s1, r0, r1) 83 vcmp.f32 s0, s1 84 vmrs APSR_nzcv, fpscr 85 movhi r0, #0 86 movls r0, #1 87 RET 88AEABI_END(fcmple) 89 90/* int __aeabi_fcmpge(float, float) */ 91AEABI_ENTRY(fcmpge) 92 LOAD_SREGS(s0, s1, r0, r1) 93 vcmp.f32 s0, s1 94 vmrs APSR_nzcv, fpscr 95 movlt r0, #0 96 movge r0, #1 97 RET 98AEABI_END(fcmpge) 99 100/* int __aeabi_fcmpgt(float, float) */ 101AEABI_ENTRY(fcmpgt) 102 LOAD_SREGS(s0, s1, r0, r1) 103 vcmp.f32 s0, s1 104 vmrs APSR_nzcv, fpscr 105 movle r0, #0 106 movgt r0, #1 107 RET 108AEABI_END(fcmpgt) 109 110/* int __aeabi_fcmpun(float, float) */ 111AEABI_ENTRY(fcmpun) 112 LOAD_SREGS(s0, s1, r0, r1) 113 vcmp.f32 s0, s1 114 vmrs APSR_nzcv, fpscr 115 movvc r0, #0 116 movvs r0, #1 117 RET 118AEABI_END(fcmpun) 119 120/* int __aeabi_f2iz(float) */ 121AEABI_ENTRY(f2iz) 122 LOAD_SREG(s0, r0) 123#if 0 124 /* 125 * This should be the correct instruction, but binutils incorrectly 126 * encodes it as the version that used FPSCR to determine the rounding. 127 * When binutils is fixed we can use this again. 128 */ 129 vcvt.s32.f32 s0, s0 130#else 131 ftosizs s0, s0 132#endif 133 vmov r0, s0 134 RET 135AEABI_END(f2iz) 136 137/* double __aeabi_f2d(float) */ 138AEABI_ENTRY(f2d) 139 LOAD_SREG(s0, r0) 140 vcvt.f64.f32 d0, s0 141 UNLOAD_DREG(r0, r1, d0) 142 RET 143AEABI_END(f2d) 144 145/* float __aeabi_i2f(int) */ 146AEABI_ENTRY(i2f) 147 vmov s0, r0 148 vcvt.f32.s32 s0, s0 149 UNLOAD_SREG(r0, s0) 150 RET 151AEABI_END(i2f) 152 153/* float __aeabi_fadd(float, float) */ 154AEABI_ENTRY(fadd) 155 LOAD_SREGS(s0, s1, r0, r1) 156 vadd.f32 s0, s0, s1 157 UNLOAD_SREG(r0, s0) 158 RET 159AEABI_END(fadd) 160 161/* float __aeabi_fmul(float, float) */ 162AEABI_ENTRY(fdiv) 163 LOAD_SREGS(s0, s1, r0, r1) 164 vdiv.f32 s0, s0, s1 165 UNLOAD_SREG(r0, s0) 166 RET 167AEABI_END(fdiv) 168 169/* float __aeabi_fmul(float, float) */ 170AEABI_ENTRY(fmul) 171 LOAD_SREGS(s0, s1, r0, r1) 172 vmul.f32 s0, s0, s1 173 UNLOAD_SREG(r0, s0) 174 RET 175AEABI_END(fmul) 176 177/* float __aeabi_fsub(float, float) */ 178AEABI_ENTRY(fsub) 179 LOAD_SREGS(s0, s1, r0, r1) 180 vsub.f32 s0, s0, s1 181 UNLOAD_SREG(r0, s0) 182 RET 183AEABI_END(fsub) 184 185