1 /*
2 * Function wrappers for mathbench.
3 *
4 * Copyright (c) 2022-2023, Arm Limited.
5 * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6 */
7
8 static double
atan2_wrap(double x)9 atan2_wrap (double x)
10 {
11 return atan2 (5.0, x);
12 }
13
14 static float
atan2f_wrap(float x)15 atan2f_wrap (float x)
16 {
17 return atan2f (5.0f, x);
18 }
19
20 static double
powi_wrap(double x)21 powi_wrap (double x)
22 {
23 return __builtin_powi (x, (int) round (x));
24 }
25
26 #if __aarch64__ && defined(__vpcs)
27
28 __vpcs static v_double
_Z_atan2_wrap(v_double x)29 _Z_atan2_wrap (v_double x)
30 {
31 return _ZGVnN2vv_atan2 (v_double_dup (5.0), x);
32 }
33
34 __vpcs static v_float
_Z_atan2f_wrap(v_float x)35 _Z_atan2f_wrap (v_float x)
36 {
37 return _ZGVnN4vv_atan2f (v_float_dup (5.0f), x);
38 }
39
40 __vpcs static v_float
_Z_hypotf_wrap(v_float x)41 _Z_hypotf_wrap (v_float x)
42 {
43 return _ZGVnN4vv_hypotf (v_float_dup (5.0f), x);
44 }
45
46 __vpcs static v_double
_Z_hypot_wrap(v_double x)47 _Z_hypot_wrap (v_double x)
48 {
49 return _ZGVnN2vv_hypot (v_double_dup (5.0), x);
50 }
51
52 __vpcs static v_double
xy_Z_pow(v_double x)53 xy_Z_pow (v_double x)
54 {
55 return _ZGVnN2vv_pow (x, x);
56 }
57
58 __vpcs static v_double
x_Z_pow(v_double x)59 x_Z_pow (v_double x)
60 {
61 return _ZGVnN2vv_pow (x, v_double_dup (23.4));
62 }
63
64 __vpcs static v_double
y_Z_pow(v_double x)65 y_Z_pow (v_double x)
66 {
67 return _ZGVnN2vv_pow (v_double_dup (2.34), x);
68 }
69
70 __vpcs static v_float
_Z_sincosf_wrap(v_float x)71 _Z_sincosf_wrap (v_float x)
72 {
73 v_float s, c;
74 _ZGVnN4vl4l4_sincosf (x, &s, &c);
75 return s + c;
76 }
77
78 __vpcs static v_float
_Z_cexpif_wrap(v_float x)79 _Z_cexpif_wrap (v_float x)
80 {
81 __f32x4x2_t sc = _ZGVnN4v_cexpif (x);
82 return sc.val[0] + sc.val[1];
83 }
84
85 __vpcs static v_double
_Z_sincos_wrap(v_double x)86 _Z_sincos_wrap (v_double x)
87 {
88 v_double s, c;
89 _ZGVnN2vl8l8_sincos (x, &s, &c);
90 return s + c;
91 }
92
93 __vpcs static v_double
_Z_cexpi_wrap(v_double x)94 _Z_cexpi_wrap (v_double x)
95 {
96 __f64x2x2_t sc = _ZGVnN2v_cexpi (x);
97 return sc.val[0] + sc.val[1];
98 }
99
100 #endif // __arch64__ && __vpcs
101
102 #if WANT_SVE_MATH
103
104 static sv_float
_Z_sv_atan2f_wrap(sv_float x,sv_bool pg)105 _Z_sv_atan2f_wrap (sv_float x, sv_bool pg)
106 {
107 return _ZGVsMxvv_atan2f (x, svdup_f32 (5.0f), pg);
108 }
109
110 static sv_double
_Z_sv_atan2_wrap(sv_double x,sv_bool pg)111 _Z_sv_atan2_wrap (sv_double x, sv_bool pg)
112 {
113 return _ZGVsMxvv_atan2 (x, svdup_f64 (5.0), pg);
114 }
115
116 static sv_float
_Z_sv_hypotf_wrap(sv_float x,sv_bool pg)117 _Z_sv_hypotf_wrap (sv_float x, sv_bool pg)
118 {
119 return _ZGVsMxvv_hypotf (x, svdup_f32 (5.0), pg);
120 }
121
122 static sv_double
_Z_sv_hypot_wrap(sv_double x,sv_bool pg)123 _Z_sv_hypot_wrap (sv_double x, sv_bool pg)
124 {
125 return _ZGVsMxvv_hypot (x, svdup_f64 (5.0), pg);
126 }
127
128 static sv_float
_Z_sv_powi_wrap(sv_float x,sv_bool pg)129 _Z_sv_powi_wrap (sv_float x, sv_bool pg)
130 {
131 return _ZGVsMxvv_powi (x, svcvt_s32_f32_x (pg, x), pg);
132 }
133
134 static sv_double
_Z_sv_powk_wrap(sv_double x,sv_bool pg)135 _Z_sv_powk_wrap (sv_double x, sv_bool pg)
136 {
137 return _ZGVsMxvv_powk (x, svcvt_s64_f64_x (pg, x), pg);
138 }
139
140 static sv_float
xy_Z_sv_powf(sv_float x,sv_bool pg)141 xy_Z_sv_powf (sv_float x, sv_bool pg)
142 {
143 return _ZGVsMxvv_powf (x, x, pg);
144 }
145
146 static sv_float
x_Z_sv_powf(sv_float x,sv_bool pg)147 x_Z_sv_powf (sv_float x, sv_bool pg)
148 {
149 return _ZGVsMxvv_powf (x, svdup_f32 (23.4f), pg);
150 }
151
152 static sv_float
y_Z_sv_powf(sv_float x,sv_bool pg)153 y_Z_sv_powf (sv_float x, sv_bool pg)
154 {
155 return _ZGVsMxvv_powf (svdup_f32 (2.34f), x, pg);
156 }
157
158 static sv_double
xy_Z_sv_pow(sv_double x,sv_bool pg)159 xy_Z_sv_pow (sv_double x, sv_bool pg)
160 {
161 return _ZGVsMxvv_pow (x, x, pg);
162 }
163
164 static sv_double
x_Z_sv_pow(sv_double x,sv_bool pg)165 x_Z_sv_pow (sv_double x, sv_bool pg)
166 {
167 return _ZGVsMxvv_pow (x, svdup_f64 (23.4), pg);
168 }
169
170 static sv_double
y_Z_sv_pow(sv_double x,sv_bool pg)171 y_Z_sv_pow (sv_double x, sv_bool pg)
172 {
173 return _ZGVsMxvv_pow (svdup_f64 (2.34), x, pg);
174 }
175
176 static sv_float
_Z_sv_sincosf_wrap(sv_float x,sv_bool pg)177 _Z_sv_sincosf_wrap (sv_float x, sv_bool pg)
178 {
179 float s[svcntw ()], c[svcntw ()];
180 _ZGVsMxvl4l4_sincosf (x, s, c, pg);
181 return svadd_x (pg, svld1 (pg, s), svld1 (pg, s));
182 }
183
184 static sv_float
_Z_sv_cexpif_wrap(sv_float x,sv_bool pg)185 _Z_sv_cexpif_wrap (sv_float x, sv_bool pg)
186 {
187 svfloat32x2_t sc = _ZGVsMxv_cexpif (x, pg);
188 return svadd_x (pg, svget2 (sc, 0), svget2 (sc, 1));
189 }
190
191 static sv_double
_Z_sv_sincos_wrap(sv_double x,sv_bool pg)192 _Z_sv_sincos_wrap (sv_double x, sv_bool pg)
193 {
194 double s[svcntd ()], c[svcntd ()];
195 _ZGVsMxvl8l8_sincos (x, s, c, pg);
196 return svadd_x (pg, svld1 (pg, s), svld1 (pg, s));
197 }
198
199 static sv_double
_Z_sv_cexpi_wrap(sv_double x,sv_bool pg)200 _Z_sv_cexpi_wrap (sv_double x, sv_bool pg)
201 {
202 svfloat64x2_t sc = _ZGVsMxv_cexpi (x, pg);
203 return svadd_x (pg, svget2 (sc, 0), svget2 (sc, 1));
204 }
205
206 #endif // WANT_SVE_MATH
207