xref: /freebsd/contrib/arm-optimized-routines/pl/math/sv_math.h (revision 397e83df75e0fcd0d3fcb95ae4d794cb7600fc89)
1 /*
2  * Wrapper functions for SVE ACLE.
3  *
4  * Copyright (c) 2019-2023, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #ifndef SV_MATH_H
9 #define SV_MATH_H
10 
11 #ifndef WANT_VMATH
12 /* Enable the build of vector math code.  */
13 # define WANT_VMATH 1
14 #endif
15 
16 #if WANT_VMATH
17 
18 # include <arm_sve.h>
19 # include <stdbool.h>
20 
21 # include "math_config.h"
22 
23 /* Double precision.  */
24 static inline svint64_t
25 sv_s64 (int64_t x)
26 {
27   return svdup_s64 (x);
28 }
29 
30 static inline svuint64_t
31 sv_u64 (uint64_t x)
32 {
33   return svdup_u64 (x);
34 }
35 
36 static inline svfloat64_t
37 sv_f64 (double x)
38 {
39   return svdup_f64 (x);
40 }
41 
42 static inline svfloat64_t
43 sv_call_f64 (double (*f) (double), svfloat64_t x, svfloat64_t y, svbool_t cmp)
44 {
45   svbool_t p = svpfirst (cmp, svpfalse ());
46   while (svptest_any (cmp, p))
47     {
48       double elem = svclastb (p, 0, x);
49       elem = (*f) (elem);
50       svfloat64_t y2 = sv_f64 (elem);
51       y = svsel (p, y2, y);
52       p = svpnext_b64 (cmp, p);
53     }
54   return y;
55 }
56 
57 static inline svfloat64_t
58 sv_call2_f64 (double (*f) (double, double), svfloat64_t x1, svfloat64_t x2,
59 	      svfloat64_t y, svbool_t cmp)
60 {
61   svbool_t p = svpfirst (cmp, svpfalse ());
62   while (svptest_any (cmp, p))
63     {
64       double elem1 = svclastb (p, 0, x1);
65       double elem2 = svclastb (p, 0, x2);
66       double ret = (*f) (elem1, elem2);
67       svfloat64_t y2 = sv_f64 (ret);
68       y = svsel (p, y2, y);
69       p = svpnext_b64 (cmp, p);
70     }
71   return y;
72 }
73 
74 static inline svuint64_t
75 sv_mod_n_u64_x (svbool_t pg, svuint64_t x, uint64_t y)
76 {
77   svuint64_t q = svdiv_x (pg, x, y);
78   return svmls_x (pg, x, q, y);
79 }
80 
81 /* Single precision.  */
82 static inline svint32_t
83 sv_s32 (int32_t x)
84 {
85   return svdup_s32 (x);
86 }
87 
88 static inline svuint32_t
89 sv_u32 (uint32_t x)
90 {
91   return svdup_u32 (x);
92 }
93 
94 static inline svfloat32_t
95 sv_f32 (float x)
96 {
97   return svdup_f32 (x);
98 }
99 
100 static inline svfloat32_t
101 sv_call_f32 (float (*f) (float), svfloat32_t x, svfloat32_t y, svbool_t cmp)
102 {
103   svbool_t p = svpfirst (cmp, svpfalse ());
104   while (svptest_any (cmp, p))
105     {
106       float elem = svclastb (p, 0, x);
107       elem = (*f) (elem);
108       svfloat32_t y2 = sv_f32 (elem);
109       y = svsel (p, y2, y);
110       p = svpnext_b32 (cmp, p);
111     }
112   return y;
113 }
114 
115 static inline svfloat32_t
116 sv_call2_f32 (float (*f) (float, float), svfloat32_t x1, svfloat32_t x2,
117 	      svfloat32_t y, svbool_t cmp)
118 {
119   svbool_t p = svpfirst (cmp, svpfalse ());
120   while (svptest_any (cmp, p))
121     {
122       float elem1 = svclastb (p, 0, x1);
123       float elem2 = svclastb (p, 0, x2);
124       float ret = (*f) (elem1, elem2);
125       svfloat32_t y2 = sv_f32 (ret);
126       y = svsel (p, y2, y);
127       p = svpnext_b32 (cmp, p);
128     }
129   return y;
130 }
131 #endif
132 
133 #endif
134