xref: /freebsd/contrib/arm-optimized-routines/pl/math/sv_powif.c (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 /*
2  * Single-precision SVE powi(x, n) function.
3  *
4  * Copyright (c) 2020-2023, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "sv_math.h"
9 
10 /* Optimized single-precision vector powi (float base, integer power).
11    powi is developed for environments in which accuracy is of much less
12    importance than performance, hence we provide no estimate for worst-case
13    error.  */
14 svfloat32_t
15 _ZGVsMxvv_powi (svfloat32_t as, svint32_t ns, svbool_t p)
16 {
17   /* Compute powi by successive squaring, right to left.  */
18   svfloat32_t acc = sv_f32 (1.f);
19   svbool_t want_recip = svcmplt (p, ns, 0);
20   svuint32_t ns_abs = svreinterpret_u32 (svabs_x (p, ns));
21 
22   /* We use a max to avoid needing to check whether any lane != 0 on each
23      iteration.  */
24   uint32_t max_n = svmaxv (p, ns_abs);
25 
26   svfloat32_t c = as;
27   /* Successively square c, and use merging predication (_m) to determine
28      whether or not to perform the multiplication or keep the previous
29      iteration.  */
30   while (true)
31     {
32       svbool_t px = svcmpeq (p, svand_x (p, ns_abs, 1), 1);
33       acc = svmul_m (px, acc, c);
34       max_n >>= 1;
35       if (max_n == 0)
36 	break;
37 
38       ns_abs = svlsr_x (p, ns_abs, 1);
39       c = svmul_x (p, c, c);
40     }
41 
42   /* Negative powers are handled by computing the abs(n) version and then
43      taking the reciprocal.  */
44   if (svptest_any (want_recip, want_recip))
45     acc = svdivr_m (want_recip, acc, 1.0f);
46 
47   return acc;
48 }
49