1 /*===---- __clang_hip_math.h - Device-side HIP math support ----------------=== 2 * 3 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 * See https://llvm.org/LICENSE.txt for license information. 5 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 * 7 *===-----------------------------------------------------------------------=== 8 */ 9 #ifndef __CLANG_HIP_MATH_H__ 10 #define __CLANG_HIP_MATH_H__ 11 12 #if !defined(__HIP__) && !defined(__OPENMP_AMDGCN__) 13 #error "This file is for HIP and OpenMP AMDGCN device compilation only." 14 #endif 15 16 #if !defined(__HIPCC_RTC__) 17 #if defined(__cplusplus) 18 #include <algorithm> 19 #endif 20 #include <limits.h> 21 #include <stdint.h> 22 #ifdef __OPENMP_AMDGCN__ 23 #include <omp.h> 24 #endif 25 #endif // !defined(__HIPCC_RTC__) 26 27 #pragma push_macro("__DEVICE__") 28 29 #ifdef __OPENMP_AMDGCN__ 30 #define __DEVICE__ static inline __attribute__((always_inline, nothrow)) 31 #else 32 #define __DEVICE__ static __device__ inline __attribute__((always_inline)) 33 #endif 34 35 // A few functions return bool type starting only in C++11. 36 #pragma push_macro("__RETURN_TYPE") 37 #ifdef __OPENMP_AMDGCN__ 38 #define __RETURN_TYPE int 39 #else 40 #if defined(__cplusplus) 41 #define __RETURN_TYPE bool 42 #else 43 #define __RETURN_TYPE int 44 #endif 45 #endif // __OPENMP_AMDGCN__ 46 47 #if defined (__cplusplus) && __cplusplus < 201103L 48 // emulate static_assert on type sizes 49 template<bool> 50 struct __compare_result{}; 51 template<> 52 struct __compare_result<true> { 53 static const __device__ bool valid; 54 }; 55 56 __DEVICE__ 57 void __suppress_unused_warning(bool b){}; 58 template <unsigned int S, unsigned int T> 59 __DEVICE__ void __static_assert_equal_size() { 60 __suppress_unused_warning(__compare_result<S == T>::valid); 61 } 62 63 #define __static_assert_type_size_equal(A, B) \ 64 __static_assert_equal_size<A,B>() 65 66 #else 67 #define __static_assert_type_size_equal(A,B) \ 68 static_assert((A) == (B), "") 69 70 #endif 71 72 __DEVICE__ 73 uint64_t __make_mantissa_base8(const char *__tagp __attribute__((nonnull))) { 74 uint64_t __r = 0; 75 while (*__tagp != '\0') { 76 char __tmp = *__tagp; 77 78 if (__tmp >= '0' && __tmp <= '7') 79 __r = (__r * 8u) + __tmp - '0'; 80 else 81 return 0; 82 83 ++__tagp; 84 } 85 86 return __r; 87 } 88 89 __DEVICE__ 90 uint64_t __make_mantissa_base10(const char *__tagp __attribute__((nonnull))) { 91 uint64_t __r = 0; 92 while (*__tagp != '\0') { 93 char __tmp = *__tagp; 94 95 if (__tmp >= '0' && __tmp <= '9') 96 __r = (__r * 10u) + __tmp - '0'; 97 else 98 return 0; 99 100 ++__tagp; 101 } 102 103 return __r; 104 } 105 106 __DEVICE__ 107 uint64_t __make_mantissa_base16(const char *__tagp __attribute__((nonnull))) { 108 uint64_t __r = 0; 109 while (*__tagp != '\0') { 110 char __tmp = *__tagp; 111 112 if (__tmp >= '0' && __tmp <= '9') 113 __r = (__r * 16u) + __tmp - '0'; 114 else if (__tmp >= 'a' && __tmp <= 'f') 115 __r = (__r * 16u) + __tmp - 'a' + 10; 116 else if (__tmp >= 'A' && __tmp <= 'F') 117 __r = (__r * 16u) + __tmp - 'A' + 10; 118 else 119 return 0; 120 121 ++__tagp; 122 } 123 124 return __r; 125 } 126 127 __DEVICE__ 128 uint64_t __make_mantissa(const char *__tagp __attribute__((nonnull))) { 129 if (*__tagp == '0') { 130 ++__tagp; 131 132 if (*__tagp == 'x' || *__tagp == 'X') 133 return __make_mantissa_base16(__tagp); 134 else 135 return __make_mantissa_base8(__tagp); 136 } 137 138 return __make_mantissa_base10(__tagp); 139 } 140 141 // BEGIN FLOAT 142 #if defined(__cplusplus) 143 __DEVICE__ 144 int abs(int __x) { 145 int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1); 146 return (__x ^ __sgn) - __sgn; 147 } 148 __DEVICE__ 149 long labs(long __x) { 150 long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1); 151 return (__x ^ __sgn) - __sgn; 152 } 153 __DEVICE__ 154 long long llabs(long long __x) { 155 long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1); 156 return (__x ^ __sgn) - __sgn; 157 } 158 #endif 159 160 __DEVICE__ 161 float acosf(float __x) { return __ocml_acos_f32(__x); } 162 163 __DEVICE__ 164 float acoshf(float __x) { return __ocml_acosh_f32(__x); } 165 166 __DEVICE__ 167 float asinf(float __x) { return __ocml_asin_f32(__x); } 168 169 __DEVICE__ 170 float asinhf(float __x) { return __ocml_asinh_f32(__x); } 171 172 __DEVICE__ 173 float atan2f(float __x, float __y) { return __ocml_atan2_f32(__x, __y); } 174 175 __DEVICE__ 176 float atanf(float __x) { return __ocml_atan_f32(__x); } 177 178 __DEVICE__ 179 float atanhf(float __x) { return __ocml_atanh_f32(__x); } 180 181 __DEVICE__ 182 float cbrtf(float __x) { return __ocml_cbrt_f32(__x); } 183 184 __DEVICE__ 185 float ceilf(float __x) { return __builtin_ceilf(__x); } 186 187 __DEVICE__ 188 float copysignf(float __x, float __y) { return __builtin_copysignf(__x, __y); } 189 190 __DEVICE__ 191 float cosf(float __x) { return __ocml_cos_f32(__x); } 192 193 __DEVICE__ 194 float coshf(float __x) { return __ocml_cosh_f32(__x); } 195 196 __DEVICE__ 197 float cospif(float __x) { return __ocml_cospi_f32(__x); } 198 199 __DEVICE__ 200 float cyl_bessel_i0f(float __x) { return __ocml_i0_f32(__x); } 201 202 __DEVICE__ 203 float cyl_bessel_i1f(float __x) { return __ocml_i1_f32(__x); } 204 205 __DEVICE__ 206 float erfcf(float __x) { return __ocml_erfc_f32(__x); } 207 208 __DEVICE__ 209 float erfcinvf(float __x) { return __ocml_erfcinv_f32(__x); } 210 211 __DEVICE__ 212 float erfcxf(float __x) { return __ocml_erfcx_f32(__x); } 213 214 __DEVICE__ 215 float erff(float __x) { return __ocml_erf_f32(__x); } 216 217 __DEVICE__ 218 float erfinvf(float __x) { return __ocml_erfinv_f32(__x); } 219 220 __DEVICE__ 221 float exp10f(float __x) { return __ocml_exp10_f32(__x); } 222 223 __DEVICE__ 224 float exp2f(float __x) { return __builtin_exp2f(__x); } 225 226 __DEVICE__ 227 float expf(float __x) { return __builtin_expf(__x); } 228 229 __DEVICE__ 230 float expm1f(float __x) { return __ocml_expm1_f32(__x); } 231 232 __DEVICE__ 233 float fabsf(float __x) { return __builtin_fabsf(__x); } 234 235 __DEVICE__ 236 float fdimf(float __x, float __y) { return __ocml_fdim_f32(__x, __y); } 237 238 __DEVICE__ 239 float fdividef(float __x, float __y) { return __x / __y; } 240 241 __DEVICE__ 242 float floorf(float __x) { return __builtin_floorf(__x); } 243 244 __DEVICE__ 245 float fmaf(float __x, float __y, float __z) { 246 return __builtin_fmaf(__x, __y, __z); 247 } 248 249 __DEVICE__ 250 float fmaxf(float __x, float __y) { return __builtin_fmaxf(__x, __y); } 251 252 __DEVICE__ 253 float fminf(float __x, float __y) { return __builtin_fminf(__x, __y); } 254 255 __DEVICE__ 256 float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); } 257 258 __DEVICE__ 259 float frexpf(float __x, int *__nptr) { 260 return __builtin_frexpf(__x, __nptr); 261 } 262 263 __DEVICE__ 264 float hypotf(float __x, float __y) { return __ocml_hypot_f32(__x, __y); } 265 266 __DEVICE__ 267 int ilogbf(float __x) { return __ocml_ilogb_f32(__x); } 268 269 __DEVICE__ 270 __RETURN_TYPE __finitef(float __x) { return __builtin_isfinite(__x); } 271 272 __DEVICE__ 273 __RETURN_TYPE __isinff(float __x) { return __builtin_isinf(__x); } 274 275 __DEVICE__ 276 __RETURN_TYPE __isnanf(float __x) { return __builtin_isnan(__x); } 277 278 __DEVICE__ 279 float j0f(float __x) { return __ocml_j0_f32(__x); } 280 281 __DEVICE__ 282 float j1f(float __x) { return __ocml_j1_f32(__x); } 283 284 __DEVICE__ 285 float jnf(int __n, float __x) { // TODO: we could use Ahmes multiplication 286 // and the Miller & Brown algorithm 287 // for linear recurrences to get O(log n) steps, but it's unclear if 288 // it'd be beneficial in this case. 289 if (__n == 0) 290 return j0f(__x); 291 if (__n == 1) 292 return j1f(__x); 293 294 float __x0 = j0f(__x); 295 float __x1 = j1f(__x); 296 for (int __i = 1; __i < __n; ++__i) { 297 float __x2 = (2 * __i) / __x * __x1 - __x0; 298 __x0 = __x1; 299 __x1 = __x2; 300 } 301 302 return __x1; 303 } 304 305 __DEVICE__ 306 float ldexpf(float __x, int __e) { return __builtin_amdgcn_ldexpf(__x, __e); } 307 308 __DEVICE__ 309 float lgammaf(float __x) { return __ocml_lgamma_f32(__x); } 310 311 __DEVICE__ 312 long long int llrintf(float __x) { return __builtin_rintf(__x); } 313 314 __DEVICE__ 315 long long int llroundf(float __x) { return __builtin_roundf(__x); } 316 317 __DEVICE__ 318 float log10f(float __x) { return __builtin_log10f(__x); } 319 320 __DEVICE__ 321 float log1pf(float __x) { return __ocml_log1p_f32(__x); } 322 323 __DEVICE__ 324 float log2f(float __x) { return __builtin_log2f(__x); } 325 326 __DEVICE__ 327 float logbf(float __x) { return __ocml_logb_f32(__x); } 328 329 __DEVICE__ 330 float logf(float __x) { return __builtin_logf(__x); } 331 332 __DEVICE__ 333 long int lrintf(float __x) { return __builtin_rintf(__x); } 334 335 __DEVICE__ 336 long int lroundf(float __x) { return __builtin_roundf(__x); } 337 338 __DEVICE__ 339 float modff(float __x, float *__iptr) { 340 float __tmp; 341 #ifdef __OPENMP_AMDGCN__ 342 #pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc) 343 #endif 344 float __r = 345 __ocml_modf_f32(__x, (__attribute__((address_space(5))) float *)&__tmp); 346 *__iptr = __tmp; 347 return __r; 348 } 349 350 __DEVICE__ 351 float nanf(const char *__tagp __attribute__((nonnull))) { 352 union { 353 float val; 354 struct ieee_float { 355 unsigned int mantissa : 22; 356 unsigned int quiet : 1; 357 unsigned int exponent : 8; 358 unsigned int sign : 1; 359 } bits; 360 } __tmp; 361 __static_assert_type_size_equal(sizeof(__tmp.val), sizeof(__tmp.bits)); 362 363 __tmp.bits.sign = 0u; 364 __tmp.bits.exponent = ~0u; 365 __tmp.bits.quiet = 1u; 366 __tmp.bits.mantissa = __make_mantissa(__tagp); 367 368 return __tmp.val; 369 } 370 371 __DEVICE__ 372 float nearbyintf(float __x) { return __builtin_nearbyintf(__x); } 373 374 __DEVICE__ 375 float nextafterf(float __x, float __y) { 376 return __ocml_nextafter_f32(__x, __y); 377 } 378 379 __DEVICE__ 380 float norm3df(float __x, float __y, float __z) { 381 return __ocml_len3_f32(__x, __y, __z); 382 } 383 384 __DEVICE__ 385 float norm4df(float __x, float __y, float __z, float __w) { 386 return __ocml_len4_f32(__x, __y, __z, __w); 387 } 388 389 __DEVICE__ 390 float normcdff(float __x) { return __ocml_ncdf_f32(__x); } 391 392 __DEVICE__ 393 float normcdfinvf(float __x) { return __ocml_ncdfinv_f32(__x); } 394 395 __DEVICE__ 396 float normf(int __dim, 397 const float *__a) { // TODO: placeholder until OCML adds support. 398 float __r = 0; 399 while (__dim--) { 400 __r += __a[0] * __a[0]; 401 ++__a; 402 } 403 404 return __ocml_sqrt_f32(__r); 405 } 406 407 __DEVICE__ 408 float powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); } 409 410 __DEVICE__ 411 float powif(float __x, int __y) { return __ocml_pown_f32(__x, __y); } 412 413 __DEVICE__ 414 float rcbrtf(float __x) { return __ocml_rcbrt_f32(__x); } 415 416 __DEVICE__ 417 float remainderf(float __x, float __y) { 418 return __ocml_remainder_f32(__x, __y); 419 } 420 421 __DEVICE__ 422 float remquof(float __x, float __y, int *__quo) { 423 int __tmp; 424 #ifdef __OPENMP_AMDGCN__ 425 #pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc) 426 #endif 427 float __r = __ocml_remquo_f32( 428 __x, __y, (__attribute__((address_space(5))) int *)&__tmp); 429 *__quo = __tmp; 430 431 return __r; 432 } 433 434 __DEVICE__ 435 float rhypotf(float __x, float __y) { return __ocml_rhypot_f32(__x, __y); } 436 437 __DEVICE__ 438 float rintf(float __x) { return __builtin_rintf(__x); } 439 440 __DEVICE__ 441 float rnorm3df(float __x, float __y, float __z) { 442 return __ocml_rlen3_f32(__x, __y, __z); 443 } 444 445 __DEVICE__ 446 float rnorm4df(float __x, float __y, float __z, float __w) { 447 return __ocml_rlen4_f32(__x, __y, __z, __w); 448 } 449 450 __DEVICE__ 451 float rnormf(int __dim, 452 const float *__a) { // TODO: placeholder until OCML adds support. 453 float __r = 0; 454 while (__dim--) { 455 __r += __a[0] * __a[0]; 456 ++__a; 457 } 458 459 return __ocml_rsqrt_f32(__r); 460 } 461 462 __DEVICE__ 463 float roundf(float __x) { return __builtin_roundf(__x); } 464 465 __DEVICE__ 466 float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); } 467 468 __DEVICE__ 469 float scalblnf(float __x, long int __n) { 470 return (__n < INT_MAX) ? __builtin_amdgcn_ldexpf(__x, __n) 471 : __ocml_scalb_f32(__x, __n); 472 } 473 474 __DEVICE__ 475 float scalbnf(float __x, int __n) { return __builtin_amdgcn_ldexpf(__x, __n); } 476 477 __DEVICE__ 478 __RETURN_TYPE __signbitf(float __x) { return __builtin_signbitf(__x); } 479 480 __DEVICE__ 481 void sincosf(float __x, float *__sinptr, float *__cosptr) { 482 float __tmp; 483 #ifdef __OPENMP_AMDGCN__ 484 #pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc) 485 #endif 486 *__sinptr = 487 __ocml_sincos_f32(__x, (__attribute__((address_space(5))) float *)&__tmp); 488 *__cosptr = __tmp; 489 } 490 491 __DEVICE__ 492 void sincospif(float __x, float *__sinptr, float *__cosptr) { 493 float __tmp; 494 #ifdef __OPENMP_AMDGCN__ 495 #pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc) 496 #endif 497 *__sinptr = __ocml_sincospi_f32( 498 __x, (__attribute__((address_space(5))) float *)&__tmp); 499 *__cosptr = __tmp; 500 } 501 502 __DEVICE__ 503 float sinf(float __x) { return __ocml_sin_f32(__x); } 504 505 __DEVICE__ 506 float sinhf(float __x) { return __ocml_sinh_f32(__x); } 507 508 __DEVICE__ 509 float sinpif(float __x) { return __ocml_sinpi_f32(__x); } 510 511 __DEVICE__ 512 float sqrtf(float __x) { return __ocml_sqrt_f32(__x); } 513 514 __DEVICE__ 515 float tanf(float __x) { return __ocml_tan_f32(__x); } 516 517 __DEVICE__ 518 float tanhf(float __x) { return __ocml_tanh_f32(__x); } 519 520 __DEVICE__ 521 float tgammaf(float __x) { return __ocml_tgamma_f32(__x); } 522 523 __DEVICE__ 524 float truncf(float __x) { return __builtin_truncf(__x); } 525 526 __DEVICE__ 527 float y0f(float __x) { return __ocml_y0_f32(__x); } 528 529 __DEVICE__ 530 float y1f(float __x) { return __ocml_y1_f32(__x); } 531 532 __DEVICE__ 533 float ynf(int __n, float __x) { // TODO: we could use Ahmes multiplication 534 // and the Miller & Brown algorithm 535 // for linear recurrences to get O(log n) steps, but it's unclear if 536 // it'd be beneficial in this case. Placeholder until OCML adds 537 // support. 538 if (__n == 0) 539 return y0f(__x); 540 if (__n == 1) 541 return y1f(__x); 542 543 float __x0 = y0f(__x); 544 float __x1 = y1f(__x); 545 for (int __i = 1; __i < __n; ++__i) { 546 float __x2 = (2 * __i) / __x * __x1 - __x0; 547 __x0 = __x1; 548 __x1 = __x2; 549 } 550 551 return __x1; 552 } 553 554 // BEGIN INTRINSICS 555 556 __DEVICE__ 557 float __cosf(float __x) { return __ocml_native_cos_f32(__x); } 558 559 __DEVICE__ 560 float __exp10f(float __x) { return __ocml_native_exp10_f32(__x); } 561 562 __DEVICE__ 563 float __expf(float __x) { return __ocml_native_exp_f32(__x); } 564 565 #if defined OCML_BASIC_ROUNDED_OPERATIONS 566 __DEVICE__ 567 float __fadd_rd(float __x, float __y) { return __ocml_add_rtn_f32(__x, __y); } 568 __DEVICE__ 569 float __fadd_rn(float __x, float __y) { return __ocml_add_rte_f32(__x, __y); } 570 __DEVICE__ 571 float __fadd_ru(float __x, float __y) { return __ocml_add_rtp_f32(__x, __y); } 572 __DEVICE__ 573 float __fadd_rz(float __x, float __y) { return __ocml_add_rtz_f32(__x, __y); } 574 #else 575 __DEVICE__ 576 float __fadd_rn(float __x, float __y) { return __x + __y; } 577 #endif 578 579 #if defined OCML_BASIC_ROUNDED_OPERATIONS 580 __DEVICE__ 581 float __fdiv_rd(float __x, float __y) { return __ocml_div_rtn_f32(__x, __y); } 582 __DEVICE__ 583 float __fdiv_rn(float __x, float __y) { return __ocml_div_rte_f32(__x, __y); } 584 __DEVICE__ 585 float __fdiv_ru(float __x, float __y) { return __ocml_div_rtp_f32(__x, __y); } 586 __DEVICE__ 587 float __fdiv_rz(float __x, float __y) { return __ocml_div_rtz_f32(__x, __y); } 588 #else 589 __DEVICE__ 590 float __fdiv_rn(float __x, float __y) { return __x / __y; } 591 #endif 592 593 __DEVICE__ 594 float __fdividef(float __x, float __y) { return __x / __y; } 595 596 #if defined OCML_BASIC_ROUNDED_OPERATIONS 597 __DEVICE__ 598 float __fmaf_rd(float __x, float __y, float __z) { 599 return __ocml_fma_rtn_f32(__x, __y, __z); 600 } 601 __DEVICE__ 602 float __fmaf_rn(float __x, float __y, float __z) { 603 return __ocml_fma_rte_f32(__x, __y, __z); 604 } 605 __DEVICE__ 606 float __fmaf_ru(float __x, float __y, float __z) { 607 return __ocml_fma_rtp_f32(__x, __y, __z); 608 } 609 __DEVICE__ 610 float __fmaf_rz(float __x, float __y, float __z) { 611 return __ocml_fma_rtz_f32(__x, __y, __z); 612 } 613 #else 614 __DEVICE__ 615 float __fmaf_rn(float __x, float __y, float __z) { 616 return __builtin_fmaf(__x, __y, __z); 617 } 618 #endif 619 620 #if defined OCML_BASIC_ROUNDED_OPERATIONS 621 __DEVICE__ 622 float __fmul_rd(float __x, float __y) { return __ocml_mul_rtn_f32(__x, __y); } 623 __DEVICE__ 624 float __fmul_rn(float __x, float __y) { return __ocml_mul_rte_f32(__x, __y); } 625 __DEVICE__ 626 float __fmul_ru(float __x, float __y) { return __ocml_mul_rtp_f32(__x, __y); } 627 __DEVICE__ 628 float __fmul_rz(float __x, float __y) { return __ocml_mul_rtz_f32(__x, __y); } 629 #else 630 __DEVICE__ 631 float __fmul_rn(float __x, float __y) { return __x * __y; } 632 #endif 633 634 #if defined OCML_BASIC_ROUNDED_OPERATIONS 635 __DEVICE__ 636 float __frcp_rd(float __x) { return __ocml_div_rtn_f32(1.0f, __x); } 637 __DEVICE__ 638 float __frcp_rn(float __x) { return __ocml_div_rte_f32(1.0f, __x); } 639 __DEVICE__ 640 float __frcp_ru(float __x) { return __ocml_div_rtp_f32(1.0f, __x); } 641 __DEVICE__ 642 float __frcp_rz(float __x) { return __ocml_div_rtz_f32(1.0f, __x); } 643 #else 644 __DEVICE__ 645 float __frcp_rn(float __x) { return 1.0f / __x; } 646 #endif 647 648 __DEVICE__ 649 float __frsqrt_rn(float __x) { return __builtin_amdgcn_rsqf(__x); } 650 651 #if defined OCML_BASIC_ROUNDED_OPERATIONS 652 __DEVICE__ 653 float __fsqrt_rd(float __x) { return __ocml_sqrt_rtn_f32(__x); } 654 __DEVICE__ 655 float __fsqrt_rn(float __x) { return __ocml_sqrt_rte_f32(__x); } 656 __DEVICE__ 657 float __fsqrt_ru(float __x) { return __ocml_sqrt_rtp_f32(__x); } 658 __DEVICE__ 659 float __fsqrt_rz(float __x) { return __ocml_sqrt_rtz_f32(__x); } 660 #else 661 __DEVICE__ 662 float __fsqrt_rn(float __x) { return __ocml_native_sqrt_f32(__x); } 663 #endif 664 665 #if defined OCML_BASIC_ROUNDED_OPERATIONS 666 __DEVICE__ 667 float __fsub_rd(float __x, float __y) { return __ocml_sub_rtn_f32(__x, __y); } 668 __DEVICE__ 669 float __fsub_rn(float __x, float __y) { return __ocml_sub_rte_f32(__x, __y); } 670 __DEVICE__ 671 float __fsub_ru(float __x, float __y) { return __ocml_sub_rtp_f32(__x, __y); } 672 __DEVICE__ 673 float __fsub_rz(float __x, float __y) { return __ocml_sub_rtz_f32(__x, __y); } 674 #else 675 __DEVICE__ 676 float __fsub_rn(float __x, float __y) { return __x - __y; } 677 #endif 678 679 __DEVICE__ 680 float __log10f(float __x) { return __ocml_native_log10_f32(__x); } 681 682 __DEVICE__ 683 float __log2f(float __x) { return __ocml_native_log2_f32(__x); } 684 685 __DEVICE__ 686 float __logf(float __x) { return __ocml_native_log_f32(__x); } 687 688 __DEVICE__ 689 float __powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); } 690 691 __DEVICE__ 692 float __saturatef(float __x) { return (__x < 0) ? 0 : ((__x > 1) ? 1 : __x); } 693 694 __DEVICE__ 695 void __sincosf(float __x, float *__sinptr, float *__cosptr) { 696 *__sinptr = __ocml_native_sin_f32(__x); 697 *__cosptr = __ocml_native_cos_f32(__x); 698 } 699 700 __DEVICE__ 701 float __sinf(float __x) { return __ocml_native_sin_f32(__x); } 702 703 __DEVICE__ 704 float __tanf(float __x) { return __ocml_tan_f32(__x); } 705 // END INTRINSICS 706 // END FLOAT 707 708 // BEGIN DOUBLE 709 __DEVICE__ 710 double acos(double __x) { return __ocml_acos_f64(__x); } 711 712 __DEVICE__ 713 double acosh(double __x) { return __ocml_acosh_f64(__x); } 714 715 __DEVICE__ 716 double asin(double __x) { return __ocml_asin_f64(__x); } 717 718 __DEVICE__ 719 double asinh(double __x) { return __ocml_asinh_f64(__x); } 720 721 __DEVICE__ 722 double atan(double __x) { return __ocml_atan_f64(__x); } 723 724 __DEVICE__ 725 double atan2(double __x, double __y) { return __ocml_atan2_f64(__x, __y); } 726 727 __DEVICE__ 728 double atanh(double __x) { return __ocml_atanh_f64(__x); } 729 730 __DEVICE__ 731 double cbrt(double __x) { return __ocml_cbrt_f64(__x); } 732 733 __DEVICE__ 734 double ceil(double __x) { return __builtin_ceil(__x); } 735 736 __DEVICE__ 737 double copysign(double __x, double __y) { 738 return __builtin_copysign(__x, __y); 739 } 740 741 __DEVICE__ 742 double cos(double __x) { return __ocml_cos_f64(__x); } 743 744 __DEVICE__ 745 double cosh(double __x) { return __ocml_cosh_f64(__x); } 746 747 __DEVICE__ 748 double cospi(double __x) { return __ocml_cospi_f64(__x); } 749 750 __DEVICE__ 751 double cyl_bessel_i0(double __x) { return __ocml_i0_f64(__x); } 752 753 __DEVICE__ 754 double cyl_bessel_i1(double __x) { return __ocml_i1_f64(__x); } 755 756 __DEVICE__ 757 double erf(double __x) { return __ocml_erf_f64(__x); } 758 759 __DEVICE__ 760 double erfc(double __x) { return __ocml_erfc_f64(__x); } 761 762 __DEVICE__ 763 double erfcinv(double __x) { return __ocml_erfcinv_f64(__x); } 764 765 __DEVICE__ 766 double erfcx(double __x) { return __ocml_erfcx_f64(__x); } 767 768 __DEVICE__ 769 double erfinv(double __x) { return __ocml_erfinv_f64(__x); } 770 771 __DEVICE__ 772 double exp(double __x) { return __ocml_exp_f64(__x); } 773 774 __DEVICE__ 775 double exp10(double __x) { return __ocml_exp10_f64(__x); } 776 777 __DEVICE__ 778 double exp2(double __x) { return __ocml_exp2_f64(__x); } 779 780 __DEVICE__ 781 double expm1(double __x) { return __ocml_expm1_f64(__x); } 782 783 __DEVICE__ 784 double fabs(double __x) { return __builtin_fabs(__x); } 785 786 __DEVICE__ 787 double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); } 788 789 __DEVICE__ 790 double floor(double __x) { return __builtin_floor(__x); } 791 792 __DEVICE__ 793 double fma(double __x, double __y, double __z) { 794 return __builtin_fma(__x, __y, __z); 795 } 796 797 __DEVICE__ 798 double fmax(double __x, double __y) { return __builtin_fmax(__x, __y); } 799 800 __DEVICE__ 801 double fmin(double __x, double __y) { return __builtin_fmin(__x, __y); } 802 803 __DEVICE__ 804 double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); } 805 806 __DEVICE__ 807 double frexp(double __x, int *__nptr) { 808 return __builtin_frexp(__x, __nptr); 809 } 810 811 __DEVICE__ 812 double hypot(double __x, double __y) { return __ocml_hypot_f64(__x, __y); } 813 814 __DEVICE__ 815 int ilogb(double __x) { return __ocml_ilogb_f64(__x); } 816 817 __DEVICE__ 818 __RETURN_TYPE __finite(double __x) { return __builtin_isfinite(__x); } 819 820 __DEVICE__ 821 __RETURN_TYPE __isinf(double __x) { return __builtin_isinf(__x); } 822 823 __DEVICE__ 824 __RETURN_TYPE __isnan(double __x) { return __builtin_isnan(__x); } 825 826 __DEVICE__ 827 double j0(double __x) { return __ocml_j0_f64(__x); } 828 829 __DEVICE__ 830 double j1(double __x) { return __ocml_j1_f64(__x); } 831 832 __DEVICE__ 833 double jn(int __n, double __x) { // TODO: we could use Ahmes multiplication 834 // and the Miller & Brown algorithm 835 // for linear recurrences to get O(log n) steps, but it's unclear if 836 // it'd be beneficial in this case. Placeholder until OCML adds 837 // support. 838 if (__n == 0) 839 return j0(__x); 840 if (__n == 1) 841 return j1(__x); 842 843 double __x0 = j0(__x); 844 double __x1 = j1(__x); 845 for (int __i = 1; __i < __n; ++__i) { 846 double __x2 = (2 * __i) / __x * __x1 - __x0; 847 __x0 = __x1; 848 __x1 = __x2; 849 } 850 return __x1; 851 } 852 853 __DEVICE__ 854 double ldexp(double __x, int __e) { return __builtin_amdgcn_ldexp(__x, __e); } 855 856 __DEVICE__ 857 double lgamma(double __x) { return __ocml_lgamma_f64(__x); } 858 859 __DEVICE__ 860 long long int llrint(double __x) { return __builtin_rint(__x); } 861 862 __DEVICE__ 863 long long int llround(double __x) { return __builtin_round(__x); } 864 865 __DEVICE__ 866 double log(double __x) { return __ocml_log_f64(__x); } 867 868 __DEVICE__ 869 double log10(double __x) { return __ocml_log10_f64(__x); } 870 871 __DEVICE__ 872 double log1p(double __x) { return __ocml_log1p_f64(__x); } 873 874 __DEVICE__ 875 double log2(double __x) { return __ocml_log2_f64(__x); } 876 877 __DEVICE__ 878 double logb(double __x) { return __ocml_logb_f64(__x); } 879 880 __DEVICE__ 881 long int lrint(double __x) { return __builtin_rint(__x); } 882 883 __DEVICE__ 884 long int lround(double __x) { return __builtin_round(__x); } 885 886 __DEVICE__ 887 double modf(double __x, double *__iptr) { 888 double __tmp; 889 #ifdef __OPENMP_AMDGCN__ 890 #pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc) 891 #endif 892 double __r = 893 __ocml_modf_f64(__x, (__attribute__((address_space(5))) double *)&__tmp); 894 *__iptr = __tmp; 895 896 return __r; 897 } 898 899 __DEVICE__ 900 double nan(const char *__tagp) { 901 #if !_WIN32 902 union { 903 double val; 904 struct ieee_double { 905 uint64_t mantissa : 51; 906 uint32_t quiet : 1; 907 uint32_t exponent : 11; 908 uint32_t sign : 1; 909 } bits; 910 } __tmp; 911 __static_assert_type_size_equal(sizeof(__tmp.val), sizeof(__tmp.bits)); 912 913 __tmp.bits.sign = 0u; 914 __tmp.bits.exponent = ~0u; 915 __tmp.bits.quiet = 1u; 916 __tmp.bits.mantissa = __make_mantissa(__tagp); 917 918 return __tmp.val; 919 #else 920 __static_assert_type_size_equal(sizeof(uint64_t), sizeof(double)); 921 uint64_t __val = __make_mantissa(__tagp); 922 __val |= 0xFFF << 51; 923 return *reinterpret_cast<double *>(&__val); 924 #endif 925 } 926 927 __DEVICE__ 928 double nearbyint(double __x) { return __builtin_nearbyint(__x); } 929 930 __DEVICE__ 931 double nextafter(double __x, double __y) { 932 return __ocml_nextafter_f64(__x, __y); 933 } 934 935 __DEVICE__ 936 double norm(int __dim, 937 const double *__a) { // TODO: placeholder until OCML adds support. 938 double __r = 0; 939 while (__dim--) { 940 __r += __a[0] * __a[0]; 941 ++__a; 942 } 943 944 return __ocml_sqrt_f64(__r); 945 } 946 947 __DEVICE__ 948 double norm3d(double __x, double __y, double __z) { 949 return __ocml_len3_f64(__x, __y, __z); 950 } 951 952 __DEVICE__ 953 double norm4d(double __x, double __y, double __z, double __w) { 954 return __ocml_len4_f64(__x, __y, __z, __w); 955 } 956 957 __DEVICE__ 958 double normcdf(double __x) { return __ocml_ncdf_f64(__x); } 959 960 __DEVICE__ 961 double normcdfinv(double __x) { return __ocml_ncdfinv_f64(__x); } 962 963 __DEVICE__ 964 double pow(double __x, double __y) { return __ocml_pow_f64(__x, __y); } 965 966 __DEVICE__ 967 double powi(double __x, int __y) { return __ocml_pown_f64(__x, __y); } 968 969 __DEVICE__ 970 double rcbrt(double __x) { return __ocml_rcbrt_f64(__x); } 971 972 __DEVICE__ 973 double remainder(double __x, double __y) { 974 return __ocml_remainder_f64(__x, __y); 975 } 976 977 __DEVICE__ 978 double remquo(double __x, double __y, int *__quo) { 979 int __tmp; 980 #ifdef __OPENMP_AMDGCN__ 981 #pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc) 982 #endif 983 double __r = __ocml_remquo_f64( 984 __x, __y, (__attribute__((address_space(5))) int *)&__tmp); 985 *__quo = __tmp; 986 987 return __r; 988 } 989 990 __DEVICE__ 991 double rhypot(double __x, double __y) { return __ocml_rhypot_f64(__x, __y); } 992 993 __DEVICE__ 994 double rint(double __x) { return __builtin_rint(__x); } 995 996 __DEVICE__ 997 double rnorm(int __dim, 998 const double *__a) { // TODO: placeholder until OCML adds support. 999 double __r = 0; 1000 while (__dim--) { 1001 __r += __a[0] * __a[0]; 1002 ++__a; 1003 } 1004 1005 return __ocml_rsqrt_f64(__r); 1006 } 1007 1008 __DEVICE__ 1009 double rnorm3d(double __x, double __y, double __z) { 1010 return __ocml_rlen3_f64(__x, __y, __z); 1011 } 1012 1013 __DEVICE__ 1014 double rnorm4d(double __x, double __y, double __z, double __w) { 1015 return __ocml_rlen4_f64(__x, __y, __z, __w); 1016 } 1017 1018 __DEVICE__ 1019 double round(double __x) { return __builtin_round(__x); } 1020 1021 __DEVICE__ 1022 double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); } 1023 1024 __DEVICE__ 1025 double scalbln(double __x, long int __n) { 1026 return (__n < INT_MAX) ? __builtin_amdgcn_ldexp(__x, __n) 1027 : __ocml_scalb_f64(__x, __n); 1028 } 1029 __DEVICE__ 1030 double scalbn(double __x, int __n) { return __builtin_amdgcn_ldexp(__x, __n); } 1031 1032 __DEVICE__ 1033 __RETURN_TYPE __signbit(double __x) { return __builtin_signbit(__x); } 1034 1035 __DEVICE__ 1036 double sin(double __x) { return __ocml_sin_f64(__x); } 1037 1038 __DEVICE__ 1039 void sincos(double __x, double *__sinptr, double *__cosptr) { 1040 double __tmp; 1041 #ifdef __OPENMP_AMDGCN__ 1042 #pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc) 1043 #endif 1044 *__sinptr = __ocml_sincos_f64( 1045 __x, (__attribute__((address_space(5))) double *)&__tmp); 1046 *__cosptr = __tmp; 1047 } 1048 1049 __DEVICE__ 1050 void sincospi(double __x, double *__sinptr, double *__cosptr) { 1051 double __tmp; 1052 #ifdef __OPENMP_AMDGCN__ 1053 #pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc) 1054 #endif 1055 *__sinptr = __ocml_sincospi_f64( 1056 __x, (__attribute__((address_space(5))) double *)&__tmp); 1057 *__cosptr = __tmp; 1058 } 1059 1060 __DEVICE__ 1061 double sinh(double __x) { return __ocml_sinh_f64(__x); } 1062 1063 __DEVICE__ 1064 double sinpi(double __x) { return __ocml_sinpi_f64(__x); } 1065 1066 __DEVICE__ 1067 double sqrt(double __x) { return __ocml_sqrt_f64(__x); } 1068 1069 __DEVICE__ 1070 double tan(double __x) { return __ocml_tan_f64(__x); } 1071 1072 __DEVICE__ 1073 double tanh(double __x) { return __ocml_tanh_f64(__x); } 1074 1075 __DEVICE__ 1076 double tgamma(double __x) { return __ocml_tgamma_f64(__x); } 1077 1078 __DEVICE__ 1079 double trunc(double __x) { return __builtin_trunc(__x); } 1080 1081 __DEVICE__ 1082 double y0(double __x) { return __ocml_y0_f64(__x); } 1083 1084 __DEVICE__ 1085 double y1(double __x) { return __ocml_y1_f64(__x); } 1086 1087 __DEVICE__ 1088 double yn(int __n, double __x) { // TODO: we could use Ahmes multiplication 1089 // and the Miller & Brown algorithm 1090 // for linear recurrences to get O(log n) steps, but it's unclear if 1091 // it'd be beneficial in this case. Placeholder until OCML adds 1092 // support. 1093 if (__n == 0) 1094 return y0(__x); 1095 if (__n == 1) 1096 return y1(__x); 1097 1098 double __x0 = y0(__x); 1099 double __x1 = y1(__x); 1100 for (int __i = 1; __i < __n; ++__i) { 1101 double __x2 = (2 * __i) / __x * __x1 - __x0; 1102 __x0 = __x1; 1103 __x1 = __x2; 1104 } 1105 1106 return __x1; 1107 } 1108 1109 // BEGIN INTRINSICS 1110 #if defined OCML_BASIC_ROUNDED_OPERATIONS 1111 __DEVICE__ 1112 double __dadd_rd(double __x, double __y) { 1113 return __ocml_add_rtn_f64(__x, __y); 1114 } 1115 __DEVICE__ 1116 double __dadd_rn(double __x, double __y) { 1117 return __ocml_add_rte_f64(__x, __y); 1118 } 1119 __DEVICE__ 1120 double __dadd_ru(double __x, double __y) { 1121 return __ocml_add_rtp_f64(__x, __y); 1122 } 1123 __DEVICE__ 1124 double __dadd_rz(double __x, double __y) { 1125 return __ocml_add_rtz_f64(__x, __y); 1126 } 1127 #else 1128 __DEVICE__ 1129 double __dadd_rn(double __x, double __y) { return __x + __y; } 1130 #endif 1131 1132 #if defined OCML_BASIC_ROUNDED_OPERATIONS 1133 __DEVICE__ 1134 double __ddiv_rd(double __x, double __y) { 1135 return __ocml_div_rtn_f64(__x, __y); 1136 } 1137 __DEVICE__ 1138 double __ddiv_rn(double __x, double __y) { 1139 return __ocml_div_rte_f64(__x, __y); 1140 } 1141 __DEVICE__ 1142 double __ddiv_ru(double __x, double __y) { 1143 return __ocml_div_rtp_f64(__x, __y); 1144 } 1145 __DEVICE__ 1146 double __ddiv_rz(double __x, double __y) { 1147 return __ocml_div_rtz_f64(__x, __y); 1148 } 1149 #else 1150 __DEVICE__ 1151 double __ddiv_rn(double __x, double __y) { return __x / __y; } 1152 #endif 1153 1154 #if defined OCML_BASIC_ROUNDED_OPERATIONS 1155 __DEVICE__ 1156 double __dmul_rd(double __x, double __y) { 1157 return __ocml_mul_rtn_f64(__x, __y); 1158 } 1159 __DEVICE__ 1160 double __dmul_rn(double __x, double __y) { 1161 return __ocml_mul_rte_f64(__x, __y); 1162 } 1163 __DEVICE__ 1164 double __dmul_ru(double __x, double __y) { 1165 return __ocml_mul_rtp_f64(__x, __y); 1166 } 1167 __DEVICE__ 1168 double __dmul_rz(double __x, double __y) { 1169 return __ocml_mul_rtz_f64(__x, __y); 1170 } 1171 #else 1172 __DEVICE__ 1173 double __dmul_rn(double __x, double __y) { return __x * __y; } 1174 #endif 1175 1176 #if defined OCML_BASIC_ROUNDED_OPERATIONS 1177 __DEVICE__ 1178 double __drcp_rd(double __x) { return __ocml_div_rtn_f64(1.0, __x); } 1179 __DEVICE__ 1180 double __drcp_rn(double __x) { return __ocml_div_rte_f64(1.0, __x); } 1181 __DEVICE__ 1182 double __drcp_ru(double __x) { return __ocml_div_rtp_f64(1.0, __x); } 1183 __DEVICE__ 1184 double __drcp_rz(double __x) { return __ocml_div_rtz_f64(1.0, __x); } 1185 #else 1186 __DEVICE__ 1187 double __drcp_rn(double __x) { return 1.0 / __x; } 1188 #endif 1189 1190 #if defined OCML_BASIC_ROUNDED_OPERATIONS 1191 __DEVICE__ 1192 double __dsqrt_rd(double __x) { return __ocml_sqrt_rtn_f64(__x); } 1193 __DEVICE__ 1194 double __dsqrt_rn(double __x) { return __ocml_sqrt_rte_f64(__x); } 1195 __DEVICE__ 1196 double __dsqrt_ru(double __x) { return __ocml_sqrt_rtp_f64(__x); } 1197 __DEVICE__ 1198 double __dsqrt_rz(double __x) { return __ocml_sqrt_rtz_f64(__x); } 1199 #else 1200 __DEVICE__ 1201 double __dsqrt_rn(double __x) { return __ocml_sqrt_f64(__x); } 1202 #endif 1203 1204 #if defined OCML_BASIC_ROUNDED_OPERATIONS 1205 __DEVICE__ 1206 double __dsub_rd(double __x, double __y) { 1207 return __ocml_sub_rtn_f64(__x, __y); 1208 } 1209 __DEVICE__ 1210 double __dsub_rn(double __x, double __y) { 1211 return __ocml_sub_rte_f64(__x, __y); 1212 } 1213 __DEVICE__ 1214 double __dsub_ru(double __x, double __y) { 1215 return __ocml_sub_rtp_f64(__x, __y); 1216 } 1217 __DEVICE__ 1218 double __dsub_rz(double __x, double __y) { 1219 return __ocml_sub_rtz_f64(__x, __y); 1220 } 1221 #else 1222 __DEVICE__ 1223 double __dsub_rn(double __x, double __y) { return __x - __y; } 1224 #endif 1225 1226 #if defined OCML_BASIC_ROUNDED_OPERATIONS 1227 __DEVICE__ 1228 double __fma_rd(double __x, double __y, double __z) { 1229 return __ocml_fma_rtn_f64(__x, __y, __z); 1230 } 1231 __DEVICE__ 1232 double __fma_rn(double __x, double __y, double __z) { 1233 return __ocml_fma_rte_f64(__x, __y, __z); 1234 } 1235 __DEVICE__ 1236 double __fma_ru(double __x, double __y, double __z) { 1237 return __ocml_fma_rtp_f64(__x, __y, __z); 1238 } 1239 __DEVICE__ 1240 double __fma_rz(double __x, double __y, double __z) { 1241 return __ocml_fma_rtz_f64(__x, __y, __z); 1242 } 1243 #else 1244 __DEVICE__ 1245 double __fma_rn(double __x, double __y, double __z) { 1246 return __builtin_fma(__x, __y, __z); 1247 } 1248 #endif 1249 // END INTRINSICS 1250 // END DOUBLE 1251 1252 // C only macros 1253 #if !defined(__cplusplus) && __STDC_VERSION__ >= 201112L 1254 #define isfinite(__x) _Generic((__x), float : __finitef, double : __finite)(__x) 1255 #define isinf(__x) _Generic((__x), float : __isinff, double : __isinf)(__x) 1256 #define isnan(__x) _Generic((__x), float : __isnanf, double : __isnan)(__x) 1257 #define signbit(__x) \ 1258 _Generic((__x), float : __signbitf, double : __signbit)(__x) 1259 #endif // !defined(__cplusplus) && __STDC_VERSION__ >= 201112L 1260 1261 #if defined(__cplusplus) 1262 template <class T> __DEVICE__ T min(T __arg1, T __arg2) { 1263 return (__arg1 < __arg2) ? __arg1 : __arg2; 1264 } 1265 1266 template <class T> __DEVICE__ T max(T __arg1, T __arg2) { 1267 return (__arg1 > __arg2) ? __arg1 : __arg2; 1268 } 1269 1270 __DEVICE__ int min(int __arg1, int __arg2) { 1271 return (__arg1 < __arg2) ? __arg1 : __arg2; 1272 } 1273 __DEVICE__ int max(int __arg1, int __arg2) { 1274 return (__arg1 > __arg2) ? __arg1 : __arg2; 1275 } 1276 1277 __DEVICE__ 1278 float max(float __x, float __y) { return __builtin_fmaxf(__x, __y); } 1279 1280 __DEVICE__ 1281 double max(double __x, double __y) { return __builtin_fmax(__x, __y); } 1282 1283 __DEVICE__ 1284 float min(float __x, float __y) { return __builtin_fminf(__x, __y); } 1285 1286 __DEVICE__ 1287 double min(double __x, double __y) { return __builtin_fmin(__x, __y); } 1288 1289 #if !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__) 1290 __host__ inline static int min(int __arg1, int __arg2) { 1291 return std::min(__arg1, __arg2); 1292 } 1293 1294 __host__ inline static int max(int __arg1, int __arg2) { 1295 return std::max(__arg1, __arg2); 1296 } 1297 #endif // !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__) 1298 #endif 1299 1300 #pragma pop_macro("__DEVICE__") 1301 #pragma pop_macro("__RETURN_TYPE") 1302 1303 #endif // __CLANG_HIP_MATH_H__ 1304