1 /*===---- arm_acle.h - ARM Non-Neon intrinsics -----------------------------=== 2 * 3 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 * See https://llvm.org/LICENSE.txt for license information. 5 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 * 7 *===-----------------------------------------------------------------------=== 8 */ 9 10 #ifndef __ARM_ACLE_H 11 #define __ARM_ACLE_H 12 13 #ifndef __ARM_ACLE 14 #error "ACLE intrinsics support not enabled." 15 #endif 16 17 #include <stdint.h> 18 19 #if defined(__cplusplus) 20 extern "C" { 21 #endif 22 23 /* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */ 24 /* 8.3 Memory barriers */ 25 #if !__has_builtin(__dmb) 26 #define __dmb(i) __builtin_arm_dmb(i) 27 #endif 28 #if !__has_builtin(__dsb) 29 #define __dsb(i) __builtin_arm_dsb(i) 30 #endif 31 #if !__has_builtin(__isb) 32 #define __isb(i) __builtin_arm_isb(i) 33 #endif 34 35 /* 8.4 Hints */ 36 37 #if !__has_builtin(__wfi) 38 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) { 39 __builtin_arm_wfi(); 40 } 41 #endif 42 43 #if !__has_builtin(__wfe) 44 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) { 45 __builtin_arm_wfe(); 46 } 47 #endif 48 49 #if !__has_builtin(__sev) 50 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) { 51 __builtin_arm_sev(); 52 } 53 #endif 54 55 #if !__has_builtin(__sevl) 56 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) { 57 __builtin_arm_sevl(); 58 } 59 #endif 60 61 #if !__has_builtin(__yield) 62 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) { 63 __builtin_arm_yield(); 64 } 65 #endif 66 67 #if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE 68 #define __dbg(t) __builtin_arm_dbg(t) 69 #endif 70 71 /* 8.5 Swap */ 72 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) 73 __swp(uint32_t __x, volatile uint32_t *__p) { 74 uint32_t v; 75 do 76 v = __builtin_arm_ldrex(__p); 77 while (__builtin_arm_strex(__x, __p)); 78 return v; 79 } 80 81 /* 8.6 Memory prefetch intrinsics */ 82 /* 8.6.1 Data prefetch */ 83 #define __pld(addr) __pldx(0, 0, 0, addr) 84 85 #if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE 86 #define __pldx(access_kind, cache_level, retention_policy, addr) \ 87 __builtin_arm_prefetch(addr, access_kind, 1) 88 #else 89 #define __pldx(access_kind, cache_level, retention_policy, addr) \ 90 __builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1) 91 #endif 92 93 /* 8.6.2 Instruction prefetch */ 94 #define __pli(addr) __plix(0, 0, addr) 95 96 #if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE 97 #define __plix(cache_level, retention_policy, addr) \ 98 __builtin_arm_prefetch(addr, 0, 0) 99 #else 100 #define __plix(cache_level, retention_policy, addr) \ 101 __builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0) 102 #endif 103 104 /* 8.7 NOP */ 105 #if !defined(_MSC_VER) || !defined(__aarch64__) 106 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) { 107 __builtin_arm_nop(); 108 } 109 #endif 110 111 /* 9 DATA-PROCESSING INTRINSICS */ 112 /* 9.2 Miscellaneous data-processing intrinsics */ 113 /* ROR */ 114 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) 115 __ror(uint32_t __x, uint32_t __y) { 116 __y %= 32; 117 if (__y == 0) 118 return __x; 119 return (__x >> __y) | (__x << (32 - __y)); 120 } 121 122 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) 123 __rorll(uint64_t __x, uint32_t __y) { 124 __y %= 64; 125 if (__y == 0) 126 return __x; 127 return (__x >> __y) | (__x << (64 - __y)); 128 } 129 130 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) 131 __rorl(unsigned long __x, uint32_t __y) { 132 #if __SIZEOF_LONG__ == 4 133 return __ror(__x, __y); 134 #else 135 return __rorll(__x, __y); 136 #endif 137 } 138 139 140 /* CLZ */ 141 static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) 142 __clz(uint32_t __t) { 143 return __builtin_arm_clz(__t); 144 } 145 146 static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) 147 __clzl(unsigned long __t) { 148 #if __SIZEOF_LONG__ == 4 149 return __builtin_arm_clz(__t); 150 #else 151 return __builtin_arm_clz64(__t); 152 #endif 153 } 154 155 static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) 156 __clzll(uint64_t __t) { 157 return __builtin_arm_clz64(__t); 158 } 159 160 /* CLS */ 161 static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) 162 __cls(uint32_t __t) { 163 return __builtin_arm_cls(__t); 164 } 165 166 static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) 167 __clsl(unsigned long __t) { 168 #if __SIZEOF_LONG__ == 4 169 return __builtin_arm_cls(__t); 170 #else 171 return __builtin_arm_cls64(__t); 172 #endif 173 } 174 175 static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) 176 __clsll(uint64_t __t) { 177 return __builtin_arm_cls64(__t); 178 } 179 180 /* REV */ 181 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) 182 __rev(uint32_t __t) { 183 return __builtin_bswap32(__t); 184 } 185 186 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) 187 __revl(unsigned long __t) { 188 #if __SIZEOF_LONG__ == 4 189 return __builtin_bswap32(__t); 190 #else 191 return __builtin_bswap64(__t); 192 #endif 193 } 194 195 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) 196 __revll(uint64_t __t) { 197 return __builtin_bswap64(__t); 198 } 199 200 /* REV16 */ 201 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) 202 __rev16(uint32_t __t) { 203 return __ror(__rev(__t), 16); 204 } 205 206 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) 207 __rev16ll(uint64_t __t) { 208 return (((uint64_t)__rev16(__t >> 32)) << 32) | (uint64_t)__rev16((uint32_t)__t); 209 } 210 211 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) 212 __rev16l(unsigned long __t) { 213 #if __SIZEOF_LONG__ == 4 214 return __rev16(__t); 215 #else 216 return __rev16ll(__t); 217 #endif 218 } 219 220 /* REVSH */ 221 static __inline__ int16_t __attribute__((__always_inline__, __nodebug__)) 222 __revsh(int16_t __t) { 223 return (int16_t)__builtin_bswap16((uint16_t)__t); 224 } 225 226 /* RBIT */ 227 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) 228 __rbit(uint32_t __t) { 229 return __builtin_arm_rbit(__t); 230 } 231 232 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) 233 __rbitll(uint64_t __t) { 234 #if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE 235 return (((uint64_t)__builtin_arm_rbit(__t)) << 32) | 236 __builtin_arm_rbit(__t >> 32); 237 #else 238 return __builtin_arm_rbit64(__t); 239 #endif 240 } 241 242 static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) 243 __rbitl(unsigned long __t) { 244 #if __SIZEOF_LONG__ == 4 245 return __rbit(__t); 246 #else 247 return __rbitll(__t); 248 #endif 249 } 250 251 /* 252 * 9.3 16-bit multiplications 253 */ 254 #if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP 255 static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) 256 __smulbb(int32_t __a, int32_t __b) { 257 return __builtin_arm_smulbb(__a, __b); 258 } 259 static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) 260 __smulbt(int32_t __a, int32_t __b) { 261 return __builtin_arm_smulbt(__a, __b); 262 } 263 static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) 264 __smultb(int32_t __a, int32_t __b) { 265 return __builtin_arm_smultb(__a, __b); 266 } 267 static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) 268 __smultt(int32_t __a, int32_t __b) { 269 return __builtin_arm_smultt(__a, __b); 270 } 271 static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) 272 __smulwb(int32_t __a, int32_t __b) { 273 return __builtin_arm_smulwb(__a, __b); 274 } 275 static __inline__ int32_t __attribute__((__always_inline__,__nodebug__)) 276 __smulwt(int32_t __a, int32_t __b) { 277 return __builtin_arm_smulwt(__a, __b); 278 } 279 #endif 280 281 /* 282 * 9.4 Saturating intrinsics 283 * 284 * FIXME: Change guard to their corresponding __ARM_FEATURE flag when Q flag 285 * intrinsics are implemented and the flag is enabled. 286 */ 287 /* 9.4.1 Width-specified saturation intrinsics */ 288 #if defined(__ARM_FEATURE_SAT) && __ARM_FEATURE_SAT 289 #define __ssat(x, y) __builtin_arm_ssat(x, y) 290 #define __usat(x, y) __builtin_arm_usat(x, y) 291 #endif 292 293 /* 9.4.2 Saturating addition and subtraction intrinsics */ 294 #if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP 295 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) 296 __qadd(int32_t __t, int32_t __v) { 297 return __builtin_arm_qadd(__t, __v); 298 } 299 300 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) 301 __qsub(int32_t __t, int32_t __v) { 302 return __builtin_arm_qsub(__t, __v); 303 } 304 305 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) 306 __qdbl(int32_t __t) { 307 return __builtin_arm_qadd(__t, __t); 308 } 309 #endif 310 311 /* 9.4.3 Accumultating multiplications */ 312 #if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP 313 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) 314 __smlabb(int32_t __a, int32_t __b, int32_t __c) { 315 return __builtin_arm_smlabb(__a, __b, __c); 316 } 317 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) 318 __smlabt(int32_t __a, int32_t __b, int32_t __c) { 319 return __builtin_arm_smlabt(__a, __b, __c); 320 } 321 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) 322 __smlatb(int32_t __a, int32_t __b, int32_t __c) { 323 return __builtin_arm_smlatb(__a, __b, __c); 324 } 325 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) 326 __smlatt(int32_t __a, int32_t __b, int32_t __c) { 327 return __builtin_arm_smlatt(__a, __b, __c); 328 } 329 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) 330 __smlawb(int32_t __a, int32_t __b, int32_t __c) { 331 return __builtin_arm_smlawb(__a, __b, __c); 332 } 333 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) 334 __smlawt(int32_t __a, int32_t __b, int32_t __c) { 335 return __builtin_arm_smlawt(__a, __b, __c); 336 } 337 #endif 338 339 340 /* 9.5.4 Parallel 16-bit saturation */ 341 #if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 342 #define __ssat16(x, y) __builtin_arm_ssat16(x, y) 343 #define __usat16(x, y) __builtin_arm_usat16(x, y) 344 #endif 345 346 /* 9.5.5 Packing and unpacking */ 347 #if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 348 typedef int32_t int8x4_t; 349 typedef int32_t int16x2_t; 350 typedef uint32_t uint8x4_t; 351 typedef uint32_t uint16x2_t; 352 353 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) 354 __sxtab16(int16x2_t __a, int8x4_t __b) { 355 return __builtin_arm_sxtab16(__a, __b); 356 } 357 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) 358 __sxtb16(int8x4_t __a) { 359 return __builtin_arm_sxtb16(__a); 360 } 361 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) 362 __uxtab16(int16x2_t __a, int8x4_t __b) { 363 return __builtin_arm_uxtab16(__a, __b); 364 } 365 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) 366 __uxtb16(int8x4_t __a) { 367 return __builtin_arm_uxtb16(__a); 368 } 369 #endif 370 371 /* 9.5.6 Parallel selection */ 372 #if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 373 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) 374 __sel(uint8x4_t __a, uint8x4_t __b) { 375 return __builtin_arm_sel(__a, __b); 376 } 377 #endif 378 379 /* 9.5.7 Parallel 8-bit addition and subtraction */ 380 #if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 381 static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) 382 __qadd8(int8x4_t __a, int8x4_t __b) { 383 return __builtin_arm_qadd8(__a, __b); 384 } 385 static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) 386 __qsub8(int8x4_t __a, int8x4_t __b) { 387 return __builtin_arm_qsub8(__a, __b); 388 } 389 static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) 390 __sadd8(int8x4_t __a, int8x4_t __b) { 391 return __builtin_arm_sadd8(__a, __b); 392 } 393 static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) 394 __shadd8(int8x4_t __a, int8x4_t __b) { 395 return __builtin_arm_shadd8(__a, __b); 396 } 397 static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) 398 __shsub8(int8x4_t __a, int8x4_t __b) { 399 return __builtin_arm_shsub8(__a, __b); 400 } 401 static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__)) 402 __ssub8(int8x4_t __a, int8x4_t __b) { 403 return __builtin_arm_ssub8(__a, __b); 404 } 405 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) 406 __uadd8(uint8x4_t __a, uint8x4_t __b) { 407 return __builtin_arm_uadd8(__a, __b); 408 } 409 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) 410 __uhadd8(uint8x4_t __a, uint8x4_t __b) { 411 return __builtin_arm_uhadd8(__a, __b); 412 } 413 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) 414 __uhsub8(uint8x4_t __a, uint8x4_t __b) { 415 return __builtin_arm_uhsub8(__a, __b); 416 } 417 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) 418 __uqadd8(uint8x4_t __a, uint8x4_t __b) { 419 return __builtin_arm_uqadd8(__a, __b); 420 } 421 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) 422 __uqsub8(uint8x4_t __a, uint8x4_t __b) { 423 return __builtin_arm_uqsub8(__a, __b); 424 } 425 static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__)) 426 __usub8(uint8x4_t __a, uint8x4_t __b) { 427 return __builtin_arm_usub8(__a, __b); 428 } 429 #endif 430 431 /* 9.5.8 Sum of 8-bit absolute differences */ 432 #if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 433 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) 434 __usad8(uint8x4_t __a, uint8x4_t __b) { 435 return __builtin_arm_usad8(__a, __b); 436 } 437 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) 438 __usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) { 439 return __builtin_arm_usada8(__a, __b, __c); 440 } 441 #endif 442 443 /* 9.5.9 Parallel 16-bit addition and subtraction */ 444 #if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 445 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) 446 __qadd16(int16x2_t __a, int16x2_t __b) { 447 return __builtin_arm_qadd16(__a, __b); 448 } 449 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) 450 __qasx(int16x2_t __a, int16x2_t __b) { 451 return __builtin_arm_qasx(__a, __b); 452 } 453 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) 454 __qsax(int16x2_t __a, int16x2_t __b) { 455 return __builtin_arm_qsax(__a, __b); 456 } 457 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) 458 __qsub16(int16x2_t __a, int16x2_t __b) { 459 return __builtin_arm_qsub16(__a, __b); 460 } 461 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) 462 __sadd16(int16x2_t __a, int16x2_t __b) { 463 return __builtin_arm_sadd16(__a, __b); 464 } 465 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) 466 __sasx(int16x2_t __a, int16x2_t __b) { 467 return __builtin_arm_sasx(__a, __b); 468 } 469 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) 470 __shadd16(int16x2_t __a, int16x2_t __b) { 471 return __builtin_arm_shadd16(__a, __b); 472 } 473 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) 474 __shasx(int16x2_t __a, int16x2_t __b) { 475 return __builtin_arm_shasx(__a, __b); 476 } 477 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) 478 __shsax(int16x2_t __a, int16x2_t __b) { 479 return __builtin_arm_shsax(__a, __b); 480 } 481 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) 482 __shsub16(int16x2_t __a, int16x2_t __b) { 483 return __builtin_arm_shsub16(__a, __b); 484 } 485 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) 486 __ssax(int16x2_t __a, int16x2_t __b) { 487 return __builtin_arm_ssax(__a, __b); 488 } 489 static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__)) 490 __ssub16(int16x2_t __a, int16x2_t __b) { 491 return __builtin_arm_ssub16(__a, __b); 492 } 493 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) 494 __uadd16(uint16x2_t __a, uint16x2_t __b) { 495 return __builtin_arm_uadd16(__a, __b); 496 } 497 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) 498 __uasx(uint16x2_t __a, uint16x2_t __b) { 499 return __builtin_arm_uasx(__a, __b); 500 } 501 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) 502 __uhadd16(uint16x2_t __a, uint16x2_t __b) { 503 return __builtin_arm_uhadd16(__a, __b); 504 } 505 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) 506 __uhasx(uint16x2_t __a, uint16x2_t __b) { 507 return __builtin_arm_uhasx(__a, __b); 508 } 509 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) 510 __uhsax(uint16x2_t __a, uint16x2_t __b) { 511 return __builtin_arm_uhsax(__a, __b); 512 } 513 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) 514 __uhsub16(uint16x2_t __a, uint16x2_t __b) { 515 return __builtin_arm_uhsub16(__a, __b); 516 } 517 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) 518 __uqadd16(uint16x2_t __a, uint16x2_t __b) { 519 return __builtin_arm_uqadd16(__a, __b); 520 } 521 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) 522 __uqasx(uint16x2_t __a, uint16x2_t __b) { 523 return __builtin_arm_uqasx(__a, __b); 524 } 525 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) 526 __uqsax(uint16x2_t __a, uint16x2_t __b) { 527 return __builtin_arm_uqsax(__a, __b); 528 } 529 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) 530 __uqsub16(uint16x2_t __a, uint16x2_t __b) { 531 return __builtin_arm_uqsub16(__a, __b); 532 } 533 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) 534 __usax(uint16x2_t __a, uint16x2_t __b) { 535 return __builtin_arm_usax(__a, __b); 536 } 537 static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__)) 538 __usub16(uint16x2_t __a, uint16x2_t __b) { 539 return __builtin_arm_usub16(__a, __b); 540 } 541 #endif 542 543 /* 9.5.10 Parallel 16-bit multiplications */ 544 #if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 545 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) 546 __smlad(int16x2_t __a, int16x2_t __b, int32_t __c) { 547 return __builtin_arm_smlad(__a, __b, __c); 548 } 549 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) 550 __smladx(int16x2_t __a, int16x2_t __b, int32_t __c) { 551 return __builtin_arm_smladx(__a, __b, __c); 552 } 553 static __inline__ int64_t __attribute__((__always_inline__, __nodebug__)) 554 __smlald(int16x2_t __a, int16x2_t __b, int64_t __c) { 555 return __builtin_arm_smlald(__a, __b, __c); 556 } 557 static __inline__ int64_t __attribute__((__always_inline__, __nodebug__)) 558 __smlaldx(int16x2_t __a, int16x2_t __b, int64_t __c) { 559 return __builtin_arm_smlaldx(__a, __b, __c); 560 } 561 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) 562 __smlsd(int16x2_t __a, int16x2_t __b, int32_t __c) { 563 return __builtin_arm_smlsd(__a, __b, __c); 564 } 565 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) 566 __smlsdx(int16x2_t __a, int16x2_t __b, int32_t __c) { 567 return __builtin_arm_smlsdx(__a, __b, __c); 568 } 569 static __inline__ int64_t __attribute__((__always_inline__, __nodebug__)) 570 __smlsld(int16x2_t __a, int16x2_t __b, int64_t __c) { 571 return __builtin_arm_smlsld(__a, __b, __c); 572 } 573 static __inline__ int64_t __attribute__((__always_inline__, __nodebug__)) 574 __smlsldx(int16x2_t __a, int16x2_t __b, int64_t __c) { 575 return __builtin_arm_smlsldx(__a, __b, __c); 576 } 577 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) 578 __smuad(int16x2_t __a, int16x2_t __b) { 579 return __builtin_arm_smuad(__a, __b); 580 } 581 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) 582 __smuadx(int16x2_t __a, int16x2_t __b) { 583 return __builtin_arm_smuadx(__a, __b); 584 } 585 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) 586 __smusd(int16x2_t __a, int16x2_t __b) { 587 return __builtin_arm_smusd(__a, __b); 588 } 589 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) 590 __smusdx(int16x2_t __a, int16x2_t __b) { 591 return __builtin_arm_smusdx(__a, __b); 592 } 593 #endif 594 595 /* 8.6 Floating-point data-processing intrinsics */ 596 #if (defined(__ARM_FEATURE_DIRECTED_ROUNDING) && \ 597 (__ARM_FEATURE_DIRECTED_ROUNDING)) && \ 598 (defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE) 599 static __inline__ double __attribute__((__always_inline__, __nodebug__)) 600 __rintn(double __a) { 601 return __builtin_roundeven(__a); 602 } 603 604 static __inline__ float __attribute__((__always_inline__, __nodebug__)) 605 __rintnf(float __a) { 606 return __builtin_roundevenf(__a); 607 } 608 #endif 609 610 /* 9.7 CRC32 intrinsics */ 611 #if (defined(__ARM_FEATURE_CRC32) && __ARM_FEATURE_CRC32) || \ 612 (defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE) 613 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) 614 __crc32b(uint32_t __a, uint8_t __b) { 615 return __builtin_arm_crc32b(__a, __b); 616 } 617 618 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) 619 __crc32h(uint32_t __a, uint16_t __b) { 620 return __builtin_arm_crc32h(__a, __b); 621 } 622 623 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) 624 __crc32w(uint32_t __a, uint32_t __b) { 625 return __builtin_arm_crc32w(__a, __b); 626 } 627 628 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) 629 __crc32d(uint32_t __a, uint64_t __b) { 630 return __builtin_arm_crc32d(__a, __b); 631 } 632 633 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) 634 __crc32cb(uint32_t __a, uint8_t __b) { 635 return __builtin_arm_crc32cb(__a, __b); 636 } 637 638 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) 639 __crc32ch(uint32_t __a, uint16_t __b) { 640 return __builtin_arm_crc32ch(__a, __b); 641 } 642 643 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) 644 __crc32cw(uint32_t __a, uint32_t __b) { 645 return __builtin_arm_crc32cw(__a, __b); 646 } 647 648 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc"))) 649 __crc32cd(uint32_t __a, uint64_t __b) { 650 return __builtin_arm_crc32cd(__a, __b); 651 } 652 #endif 653 654 /* Armv8.3-A Javascript conversion intrinsic */ 655 #if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE 656 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("v8.3a"))) 657 __jcvt(double __a) { 658 return __builtin_arm_jcvt(__a); 659 } 660 #endif 661 662 /* Armv8.5-A FP rounding intrinsics */ 663 #if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE 664 static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) 665 __rint32zf(float __a) { 666 return __builtin_arm_rint32zf(__a); 667 } 668 669 static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) 670 __rint32z(double __a) { 671 return __builtin_arm_rint32z(__a); 672 } 673 674 static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) 675 __rint64zf(float __a) { 676 return __builtin_arm_rint64zf(__a); 677 } 678 679 static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) 680 __rint64z(double __a) { 681 return __builtin_arm_rint64z(__a); 682 } 683 684 static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) 685 __rint32xf(float __a) { 686 return __builtin_arm_rint32xf(__a); 687 } 688 689 static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) 690 __rint32x(double __a) { 691 return __builtin_arm_rint32x(__a); 692 } 693 694 static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) 695 __rint64xf(float __a) { 696 return __builtin_arm_rint64xf(__a); 697 } 698 699 static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a"))) 700 __rint64x(double __a) { 701 return __builtin_arm_rint64x(__a); 702 } 703 #endif 704 705 /* Armv8.7-A load/store 64-byte intrinsics */ 706 #if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE 707 typedef struct { 708 uint64_t val[8]; 709 } data512_t; 710 711 static __inline__ data512_t __attribute__((__always_inline__, __nodebug__, target("ls64"))) 712 __arm_ld64b(const void *__addr) { 713 data512_t __value; 714 __builtin_arm_ld64b(__addr, __value.val); 715 return __value; 716 } 717 static __inline__ void __attribute__((__always_inline__, __nodebug__, target("ls64"))) 718 __arm_st64b(void *__addr, data512_t __value) { 719 __builtin_arm_st64b(__addr, __value.val); 720 } 721 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("ls64"))) 722 __arm_st64bv(void *__addr, data512_t __value) { 723 return __builtin_arm_st64bv(__addr, __value.val); 724 } 725 static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("ls64"))) 726 __arm_st64bv0(void *__addr, data512_t __value) { 727 return __builtin_arm_st64bv0(__addr, __value.val); 728 } 729 #endif 730 731 /* 10.1 Special register intrinsics */ 732 #define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg) 733 #define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg) 734 #define __arm_rsr128(sysreg) __builtin_arm_rsr128(sysreg) 735 #define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg) 736 #define __arm_rsrf(sysreg) __builtin_bit_cast(float, __arm_rsr(sysreg)) 737 #define __arm_rsrf64(sysreg) __builtin_bit_cast(double, __arm_rsr64(sysreg)) 738 #define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v) 739 #define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v) 740 #define __arm_wsr128(sysreg, v) __builtin_arm_wsr128(sysreg, v) 741 #define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v) 742 #define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v)) 743 #define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v)) 744 745 /* Memory Tagging Extensions (MTE) Intrinsics */ 746 #if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE 747 #define __arm_mte_create_random_tag(__ptr, __mask) __builtin_arm_irg(__ptr, __mask) 748 #define __arm_mte_increment_tag(__ptr, __tag_offset) __builtin_arm_addg(__ptr, __tag_offset) 749 #define __arm_mte_exclude_tag(__ptr, __excluded) __builtin_arm_gmi(__ptr, __excluded) 750 #define __arm_mte_get_tag(__ptr) __builtin_arm_ldg(__ptr) 751 #define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr) 752 #define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb) 753 754 /* Memory Operations Intrinsics */ 755 #define __arm_mops_memset_tag(__tagged_address, __value, __size) \ 756 __builtin_arm_mops_memset_tag(__tagged_address, __value, __size) 757 #endif 758 759 /* Coprocessor Intrinsics */ 760 #if defined(__ARM_FEATURE_COPROC) 761 762 #if (__ARM_FEATURE_COPROC & 0x1) 763 764 #if (__ARM_ARCH < 8) 765 #define __arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2) \ 766 __builtin_arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2) 767 #endif /* __ARM_ARCH < 8 */ 768 769 #define __arm_ldc(coproc, CRd, p) __builtin_arm_ldc(coproc, CRd, p) 770 #define __arm_stc(coproc, CRd, p) __builtin_arm_stc(coproc, CRd, p) 771 772 #define __arm_mcr(coproc, opc1, value, CRn, CRm, opc2) \ 773 __builtin_arm_mcr(coproc, opc1, value, CRn, CRm, opc2) 774 #define __arm_mrc(coproc, opc1, CRn, CRm, opc2) \ 775 __builtin_arm_mrc(coproc, opc1, CRn, CRm, opc2) 776 777 #if (__ARM_ARCH != 4) && (__ARM_ARCH < 8) 778 #define __arm_ldcl(coproc, CRd, p) __builtin_arm_ldcl(coproc, CRd, p) 779 #define __arm_stcl(coproc, CRd, p) __builtin_arm_stcl(coproc, CRd, p) 780 #endif /* (__ARM_ARCH != 4) && (__ARM_ARCH != 8) */ 781 782 #if (__ARM_ARCH_8M_MAIN__) || (__ARM_ARCH_8_1M_MAIN__) 783 #define __arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2) \ 784 __builtin_arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2) 785 #define __arm_ldcl(coproc, CRd, p) __builtin_arm_ldcl(coproc, CRd, p) 786 #define __arm_stcl(coproc, CRd, p) __builtin_arm_stcl(coproc, CRd, p) 787 #endif /* ___ARM_ARCH_8M_MAIN__ */ 788 789 #endif /* __ARM_FEATURE_COPROC & 0x1 */ 790 791 #if (__ARM_FEATURE_COPROC & 0x2) 792 #define __arm_cdp2(coproc, opc1, CRd, CRn, CRm, opc2) \ 793 __builtin_arm_cdp2(coproc, opc1, CRd, CRn, CRm, opc2) 794 #define __arm_ldc2(coproc, CRd, p) __builtin_arm_ldc2(coproc, CRd, p) 795 #define __arm_stc2(coproc, CRd, p) __builtin_arm_stc2(coproc, CRd, p) 796 #define __arm_ldc2l(coproc, CRd, p) __builtin_arm_ldc2l(coproc, CRd, p) 797 #define __arm_stc2l(coproc, CRd, p) __builtin_arm_stc2l(coproc, CRd, p) 798 #define __arm_mcr2(coproc, opc1, value, CRn, CRm, opc2) \ 799 __builtin_arm_mcr2(coproc, opc1, value, CRn, CRm, opc2) 800 #define __arm_mrc2(coproc, opc1, CRn, CRm, opc2) \ 801 __builtin_arm_mrc2(coproc, opc1, CRn, CRm, opc2) 802 #endif 803 804 #if (__ARM_FEATURE_COPROC & 0x4) 805 #define __arm_mcrr(coproc, opc1, value, CRm) \ 806 __builtin_arm_mcrr(coproc, opc1, value, CRm) 807 #define __arm_mrrc(coproc, opc1, CRm) __builtin_arm_mrrc(coproc, opc1, CRm) 808 #endif 809 810 #if (__ARM_FEATURE_COPROC & 0x8) 811 #define __arm_mcrr2(coproc, opc1, value, CRm) \ 812 __builtin_arm_mcrr2(coproc, opc1, value, CRm) 813 #define __arm_mrrc2(coproc, opc1, CRm) __builtin_arm_mrrc2(coproc, opc1, CRm) 814 #endif 815 816 #endif // __ARM_FEATURE_COPROC 817 818 /* Transactional Memory Extension (TME) Intrinsics */ 819 #if defined(__ARM_FEATURE_TME) && __ARM_FEATURE_TME 820 821 #define _TMFAILURE_REASON 0x00007fffu 822 #define _TMFAILURE_RTRY 0x00008000u 823 #define _TMFAILURE_CNCL 0x00010000u 824 #define _TMFAILURE_MEM 0x00020000u 825 #define _TMFAILURE_IMP 0x00040000u 826 #define _TMFAILURE_ERR 0x00080000u 827 #define _TMFAILURE_SIZE 0x00100000u 828 #define _TMFAILURE_NEST 0x00200000u 829 #define _TMFAILURE_DBG 0x00400000u 830 #define _TMFAILURE_INT 0x00800000u 831 #define _TMFAILURE_TRIVIAL 0x01000000u 832 833 #define __tstart() __builtin_arm_tstart() 834 #define __tcommit() __builtin_arm_tcommit() 835 #define __tcancel(__arg) __builtin_arm_tcancel(__arg) 836 #define __ttest() __builtin_arm_ttest() 837 838 #endif /* __ARM_FEATURE_TME */ 839 840 /* Armv8.5-A Random number generation intrinsics */ 841 #if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE 842 static __inline__ int __attribute__((__always_inline__, __nodebug__, target("rand"))) 843 __rndr(uint64_t *__p) { 844 return __builtin_arm_rndr(__p); 845 } 846 static __inline__ int __attribute__((__always_inline__, __nodebug__, target("rand"))) 847 __rndrrs(uint64_t *__p) { 848 return __builtin_arm_rndrrs(__p); 849 } 850 #endif 851 852 #if defined(__cplusplus) 853 } 854 #endif 855 856 #endif /* __ARM_ACLE_H */ 857