1 /* 2 * kmp_os.h -- KPTS runtime header file. 3 */ 4 5 //===----------------------------------------------------------------------===// 6 // 7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 8 // See https://llvm.org/LICENSE.txt for license information. 9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef KMP_OS_H 14 #define KMP_OS_H 15 16 #include "kmp_config.h" 17 #include <atomic> 18 #include <stdarg.h> 19 #include <stdlib.h> 20 #include <string.h> 21 22 #define KMP_FTN_PLAIN 1 23 #define KMP_FTN_APPEND 2 24 #define KMP_FTN_UPPER 3 25 /* 26 #define KMP_FTN_PREPEND 4 27 #define KMP_FTN_UAPPEND 5 28 */ 29 30 #define KMP_PTR_SKIP (sizeof(void *)) 31 32 /* -------------------------- Compiler variations ------------------------ */ 33 34 #define KMP_OFF 0 35 #define KMP_ON 1 36 37 #define KMP_MEM_CONS_VOLATILE 0 38 #define KMP_MEM_CONS_FENCE 1 39 40 #ifndef KMP_MEM_CONS_MODEL 41 #define KMP_MEM_CONS_MODEL KMP_MEM_CONS_VOLATILE 42 #endif 43 44 #ifndef __has_cpp_attribute 45 #define __has_cpp_attribute(x) 0 46 #endif 47 48 #ifndef __has_attribute 49 #define __has_attribute(x) 0 50 #endif 51 52 /* ------------------------- Compiler recognition ---------------------- */ 53 #define KMP_COMPILER_ICC 0 54 #define KMP_COMPILER_GCC 0 55 #define KMP_COMPILER_CLANG 0 56 #define KMP_COMPILER_MSVC 0 57 #define KMP_COMPILER_ICX 0 58 59 #if __INTEL_CLANG_COMPILER 60 #undef KMP_COMPILER_ICX 61 #define KMP_COMPILER_ICX 1 62 #elif defined(__INTEL_COMPILER) 63 #undef KMP_COMPILER_ICC 64 #define KMP_COMPILER_ICC 1 65 #elif defined(__clang__) 66 #undef KMP_COMPILER_CLANG 67 #define KMP_COMPILER_CLANG 1 68 #elif defined(__GNUC__) 69 #undef KMP_COMPILER_GCC 70 #define KMP_COMPILER_GCC 1 71 #elif defined(_MSC_VER) 72 #undef KMP_COMPILER_MSVC 73 #define KMP_COMPILER_MSVC 1 74 #else 75 #error Unknown compiler 76 #endif 77 78 #if (KMP_OS_LINUX || KMP_OS_WINDOWS || KMP_OS_FREEBSD) 79 #define KMP_AFFINITY_SUPPORTED 1 80 #if KMP_OS_WINDOWS && KMP_ARCH_X86_64 81 #define KMP_GROUP_AFFINITY 1 82 #else 83 #define KMP_GROUP_AFFINITY 0 84 #endif 85 #else 86 #define KMP_AFFINITY_SUPPORTED 0 87 #define KMP_GROUP_AFFINITY 0 88 #endif 89 90 #if (KMP_OS_LINUX || (KMP_OS_FREEBSD && __FreeBSD_version >= 1301000)) 91 #define KMP_HAVE_SCHED_GETCPU 1 92 #else 93 #define KMP_HAVE_SCHED_GETCPU 0 94 #endif 95 96 /* Check for quad-precision extension. */ 97 #define KMP_HAVE_QUAD 0 98 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 99 #if KMP_COMPILER_ICC || KMP_COMPILER_ICX 100 /* _Quad is already defined for icc */ 101 #undef KMP_HAVE_QUAD 102 #define KMP_HAVE_QUAD 1 103 #elif KMP_COMPILER_CLANG 104 /* Clang doesn't support a software-implemented 105 128-bit extended precision type yet */ 106 typedef long double _Quad; 107 #elif KMP_COMPILER_GCC 108 /* GCC on NetBSD lacks __multc3/__divtc3 builtins needed for quad */ 109 #if !KMP_OS_NETBSD 110 typedef __float128 _Quad; 111 #undef KMP_HAVE_QUAD 112 #define KMP_HAVE_QUAD 1 113 #endif 114 #elif KMP_COMPILER_MSVC 115 typedef long double _Quad; 116 #endif 117 #else 118 #if __LDBL_MAX_EXP__ >= 16384 && KMP_COMPILER_GCC 119 typedef long double _Quad; 120 #undef KMP_HAVE_QUAD 121 #define KMP_HAVE_QUAD 1 122 #endif 123 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ 124 125 #define KMP_USE_X87CONTROL 0 126 #if KMP_OS_WINDOWS 127 #define KMP_END_OF_LINE "\r\n" 128 typedef char kmp_int8; 129 typedef unsigned char kmp_uint8; 130 typedef short kmp_int16; 131 typedef unsigned short kmp_uint16; 132 typedef int kmp_int32; 133 typedef unsigned int kmp_uint32; 134 #define KMP_INT32_SPEC "d" 135 #define KMP_UINT32_SPEC "u" 136 #ifndef KMP_STRUCT64 137 typedef __int64 kmp_int64; 138 typedef unsigned __int64 kmp_uint64; 139 #define KMP_INT64_SPEC "I64d" 140 #define KMP_UINT64_SPEC "I64u" 141 #else 142 struct kmp_struct64 { 143 kmp_int32 a, b; 144 }; 145 typedef struct kmp_struct64 kmp_int64; 146 typedef struct kmp_struct64 kmp_uint64; 147 /* Not sure what to use for KMP_[U]INT64_SPEC here */ 148 #endif 149 #if KMP_ARCH_X86 && KMP_MSVC_COMPAT 150 #undef KMP_USE_X87CONTROL 151 #define KMP_USE_X87CONTROL 1 152 #endif 153 #if KMP_ARCH_X86_64 || KMP_ARCH_AARCH64 154 #define KMP_INTPTR 1 155 typedef __int64 kmp_intptr_t; 156 typedef unsigned __int64 kmp_uintptr_t; 157 #define KMP_INTPTR_SPEC "I64d" 158 #define KMP_UINTPTR_SPEC "I64u" 159 #endif 160 #endif /* KMP_OS_WINDOWS */ 161 162 #if KMP_OS_UNIX 163 #define KMP_END_OF_LINE "\n" 164 typedef char kmp_int8; 165 typedef unsigned char kmp_uint8; 166 typedef short kmp_int16; 167 typedef unsigned short kmp_uint16; 168 typedef int kmp_int32; 169 typedef unsigned int kmp_uint32; 170 typedef long long kmp_int64; 171 typedef unsigned long long kmp_uint64; 172 #define KMP_INT32_SPEC "d" 173 #define KMP_UINT32_SPEC "u" 174 #define KMP_INT64_SPEC "lld" 175 #define KMP_UINT64_SPEC "llu" 176 #endif /* KMP_OS_UNIX */ 177 178 #if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS 179 #define KMP_SIZE_T_SPEC KMP_UINT32_SPEC 180 #elif KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 || \ 181 KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64 182 #define KMP_SIZE_T_SPEC KMP_UINT64_SPEC 183 #else 184 #error "Can't determine size_t printf format specifier." 185 #endif 186 187 #if KMP_ARCH_X86 188 #define KMP_SIZE_T_MAX (0xFFFFFFFF) 189 #else 190 #define KMP_SIZE_T_MAX (0xFFFFFFFFFFFFFFFF) 191 #endif 192 193 typedef size_t kmp_size_t; 194 typedef float kmp_real32; 195 typedef double kmp_real64; 196 197 #ifndef KMP_INTPTR 198 #define KMP_INTPTR 1 199 typedef long kmp_intptr_t; 200 typedef unsigned long kmp_uintptr_t; 201 #define KMP_INTPTR_SPEC "ld" 202 #define KMP_UINTPTR_SPEC "lu" 203 #endif 204 205 #ifdef BUILD_I8 206 typedef kmp_int64 kmp_int; 207 typedef kmp_uint64 kmp_uint; 208 #else 209 typedef kmp_int32 kmp_int; 210 typedef kmp_uint32 kmp_uint; 211 #endif /* BUILD_I8 */ 212 #define KMP_INT_MAX ((kmp_int32)0x7FFFFFFF) 213 #define KMP_INT_MIN ((kmp_int32)0x80000000) 214 215 // stdarg handling 216 #if (KMP_ARCH_ARM || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64) && \ 217 (KMP_OS_FREEBSD || KMP_OS_LINUX) 218 typedef va_list *kmp_va_list; 219 #define kmp_va_deref(ap) (*(ap)) 220 #define kmp_va_addr_of(ap) (&(ap)) 221 #else 222 typedef va_list kmp_va_list; 223 #define kmp_va_deref(ap) (ap) 224 #define kmp_va_addr_of(ap) (ap) 225 #endif 226 227 #ifdef __cplusplus 228 // macros to cast out qualifiers and to re-interpret types 229 #define CCAST(type, var) const_cast<type>(var) 230 #define RCAST(type, var) reinterpret_cast<type>(var) 231 //------------------------------------------------------------------------- 232 // template for debug prints specification ( d, u, lld, llu ), and to obtain 233 // signed/unsigned flavors of a type 234 template <typename T> struct traits_t {}; 235 // int 236 template <> struct traits_t<signed int> { 237 typedef signed int signed_t; 238 typedef unsigned int unsigned_t; 239 typedef double floating_t; 240 static char const *spec; 241 static const signed_t max_value = 0x7fffffff; 242 static const signed_t min_value = 0x80000000; 243 static const int type_size = sizeof(signed_t); 244 }; 245 // unsigned int 246 template <> struct traits_t<unsigned int> { 247 typedef signed int signed_t; 248 typedef unsigned int unsigned_t; 249 typedef double floating_t; 250 static char const *spec; 251 static const unsigned_t max_value = 0xffffffff; 252 static const unsigned_t min_value = 0x00000000; 253 static const int type_size = sizeof(unsigned_t); 254 }; 255 // long 256 template <> struct traits_t<signed long> { 257 typedef signed long signed_t; 258 typedef unsigned long unsigned_t; 259 typedef long double floating_t; 260 static char const *spec; 261 static const int type_size = sizeof(signed_t); 262 }; 263 // long long 264 template <> struct traits_t<signed long long> { 265 typedef signed long long signed_t; 266 typedef unsigned long long unsigned_t; 267 typedef long double floating_t; 268 static char const *spec; 269 static const signed_t max_value = 0x7fffffffffffffffLL; 270 static const signed_t min_value = 0x8000000000000000LL; 271 static const int type_size = sizeof(signed_t); 272 }; 273 // unsigned long long 274 template <> struct traits_t<unsigned long long> { 275 typedef signed long long signed_t; 276 typedef unsigned long long unsigned_t; 277 typedef long double floating_t; 278 static char const *spec; 279 static const unsigned_t max_value = 0xffffffffffffffffLL; 280 static const unsigned_t min_value = 0x0000000000000000LL; 281 static const int type_size = sizeof(unsigned_t); 282 }; 283 //------------------------------------------------------------------------- 284 #else 285 #define CCAST(type, var) (type)(var) 286 #define RCAST(type, var) (type)(var) 287 #endif // __cplusplus 288 289 #define KMP_EXPORT extern /* export declaration in guide libraries */ 290 291 #if __GNUC__ >= 4 && !defined(__MINGW32__) 292 #define __forceinline __inline 293 #endif 294 295 /* Check if the OS/arch can support user-level mwait */ 296 // All mwait code tests for UMWAIT first, so it should only fall back to ring3 297 // MWAIT for KNL. 298 #define KMP_HAVE_MWAIT \ 299 ((KMP_ARCH_X86 || KMP_ARCH_X86_64) && (KMP_OS_LINUX || KMP_OS_WINDOWS) && \ 300 !KMP_MIC2) 301 #define KMP_HAVE_UMWAIT \ 302 ((KMP_ARCH_X86 || KMP_ARCH_X86_64) && (KMP_OS_LINUX || KMP_OS_WINDOWS) && \ 303 !KMP_MIC) 304 305 #if KMP_OS_WINDOWS 306 #include <windows.h> 307 308 static inline int KMP_GET_PAGE_SIZE(void) { 309 SYSTEM_INFO si; 310 GetSystemInfo(&si); 311 return si.dwPageSize; 312 } 313 #else 314 #define KMP_GET_PAGE_SIZE() getpagesize() 315 #endif 316 317 #define PAGE_ALIGNED(_addr) \ 318 (!((size_t)_addr & (size_t)(KMP_GET_PAGE_SIZE() - 1))) 319 #define ALIGN_TO_PAGE(x) \ 320 (void *)(((size_t)(x)) & ~((size_t)(KMP_GET_PAGE_SIZE() - 1))) 321 322 /* ---------- Support for cache alignment, padding, etc. ----------------*/ 323 324 #ifdef __cplusplus 325 extern "C" { 326 #endif // __cplusplus 327 328 #define INTERNODE_CACHE_LINE 4096 /* for multi-node systems */ 329 330 /* Define the default size of the cache line */ 331 #ifndef CACHE_LINE 332 #define CACHE_LINE 128 /* cache line size in bytes */ 333 #else 334 #if (CACHE_LINE < 64) && !defined(KMP_OS_DARWIN) 335 // 2006-02-13: This produces too many warnings on OS X*. Disable for now 336 #warning CACHE_LINE is too small. 337 #endif 338 #endif /* CACHE_LINE */ 339 340 #define KMP_CACHE_PREFETCH(ADDR) /* nothing */ 341 342 // Define attribute that indicates that the fall through from the previous 343 // case label is intentional and should not be diagnosed by a compiler 344 // Code from libcxx/include/__config 345 // Use a function like macro to imply that it must be followed by a semicolon 346 #if __cplusplus > 201402L && __has_cpp_attribute(fallthrough) 347 #define KMP_FALLTHROUGH() [[fallthrough]] 348 // icc cannot properly tell this attribute is absent so force off 349 #elif KMP_COMPILER_ICC 350 #define KMP_FALLTHROUGH() ((void)0) 351 #elif __has_cpp_attribute(clang::fallthrough) 352 #define KMP_FALLTHROUGH() [[clang::fallthrough]] 353 #elif __has_attribute(fallthrough) || __GNUC__ >= 7 354 #define KMP_FALLTHROUGH() __attribute__((__fallthrough__)) 355 #else 356 #define KMP_FALLTHROUGH() ((void)0) 357 #endif 358 359 #if KMP_HAVE_ATTRIBUTE_WAITPKG 360 #define KMP_ATTRIBUTE_TARGET_WAITPKG __attribute__((target("waitpkg"))) 361 #else 362 #define KMP_ATTRIBUTE_TARGET_WAITPKG /* Nothing */ 363 #endif 364 365 #if KMP_HAVE_ATTRIBUTE_RTM 366 #define KMP_ATTRIBUTE_TARGET_RTM __attribute__((target("rtm"))) 367 #else 368 #define KMP_ATTRIBUTE_TARGET_RTM /* Nothing */ 369 #endif 370 371 // Define attribute that indicates a function does not return 372 #if __cplusplus >= 201103L 373 #define KMP_NORETURN [[noreturn]] 374 #elif KMP_OS_WINDOWS 375 #define KMP_NORETURN __declspec(noreturn) 376 #else 377 #define KMP_NORETURN __attribute__((noreturn)) 378 #endif 379 380 #if KMP_OS_WINDOWS && KMP_MSVC_COMPAT 381 #define KMP_ALIGN(bytes) __declspec(align(bytes)) 382 #define KMP_THREAD_LOCAL __declspec(thread) 383 #define KMP_ALIAS /* Nothing */ 384 #else 385 #define KMP_ALIGN(bytes) __attribute__((aligned(bytes))) 386 #define KMP_THREAD_LOCAL __thread 387 #define KMP_ALIAS(alias_of) __attribute__((alias(alias_of))) 388 #endif 389 390 #if KMP_HAVE_WEAK_ATTRIBUTE && !KMP_DYNAMIC_LIB 391 #define KMP_WEAK_ATTRIBUTE_EXTERNAL __attribute__((weak)) 392 #else 393 #define KMP_WEAK_ATTRIBUTE_EXTERNAL /* Nothing */ 394 #endif 395 396 #if KMP_HAVE_WEAK_ATTRIBUTE 397 #define KMP_WEAK_ATTRIBUTE_INTERNAL __attribute__((weak)) 398 #else 399 #define KMP_WEAK_ATTRIBUTE_INTERNAL /* Nothing */ 400 #endif 401 402 // Define KMP_VERSION_SYMBOL and KMP_EXPAND_NAME 403 #ifndef KMP_STR 404 #define KMP_STR(x) _KMP_STR(x) 405 #define _KMP_STR(x) #x 406 #endif 407 408 #ifdef KMP_USE_VERSION_SYMBOLS 409 // If using versioned symbols, KMP_EXPAND_NAME prepends 410 // __kmp_api_ to the real API name 411 #define KMP_EXPAND_NAME(api_name) _KMP_EXPAND_NAME(api_name) 412 #define _KMP_EXPAND_NAME(api_name) __kmp_api_##api_name 413 #define KMP_VERSION_SYMBOL(api_name, ver_num, ver_str) \ 414 _KMP_VERSION_SYMBOL(api_name, ver_num, ver_str, "VERSION") 415 #define _KMP_VERSION_SYMBOL(api_name, ver_num, ver_str, default_ver) \ 416 __typeof__(__kmp_api_##api_name) __kmp_api_##api_name##_##ver_num##_alias \ 417 __attribute__((alias(KMP_STR(__kmp_api_##api_name)))); \ 418 __asm__( \ 419 ".symver " KMP_STR(__kmp_api_##api_name##_##ver_num##_alias) "," KMP_STR( \ 420 api_name) "@" ver_str "\n\t"); \ 421 __asm__(".symver " KMP_STR(__kmp_api_##api_name) "," KMP_STR( \ 422 api_name) "@@" default_ver "\n\t") 423 424 #define KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str) \ 425 _KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str, "VERSION") 426 #define _KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str, \ 427 default_ver) \ 428 __typeof__(__kmp_api_##apic_name) __kmp_api_##apic_name##_##ver_num##_alias \ 429 __attribute__((alias(KMP_STR(__kmp_api_##apic_name)))); \ 430 __asm__(".symver " KMP_STR(__kmp_api_##apic_name) "," KMP_STR( \ 431 apic_name) "@@" default_ver "\n\t"); \ 432 __asm__( \ 433 ".symver " KMP_STR(__kmp_api_##apic_name##_##ver_num##_alias) "," KMP_STR( \ 434 api_name) "@" ver_str "\n\t") 435 436 #else // KMP_USE_VERSION_SYMBOLS 437 #define KMP_EXPAND_NAME(api_name) api_name 438 #define KMP_VERSION_SYMBOL(api_name, ver_num, ver_str) /* Nothing */ 439 #define KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, \ 440 ver_str) /* Nothing */ 441 #endif // KMP_USE_VERSION_SYMBOLS 442 443 /* Temporary note: if performance testing of this passes, we can remove 444 all references to KMP_DO_ALIGN and replace with KMP_ALIGN. */ 445 #define KMP_DO_ALIGN(bytes) KMP_ALIGN(bytes) 446 #define KMP_ALIGN_CACHE KMP_ALIGN(CACHE_LINE) 447 #define KMP_ALIGN_CACHE_INTERNODE KMP_ALIGN(INTERNODE_CACHE_LINE) 448 449 /* General purpose fence types for memory operations */ 450 enum kmp_mem_fence_type { 451 kmp_no_fence, /* No memory fence */ 452 kmp_acquire_fence, /* Acquire (read) memory fence */ 453 kmp_release_fence, /* Release (write) memory fence */ 454 kmp_full_fence /* Full (read+write) memory fence */ 455 }; 456 457 // Synchronization primitives 458 459 #if KMP_ASM_INTRINS && KMP_OS_WINDOWS 460 461 #if KMP_MSVC_COMPAT && !KMP_COMPILER_CLANG 462 #pragma intrinsic(InterlockedExchangeAdd) 463 #pragma intrinsic(InterlockedCompareExchange) 464 #pragma intrinsic(InterlockedExchange) 465 #if !(KMP_COMPILER_ICX && KMP_32_BIT_ARCH) 466 #pragma intrinsic(InterlockedExchange64) 467 #endif 468 #endif 469 470 // Using InterlockedIncrement / InterlockedDecrement causes a library loading 471 // ordering problem, so we use InterlockedExchangeAdd instead. 472 #define KMP_TEST_THEN_INC32(p) InterlockedExchangeAdd((volatile long *)(p), 1) 473 #define KMP_TEST_THEN_INC_ACQ32(p) \ 474 InterlockedExchangeAdd((volatile long *)(p), 1) 475 #define KMP_TEST_THEN_ADD4_32(p) InterlockedExchangeAdd((volatile long *)(p), 4) 476 #define KMP_TEST_THEN_ADD4_ACQ32(p) \ 477 InterlockedExchangeAdd((volatile long *)(p), 4) 478 #define KMP_TEST_THEN_DEC32(p) InterlockedExchangeAdd((volatile long *)(p), -1) 479 #define KMP_TEST_THEN_DEC_ACQ32(p) \ 480 InterlockedExchangeAdd((volatile long *)(p), -1) 481 #define KMP_TEST_THEN_ADD32(p, v) \ 482 InterlockedExchangeAdd((volatile long *)(p), (v)) 483 484 #define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \ 485 InterlockedCompareExchange((volatile long *)(p), (long)(sv), (long)(cv)) 486 487 #define KMP_XCHG_FIXED32(p, v) \ 488 InterlockedExchange((volatile long *)(p), (long)(v)) 489 #define KMP_XCHG_FIXED64(p, v) \ 490 InterlockedExchange64((volatile kmp_int64 *)(p), (kmp_int64)(v)) 491 492 inline kmp_real32 KMP_XCHG_REAL32(volatile kmp_real32 *p, kmp_real32 v) { 493 kmp_int32 tmp = InterlockedExchange((volatile long *)p, *(long *)&v); 494 return *(kmp_real32 *)&tmp; 495 } 496 497 #define KMP_TEST_THEN_OR8(p, v) __kmp_test_then_or8((p), (v)) 498 #define KMP_TEST_THEN_AND8(p, v) __kmp_test_then_and8((p), (v)) 499 #define KMP_TEST_THEN_OR32(p, v) __kmp_test_then_or32((p), (v)) 500 #define KMP_TEST_THEN_AND32(p, v) __kmp_test_then_and32((p), (v)) 501 #define KMP_TEST_THEN_OR64(p, v) __kmp_test_then_or64((p), (v)) 502 #define KMP_TEST_THEN_AND64(p, v) __kmp_test_then_and64((p), (v)) 503 504 extern kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 v); 505 extern kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 v); 506 extern kmp_int32 __kmp_test_then_add32(volatile kmp_int32 *p, kmp_int32 v); 507 extern kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 v); 508 extern kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 v); 509 extern kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 v); 510 extern kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 v); 511 extern kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 v); 512 513 #if KMP_ARCH_AARCH64 && KMP_COMPILER_MSVC && !KMP_COMPILER_CLANG 514 #define KMP_TEST_THEN_INC64(p) _InterlockedExchangeAdd64((p), 1LL) 515 #define KMP_TEST_THEN_INC_ACQ64(p) _InterlockedExchangeAdd64_acq((p), 1LL) 516 #define KMP_TEST_THEN_ADD4_64(p) _InterlockedExchangeAdd64((p), 4LL) 517 // #define KMP_TEST_THEN_ADD4_ACQ64(p) _InterlockedExchangeAdd64_acq((p), 4LL) 518 // #define KMP_TEST_THEN_DEC64(p) _InterlockedExchangeAdd64((p), -1LL) 519 // #define KMP_TEST_THEN_DEC_ACQ64(p) _InterlockedExchangeAdd64_acq((p), -1LL) 520 // #define KMP_TEST_THEN_ADD8(p, v) _InterlockedExchangeAdd8((p), (v)) 521 #define KMP_TEST_THEN_ADD64(p, v) _InterlockedExchangeAdd64((p), (v)) 522 523 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \ 524 __kmp_compare_and_store_acq8((p), (cv), (sv)) 525 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \ 526 __kmp_compare_and_store_rel8((p), (cv), (sv)) 527 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \ 528 __kmp_compare_and_store_acq16((p), (cv), (sv)) 529 /* 530 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \ 531 __kmp_compare_and_store_rel16((p), (cv), (sv)) 532 */ 533 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \ 534 __kmp_compare_and_store_acq32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \ 535 (kmp_int32)(sv)) 536 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \ 537 __kmp_compare_and_store_rel32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \ 538 (kmp_int32)(sv)) 539 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \ 540 __kmp_compare_and_store_acq64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 541 (kmp_int64)(sv)) 542 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \ 543 __kmp_compare_and_store_rel64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 544 (kmp_int64)(sv)) 545 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \ 546 __kmp_compare_and_store_ptr((void *volatile *)(p), (void *)(cv), (void *)(sv)) 547 548 // KMP_COMPARE_AND_STORE expects this order: pointer, compare, exchange 549 // _InterlockedCompareExchange expects this order: pointer, exchange, compare 550 // KMP_COMPARE_AND_STORE also returns a bool indicating a successful write. A 551 // write is successful if the return value of _InterlockedCompareExchange is the 552 // same as the compare value. 553 inline kmp_int8 __kmp_compare_and_store_acq8(volatile kmp_int8 *p, kmp_int8 cv, 554 kmp_int8 sv) { 555 return _InterlockedCompareExchange8_acq(p, sv, cv) == cv; 556 } 557 558 inline kmp_int8 __kmp_compare_and_store_rel8(volatile kmp_int8 *p, kmp_int8 cv, 559 kmp_int8 sv) { 560 return _InterlockedCompareExchange8_rel(p, sv, cv) == cv; 561 } 562 563 inline kmp_int16 __kmp_compare_and_store_acq16(volatile kmp_int16 *p, 564 kmp_int16 cv, kmp_int16 sv) { 565 return _InterlockedCompareExchange16_acq(p, sv, cv) == cv; 566 } 567 568 inline kmp_int16 __kmp_compare_and_store_rel16(volatile kmp_int16 *p, 569 kmp_int16 cv, kmp_int16 sv) { 570 return _InterlockedCompareExchange16_rel(p, sv, cv) == cv; 571 } 572 573 inline kmp_int32 __kmp_compare_and_store_acq32(volatile kmp_int32 *p, 574 kmp_int32 cv, kmp_int32 sv) { 575 return _InterlockedCompareExchange_acq((volatile long *)p, sv, cv) == cv; 576 } 577 578 inline kmp_int32 __kmp_compare_and_store_rel32(volatile kmp_int32 *p, 579 kmp_int32 cv, kmp_int32 sv) { 580 return _InterlockedCompareExchange_rel((volatile long *)p, sv, cv) == cv; 581 } 582 583 inline kmp_int32 __kmp_compare_and_store_acq64(volatile kmp_int64 *p, 584 kmp_int64 cv, kmp_int64 sv) { 585 return _InterlockedCompareExchange64_acq(p, sv, cv) == cv; 586 } 587 588 inline kmp_int32 __kmp_compare_and_store_rel64(volatile kmp_int64 *p, 589 kmp_int64 cv, kmp_int64 sv) { 590 return _InterlockedCompareExchange64_rel(p, sv, cv) == cv; 591 } 592 593 inline kmp_int32 __kmp_compare_and_store_ptr(void *volatile *p, void *cv, 594 void *sv) { 595 return _InterlockedCompareExchangePointer(p, sv, cv) == cv; 596 } 597 598 // The _RET versions return the value instead of a bool 599 /* 600 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \ 601 _InterlockedCompareExchange8((p), (sv), (cv)) 602 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \ 603 _InterlockedCompareExchange16((p), (sv), (cv)) 604 */ 605 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \ 606 _InterlockedCompareExchange64((volatile kmp_int64 *)(p), (kmp_int64)(sv), \ 607 (kmp_int64)(cv)) 608 609 /* 610 #define KMP_XCHG_FIXED8(p, v) \ 611 _InterlockedExchange8((volatile kmp_int8 *)(p), (kmp_int8)(v)); 612 */ 613 // #define KMP_XCHG_FIXED16(p, v) _InterlockedExchange16((p), (v)); 614 // #define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v))); 615 616 // inline kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v) { 617 // kmp_int64 tmp = _InterlockedExchange64((volatile kmp_int64 *)p, *(kmp_int64 618 // *)&v); return *(kmp_real64 *)&tmp; 619 // } 620 621 #else // !KMP_ARCH_AARCH64 622 623 // Routines that we still need to implement in assembly. 624 extern kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 v); 625 626 extern kmp_int8 __kmp_compare_and_store8(volatile kmp_int8 *p, kmp_int8 cv, 627 kmp_int8 sv); 628 extern kmp_int16 __kmp_compare_and_store16(volatile kmp_int16 *p, kmp_int16 cv, 629 kmp_int16 sv); 630 extern kmp_int32 __kmp_compare_and_store32(volatile kmp_int32 *p, kmp_int32 cv, 631 kmp_int32 sv); 632 extern kmp_int32 __kmp_compare_and_store64(volatile kmp_int64 *p, kmp_int64 cv, 633 kmp_int64 sv); 634 extern kmp_int8 __kmp_compare_and_store_ret8(volatile kmp_int8 *p, kmp_int8 cv, 635 kmp_int8 sv); 636 extern kmp_int16 __kmp_compare_and_store_ret16(volatile kmp_int16 *p, 637 kmp_int16 cv, kmp_int16 sv); 638 extern kmp_int32 __kmp_compare_and_store_ret32(volatile kmp_int32 *p, 639 kmp_int32 cv, kmp_int32 sv); 640 extern kmp_int64 __kmp_compare_and_store_ret64(volatile kmp_int64 *p, 641 kmp_int64 cv, kmp_int64 sv); 642 643 extern kmp_int8 __kmp_xchg_fixed8(volatile kmp_int8 *p, kmp_int8 v); 644 extern kmp_int16 __kmp_xchg_fixed16(volatile kmp_int16 *p, kmp_int16 v); 645 extern kmp_int32 __kmp_xchg_fixed32(volatile kmp_int32 *p, kmp_int32 v); 646 extern kmp_int64 __kmp_xchg_fixed64(volatile kmp_int64 *p, kmp_int64 v); 647 extern kmp_real32 __kmp_xchg_real32(volatile kmp_real32 *p, kmp_real32 v); 648 extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v); 649 650 //#define KMP_TEST_THEN_INC32(p) __kmp_test_then_add32((p), 1) 651 //#define KMP_TEST_THEN_INC_ACQ32(p) __kmp_test_then_add32((p), 1) 652 #define KMP_TEST_THEN_INC64(p) __kmp_test_then_add64((p), 1LL) 653 #define KMP_TEST_THEN_INC_ACQ64(p) __kmp_test_then_add64((p), 1LL) 654 //#define KMP_TEST_THEN_ADD4_32(p) __kmp_test_then_add32((p), 4) 655 //#define KMP_TEST_THEN_ADD4_ACQ32(p) __kmp_test_then_add32((p), 4) 656 #define KMP_TEST_THEN_ADD4_64(p) __kmp_test_then_add64((p), 4LL) 657 #define KMP_TEST_THEN_ADD4_ACQ64(p) __kmp_test_then_add64((p), 4LL) 658 //#define KMP_TEST_THEN_DEC32(p) __kmp_test_then_add32((p), -1) 659 //#define KMP_TEST_THEN_DEC_ACQ32(p) __kmp_test_then_add32((p), -1) 660 #define KMP_TEST_THEN_DEC64(p) __kmp_test_then_add64((p), -1LL) 661 #define KMP_TEST_THEN_DEC_ACQ64(p) __kmp_test_then_add64((p), -1LL) 662 //#define KMP_TEST_THEN_ADD32(p, v) __kmp_test_then_add32((p), (v)) 663 #define KMP_TEST_THEN_ADD8(p, v) __kmp_test_then_add8((p), (v)) 664 #define KMP_TEST_THEN_ADD64(p, v) __kmp_test_then_add64((p), (v)) 665 666 667 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \ 668 __kmp_compare_and_store8((p), (cv), (sv)) 669 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \ 670 __kmp_compare_and_store8((p), (cv), (sv)) 671 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \ 672 __kmp_compare_and_store16((p), (cv), (sv)) 673 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \ 674 __kmp_compare_and_store16((p), (cv), (sv)) 675 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \ 676 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \ 677 (kmp_int32)(sv)) 678 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \ 679 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \ 680 (kmp_int32)(sv)) 681 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \ 682 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 683 (kmp_int64)(sv)) 684 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \ 685 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 686 (kmp_int64)(sv)) 687 688 #if KMP_ARCH_X86 689 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \ 690 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \ 691 (kmp_int32)(sv)) 692 #else /* 64 bit pointers */ 693 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \ 694 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 695 (kmp_int64)(sv)) 696 #endif /* KMP_ARCH_X86 */ 697 698 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \ 699 __kmp_compare_and_store_ret8((p), (cv), (sv)) 700 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \ 701 __kmp_compare_and_store_ret16((p), (cv), (sv)) 702 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \ 703 __kmp_compare_and_store_ret64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 704 (kmp_int64)(sv)) 705 706 #define KMP_XCHG_FIXED8(p, v) \ 707 __kmp_xchg_fixed8((volatile kmp_int8 *)(p), (kmp_int8)(v)); 708 #define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16((p), (v)); 709 //#define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32((p), (v)); 710 //#define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64((p), (v)); 711 //#define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32((p), (v)); 712 #define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v)); 713 #endif 714 715 #elif (KMP_ASM_INTRINS && KMP_OS_UNIX) || !(KMP_ARCH_X86 || KMP_ARCH_X86_64) 716 717 /* cast p to correct type so that proper intrinsic will be used */ 718 #define KMP_TEST_THEN_INC32(p) \ 719 __sync_fetch_and_add((volatile kmp_int32 *)(p), 1) 720 #define KMP_TEST_THEN_INC_ACQ32(p) \ 721 __sync_fetch_and_add((volatile kmp_int32 *)(p), 1) 722 #if KMP_ARCH_MIPS 723 #define KMP_TEST_THEN_INC64(p) \ 724 __atomic_fetch_add((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST) 725 #define KMP_TEST_THEN_INC_ACQ64(p) \ 726 __atomic_fetch_add((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST) 727 #else 728 #define KMP_TEST_THEN_INC64(p) \ 729 __sync_fetch_and_add((volatile kmp_int64 *)(p), 1LL) 730 #define KMP_TEST_THEN_INC_ACQ64(p) \ 731 __sync_fetch_and_add((volatile kmp_int64 *)(p), 1LL) 732 #endif 733 #define KMP_TEST_THEN_ADD4_32(p) \ 734 __sync_fetch_and_add((volatile kmp_int32 *)(p), 4) 735 #define KMP_TEST_THEN_ADD4_ACQ32(p) \ 736 __sync_fetch_and_add((volatile kmp_int32 *)(p), 4) 737 #if KMP_ARCH_MIPS 738 #define KMP_TEST_THEN_ADD4_64(p) \ 739 __atomic_fetch_add((volatile kmp_int64 *)(p), 4LL, __ATOMIC_SEQ_CST) 740 #define KMP_TEST_THEN_ADD4_ACQ64(p) \ 741 __atomic_fetch_add((volatile kmp_int64 *)(p), 4LL, __ATOMIC_SEQ_CST) 742 #define KMP_TEST_THEN_DEC64(p) \ 743 __atomic_fetch_sub((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST) 744 #define KMP_TEST_THEN_DEC_ACQ64(p) \ 745 __atomic_fetch_sub((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST) 746 #else 747 #define KMP_TEST_THEN_ADD4_64(p) \ 748 __sync_fetch_and_add((volatile kmp_int64 *)(p), 4LL) 749 #define KMP_TEST_THEN_ADD4_ACQ64(p) \ 750 __sync_fetch_and_add((volatile kmp_int64 *)(p), 4LL) 751 #define KMP_TEST_THEN_DEC64(p) \ 752 __sync_fetch_and_sub((volatile kmp_int64 *)(p), 1LL) 753 #define KMP_TEST_THEN_DEC_ACQ64(p) \ 754 __sync_fetch_and_sub((volatile kmp_int64 *)(p), 1LL) 755 #endif 756 #define KMP_TEST_THEN_DEC32(p) \ 757 __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1) 758 #define KMP_TEST_THEN_DEC_ACQ32(p) \ 759 __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1) 760 #define KMP_TEST_THEN_ADD8(p, v) \ 761 __sync_fetch_and_add((volatile kmp_int8 *)(p), (kmp_int8)(v)) 762 #define KMP_TEST_THEN_ADD32(p, v) \ 763 __sync_fetch_and_add((volatile kmp_int32 *)(p), (kmp_int32)(v)) 764 #if KMP_ARCH_MIPS 765 #define KMP_TEST_THEN_ADD64(p, v) \ 766 __atomic_fetch_add((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \ 767 __ATOMIC_SEQ_CST) 768 #else 769 #define KMP_TEST_THEN_ADD64(p, v) \ 770 __sync_fetch_and_add((volatile kmp_int64 *)(p), (kmp_int64)(v)) 771 #endif 772 773 #define KMP_TEST_THEN_OR8(p, v) \ 774 __sync_fetch_and_or((volatile kmp_int8 *)(p), (kmp_int8)(v)) 775 #define KMP_TEST_THEN_AND8(p, v) \ 776 __sync_fetch_and_and((volatile kmp_int8 *)(p), (kmp_int8)(v)) 777 #define KMP_TEST_THEN_OR32(p, v) \ 778 __sync_fetch_and_or((volatile kmp_uint32 *)(p), (kmp_uint32)(v)) 779 #define KMP_TEST_THEN_AND32(p, v) \ 780 __sync_fetch_and_and((volatile kmp_uint32 *)(p), (kmp_uint32)(v)) 781 #if KMP_ARCH_MIPS 782 #define KMP_TEST_THEN_OR64(p, v) \ 783 __atomic_fetch_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \ 784 __ATOMIC_SEQ_CST) 785 #define KMP_TEST_THEN_AND64(p, v) \ 786 __atomic_fetch_and((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \ 787 __ATOMIC_SEQ_CST) 788 #else 789 #define KMP_TEST_THEN_OR64(p, v) \ 790 __sync_fetch_and_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v)) 791 #define KMP_TEST_THEN_AND64(p, v) \ 792 __sync_fetch_and_and((volatile kmp_uint64 *)(p), (kmp_uint64)(v)) 793 #endif 794 795 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \ 796 __sync_bool_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \ 797 (kmp_uint8)(sv)) 798 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \ 799 __sync_bool_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \ 800 (kmp_uint8)(sv)) 801 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \ 802 __sync_bool_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \ 803 (kmp_uint16)(sv)) 804 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \ 805 __sync_bool_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \ 806 (kmp_uint16)(sv)) 807 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \ 808 __sync_bool_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \ 809 (kmp_uint32)(sv)) 810 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \ 811 __sync_bool_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \ 812 (kmp_uint32)(sv)) 813 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \ 814 __sync_bool_compare_and_swap((void *volatile *)(p), (void *)(cv), \ 815 (void *)(sv)) 816 817 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \ 818 __sync_val_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \ 819 (kmp_uint8)(sv)) 820 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \ 821 __sync_val_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \ 822 (kmp_uint16)(sv)) 823 #define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \ 824 __sync_val_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \ 825 (kmp_uint32)(sv)) 826 #if KMP_ARCH_MIPS 827 static inline bool mips_sync_bool_compare_and_swap(volatile kmp_uint64 *p, 828 kmp_uint64 cv, 829 kmp_uint64 sv) { 830 return __atomic_compare_exchange(p, &cv, &sv, false, __ATOMIC_SEQ_CST, 831 __ATOMIC_SEQ_CST); 832 } 833 static inline bool mips_sync_val_compare_and_swap(volatile kmp_uint64 *p, 834 kmp_uint64 cv, 835 kmp_uint64 sv) { 836 __atomic_compare_exchange(p, &cv, &sv, false, __ATOMIC_SEQ_CST, 837 __ATOMIC_SEQ_CST); 838 return cv; 839 } 840 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \ 841 mips_sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), \ 842 (kmp_uint64)(cv), (kmp_uint64)(sv)) 843 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \ 844 mips_sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), \ 845 (kmp_uint64)(cv), (kmp_uint64)(sv)) 846 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \ 847 mips_sync_val_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \ 848 (kmp_uint64)(sv)) 849 #else 850 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \ 851 __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \ 852 (kmp_uint64)(sv)) 853 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \ 854 __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \ 855 (kmp_uint64)(sv)) 856 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \ 857 __sync_val_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \ 858 (kmp_uint64)(sv)) 859 #endif 860 861 #if KMP_OS_DARWIN && defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 1800 862 #define KMP_XCHG_FIXED8(p, v) \ 863 __atomic_exchange_1((volatile kmp_uint8 *)(p), (kmp_uint8)(v), \ 864 __ATOMIC_SEQ_CST) 865 #else 866 #define KMP_XCHG_FIXED8(p, v) \ 867 __sync_lock_test_and_set((volatile kmp_uint8 *)(p), (kmp_uint8)(v)) 868 #endif 869 #define KMP_XCHG_FIXED16(p, v) \ 870 __sync_lock_test_and_set((volatile kmp_uint16 *)(p), (kmp_uint16)(v)) 871 #define KMP_XCHG_FIXED32(p, v) \ 872 __sync_lock_test_and_set((volatile kmp_uint32 *)(p), (kmp_uint32)(v)) 873 #define KMP_XCHG_FIXED64(p, v) \ 874 __sync_lock_test_and_set((volatile kmp_uint64 *)(p), (kmp_uint64)(v)) 875 876 inline kmp_real32 KMP_XCHG_REAL32(volatile kmp_real32 *p, kmp_real32 v) { 877 volatile kmp_uint32 *up; 878 kmp_uint32 uv; 879 memcpy(&up, &p, sizeof(up)); 880 memcpy(&uv, &v, sizeof(uv)); 881 kmp_int32 tmp = __sync_lock_test_and_set(up, uv); 882 kmp_real32 ftmp; 883 memcpy(&ftmp, &tmp, sizeof(tmp)); 884 return ftmp; 885 } 886 887 inline kmp_real64 KMP_XCHG_REAL64(volatile kmp_real64 *p, kmp_real64 v) { 888 volatile kmp_uint64 *up; 889 kmp_uint64 uv; 890 memcpy(&up, &p, sizeof(up)); 891 memcpy(&uv, &v, sizeof(uv)); 892 kmp_int64 tmp = __sync_lock_test_and_set(up, uv); 893 kmp_real64 dtmp; 894 memcpy(&dtmp, &tmp, sizeof(tmp)); 895 return dtmp; 896 } 897 898 #else 899 900 extern kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 v); 901 extern kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 v); 902 extern kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 v); 903 extern kmp_int32 __kmp_test_then_add32(volatile kmp_int32 *p, kmp_int32 v); 904 extern kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 v); 905 extern kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 v); 906 extern kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 v); 907 extern kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 v); 908 extern kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 v); 909 910 extern kmp_int8 __kmp_compare_and_store8(volatile kmp_int8 *p, kmp_int8 cv, 911 kmp_int8 sv); 912 extern kmp_int16 __kmp_compare_and_store16(volatile kmp_int16 *p, kmp_int16 cv, 913 kmp_int16 sv); 914 extern kmp_int32 __kmp_compare_and_store32(volatile kmp_int32 *p, kmp_int32 cv, 915 kmp_int32 sv); 916 extern kmp_int32 __kmp_compare_and_store64(volatile kmp_int64 *p, kmp_int64 cv, 917 kmp_int64 sv); 918 extern kmp_int8 __kmp_compare_and_store_ret8(volatile kmp_int8 *p, kmp_int8 cv, 919 kmp_int8 sv); 920 extern kmp_int16 __kmp_compare_and_store_ret16(volatile kmp_int16 *p, 921 kmp_int16 cv, kmp_int16 sv); 922 extern kmp_int32 __kmp_compare_and_store_ret32(volatile kmp_int32 *p, 923 kmp_int32 cv, kmp_int32 sv); 924 extern kmp_int64 __kmp_compare_and_store_ret64(volatile kmp_int64 *p, 925 kmp_int64 cv, kmp_int64 sv); 926 927 extern kmp_int8 __kmp_xchg_fixed8(volatile kmp_int8 *p, kmp_int8 v); 928 extern kmp_int16 __kmp_xchg_fixed16(volatile kmp_int16 *p, kmp_int16 v); 929 extern kmp_int32 __kmp_xchg_fixed32(volatile kmp_int32 *p, kmp_int32 v); 930 extern kmp_int64 __kmp_xchg_fixed64(volatile kmp_int64 *p, kmp_int64 v); 931 extern kmp_real32 __kmp_xchg_real32(volatile kmp_real32 *p, kmp_real32 v); 932 extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v); 933 934 #define KMP_TEST_THEN_INC32(p) \ 935 __kmp_test_then_add32((volatile kmp_int32 *)(p), 1) 936 #define KMP_TEST_THEN_INC_ACQ32(p) \ 937 __kmp_test_then_add32((volatile kmp_int32 *)(p), 1) 938 #define KMP_TEST_THEN_INC64(p) \ 939 __kmp_test_then_add64((volatile kmp_int64 *)(p), 1LL) 940 #define KMP_TEST_THEN_INC_ACQ64(p) \ 941 __kmp_test_then_add64((volatile kmp_int64 *)(p), 1LL) 942 #define KMP_TEST_THEN_ADD4_32(p) \ 943 __kmp_test_then_add32((volatile kmp_int32 *)(p), 4) 944 #define KMP_TEST_THEN_ADD4_ACQ32(p) \ 945 __kmp_test_then_add32((volatile kmp_int32 *)(p), 4) 946 #define KMP_TEST_THEN_ADD4_64(p) \ 947 __kmp_test_then_add64((volatile kmp_int64 *)(p), 4LL) 948 #define KMP_TEST_THEN_ADD4_ACQ64(p) \ 949 __kmp_test_then_add64((volatile kmp_int64 *)(p), 4LL) 950 #define KMP_TEST_THEN_DEC32(p) \ 951 __kmp_test_then_add32((volatile kmp_int32 *)(p), -1) 952 #define KMP_TEST_THEN_DEC_ACQ32(p) \ 953 __kmp_test_then_add32((volatile kmp_int32 *)(p), -1) 954 #define KMP_TEST_THEN_DEC64(p) \ 955 __kmp_test_then_add64((volatile kmp_int64 *)(p), -1LL) 956 #define KMP_TEST_THEN_DEC_ACQ64(p) \ 957 __kmp_test_then_add64((volatile kmp_int64 *)(p), -1LL) 958 #define KMP_TEST_THEN_ADD8(p, v) \ 959 __kmp_test_then_add8((volatile kmp_int8 *)(p), (kmp_int8)(v)) 960 #define KMP_TEST_THEN_ADD32(p, v) \ 961 __kmp_test_then_add32((volatile kmp_int32 *)(p), (kmp_int32)(v)) 962 #define KMP_TEST_THEN_ADD64(p, v) \ 963 __kmp_test_then_add64((volatile kmp_int64 *)(p), (kmp_int64)(v)) 964 965 #define KMP_TEST_THEN_OR8(p, v) \ 966 __kmp_test_then_or8((volatile kmp_int8 *)(p), (kmp_int8)(v)) 967 #define KMP_TEST_THEN_AND8(p, v) \ 968 __kmp_test_then_and8((volatile kmp_int8 *)(p), (kmp_int8)(v)) 969 #define KMP_TEST_THEN_OR32(p, v) \ 970 __kmp_test_then_or32((volatile kmp_uint32 *)(p), (kmp_uint32)(v)) 971 #define KMP_TEST_THEN_AND32(p, v) \ 972 __kmp_test_then_and32((volatile kmp_uint32 *)(p), (kmp_uint32)(v)) 973 #define KMP_TEST_THEN_OR64(p, v) \ 974 __kmp_test_then_or64((volatile kmp_uint64 *)(p), (kmp_uint64)(v)) 975 #define KMP_TEST_THEN_AND64(p, v) \ 976 __kmp_test_then_and64((volatile kmp_uint64 *)(p), (kmp_uint64)(v)) 977 978 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \ 979 __kmp_compare_and_store8((volatile kmp_int8 *)(p), (kmp_int8)(cv), \ 980 (kmp_int8)(sv)) 981 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \ 982 __kmp_compare_and_store8((volatile kmp_int8 *)(p), (kmp_int8)(cv), \ 983 (kmp_int8)(sv)) 984 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \ 985 __kmp_compare_and_store16((volatile kmp_int16 *)(p), (kmp_int16)(cv), \ 986 (kmp_int16)(sv)) 987 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \ 988 __kmp_compare_and_store16((volatile kmp_int16 *)(p), (kmp_int16)(cv), \ 989 (kmp_int16)(sv)) 990 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \ 991 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \ 992 (kmp_int32)(sv)) 993 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \ 994 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \ 995 (kmp_int32)(sv)) 996 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \ 997 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 998 (kmp_int64)(sv)) 999 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \ 1000 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 1001 (kmp_int64)(sv)) 1002 1003 #if KMP_ARCH_X86 1004 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \ 1005 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \ 1006 (kmp_int32)(sv)) 1007 #else /* 64 bit pointers */ 1008 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \ 1009 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 1010 (kmp_int64)(sv)) 1011 #endif /* KMP_ARCH_X86 */ 1012 1013 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \ 1014 __kmp_compare_and_store_ret8((p), (cv), (sv)) 1015 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \ 1016 __kmp_compare_and_store_ret16((p), (cv), (sv)) 1017 #define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \ 1018 __kmp_compare_and_store_ret32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \ 1019 (kmp_int32)(sv)) 1020 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \ 1021 __kmp_compare_and_store_ret64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 1022 (kmp_int64)(sv)) 1023 1024 #define KMP_XCHG_FIXED8(p, v) \ 1025 __kmp_xchg_fixed8((volatile kmp_int8 *)(p), (kmp_int8)(v)); 1026 #define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16((p), (v)); 1027 #define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32((p), (v)); 1028 #define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64((p), (v)); 1029 #define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32((p), (v)); 1030 #define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v)); 1031 1032 #endif /* KMP_ASM_INTRINS */ 1033 1034 /* ------------- relaxed consistency memory model stuff ------------------ */ 1035 1036 #if KMP_OS_WINDOWS 1037 #ifdef __ABSOFT_WIN 1038 #define KMP_MB() asm("nop") 1039 #define KMP_IMB() asm("nop") 1040 #else 1041 #define KMP_MB() /* _asm{ nop } */ 1042 #define KMP_IMB() /* _asm{ nop } */ 1043 #endif 1044 #endif /* KMP_OS_WINDOWS */ 1045 1046 #if KMP_ARCH_PPC64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS || \ 1047 KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64 1048 #if KMP_OS_WINDOWS 1049 #undef KMP_MB 1050 #define KMP_MB() std::atomic_thread_fence(std::memory_order_seq_cst) 1051 #else /* !KMP_OS_WINDOWS */ 1052 #define KMP_MB() __sync_synchronize() 1053 #endif 1054 #endif 1055 1056 #ifndef KMP_MB 1057 #define KMP_MB() /* nothing to do */ 1058 #endif 1059 1060 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 1061 #if KMP_COMPILER_ICC || KMP_COMPILER_ICX 1062 #define KMP_MFENCE_() _mm_mfence() 1063 #define KMP_SFENCE_() _mm_sfence() 1064 #elif KMP_COMPILER_MSVC 1065 #define KMP_MFENCE_() MemoryBarrier() 1066 #define KMP_SFENCE_() MemoryBarrier() 1067 #else 1068 #define KMP_MFENCE_() __sync_synchronize() 1069 #define KMP_SFENCE_() __sync_synchronize() 1070 #endif 1071 #define KMP_MFENCE() \ 1072 if (UNLIKELY(!__kmp_cpuinfo.initialized)) { \ 1073 __kmp_query_cpuid(&__kmp_cpuinfo); \ 1074 } \ 1075 if (__kmp_cpuinfo.flags.sse2) { \ 1076 KMP_MFENCE_(); \ 1077 } 1078 #define KMP_SFENCE() KMP_SFENCE_() 1079 #else 1080 #define KMP_MFENCE() KMP_MB() 1081 #define KMP_SFENCE() KMP_MB() 1082 #endif 1083 1084 #ifndef KMP_IMB 1085 #define KMP_IMB() /* nothing to do */ 1086 #endif 1087 1088 #ifndef KMP_ST_REL32 1089 #define KMP_ST_REL32(A, D) (*(A) = (D)) 1090 #endif 1091 1092 #ifndef KMP_ST_REL64 1093 #define KMP_ST_REL64(A, D) (*(A) = (D)) 1094 #endif 1095 1096 #ifndef KMP_LD_ACQ32 1097 #define KMP_LD_ACQ32(A) (*(A)) 1098 #endif 1099 1100 #ifndef KMP_LD_ACQ64 1101 #define KMP_LD_ACQ64(A) (*(A)) 1102 #endif 1103 1104 /* ------------------------------------------------------------------------ */ 1105 // FIXME - maybe this should this be 1106 // 1107 // #define TCR_4(a) (*(volatile kmp_int32 *)(&a)) 1108 // #define TCW_4(a,b) (a) = (*(volatile kmp_int32 *)&(b)) 1109 // 1110 // #define TCR_8(a) (*(volatile kmp_int64 *)(a)) 1111 // #define TCW_8(a,b) (a) = (*(volatile kmp_int64 *)(&b)) 1112 // 1113 // I'm fairly certain this is the correct thing to do, but I'm afraid 1114 // of performance regressions. 1115 1116 #define TCR_1(a) (a) 1117 #define TCW_1(a, b) (a) = (b) 1118 #define TCR_4(a) (a) 1119 #define TCW_4(a, b) (a) = (b) 1120 #define TCI_4(a) (++(a)) 1121 #define TCD_4(a) (--(a)) 1122 #define TCR_8(a) (a) 1123 #define TCW_8(a, b) (a) = (b) 1124 #define TCI_8(a) (++(a)) 1125 #define TCD_8(a) (--(a)) 1126 #define TCR_SYNC_4(a) (a) 1127 #define TCW_SYNC_4(a, b) (a) = (b) 1128 #define TCX_SYNC_4(a, b, c) \ 1129 KMP_COMPARE_AND_STORE_REL32((volatile kmp_int32 *)(volatile void *)&(a), \ 1130 (kmp_int32)(b), (kmp_int32)(c)) 1131 #define TCR_SYNC_8(a) (a) 1132 #define TCW_SYNC_8(a, b) (a) = (b) 1133 #define TCX_SYNC_8(a, b, c) \ 1134 KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)(volatile void *)&(a), \ 1135 (kmp_int64)(b), (kmp_int64)(c)) 1136 1137 #if KMP_ARCH_X86 || KMP_ARCH_MIPS 1138 // What about ARM? 1139 #define TCR_PTR(a) ((void *)TCR_4(a)) 1140 #define TCW_PTR(a, b) TCW_4((a), (b)) 1141 #define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_4(a)) 1142 #define TCW_SYNC_PTR(a, b) TCW_SYNC_4((a), (b)) 1143 #define TCX_SYNC_PTR(a, b, c) ((void *)TCX_SYNC_4((a), (b), (c))) 1144 1145 #else /* 64 bit pointers */ 1146 1147 #define TCR_PTR(a) ((void *)TCR_8(a)) 1148 #define TCW_PTR(a, b) TCW_8((a), (b)) 1149 #define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_8(a)) 1150 #define TCW_SYNC_PTR(a, b) TCW_SYNC_8((a), (b)) 1151 #define TCX_SYNC_PTR(a, b, c) ((void *)TCX_SYNC_8((a), (b), (c))) 1152 1153 #endif /* KMP_ARCH_X86 */ 1154 1155 /* If these FTN_{TRUE,FALSE} values change, may need to change several places 1156 where they are used to check that language is Fortran, not C. */ 1157 1158 #ifndef FTN_TRUE 1159 #define FTN_TRUE TRUE 1160 #endif 1161 1162 #ifndef FTN_FALSE 1163 #define FTN_FALSE FALSE 1164 #endif 1165 1166 typedef void (*microtask_t)(int *gtid, int *npr, ...); 1167 1168 #ifdef USE_VOLATILE_CAST 1169 #define VOLATILE_CAST(x) (volatile x) 1170 #else 1171 #define VOLATILE_CAST(x) (x) 1172 #endif 1173 1174 #define KMP_WAIT __kmp_wait_4 1175 #define KMP_WAIT_PTR __kmp_wait_4_ptr 1176 #define KMP_EQ __kmp_eq_4 1177 #define KMP_NEQ __kmp_neq_4 1178 #define KMP_LT __kmp_lt_4 1179 #define KMP_GE __kmp_ge_4 1180 #define KMP_LE __kmp_le_4 1181 1182 /* Workaround for Intel(R) 64 code gen bug when taking address of static array 1183 * (Intel(R) 64 Tracker #138) */ 1184 #if (KMP_ARCH_X86_64 || KMP_ARCH_PPC64) && KMP_OS_LINUX 1185 #define STATIC_EFI2_WORKAROUND 1186 #else 1187 #define STATIC_EFI2_WORKAROUND static 1188 #endif 1189 1190 // Support of BGET usage 1191 #ifndef KMP_USE_BGET 1192 #define KMP_USE_BGET 1 1193 #endif 1194 1195 // Switches for OSS builds 1196 #ifndef USE_CMPXCHG_FIX 1197 #define USE_CMPXCHG_FIX 1 1198 #endif 1199 1200 // Enable dynamic user lock 1201 #define KMP_USE_DYNAMIC_LOCK 1 1202 1203 // Enable Intel(R) Transactional Synchronization Extensions (Intel(R) TSX) if 1204 // dynamic user lock is turned on 1205 #if KMP_USE_DYNAMIC_LOCK 1206 // Visual studio can't handle the asm sections in this code 1207 #define KMP_USE_TSX (KMP_ARCH_X86 || KMP_ARCH_X86_64) && !KMP_COMPILER_MSVC 1208 #ifdef KMP_USE_ADAPTIVE_LOCKS 1209 #undef KMP_USE_ADAPTIVE_LOCKS 1210 #endif 1211 #define KMP_USE_ADAPTIVE_LOCKS KMP_USE_TSX 1212 #endif 1213 1214 // Enable tick time conversion of ticks to seconds 1215 #if KMP_STATS_ENABLED 1216 #define KMP_HAVE_TICK_TIME \ 1217 (KMP_OS_LINUX && (KMP_MIC || KMP_ARCH_X86 || KMP_ARCH_X86_64)) 1218 #endif 1219 1220 // Warning levels 1221 enum kmp_warnings_level { 1222 kmp_warnings_off = 0, /* No warnings */ 1223 kmp_warnings_low, /* Minimal warnings (default) */ 1224 kmp_warnings_explicit = 6, /* Explicitly set to ON - more warnings */ 1225 kmp_warnings_verbose /* reserved */ 1226 }; 1227 1228 #ifdef __cplusplus 1229 } // extern "C" 1230 #endif // __cplusplus 1231 1232 // Safe C API 1233 #include "kmp_safe_c_api.h" 1234 1235 // Macros for C++11 atomic functions 1236 #define KMP_ATOMIC_LD(p, order) (p)->load(std::memory_order_##order) 1237 #define KMP_ATOMIC_OP(op, p, v, order) (p)->op(v, std::memory_order_##order) 1238 1239 // For non-default load/store 1240 #define KMP_ATOMIC_LD_ACQ(p) KMP_ATOMIC_LD(p, acquire) 1241 #define KMP_ATOMIC_LD_RLX(p) KMP_ATOMIC_LD(p, relaxed) 1242 #define KMP_ATOMIC_ST_REL(p, v) KMP_ATOMIC_OP(store, p, v, release) 1243 #define KMP_ATOMIC_ST_RLX(p, v) KMP_ATOMIC_OP(store, p, v, relaxed) 1244 1245 // For non-default fetch_<op> 1246 #define KMP_ATOMIC_ADD(p, v) KMP_ATOMIC_OP(fetch_add, p, v, acq_rel) 1247 #define KMP_ATOMIC_SUB(p, v) KMP_ATOMIC_OP(fetch_sub, p, v, acq_rel) 1248 #define KMP_ATOMIC_AND(p, v) KMP_ATOMIC_OP(fetch_and, p, v, acq_rel) 1249 #define KMP_ATOMIC_OR(p, v) KMP_ATOMIC_OP(fetch_or, p, v, acq_rel) 1250 #define KMP_ATOMIC_INC(p) KMP_ATOMIC_OP(fetch_add, p, 1, acq_rel) 1251 #define KMP_ATOMIC_DEC(p) KMP_ATOMIC_OP(fetch_sub, p, 1, acq_rel) 1252 #define KMP_ATOMIC_ADD_RLX(p, v) KMP_ATOMIC_OP(fetch_add, p, v, relaxed) 1253 #define KMP_ATOMIC_INC_RLX(p) KMP_ATOMIC_OP(fetch_add, p, 1, relaxed) 1254 1255 // Callers of the following functions cannot see the side effect on "expected". 1256 template <typename T> 1257 bool __kmp_atomic_compare_store(std::atomic<T> *p, T expected, T desired) { 1258 return p->compare_exchange_strong( 1259 expected, desired, std::memory_order_acq_rel, std::memory_order_relaxed); 1260 } 1261 1262 template <typename T> 1263 bool __kmp_atomic_compare_store_acq(std::atomic<T> *p, T expected, T desired) { 1264 return p->compare_exchange_strong( 1265 expected, desired, std::memory_order_acquire, std::memory_order_relaxed); 1266 } 1267 1268 template <typename T> 1269 bool __kmp_atomic_compare_store_rel(std::atomic<T> *p, T expected, T desired) { 1270 return p->compare_exchange_strong( 1271 expected, desired, std::memory_order_release, std::memory_order_relaxed); 1272 } 1273 1274 // Symbol lookup on Linux/Windows 1275 #if KMP_OS_WINDOWS 1276 extern void *__kmp_lookup_symbol(const char *name); 1277 #define KMP_DLSYM(name) __kmp_lookup_symbol(name) 1278 #define KMP_DLSYM_NEXT(name) nullptr 1279 #else 1280 #define KMP_DLSYM(name) dlsym(RTLD_DEFAULT, name) 1281 #define KMP_DLSYM_NEXT(name) dlsym(RTLD_NEXT, name) 1282 #endif 1283 1284 #endif /* KMP_OS_H */ 1285