1 /* 2 * kmp_atomic.h - ATOMIC header file 3 */ 4 5 //===----------------------------------------------------------------------===// 6 // 7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 8 // See https://llvm.org/LICENSE.txt for license information. 9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef KMP_ATOMIC_H 14 #define KMP_ATOMIC_H 15 16 #include "kmp_lock.h" 17 #include "kmp_os.h" 18 19 #if OMPT_SUPPORT 20 #include "ompt-specific.h" 21 #endif 22 23 // C++ build port. 24 // Intel compiler does not support _Complex datatype on win. 25 // Intel compiler supports _Complex datatype on lin and mac. 26 // On the other side, there is a problem of stack alignment on lin_32 and mac_32 27 // if the rhs is cmplx80 or cmplx128 typedef'ed datatype. 28 // The decision is: to use compiler supported _Complex type on lin and mac, 29 // to use typedef'ed types on win. 30 // Condition for WIN64 was modified in anticipation of 10.1 build compiler. 31 32 #if defined(__cplusplus) && (KMP_OS_WINDOWS) 33 // create shortcuts for c99 complex types 34 35 // Visual Studio cannot have function parameters that have the 36 // align __declspec attribute, so we must remove it. (Compiler Error C2719) 37 #if KMP_COMPILER_MSVC 38 #undef KMP_DO_ALIGN 39 #define KMP_DO_ALIGN(alignment) /* Nothing */ 40 #endif 41 42 #if defined(_MSC_VER) && (_MSC_VER < 1600) && defined(_DEBUG) 43 // Workaround for the problem of _DebugHeapTag unresolved external. 44 // This problem prevented to use our static debug library for C tests 45 // compiled with /MDd option (the library itself built with /MTd), 46 #undef _DEBUG 47 #define _DEBUG_TEMPORARILY_UNSET_ 48 #endif 49 50 #include <complex> 51 52 template <typename type_lhs, typename type_rhs> 53 std::complex<type_lhs> __kmp_lhs_div_rhs(const std::complex<type_lhs> &lhs, 54 const std::complex<type_rhs> &rhs) { 55 type_lhs a = lhs.real(); 56 type_lhs b = lhs.imag(); 57 type_rhs c = rhs.real(); 58 type_rhs d = rhs.imag(); 59 type_rhs den = c * c + d * d; 60 type_rhs r = (a * c + b * d); 61 type_rhs i = (b * c - a * d); 62 std::complex<type_lhs> ret(r / den, i / den); 63 return ret; 64 } 65 66 // complex8 67 struct __kmp_cmplx64_t : std::complex<double> { 68 69 __kmp_cmplx64_t() : std::complex<double>() {} 70 71 __kmp_cmplx64_t(const std::complex<double> &cd) : std::complex<double>(cd) {} 72 73 void operator/=(const __kmp_cmplx64_t &rhs) { 74 std::complex<double> lhs = *this; 75 *this = __kmp_lhs_div_rhs(lhs, rhs); 76 } 77 78 __kmp_cmplx64_t operator/(const __kmp_cmplx64_t &rhs) { 79 std::complex<double> lhs = *this; 80 return __kmp_lhs_div_rhs(lhs, rhs); 81 } 82 }; 83 typedef struct __kmp_cmplx64_t kmp_cmplx64; 84 85 // complex4 86 struct __kmp_cmplx32_t : std::complex<float> { 87 88 __kmp_cmplx32_t() : std::complex<float>() {} 89 90 __kmp_cmplx32_t(const std::complex<float> &cf) : std::complex<float>(cf) {} 91 92 __kmp_cmplx32_t operator+(const __kmp_cmplx32_t &b) { 93 std::complex<float> lhs = *this; 94 std::complex<float> rhs = b; 95 return (lhs + rhs); 96 } 97 __kmp_cmplx32_t operator-(const __kmp_cmplx32_t &b) { 98 std::complex<float> lhs = *this; 99 std::complex<float> rhs = b; 100 return (lhs - rhs); 101 } 102 __kmp_cmplx32_t operator*(const __kmp_cmplx32_t &b) { 103 std::complex<float> lhs = *this; 104 std::complex<float> rhs = b; 105 return (lhs * rhs); 106 } 107 108 __kmp_cmplx32_t operator+(const kmp_cmplx64 &b) { 109 kmp_cmplx64 t = kmp_cmplx64(*this) + b; 110 std::complex<double> d(t); 111 std::complex<float> f(d); 112 __kmp_cmplx32_t r(f); 113 return r; 114 } 115 __kmp_cmplx32_t operator-(const kmp_cmplx64 &b) { 116 kmp_cmplx64 t = kmp_cmplx64(*this) - b; 117 std::complex<double> d(t); 118 std::complex<float> f(d); 119 __kmp_cmplx32_t r(f); 120 return r; 121 } 122 __kmp_cmplx32_t operator*(const kmp_cmplx64 &b) { 123 kmp_cmplx64 t = kmp_cmplx64(*this) * b; 124 std::complex<double> d(t); 125 std::complex<float> f(d); 126 __kmp_cmplx32_t r(f); 127 return r; 128 } 129 130 void operator/=(const __kmp_cmplx32_t &rhs) { 131 std::complex<float> lhs = *this; 132 *this = __kmp_lhs_div_rhs(lhs, rhs); 133 } 134 135 __kmp_cmplx32_t operator/(const __kmp_cmplx32_t &rhs) { 136 std::complex<float> lhs = *this; 137 return __kmp_lhs_div_rhs(lhs, rhs); 138 } 139 140 void operator/=(const kmp_cmplx64 &rhs) { 141 std::complex<float> lhs = *this; 142 *this = __kmp_lhs_div_rhs(lhs, rhs); 143 } 144 145 __kmp_cmplx32_t operator/(const kmp_cmplx64 &rhs) { 146 std::complex<float> lhs = *this; 147 return __kmp_lhs_div_rhs(lhs, rhs); 148 } 149 }; 150 typedef struct __kmp_cmplx32_t kmp_cmplx32; 151 152 // complex10 153 struct KMP_DO_ALIGN(16) __kmp_cmplx80_t : std::complex<long double> { 154 155 __kmp_cmplx80_t() : std::complex<long double>() {} 156 157 __kmp_cmplx80_t(const std::complex<long double> &cld) 158 : std::complex<long double>(cld) {} 159 160 void operator/=(const __kmp_cmplx80_t &rhs) { 161 std::complex<long double> lhs = *this; 162 *this = __kmp_lhs_div_rhs(lhs, rhs); 163 } 164 165 __kmp_cmplx80_t operator/(const __kmp_cmplx80_t &rhs) { 166 std::complex<long double> lhs = *this; 167 return __kmp_lhs_div_rhs(lhs, rhs); 168 } 169 }; 170 typedef KMP_DO_ALIGN(16) struct __kmp_cmplx80_t kmp_cmplx80; 171 172 // complex16 173 #if KMP_HAVE_QUAD 174 struct __kmp_cmplx128_t : std::complex<_Quad> { 175 176 __kmp_cmplx128_t() : std::complex<_Quad>() {} 177 178 __kmp_cmplx128_t(const std::complex<_Quad> &cq) : std::complex<_Quad>(cq) {} 179 180 void operator/=(const __kmp_cmplx128_t &rhs) { 181 std::complex<_Quad> lhs = *this; 182 *this = __kmp_lhs_div_rhs(lhs, rhs); 183 } 184 185 __kmp_cmplx128_t operator/(const __kmp_cmplx128_t &rhs) { 186 std::complex<_Quad> lhs = *this; 187 return __kmp_lhs_div_rhs(lhs, rhs); 188 } 189 }; 190 typedef struct __kmp_cmplx128_t kmp_cmplx128; 191 #endif /* KMP_HAVE_QUAD */ 192 193 #ifdef _DEBUG_TEMPORARILY_UNSET_ 194 #undef _DEBUG_TEMPORARILY_UNSET_ 195 // Set it back now 196 #define _DEBUG 1 197 #endif 198 199 #else 200 // create shortcuts for c99 complex types 201 typedef float _Complex kmp_cmplx32; 202 typedef double _Complex kmp_cmplx64; 203 typedef long double _Complex kmp_cmplx80; 204 #if KMP_HAVE_QUAD 205 typedef _Quad _Complex kmp_cmplx128; 206 #endif 207 #endif 208 209 // Compiler 12.0 changed alignment of 16 and 32-byte arguments (like _Quad 210 // and kmp_cmplx128) on IA-32 architecture. The following aligned structures 211 // are implemented to support the old alignment in 10.1, 11.0, 11.1 and 212 // introduce the new alignment in 12.0. See CQ88405. 213 #if KMP_ARCH_X86 && KMP_HAVE_QUAD 214 215 // 4-byte aligned structures for backward compatibility. 216 217 #pragma pack(push, 4) 218 219 struct KMP_DO_ALIGN(4) Quad_a4_t { 220 _Quad q; 221 222 Quad_a4_t() : q() {} 223 Quad_a4_t(const _Quad &cq) : q(cq) {} 224 225 Quad_a4_t operator+(const Quad_a4_t &b) { 226 _Quad lhs = (*this).q; 227 _Quad rhs = b.q; 228 return (Quad_a4_t)(lhs + rhs); 229 } 230 231 Quad_a4_t operator-(const Quad_a4_t &b) { 232 _Quad lhs = (*this).q; 233 _Quad rhs = b.q; 234 return (Quad_a4_t)(lhs - rhs); 235 } 236 Quad_a4_t operator*(const Quad_a4_t &b) { 237 _Quad lhs = (*this).q; 238 _Quad rhs = b.q; 239 return (Quad_a4_t)(lhs * rhs); 240 } 241 242 Quad_a4_t operator/(const Quad_a4_t &b) { 243 _Quad lhs = (*this).q; 244 _Quad rhs = b.q; 245 return (Quad_a4_t)(lhs / rhs); 246 } 247 }; 248 249 struct KMP_DO_ALIGN(4) kmp_cmplx128_a4_t { 250 kmp_cmplx128 q; 251 252 kmp_cmplx128_a4_t() : q() {} 253 254 kmp_cmplx128_a4_t(const kmp_cmplx128 &c128) : q(c128) {} 255 256 kmp_cmplx128_a4_t operator+(const kmp_cmplx128_a4_t &b) { 257 kmp_cmplx128 lhs = (*this).q; 258 kmp_cmplx128 rhs = b.q; 259 return (kmp_cmplx128_a4_t)(lhs + rhs); 260 } 261 kmp_cmplx128_a4_t operator-(const kmp_cmplx128_a4_t &b) { 262 kmp_cmplx128 lhs = (*this).q; 263 kmp_cmplx128 rhs = b.q; 264 return (kmp_cmplx128_a4_t)(lhs - rhs); 265 } 266 kmp_cmplx128_a4_t operator*(const kmp_cmplx128_a4_t &b) { 267 kmp_cmplx128 lhs = (*this).q; 268 kmp_cmplx128 rhs = b.q; 269 return (kmp_cmplx128_a4_t)(lhs * rhs); 270 } 271 272 kmp_cmplx128_a4_t operator/(const kmp_cmplx128_a4_t &b) { 273 kmp_cmplx128 lhs = (*this).q; 274 kmp_cmplx128 rhs = b.q; 275 return (kmp_cmplx128_a4_t)(lhs / rhs); 276 } 277 }; 278 279 #pragma pack(pop) 280 281 // New 16-byte aligned structures for 12.0 compiler. 282 struct KMP_DO_ALIGN(16) Quad_a16_t { 283 _Quad q; 284 285 Quad_a16_t() : q() {} 286 Quad_a16_t(const _Quad &cq) : q(cq) {} 287 288 Quad_a16_t operator+(const Quad_a16_t &b) { 289 _Quad lhs = (*this).q; 290 _Quad rhs = b.q; 291 return (Quad_a16_t)(lhs + rhs); 292 } 293 294 Quad_a16_t operator-(const Quad_a16_t &b) { 295 _Quad lhs = (*this).q; 296 _Quad rhs = b.q; 297 return (Quad_a16_t)(lhs - rhs); 298 } 299 Quad_a16_t operator*(const Quad_a16_t &b) { 300 _Quad lhs = (*this).q; 301 _Quad rhs = b.q; 302 return (Quad_a16_t)(lhs * rhs); 303 } 304 305 Quad_a16_t operator/(const Quad_a16_t &b) { 306 _Quad lhs = (*this).q; 307 _Quad rhs = b.q; 308 return (Quad_a16_t)(lhs / rhs); 309 } 310 }; 311 312 struct KMP_DO_ALIGN(16) kmp_cmplx128_a16_t { 313 kmp_cmplx128 q; 314 315 kmp_cmplx128_a16_t() : q() {} 316 317 kmp_cmplx128_a16_t(const kmp_cmplx128 &c128) : q(c128) {} 318 319 kmp_cmplx128_a16_t operator+(const kmp_cmplx128_a16_t &b) { 320 kmp_cmplx128 lhs = (*this).q; 321 kmp_cmplx128 rhs = b.q; 322 return (kmp_cmplx128_a16_t)(lhs + rhs); 323 } 324 kmp_cmplx128_a16_t operator-(const kmp_cmplx128_a16_t &b) { 325 kmp_cmplx128 lhs = (*this).q; 326 kmp_cmplx128 rhs = b.q; 327 return (kmp_cmplx128_a16_t)(lhs - rhs); 328 } 329 kmp_cmplx128_a16_t operator*(const kmp_cmplx128_a16_t &b) { 330 kmp_cmplx128 lhs = (*this).q; 331 kmp_cmplx128 rhs = b.q; 332 return (kmp_cmplx128_a16_t)(lhs * rhs); 333 } 334 335 kmp_cmplx128_a16_t operator/(const kmp_cmplx128_a16_t &b) { 336 kmp_cmplx128 lhs = (*this).q; 337 kmp_cmplx128 rhs = b.q; 338 return (kmp_cmplx128_a16_t)(lhs / rhs); 339 } 340 }; 341 342 #endif 343 344 #if (KMP_ARCH_X86) 345 #define QUAD_LEGACY Quad_a4_t 346 #define CPLX128_LEG kmp_cmplx128_a4_t 347 #else 348 #define QUAD_LEGACY _Quad 349 #define CPLX128_LEG kmp_cmplx128 350 #endif 351 352 #ifdef __cplusplus 353 extern "C" { 354 #endif 355 356 extern int __kmp_atomic_mode; 357 358 // Atomic locks can easily become contended, so we use queuing locks for them. 359 typedef kmp_queuing_lock_t kmp_atomic_lock_t; 360 361 static inline void __kmp_acquire_atomic_lock(kmp_atomic_lock_t *lck, 362 kmp_int32 gtid) { 363 #if OMPT_SUPPORT && OMPT_OPTIONAL 364 if (ompt_enabled.ompt_callback_mutex_acquire) { 365 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)( 366 ompt_mutex_atomic, 0, kmp_mutex_impl_queuing, (ompt_wait_id_t)(uintptr_t)lck, 367 OMPT_GET_RETURN_ADDRESS(0)); 368 } 369 #endif 370 371 __kmp_acquire_queuing_lock(lck, gtid); 372 373 #if OMPT_SUPPORT && OMPT_OPTIONAL 374 if (ompt_enabled.ompt_callback_mutex_acquired) { 375 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)( 376 ompt_mutex_atomic, (ompt_wait_id_t)(uintptr_t)lck, OMPT_GET_RETURN_ADDRESS(0)); 377 } 378 #endif 379 } 380 381 static inline int __kmp_test_atomic_lock(kmp_atomic_lock_t *lck, 382 kmp_int32 gtid) { 383 return __kmp_test_queuing_lock(lck, gtid); 384 } 385 386 static inline void __kmp_release_atomic_lock(kmp_atomic_lock_t *lck, 387 kmp_int32 gtid) { 388 __kmp_release_queuing_lock(lck, gtid); 389 #if OMPT_SUPPORT && OMPT_OPTIONAL 390 if (ompt_enabled.ompt_callback_mutex_released) { 391 ompt_callbacks.ompt_callback(ompt_callback_mutex_released)( 392 ompt_mutex_atomic, (ompt_wait_id_t)(uintptr_t)lck, OMPT_GET_RETURN_ADDRESS(0)); 393 } 394 #endif 395 } 396 397 static inline void __kmp_init_atomic_lock(kmp_atomic_lock_t *lck) { 398 __kmp_init_queuing_lock(lck); 399 } 400 401 static inline void __kmp_destroy_atomic_lock(kmp_atomic_lock_t *lck) { 402 __kmp_destroy_queuing_lock(lck); 403 } 404 405 // Global Locks 406 extern kmp_atomic_lock_t __kmp_atomic_lock; /* Control access to all user coded 407 atomics in Gnu compat mode */ 408 extern kmp_atomic_lock_t __kmp_atomic_lock_1i; /* Control access to all user 409 coded atomics for 1-byte fixed 410 data types */ 411 extern kmp_atomic_lock_t __kmp_atomic_lock_2i; /* Control access to all user 412 coded atomics for 2-byte fixed 413 data types */ 414 extern kmp_atomic_lock_t __kmp_atomic_lock_4i; /* Control access to all user 415 coded atomics for 4-byte fixed 416 data types */ 417 extern kmp_atomic_lock_t __kmp_atomic_lock_4r; /* Control access to all user 418 coded atomics for kmp_real32 419 data type */ 420 extern kmp_atomic_lock_t __kmp_atomic_lock_8i; /* Control access to all user 421 coded atomics for 8-byte fixed 422 data types */ 423 extern kmp_atomic_lock_t __kmp_atomic_lock_8r; /* Control access to all user 424 coded atomics for kmp_real64 425 data type */ 426 extern kmp_atomic_lock_t 427 __kmp_atomic_lock_8c; /* Control access to all user coded atomics for 428 complex byte data type */ 429 extern kmp_atomic_lock_t 430 __kmp_atomic_lock_10r; /* Control access to all user coded atomics for long 431 double data type */ 432 extern kmp_atomic_lock_t __kmp_atomic_lock_16r; /* Control access to all user 433 coded atomics for _Quad data 434 type */ 435 extern kmp_atomic_lock_t __kmp_atomic_lock_16c; /* Control access to all user 436 coded atomics for double 437 complex data type*/ 438 extern kmp_atomic_lock_t 439 __kmp_atomic_lock_20c; /* Control access to all user coded atomics for long 440 double complex type*/ 441 extern kmp_atomic_lock_t __kmp_atomic_lock_32c; /* Control access to all user 442 coded atomics for _Quad 443 complex data type */ 444 445 // Below routines for atomic UPDATE are listed 446 447 // 1-byte 448 void __kmpc_atomic_fixed1_add(ident_t *id_ref, int gtid, char *lhs, char rhs); 449 void __kmpc_atomic_fixed1_andb(ident_t *id_ref, int gtid, char *lhs, char rhs); 450 void __kmpc_atomic_fixed1_div(ident_t *id_ref, int gtid, char *lhs, char rhs); 451 void __kmpc_atomic_fixed1u_div(ident_t *id_ref, int gtid, unsigned char *lhs, 452 unsigned char rhs); 453 void __kmpc_atomic_fixed1_mul(ident_t *id_ref, int gtid, char *lhs, char rhs); 454 void __kmpc_atomic_fixed1_orb(ident_t *id_ref, int gtid, char *lhs, char rhs); 455 void __kmpc_atomic_fixed1_shl(ident_t *id_ref, int gtid, char *lhs, char rhs); 456 void __kmpc_atomic_fixed1_shr(ident_t *id_ref, int gtid, char *lhs, char rhs); 457 void __kmpc_atomic_fixed1u_shr(ident_t *id_ref, int gtid, unsigned char *lhs, 458 unsigned char rhs); 459 void __kmpc_atomic_fixed1_sub(ident_t *id_ref, int gtid, char *lhs, char rhs); 460 void __kmpc_atomic_fixed1_xor(ident_t *id_ref, int gtid, char *lhs, char rhs); 461 // 2-byte 462 void __kmpc_atomic_fixed2_add(ident_t *id_ref, int gtid, short *lhs, short rhs); 463 void __kmpc_atomic_fixed2_andb(ident_t *id_ref, int gtid, short *lhs, 464 short rhs); 465 void __kmpc_atomic_fixed2_div(ident_t *id_ref, int gtid, short *lhs, short rhs); 466 void __kmpc_atomic_fixed2u_div(ident_t *id_ref, int gtid, unsigned short *lhs, 467 unsigned short rhs); 468 void __kmpc_atomic_fixed2_mul(ident_t *id_ref, int gtid, short *lhs, short rhs); 469 void __kmpc_atomic_fixed2_orb(ident_t *id_ref, int gtid, short *lhs, short rhs); 470 void __kmpc_atomic_fixed2_shl(ident_t *id_ref, int gtid, short *lhs, short rhs); 471 void __kmpc_atomic_fixed2_shr(ident_t *id_ref, int gtid, short *lhs, short rhs); 472 void __kmpc_atomic_fixed2u_shr(ident_t *id_ref, int gtid, unsigned short *lhs, 473 unsigned short rhs); 474 void __kmpc_atomic_fixed2_sub(ident_t *id_ref, int gtid, short *lhs, short rhs); 475 void __kmpc_atomic_fixed2_xor(ident_t *id_ref, int gtid, short *lhs, short rhs); 476 // 4-byte add / sub fixed 477 void __kmpc_atomic_fixed4_add(ident_t *id_ref, int gtid, kmp_int32 *lhs, 478 kmp_int32 rhs); 479 void __kmpc_atomic_fixed4_sub(ident_t *id_ref, int gtid, kmp_int32 *lhs, 480 kmp_int32 rhs); 481 // 4-byte add / sub float 482 void __kmpc_atomic_float4_add(ident_t *id_ref, int gtid, kmp_real32 *lhs, 483 kmp_real32 rhs); 484 void __kmpc_atomic_float4_sub(ident_t *id_ref, int gtid, kmp_real32 *lhs, 485 kmp_real32 rhs); 486 // 8-byte add / sub fixed 487 void __kmpc_atomic_fixed8_add(ident_t *id_ref, int gtid, kmp_int64 *lhs, 488 kmp_int64 rhs); 489 void __kmpc_atomic_fixed8_sub(ident_t *id_ref, int gtid, kmp_int64 *lhs, 490 kmp_int64 rhs); 491 // 8-byte add / sub float 492 void __kmpc_atomic_float8_add(ident_t *id_ref, int gtid, kmp_real64 *lhs, 493 kmp_real64 rhs); 494 void __kmpc_atomic_float8_sub(ident_t *id_ref, int gtid, kmp_real64 *lhs, 495 kmp_real64 rhs); 496 // 4-byte fixed 497 void __kmpc_atomic_fixed4_andb(ident_t *id_ref, int gtid, kmp_int32 *lhs, 498 kmp_int32 rhs); 499 void __kmpc_atomic_fixed4_div(ident_t *id_ref, int gtid, kmp_int32 *lhs, 500 kmp_int32 rhs); 501 void __kmpc_atomic_fixed4u_div(ident_t *id_ref, int gtid, kmp_uint32 *lhs, 502 kmp_uint32 rhs); 503 void __kmpc_atomic_fixed4_mul(ident_t *id_ref, int gtid, kmp_int32 *lhs, 504 kmp_int32 rhs); 505 void __kmpc_atomic_fixed4_orb(ident_t *id_ref, int gtid, kmp_int32 *lhs, 506 kmp_int32 rhs); 507 void __kmpc_atomic_fixed4_shl(ident_t *id_ref, int gtid, kmp_int32 *lhs, 508 kmp_int32 rhs); 509 void __kmpc_atomic_fixed4_shr(ident_t *id_ref, int gtid, kmp_int32 *lhs, 510 kmp_int32 rhs); 511 void __kmpc_atomic_fixed4u_shr(ident_t *id_ref, int gtid, kmp_uint32 *lhs, 512 kmp_uint32 rhs); 513 void __kmpc_atomic_fixed4_xor(ident_t *id_ref, int gtid, kmp_int32 *lhs, 514 kmp_int32 rhs); 515 // 8-byte fixed 516 void __kmpc_atomic_fixed8_andb(ident_t *id_ref, int gtid, kmp_int64 *lhs, 517 kmp_int64 rhs); 518 void __kmpc_atomic_fixed8_div(ident_t *id_ref, int gtid, kmp_int64 *lhs, 519 kmp_int64 rhs); 520 void __kmpc_atomic_fixed8u_div(ident_t *id_ref, int gtid, kmp_uint64 *lhs, 521 kmp_uint64 rhs); 522 void __kmpc_atomic_fixed8_mul(ident_t *id_ref, int gtid, kmp_int64 *lhs, 523 kmp_int64 rhs); 524 void __kmpc_atomic_fixed8_orb(ident_t *id_ref, int gtid, kmp_int64 *lhs, 525 kmp_int64 rhs); 526 void __kmpc_atomic_fixed8_shl(ident_t *id_ref, int gtid, kmp_int64 *lhs, 527 kmp_int64 rhs); 528 void __kmpc_atomic_fixed8_shr(ident_t *id_ref, int gtid, kmp_int64 *lhs, 529 kmp_int64 rhs); 530 void __kmpc_atomic_fixed8u_shr(ident_t *id_ref, int gtid, kmp_uint64 *lhs, 531 kmp_uint64 rhs); 532 void __kmpc_atomic_fixed8_xor(ident_t *id_ref, int gtid, kmp_int64 *lhs, 533 kmp_int64 rhs); 534 // 4-byte float 535 void __kmpc_atomic_float4_div(ident_t *id_ref, int gtid, kmp_real32 *lhs, 536 kmp_real32 rhs); 537 void __kmpc_atomic_float4_mul(ident_t *id_ref, int gtid, kmp_real32 *lhs, 538 kmp_real32 rhs); 539 // 8-byte float 540 void __kmpc_atomic_float8_div(ident_t *id_ref, int gtid, kmp_real64 *lhs, 541 kmp_real64 rhs); 542 void __kmpc_atomic_float8_mul(ident_t *id_ref, int gtid, kmp_real64 *lhs, 543 kmp_real64 rhs); 544 // 1-, 2-, 4-, 8-byte logical (&&, ||) 545 void __kmpc_atomic_fixed1_andl(ident_t *id_ref, int gtid, char *lhs, char rhs); 546 void __kmpc_atomic_fixed1_orl(ident_t *id_ref, int gtid, char *lhs, char rhs); 547 void __kmpc_atomic_fixed2_andl(ident_t *id_ref, int gtid, short *lhs, 548 short rhs); 549 void __kmpc_atomic_fixed2_orl(ident_t *id_ref, int gtid, short *lhs, short rhs); 550 void __kmpc_atomic_fixed4_andl(ident_t *id_ref, int gtid, kmp_int32 *lhs, 551 kmp_int32 rhs); 552 void __kmpc_atomic_fixed4_orl(ident_t *id_ref, int gtid, kmp_int32 *lhs, 553 kmp_int32 rhs); 554 void __kmpc_atomic_fixed8_andl(ident_t *id_ref, int gtid, kmp_int64 *lhs, 555 kmp_int64 rhs); 556 void __kmpc_atomic_fixed8_orl(ident_t *id_ref, int gtid, kmp_int64 *lhs, 557 kmp_int64 rhs); 558 // MIN / MAX 559 void __kmpc_atomic_fixed1_max(ident_t *id_ref, int gtid, char *lhs, char rhs); 560 void __kmpc_atomic_fixed1_min(ident_t *id_ref, int gtid, char *lhs, char rhs); 561 void __kmpc_atomic_fixed2_max(ident_t *id_ref, int gtid, short *lhs, short rhs); 562 void __kmpc_atomic_fixed2_min(ident_t *id_ref, int gtid, short *lhs, short rhs); 563 void __kmpc_atomic_fixed4_max(ident_t *id_ref, int gtid, kmp_int32 *lhs, 564 kmp_int32 rhs); 565 void __kmpc_atomic_fixed4_min(ident_t *id_ref, int gtid, kmp_int32 *lhs, 566 kmp_int32 rhs); 567 void __kmpc_atomic_fixed8_max(ident_t *id_ref, int gtid, kmp_int64 *lhs, 568 kmp_int64 rhs); 569 void __kmpc_atomic_fixed8_min(ident_t *id_ref, int gtid, kmp_int64 *lhs, 570 kmp_int64 rhs); 571 void __kmpc_atomic_float4_max(ident_t *id_ref, int gtid, kmp_real32 *lhs, 572 kmp_real32 rhs); 573 void __kmpc_atomic_float4_min(ident_t *id_ref, int gtid, kmp_real32 *lhs, 574 kmp_real32 rhs); 575 void __kmpc_atomic_float8_max(ident_t *id_ref, int gtid, kmp_real64 *lhs, 576 kmp_real64 rhs); 577 void __kmpc_atomic_float8_min(ident_t *id_ref, int gtid, kmp_real64 *lhs, 578 kmp_real64 rhs); 579 #if KMP_HAVE_QUAD 580 void __kmpc_atomic_float16_max(ident_t *id_ref, int gtid, QUAD_LEGACY *lhs, 581 QUAD_LEGACY rhs); 582 void __kmpc_atomic_float16_min(ident_t *id_ref, int gtid, QUAD_LEGACY *lhs, 583 QUAD_LEGACY rhs); 584 #if (KMP_ARCH_X86) 585 // Routines with 16-byte arguments aligned to 16-byte boundary; IA-32 586 // architecture only 587 void __kmpc_atomic_float16_max_a16(ident_t *id_ref, int gtid, Quad_a16_t *lhs, 588 Quad_a16_t rhs); 589 void __kmpc_atomic_float16_min_a16(ident_t *id_ref, int gtid, Quad_a16_t *lhs, 590 Quad_a16_t rhs); 591 #endif 592 #endif 593 // .NEQV. (same as xor) 594 void __kmpc_atomic_fixed1_neqv(ident_t *id_ref, int gtid, char *lhs, char rhs); 595 void __kmpc_atomic_fixed2_neqv(ident_t *id_ref, int gtid, short *lhs, 596 short rhs); 597 void __kmpc_atomic_fixed4_neqv(ident_t *id_ref, int gtid, kmp_int32 *lhs, 598 kmp_int32 rhs); 599 void __kmpc_atomic_fixed8_neqv(ident_t *id_ref, int gtid, kmp_int64 *lhs, 600 kmp_int64 rhs); 601 // .EQV. (same as ~xor) 602 void __kmpc_atomic_fixed1_eqv(ident_t *id_ref, int gtid, char *lhs, char rhs); 603 void __kmpc_atomic_fixed2_eqv(ident_t *id_ref, int gtid, short *lhs, short rhs); 604 void __kmpc_atomic_fixed4_eqv(ident_t *id_ref, int gtid, kmp_int32 *lhs, 605 kmp_int32 rhs); 606 void __kmpc_atomic_fixed8_eqv(ident_t *id_ref, int gtid, kmp_int64 *lhs, 607 kmp_int64 rhs); 608 // long double type 609 void __kmpc_atomic_float10_add(ident_t *id_ref, int gtid, long double *lhs, 610 long double rhs); 611 void __kmpc_atomic_float10_sub(ident_t *id_ref, int gtid, long double *lhs, 612 long double rhs); 613 void __kmpc_atomic_float10_mul(ident_t *id_ref, int gtid, long double *lhs, 614 long double rhs); 615 void __kmpc_atomic_float10_div(ident_t *id_ref, int gtid, long double *lhs, 616 long double rhs); 617 // _Quad type 618 #if KMP_HAVE_QUAD 619 void __kmpc_atomic_float16_add(ident_t *id_ref, int gtid, QUAD_LEGACY *lhs, 620 QUAD_LEGACY rhs); 621 void __kmpc_atomic_float16_sub(ident_t *id_ref, int gtid, QUAD_LEGACY *lhs, 622 QUAD_LEGACY rhs); 623 void __kmpc_atomic_float16_mul(ident_t *id_ref, int gtid, QUAD_LEGACY *lhs, 624 QUAD_LEGACY rhs); 625 void __kmpc_atomic_float16_div(ident_t *id_ref, int gtid, QUAD_LEGACY *lhs, 626 QUAD_LEGACY rhs); 627 #if (KMP_ARCH_X86) 628 // Routines with 16-byte arguments aligned to 16-byte boundary 629 void __kmpc_atomic_float16_add_a16(ident_t *id_ref, int gtid, Quad_a16_t *lhs, 630 Quad_a16_t rhs); 631 void __kmpc_atomic_float16_sub_a16(ident_t *id_ref, int gtid, Quad_a16_t *lhs, 632 Quad_a16_t rhs); 633 void __kmpc_atomic_float16_mul_a16(ident_t *id_ref, int gtid, Quad_a16_t *lhs, 634 Quad_a16_t rhs); 635 void __kmpc_atomic_float16_div_a16(ident_t *id_ref, int gtid, Quad_a16_t *lhs, 636 Quad_a16_t rhs); 637 #endif 638 #endif 639 // routines for complex types 640 void __kmpc_atomic_cmplx4_add(ident_t *id_ref, int gtid, kmp_cmplx32 *lhs, 641 kmp_cmplx32 rhs); 642 void __kmpc_atomic_cmplx4_sub(ident_t *id_ref, int gtid, kmp_cmplx32 *lhs, 643 kmp_cmplx32 rhs); 644 void __kmpc_atomic_cmplx4_mul(ident_t *id_ref, int gtid, kmp_cmplx32 *lhs, 645 kmp_cmplx32 rhs); 646 void __kmpc_atomic_cmplx4_div(ident_t *id_ref, int gtid, kmp_cmplx32 *lhs, 647 kmp_cmplx32 rhs); 648 void __kmpc_atomic_cmplx8_add(ident_t *id_ref, int gtid, kmp_cmplx64 *lhs, 649 kmp_cmplx64 rhs); 650 void __kmpc_atomic_cmplx8_sub(ident_t *id_ref, int gtid, kmp_cmplx64 *lhs, 651 kmp_cmplx64 rhs); 652 void __kmpc_atomic_cmplx8_mul(ident_t *id_ref, int gtid, kmp_cmplx64 *lhs, 653 kmp_cmplx64 rhs); 654 void __kmpc_atomic_cmplx8_div(ident_t *id_ref, int gtid, kmp_cmplx64 *lhs, 655 kmp_cmplx64 rhs); 656 void __kmpc_atomic_cmplx10_add(ident_t *id_ref, int gtid, kmp_cmplx80 *lhs, 657 kmp_cmplx80 rhs); 658 void __kmpc_atomic_cmplx10_sub(ident_t *id_ref, int gtid, kmp_cmplx80 *lhs, 659 kmp_cmplx80 rhs); 660 void __kmpc_atomic_cmplx10_mul(ident_t *id_ref, int gtid, kmp_cmplx80 *lhs, 661 kmp_cmplx80 rhs); 662 void __kmpc_atomic_cmplx10_div(ident_t *id_ref, int gtid, kmp_cmplx80 *lhs, 663 kmp_cmplx80 rhs); 664 #if KMP_HAVE_QUAD 665 void __kmpc_atomic_cmplx16_add(ident_t *id_ref, int gtid, CPLX128_LEG *lhs, 666 CPLX128_LEG rhs); 667 void __kmpc_atomic_cmplx16_sub(ident_t *id_ref, int gtid, CPLX128_LEG *lhs, 668 CPLX128_LEG rhs); 669 void __kmpc_atomic_cmplx16_mul(ident_t *id_ref, int gtid, CPLX128_LEG *lhs, 670 CPLX128_LEG rhs); 671 void __kmpc_atomic_cmplx16_div(ident_t *id_ref, int gtid, CPLX128_LEG *lhs, 672 CPLX128_LEG rhs); 673 #if (KMP_ARCH_X86) 674 // Routines with 16-byte arguments aligned to 16-byte boundary 675 void __kmpc_atomic_cmplx16_add_a16(ident_t *id_ref, int gtid, 676 kmp_cmplx128_a16_t *lhs, 677 kmp_cmplx128_a16_t rhs); 678 void __kmpc_atomic_cmplx16_sub_a16(ident_t *id_ref, int gtid, 679 kmp_cmplx128_a16_t *lhs, 680 kmp_cmplx128_a16_t rhs); 681 void __kmpc_atomic_cmplx16_mul_a16(ident_t *id_ref, int gtid, 682 kmp_cmplx128_a16_t *lhs, 683 kmp_cmplx128_a16_t rhs); 684 void __kmpc_atomic_cmplx16_div_a16(ident_t *id_ref, int gtid, 685 kmp_cmplx128_a16_t *lhs, 686 kmp_cmplx128_a16_t rhs); 687 #endif 688 #endif 689 690 // OpenMP 4.0: x = expr binop x for non-commutative operations. 691 // Supported only on IA-32 architecture and Intel(R) 64 692 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 693 694 void __kmpc_atomic_fixed1_sub_rev(ident_t *id_ref, int gtid, char *lhs, 695 char rhs); 696 void __kmpc_atomic_fixed1_div_rev(ident_t *id_ref, int gtid, char *lhs, 697 char rhs); 698 void __kmpc_atomic_fixed1u_div_rev(ident_t *id_ref, int gtid, 699 unsigned char *lhs, unsigned char rhs); 700 void __kmpc_atomic_fixed1_shl_rev(ident_t *id_ref, int gtid, char *lhs, 701 char rhs); 702 void __kmpc_atomic_fixed1_shr_rev(ident_t *id_ref, int gtid, char *lhs, 703 char rhs); 704 void __kmpc_atomic_fixed1u_shr_rev(ident_t *id_ref, int gtid, 705 unsigned char *lhs, unsigned char rhs); 706 void __kmpc_atomic_fixed2_sub_rev(ident_t *id_ref, int gtid, short *lhs, 707 short rhs); 708 void __kmpc_atomic_fixed2_div_rev(ident_t *id_ref, int gtid, short *lhs, 709 short rhs); 710 void __kmpc_atomic_fixed2u_div_rev(ident_t *id_ref, int gtid, 711 unsigned short *lhs, unsigned short rhs); 712 void __kmpc_atomic_fixed2_shl_rev(ident_t *id_ref, int gtid, short *lhs, 713 short rhs); 714 void __kmpc_atomic_fixed2_shr_rev(ident_t *id_ref, int gtid, short *lhs, 715 short rhs); 716 void __kmpc_atomic_fixed2u_shr_rev(ident_t *id_ref, int gtid, 717 unsigned short *lhs, unsigned short rhs); 718 void __kmpc_atomic_fixed4_sub_rev(ident_t *id_ref, int gtid, kmp_int32 *lhs, 719 kmp_int32 rhs); 720 void __kmpc_atomic_fixed4_div_rev(ident_t *id_ref, int gtid, kmp_int32 *lhs, 721 kmp_int32 rhs); 722 void __kmpc_atomic_fixed4u_div_rev(ident_t *id_ref, int gtid, kmp_uint32 *lhs, 723 kmp_uint32 rhs); 724 void __kmpc_atomic_fixed4_shl_rev(ident_t *id_ref, int gtid, kmp_int32 *lhs, 725 kmp_int32 rhs); 726 void __kmpc_atomic_fixed4_shr_rev(ident_t *id_ref, int gtid, kmp_int32 *lhs, 727 kmp_int32 rhs); 728 void __kmpc_atomic_fixed4u_shr_rev(ident_t *id_ref, int gtid, kmp_uint32 *lhs, 729 kmp_uint32 rhs); 730 void __kmpc_atomic_fixed8_sub_rev(ident_t *id_ref, int gtid, kmp_int64 *lhs, 731 kmp_int64 rhs); 732 void __kmpc_atomic_fixed8_div_rev(ident_t *id_ref, int gtid, kmp_int64 *lhs, 733 kmp_int64 rhs); 734 void __kmpc_atomic_fixed8u_div_rev(ident_t *id_ref, int gtid, kmp_uint64 *lhs, 735 kmp_uint64 rhs); 736 void __kmpc_atomic_fixed8_shl_rev(ident_t *id_ref, int gtid, kmp_int64 *lhs, 737 kmp_int64 rhs); 738 void __kmpc_atomic_fixed8_shr_rev(ident_t *id_ref, int gtid, kmp_int64 *lhs, 739 kmp_int64 rhs); 740 void __kmpc_atomic_fixed8u_shr_rev(ident_t *id_ref, int gtid, kmp_uint64 *lhs, 741 kmp_uint64 rhs); 742 void __kmpc_atomic_float4_sub_rev(ident_t *id_ref, int gtid, float *lhs, 743 float rhs); 744 void __kmpc_atomic_float4_div_rev(ident_t *id_ref, int gtid, float *lhs, 745 float rhs); 746 void __kmpc_atomic_float8_sub_rev(ident_t *id_ref, int gtid, double *lhs, 747 double rhs); 748 void __kmpc_atomic_float8_div_rev(ident_t *id_ref, int gtid, double *lhs, 749 double rhs); 750 void __kmpc_atomic_float10_sub_rev(ident_t *id_ref, int gtid, long double *lhs, 751 long double rhs); 752 void __kmpc_atomic_float10_div_rev(ident_t *id_ref, int gtid, long double *lhs, 753 long double rhs); 754 #if KMP_HAVE_QUAD 755 void __kmpc_atomic_float16_sub_rev(ident_t *id_ref, int gtid, QUAD_LEGACY *lhs, 756 QUAD_LEGACY rhs); 757 void __kmpc_atomic_float16_div_rev(ident_t *id_ref, int gtid, QUAD_LEGACY *lhs, 758 QUAD_LEGACY rhs); 759 #endif 760 void __kmpc_atomic_cmplx4_sub_rev(ident_t *id_ref, int gtid, kmp_cmplx32 *lhs, 761 kmp_cmplx32 rhs); 762 void __kmpc_atomic_cmplx4_div_rev(ident_t *id_ref, int gtid, kmp_cmplx32 *lhs, 763 kmp_cmplx32 rhs); 764 void __kmpc_atomic_cmplx8_sub_rev(ident_t *id_ref, int gtid, kmp_cmplx64 *lhs, 765 kmp_cmplx64 rhs); 766 void __kmpc_atomic_cmplx8_div_rev(ident_t *id_ref, int gtid, kmp_cmplx64 *lhs, 767 kmp_cmplx64 rhs); 768 void __kmpc_atomic_cmplx10_sub_rev(ident_t *id_ref, int gtid, kmp_cmplx80 *lhs, 769 kmp_cmplx80 rhs); 770 void __kmpc_atomic_cmplx10_div_rev(ident_t *id_ref, int gtid, kmp_cmplx80 *lhs, 771 kmp_cmplx80 rhs); 772 #if KMP_HAVE_QUAD 773 void __kmpc_atomic_cmplx16_sub_rev(ident_t *id_ref, int gtid, CPLX128_LEG *lhs, 774 CPLX128_LEG rhs); 775 void __kmpc_atomic_cmplx16_div_rev(ident_t *id_ref, int gtid, CPLX128_LEG *lhs, 776 CPLX128_LEG rhs); 777 #if (KMP_ARCH_X86) 778 // Routines with 16-byte arguments aligned to 16-byte boundary 779 void __kmpc_atomic_float16_sub_a16_rev(ident_t *id_ref, int gtid, 780 Quad_a16_t *lhs, Quad_a16_t rhs); 781 void __kmpc_atomic_float16_div_a16_rev(ident_t *id_ref, int gtid, 782 Quad_a16_t *lhs, Quad_a16_t rhs); 783 void __kmpc_atomic_cmplx16_sub_a16_rev(ident_t *id_ref, int gtid, 784 kmp_cmplx128_a16_t *lhs, 785 kmp_cmplx128_a16_t rhs); 786 void __kmpc_atomic_cmplx16_div_a16_rev(ident_t *id_ref, int gtid, 787 kmp_cmplx128_a16_t *lhs, 788 kmp_cmplx128_a16_t rhs); 789 #endif 790 #endif // KMP_HAVE_QUAD 791 792 #endif // KMP_ARCH_X86 || KMP_ARCH_X86_64 793 794 // routines for mixed types 795 796 // RHS=float8 797 void __kmpc_atomic_fixed1_mul_float8(ident_t *id_ref, int gtid, char *lhs, 798 kmp_real64 rhs); 799 void __kmpc_atomic_fixed1_div_float8(ident_t *id_ref, int gtid, char *lhs, 800 kmp_real64 rhs); 801 void __kmpc_atomic_fixed2_mul_float8(ident_t *id_ref, int gtid, short *lhs, 802 kmp_real64 rhs); 803 void __kmpc_atomic_fixed2_div_float8(ident_t *id_ref, int gtid, short *lhs, 804 kmp_real64 rhs); 805 void __kmpc_atomic_fixed4_mul_float8(ident_t *id_ref, int gtid, kmp_int32 *lhs, 806 kmp_real64 rhs); 807 void __kmpc_atomic_fixed4_div_float8(ident_t *id_ref, int gtid, kmp_int32 *lhs, 808 kmp_real64 rhs); 809 void __kmpc_atomic_fixed8_mul_float8(ident_t *id_ref, int gtid, kmp_int64 *lhs, 810 kmp_real64 rhs); 811 void __kmpc_atomic_fixed8_div_float8(ident_t *id_ref, int gtid, kmp_int64 *lhs, 812 kmp_real64 rhs); 813 void __kmpc_atomic_float4_add_float8(ident_t *id_ref, int gtid, kmp_real32 *lhs, 814 kmp_real64 rhs); 815 void __kmpc_atomic_float4_sub_float8(ident_t *id_ref, int gtid, kmp_real32 *lhs, 816 kmp_real64 rhs); 817 void __kmpc_atomic_float4_mul_float8(ident_t *id_ref, int gtid, kmp_real32 *lhs, 818 kmp_real64 rhs); 819 void __kmpc_atomic_float4_div_float8(ident_t *id_ref, int gtid, kmp_real32 *lhs, 820 kmp_real64 rhs); 821 822 // RHS=float16 (deprecated, to be removed when we are sure the compiler does not 823 // use them) 824 #if KMP_HAVE_QUAD 825 void __kmpc_atomic_fixed1_add_fp(ident_t *id_ref, int gtid, char *lhs, 826 _Quad rhs); 827 void __kmpc_atomic_fixed1u_add_fp(ident_t *id_ref, int gtid, unsigned char *lhs, 828 _Quad rhs); 829 void __kmpc_atomic_fixed1_sub_fp(ident_t *id_ref, int gtid, char *lhs, 830 _Quad rhs); 831 void __kmpc_atomic_fixed1u_sub_fp(ident_t *id_ref, int gtid, unsigned char *lhs, 832 _Quad rhs); 833 void __kmpc_atomic_fixed1_mul_fp(ident_t *id_ref, int gtid, char *lhs, 834 _Quad rhs); 835 void __kmpc_atomic_fixed1u_mul_fp(ident_t *id_ref, int gtid, unsigned char *lhs, 836 _Quad rhs); 837 void __kmpc_atomic_fixed1_div_fp(ident_t *id_ref, int gtid, char *lhs, 838 _Quad rhs); 839 void __kmpc_atomic_fixed1u_div_fp(ident_t *id_ref, int gtid, unsigned char *lhs, 840 _Quad rhs); 841 842 void __kmpc_atomic_fixed2_add_fp(ident_t *id_ref, int gtid, short *lhs, 843 _Quad rhs); 844 void __kmpc_atomic_fixed2u_add_fp(ident_t *id_ref, int gtid, 845 unsigned short *lhs, _Quad rhs); 846 void __kmpc_atomic_fixed2_sub_fp(ident_t *id_ref, int gtid, short *lhs, 847 _Quad rhs); 848 void __kmpc_atomic_fixed2u_sub_fp(ident_t *id_ref, int gtid, 849 unsigned short *lhs, _Quad rhs); 850 void __kmpc_atomic_fixed2_mul_fp(ident_t *id_ref, int gtid, short *lhs, 851 _Quad rhs); 852 void __kmpc_atomic_fixed2u_mul_fp(ident_t *id_ref, int gtid, 853 unsigned short *lhs, _Quad rhs); 854 void __kmpc_atomic_fixed2_div_fp(ident_t *id_ref, int gtid, short *lhs, 855 _Quad rhs); 856 void __kmpc_atomic_fixed2u_div_fp(ident_t *id_ref, int gtid, 857 unsigned short *lhs, _Quad rhs); 858 859 void __kmpc_atomic_fixed4_add_fp(ident_t *id_ref, int gtid, kmp_int32 *lhs, 860 _Quad rhs); 861 void __kmpc_atomic_fixed4u_add_fp(ident_t *id_ref, int gtid, kmp_uint32 *lhs, 862 _Quad rhs); 863 void __kmpc_atomic_fixed4_sub_fp(ident_t *id_ref, int gtid, kmp_int32 *lhs, 864 _Quad rhs); 865 void __kmpc_atomic_fixed4u_sub_fp(ident_t *id_ref, int gtid, kmp_uint32 *lhs, 866 _Quad rhs); 867 void __kmpc_atomic_fixed4_mul_fp(ident_t *id_ref, int gtid, kmp_int32 *lhs, 868 _Quad rhs); 869 void __kmpc_atomic_fixed4u_mul_fp(ident_t *id_ref, int gtid, kmp_uint32 *lhs, 870 _Quad rhs); 871 void __kmpc_atomic_fixed4_div_fp(ident_t *id_ref, int gtid, kmp_int32 *lhs, 872 _Quad rhs); 873 void __kmpc_atomic_fixed4u_div_fp(ident_t *id_ref, int gtid, kmp_uint32 *lhs, 874 _Quad rhs); 875 876 void __kmpc_atomic_fixed8_add_fp(ident_t *id_ref, int gtid, kmp_int64 *lhs, 877 _Quad rhs); 878 void __kmpc_atomic_fixed8u_add_fp(ident_t *id_ref, int gtid, kmp_uint64 *lhs, 879 _Quad rhs); 880 void __kmpc_atomic_fixed8_sub_fp(ident_t *id_ref, int gtid, kmp_int64 *lhs, 881 _Quad rhs); 882 void __kmpc_atomic_fixed8u_sub_fp(ident_t *id_ref, int gtid, kmp_uint64 *lhs, 883 _Quad rhs); 884 void __kmpc_atomic_fixed8_mul_fp(ident_t *id_ref, int gtid, kmp_int64 *lhs, 885 _Quad rhs); 886 void __kmpc_atomic_fixed8u_mul_fp(ident_t *id_ref, int gtid, kmp_uint64 *lhs, 887 _Quad rhs); 888 void __kmpc_atomic_fixed8_div_fp(ident_t *id_ref, int gtid, kmp_int64 *lhs, 889 _Quad rhs); 890 void __kmpc_atomic_fixed8u_div_fp(ident_t *id_ref, int gtid, kmp_uint64 *lhs, 891 _Quad rhs); 892 893 void __kmpc_atomic_float4_add_fp(ident_t *id_ref, int gtid, kmp_real32 *lhs, 894 _Quad rhs); 895 void __kmpc_atomic_float4_sub_fp(ident_t *id_ref, int gtid, kmp_real32 *lhs, 896 _Quad rhs); 897 void __kmpc_atomic_float4_mul_fp(ident_t *id_ref, int gtid, kmp_real32 *lhs, 898 _Quad rhs); 899 void __kmpc_atomic_float4_div_fp(ident_t *id_ref, int gtid, kmp_real32 *lhs, 900 _Quad rhs); 901 902 void __kmpc_atomic_float8_add_fp(ident_t *id_ref, int gtid, kmp_real64 *lhs, 903 _Quad rhs); 904 void __kmpc_atomic_float8_sub_fp(ident_t *id_ref, int gtid, kmp_real64 *lhs, 905 _Quad rhs); 906 void __kmpc_atomic_float8_mul_fp(ident_t *id_ref, int gtid, kmp_real64 *lhs, 907 _Quad rhs); 908 void __kmpc_atomic_float8_div_fp(ident_t *id_ref, int gtid, kmp_real64 *lhs, 909 _Quad rhs); 910 911 void __kmpc_atomic_float10_add_fp(ident_t *id_ref, int gtid, long double *lhs, 912 _Quad rhs); 913 void __kmpc_atomic_float10_sub_fp(ident_t *id_ref, int gtid, long double *lhs, 914 _Quad rhs); 915 void __kmpc_atomic_float10_mul_fp(ident_t *id_ref, int gtid, long double *lhs, 916 _Quad rhs); 917 void __kmpc_atomic_float10_div_fp(ident_t *id_ref, int gtid, long double *lhs, 918 _Quad rhs); 919 920 // Reverse operations 921 void __kmpc_atomic_fixed1_sub_rev_fp(ident_t *id_ref, int gtid, char *lhs, 922 _Quad rhs); 923 void __kmpc_atomic_fixed1u_sub_rev_fp(ident_t *id_ref, int gtid, 924 unsigned char *lhs, _Quad rhs); 925 void __kmpc_atomic_fixed1_div_rev_fp(ident_t *id_ref, int gtid, char *lhs, 926 _Quad rhs); 927 void __kmpc_atomic_fixed1u_div_rev_fp(ident_t *id_ref, int gtid, 928 unsigned char *lhs, _Quad rhs); 929 void __kmpc_atomic_fixed2_sub_rev_fp(ident_t *id_ref, int gtid, short *lhs, 930 _Quad rhs); 931 void __kmpc_atomic_fixed2u_sub_rev_fp(ident_t *id_ref, int gtid, 932 unsigned short *lhs, _Quad rhs); 933 void __kmpc_atomic_fixed2_div_rev_fp(ident_t *id_ref, int gtid, short *lhs, 934 _Quad rhs); 935 void __kmpc_atomic_fixed2u_div_rev_fp(ident_t *id_ref, int gtid, 936 unsigned short *lhs, _Quad rhs); 937 void __kmpc_atomic_fixed4_sub_rev_fp(ident_t *id_ref, int gtid, kmp_int32 *lhs, 938 _Quad rhs); 939 void __kmpc_atomic_fixed4u_sub_rev_fp(ident_t *id_ref, int gtid, 940 kmp_uint32 *lhs, _Quad rhs); 941 void __kmpc_atomic_fixed4_div_rev_fp(ident_t *id_ref, int gtid, kmp_int32 *lhs, 942 _Quad rhs); 943 void __kmpc_atomic_fixed4u_div_rev_fp(ident_t *id_ref, int gtid, 944 kmp_uint32 *lhs, _Quad rhs); 945 void __kmpc_atomic_fixed8_sub_rev_fp(ident_t *id_ref, int gtid, kmp_int64 *lhs, 946 _Quad rhs); 947 void __kmpc_atomic_fixed8u_sub_rev_fp(ident_t *id_ref, int gtid, 948 kmp_uint64 *lhs, _Quad rhs); 949 void __kmpc_atomic_fixed8_div_rev_fp(ident_t *id_ref, int gtid, kmp_int64 *lhs, 950 _Quad rhs); 951 void __kmpc_atomic_fixed8u_div_rev_fp(ident_t *id_ref, int gtid, 952 kmp_uint64 *lhs, _Quad rhs); 953 void __kmpc_atomic_float4_sub_rev_fp(ident_t *id_ref, int gtid, float *lhs, 954 _Quad rhs); 955 void __kmpc_atomic_float4_div_rev_fp(ident_t *id_ref, int gtid, float *lhs, 956 _Quad rhs); 957 void __kmpc_atomic_float8_sub_rev_fp(ident_t *id_ref, int gtid, double *lhs, 958 _Quad rhs); 959 void __kmpc_atomic_float8_div_rev_fp(ident_t *id_ref, int gtid, double *lhs, 960 _Quad rhs); 961 void __kmpc_atomic_float10_sub_rev_fp(ident_t *id_ref, int gtid, 962 long double *lhs, _Quad rhs); 963 void __kmpc_atomic_float10_div_rev_fp(ident_t *id_ref, int gtid, 964 long double *lhs, _Quad rhs); 965 966 #endif // KMP_HAVE_QUAD 967 968 // RHS=cmplx8 969 void __kmpc_atomic_cmplx4_add_cmplx8(ident_t *id_ref, int gtid, 970 kmp_cmplx32 *lhs, kmp_cmplx64 rhs); 971 void __kmpc_atomic_cmplx4_sub_cmplx8(ident_t *id_ref, int gtid, 972 kmp_cmplx32 *lhs, kmp_cmplx64 rhs); 973 void __kmpc_atomic_cmplx4_mul_cmplx8(ident_t *id_ref, int gtid, 974 kmp_cmplx32 *lhs, kmp_cmplx64 rhs); 975 void __kmpc_atomic_cmplx4_div_cmplx8(ident_t *id_ref, int gtid, 976 kmp_cmplx32 *lhs, kmp_cmplx64 rhs); 977 978 // generic atomic routines 979 void __kmpc_atomic_1(ident_t *id_ref, int gtid, void *lhs, void *rhs, 980 void (*f)(void *, void *, void *)); 981 void __kmpc_atomic_2(ident_t *id_ref, int gtid, void *lhs, void *rhs, 982 void (*f)(void *, void *, void *)); 983 void __kmpc_atomic_4(ident_t *id_ref, int gtid, void *lhs, void *rhs, 984 void (*f)(void *, void *, void *)); 985 void __kmpc_atomic_8(ident_t *id_ref, int gtid, void *lhs, void *rhs, 986 void (*f)(void *, void *, void *)); 987 void __kmpc_atomic_10(ident_t *id_ref, int gtid, void *lhs, void *rhs, 988 void (*f)(void *, void *, void *)); 989 void __kmpc_atomic_16(ident_t *id_ref, int gtid, void *lhs, void *rhs, 990 void (*f)(void *, void *, void *)); 991 void __kmpc_atomic_20(ident_t *id_ref, int gtid, void *lhs, void *rhs, 992 void (*f)(void *, void *, void *)); 993 void __kmpc_atomic_32(ident_t *id_ref, int gtid, void *lhs, void *rhs, 994 void (*f)(void *, void *, void *)); 995 996 // READ, WRITE, CAPTURE are supported only on IA-32 architecture and Intel(R) 64 997 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 998 999 // Below routines for atomic READ are listed 1000 char __kmpc_atomic_fixed1_rd(ident_t *id_ref, int gtid, char *loc); 1001 short __kmpc_atomic_fixed2_rd(ident_t *id_ref, int gtid, short *loc); 1002 kmp_int32 __kmpc_atomic_fixed4_rd(ident_t *id_ref, int gtid, kmp_int32 *loc); 1003 kmp_int64 __kmpc_atomic_fixed8_rd(ident_t *id_ref, int gtid, kmp_int64 *loc); 1004 kmp_real32 __kmpc_atomic_float4_rd(ident_t *id_ref, int gtid, kmp_real32 *loc); 1005 kmp_real64 __kmpc_atomic_float8_rd(ident_t *id_ref, int gtid, kmp_real64 *loc); 1006 long double __kmpc_atomic_float10_rd(ident_t *id_ref, int gtid, 1007 long double *loc); 1008 #if KMP_HAVE_QUAD 1009 QUAD_LEGACY __kmpc_atomic_float16_rd(ident_t *id_ref, int gtid, 1010 QUAD_LEGACY *loc); 1011 #endif 1012 // Fix for CQ220361: cmplx4 READ will return void on Windows* OS; read value 1013 // will be returned through an additional parameter 1014 #if (KMP_OS_WINDOWS) 1015 void __kmpc_atomic_cmplx4_rd(kmp_cmplx32 *out, ident_t *id_ref, int gtid, 1016 kmp_cmplx32 *loc); 1017 #else 1018 kmp_cmplx32 __kmpc_atomic_cmplx4_rd(ident_t *id_ref, int gtid, 1019 kmp_cmplx32 *loc); 1020 #endif 1021 kmp_cmplx64 __kmpc_atomic_cmplx8_rd(ident_t *id_ref, int gtid, 1022 kmp_cmplx64 *loc); 1023 kmp_cmplx80 __kmpc_atomic_cmplx10_rd(ident_t *id_ref, int gtid, 1024 kmp_cmplx80 *loc); 1025 #if KMP_HAVE_QUAD 1026 CPLX128_LEG __kmpc_atomic_cmplx16_rd(ident_t *id_ref, int gtid, 1027 CPLX128_LEG *loc); 1028 #if (KMP_ARCH_X86) 1029 // Routines with 16-byte arguments aligned to 16-byte boundary 1030 Quad_a16_t __kmpc_atomic_float16_a16_rd(ident_t *id_ref, int gtid, 1031 Quad_a16_t *loc); 1032 kmp_cmplx128_a16_t __kmpc_atomic_cmplx16_a16_rd(ident_t *id_ref, int gtid, 1033 kmp_cmplx128_a16_t *loc); 1034 #endif 1035 #endif 1036 1037 // Below routines for atomic WRITE are listed 1038 void __kmpc_atomic_fixed1_wr(ident_t *id_ref, int gtid, char *lhs, char rhs); 1039 void __kmpc_atomic_fixed2_wr(ident_t *id_ref, int gtid, short *lhs, short rhs); 1040 void __kmpc_atomic_fixed4_wr(ident_t *id_ref, int gtid, kmp_int32 *lhs, 1041 kmp_int32 rhs); 1042 void __kmpc_atomic_fixed8_wr(ident_t *id_ref, int gtid, kmp_int64 *lhs, 1043 kmp_int64 rhs); 1044 void __kmpc_atomic_float4_wr(ident_t *id_ref, int gtid, kmp_real32 *lhs, 1045 kmp_real32 rhs); 1046 void __kmpc_atomic_float8_wr(ident_t *id_ref, int gtid, kmp_real64 *lhs, 1047 kmp_real64 rhs); 1048 void __kmpc_atomic_float10_wr(ident_t *id_ref, int gtid, long double *lhs, 1049 long double rhs); 1050 #if KMP_HAVE_QUAD 1051 void __kmpc_atomic_float16_wr(ident_t *id_ref, int gtid, QUAD_LEGACY *lhs, 1052 QUAD_LEGACY rhs); 1053 #endif 1054 void __kmpc_atomic_cmplx4_wr(ident_t *id_ref, int gtid, kmp_cmplx32 *lhs, 1055 kmp_cmplx32 rhs); 1056 void __kmpc_atomic_cmplx8_wr(ident_t *id_ref, int gtid, kmp_cmplx64 *lhs, 1057 kmp_cmplx64 rhs); 1058 void __kmpc_atomic_cmplx10_wr(ident_t *id_ref, int gtid, kmp_cmplx80 *lhs, 1059 kmp_cmplx80 rhs); 1060 #if KMP_HAVE_QUAD 1061 void __kmpc_atomic_cmplx16_wr(ident_t *id_ref, int gtid, CPLX128_LEG *lhs, 1062 CPLX128_LEG rhs); 1063 #if (KMP_ARCH_X86) 1064 // Routines with 16-byte arguments aligned to 16-byte boundary 1065 void __kmpc_atomic_float16_a16_wr(ident_t *id_ref, int gtid, Quad_a16_t *lhs, 1066 Quad_a16_t rhs); 1067 void __kmpc_atomic_cmplx16_a16_wr(ident_t *id_ref, int gtid, 1068 kmp_cmplx128_a16_t *lhs, 1069 kmp_cmplx128_a16_t rhs); 1070 #endif 1071 #endif 1072 1073 // Below routines for atomic CAPTURE are listed 1074 1075 // 1-byte 1076 char __kmpc_atomic_fixed1_add_cpt(ident_t *id_ref, int gtid, char *lhs, 1077 char rhs, int flag); 1078 char __kmpc_atomic_fixed1_andb_cpt(ident_t *id_ref, int gtid, char *lhs, 1079 char rhs, int flag); 1080 char __kmpc_atomic_fixed1_div_cpt(ident_t *id_ref, int gtid, char *lhs, 1081 char rhs, int flag); 1082 unsigned char __kmpc_atomic_fixed1u_div_cpt(ident_t *id_ref, int gtid, 1083 unsigned char *lhs, 1084 unsigned char rhs, int flag); 1085 char __kmpc_atomic_fixed1_mul_cpt(ident_t *id_ref, int gtid, char *lhs, 1086 char rhs, int flag); 1087 char __kmpc_atomic_fixed1_orb_cpt(ident_t *id_ref, int gtid, char *lhs, 1088 char rhs, int flag); 1089 char __kmpc_atomic_fixed1_shl_cpt(ident_t *id_ref, int gtid, char *lhs, 1090 char rhs, int flag); 1091 char __kmpc_atomic_fixed1_shr_cpt(ident_t *id_ref, int gtid, char *lhs, 1092 char rhs, int flag); 1093 unsigned char __kmpc_atomic_fixed1u_shr_cpt(ident_t *id_ref, int gtid, 1094 unsigned char *lhs, 1095 unsigned char rhs, int flag); 1096 char __kmpc_atomic_fixed1_sub_cpt(ident_t *id_ref, int gtid, char *lhs, 1097 char rhs, int flag); 1098 char __kmpc_atomic_fixed1_xor_cpt(ident_t *id_ref, int gtid, char *lhs, 1099 char rhs, int flag); 1100 // 2-byte 1101 short __kmpc_atomic_fixed2_add_cpt(ident_t *id_ref, int gtid, short *lhs, 1102 short rhs, int flag); 1103 short __kmpc_atomic_fixed2_andb_cpt(ident_t *id_ref, int gtid, short *lhs, 1104 short rhs, int flag); 1105 short __kmpc_atomic_fixed2_div_cpt(ident_t *id_ref, int gtid, short *lhs, 1106 short rhs, int flag); 1107 unsigned short __kmpc_atomic_fixed2u_div_cpt(ident_t *id_ref, int gtid, 1108 unsigned short *lhs, 1109 unsigned short rhs, int flag); 1110 short __kmpc_atomic_fixed2_mul_cpt(ident_t *id_ref, int gtid, short *lhs, 1111 short rhs, int flag); 1112 short __kmpc_atomic_fixed2_orb_cpt(ident_t *id_ref, int gtid, short *lhs, 1113 short rhs, int flag); 1114 short __kmpc_atomic_fixed2_shl_cpt(ident_t *id_ref, int gtid, short *lhs, 1115 short rhs, int flag); 1116 short __kmpc_atomic_fixed2_shr_cpt(ident_t *id_ref, int gtid, short *lhs, 1117 short rhs, int flag); 1118 unsigned short __kmpc_atomic_fixed2u_shr_cpt(ident_t *id_ref, int gtid, 1119 unsigned short *lhs, 1120 unsigned short rhs, int flag); 1121 short __kmpc_atomic_fixed2_sub_cpt(ident_t *id_ref, int gtid, short *lhs, 1122 short rhs, int flag); 1123 short __kmpc_atomic_fixed2_xor_cpt(ident_t *id_ref, int gtid, short *lhs, 1124 short rhs, int flag); 1125 // 4-byte add / sub fixed 1126 kmp_int32 __kmpc_atomic_fixed4_add_cpt(ident_t *id_ref, int gtid, 1127 kmp_int32 *lhs, kmp_int32 rhs, int flag); 1128 kmp_int32 __kmpc_atomic_fixed4_sub_cpt(ident_t *id_ref, int gtid, 1129 kmp_int32 *lhs, kmp_int32 rhs, int flag); 1130 // 4-byte add / sub float 1131 kmp_real32 __kmpc_atomic_float4_add_cpt(ident_t *id_ref, int gtid, 1132 kmp_real32 *lhs, kmp_real32 rhs, 1133 int flag); 1134 kmp_real32 __kmpc_atomic_float4_sub_cpt(ident_t *id_ref, int gtid, 1135 kmp_real32 *lhs, kmp_real32 rhs, 1136 int flag); 1137 // 8-byte add / sub fixed 1138 kmp_int64 __kmpc_atomic_fixed8_add_cpt(ident_t *id_ref, int gtid, 1139 kmp_int64 *lhs, kmp_int64 rhs, int flag); 1140 kmp_int64 __kmpc_atomic_fixed8_sub_cpt(ident_t *id_ref, int gtid, 1141 kmp_int64 *lhs, kmp_int64 rhs, int flag); 1142 // 8-byte add / sub float 1143 kmp_real64 __kmpc_atomic_float8_add_cpt(ident_t *id_ref, int gtid, 1144 kmp_real64 *lhs, kmp_real64 rhs, 1145 int flag); 1146 kmp_real64 __kmpc_atomic_float8_sub_cpt(ident_t *id_ref, int gtid, 1147 kmp_real64 *lhs, kmp_real64 rhs, 1148 int flag); 1149 // 4-byte fixed 1150 kmp_int32 __kmpc_atomic_fixed4_andb_cpt(ident_t *id_ref, int gtid, 1151 kmp_int32 *lhs, kmp_int32 rhs, 1152 int flag); 1153 kmp_int32 __kmpc_atomic_fixed4_div_cpt(ident_t *id_ref, int gtid, 1154 kmp_int32 *lhs, kmp_int32 rhs, int flag); 1155 kmp_uint32 __kmpc_atomic_fixed4u_div_cpt(ident_t *id_ref, int gtid, 1156 kmp_uint32 *lhs, kmp_uint32 rhs, 1157 int flag); 1158 kmp_int32 __kmpc_atomic_fixed4_mul_cpt(ident_t *id_ref, int gtid, 1159 kmp_int32 *lhs, kmp_int32 rhs, int flag); 1160 kmp_int32 __kmpc_atomic_fixed4_orb_cpt(ident_t *id_ref, int gtid, 1161 kmp_int32 *lhs, kmp_int32 rhs, int flag); 1162 kmp_int32 __kmpc_atomic_fixed4_shl_cpt(ident_t *id_ref, int gtid, 1163 kmp_int32 *lhs, kmp_int32 rhs, int flag); 1164 kmp_int32 __kmpc_atomic_fixed4_shr_cpt(ident_t *id_ref, int gtid, 1165 kmp_int32 *lhs, kmp_int32 rhs, int flag); 1166 kmp_uint32 __kmpc_atomic_fixed4u_shr_cpt(ident_t *id_ref, int gtid, 1167 kmp_uint32 *lhs, kmp_uint32 rhs, 1168 int flag); 1169 kmp_int32 __kmpc_atomic_fixed4_xor_cpt(ident_t *id_ref, int gtid, 1170 kmp_int32 *lhs, kmp_int32 rhs, int flag); 1171 // 8-byte fixed 1172 kmp_int64 __kmpc_atomic_fixed8_andb_cpt(ident_t *id_ref, int gtid, 1173 kmp_int64 *lhs, kmp_int64 rhs, 1174 int flag); 1175 kmp_int64 __kmpc_atomic_fixed8_div_cpt(ident_t *id_ref, int gtid, 1176 kmp_int64 *lhs, kmp_int64 rhs, int flag); 1177 kmp_uint64 __kmpc_atomic_fixed8u_div_cpt(ident_t *id_ref, int gtid, 1178 kmp_uint64 *lhs, kmp_uint64 rhs, 1179 int flag); 1180 kmp_int64 __kmpc_atomic_fixed8_mul_cpt(ident_t *id_ref, int gtid, 1181 kmp_int64 *lhs, kmp_int64 rhs, int flag); 1182 kmp_int64 __kmpc_atomic_fixed8_orb_cpt(ident_t *id_ref, int gtid, 1183 kmp_int64 *lhs, kmp_int64 rhs, int flag); 1184 kmp_int64 __kmpc_atomic_fixed8_shl_cpt(ident_t *id_ref, int gtid, 1185 kmp_int64 *lhs, kmp_int64 rhs, int flag); 1186 kmp_int64 __kmpc_atomic_fixed8_shr_cpt(ident_t *id_ref, int gtid, 1187 kmp_int64 *lhs, kmp_int64 rhs, int flag); 1188 kmp_uint64 __kmpc_atomic_fixed8u_shr_cpt(ident_t *id_ref, int gtid, 1189 kmp_uint64 *lhs, kmp_uint64 rhs, 1190 int flag); 1191 kmp_int64 __kmpc_atomic_fixed8_xor_cpt(ident_t *id_ref, int gtid, 1192 kmp_int64 *lhs, kmp_int64 rhs, int flag); 1193 // 4-byte float 1194 kmp_real32 __kmpc_atomic_float4_div_cpt(ident_t *id_ref, int gtid, 1195 kmp_real32 *lhs, kmp_real32 rhs, 1196 int flag); 1197 kmp_real32 __kmpc_atomic_float4_mul_cpt(ident_t *id_ref, int gtid, 1198 kmp_real32 *lhs, kmp_real32 rhs, 1199 int flag); 1200 // 8-byte float 1201 kmp_real64 __kmpc_atomic_float8_div_cpt(ident_t *id_ref, int gtid, 1202 kmp_real64 *lhs, kmp_real64 rhs, 1203 int flag); 1204 kmp_real64 __kmpc_atomic_float8_mul_cpt(ident_t *id_ref, int gtid, 1205 kmp_real64 *lhs, kmp_real64 rhs, 1206 int flag); 1207 // 1-, 2-, 4-, 8-byte logical (&&, ||) 1208 char __kmpc_atomic_fixed1_andl_cpt(ident_t *id_ref, int gtid, char *lhs, 1209 char rhs, int flag); 1210 char __kmpc_atomic_fixed1_orl_cpt(ident_t *id_ref, int gtid, char *lhs, 1211 char rhs, int flag); 1212 short __kmpc_atomic_fixed2_andl_cpt(ident_t *id_ref, int gtid, short *lhs, 1213 short rhs, int flag); 1214 short __kmpc_atomic_fixed2_orl_cpt(ident_t *id_ref, int gtid, short *lhs, 1215 short rhs, int flag); 1216 kmp_int32 __kmpc_atomic_fixed4_andl_cpt(ident_t *id_ref, int gtid, 1217 kmp_int32 *lhs, kmp_int32 rhs, 1218 int flag); 1219 kmp_int32 __kmpc_atomic_fixed4_orl_cpt(ident_t *id_ref, int gtid, 1220 kmp_int32 *lhs, kmp_int32 rhs, int flag); 1221 kmp_int64 __kmpc_atomic_fixed8_andl_cpt(ident_t *id_ref, int gtid, 1222 kmp_int64 *lhs, kmp_int64 rhs, 1223 int flag); 1224 kmp_int64 __kmpc_atomic_fixed8_orl_cpt(ident_t *id_ref, int gtid, 1225 kmp_int64 *lhs, kmp_int64 rhs, int flag); 1226 // MIN / MAX 1227 char __kmpc_atomic_fixed1_max_cpt(ident_t *id_ref, int gtid, char *lhs, 1228 char rhs, int flag); 1229 char __kmpc_atomic_fixed1_min_cpt(ident_t *id_ref, int gtid, char *lhs, 1230 char rhs, int flag); 1231 short __kmpc_atomic_fixed2_max_cpt(ident_t *id_ref, int gtid, short *lhs, 1232 short rhs, int flag); 1233 short __kmpc_atomic_fixed2_min_cpt(ident_t *id_ref, int gtid, short *lhs, 1234 short rhs, int flag); 1235 kmp_int32 __kmpc_atomic_fixed4_max_cpt(ident_t *id_ref, int gtid, 1236 kmp_int32 *lhs, kmp_int32 rhs, int flag); 1237 kmp_int32 __kmpc_atomic_fixed4_min_cpt(ident_t *id_ref, int gtid, 1238 kmp_int32 *lhs, kmp_int32 rhs, int flag); 1239 kmp_int64 __kmpc_atomic_fixed8_max_cpt(ident_t *id_ref, int gtid, 1240 kmp_int64 *lhs, kmp_int64 rhs, int flag); 1241 kmp_int64 __kmpc_atomic_fixed8_min_cpt(ident_t *id_ref, int gtid, 1242 kmp_int64 *lhs, kmp_int64 rhs, int flag); 1243 kmp_real32 __kmpc_atomic_float4_max_cpt(ident_t *id_ref, int gtid, 1244 kmp_real32 *lhs, kmp_real32 rhs, 1245 int flag); 1246 kmp_real32 __kmpc_atomic_float4_min_cpt(ident_t *id_ref, int gtid, 1247 kmp_real32 *lhs, kmp_real32 rhs, 1248 int flag); 1249 kmp_real64 __kmpc_atomic_float8_max_cpt(ident_t *id_ref, int gtid, 1250 kmp_real64 *lhs, kmp_real64 rhs, 1251 int flag); 1252 kmp_real64 __kmpc_atomic_float8_min_cpt(ident_t *id_ref, int gtid, 1253 kmp_real64 *lhs, kmp_real64 rhs, 1254 int flag); 1255 #if KMP_HAVE_QUAD 1256 QUAD_LEGACY __kmpc_atomic_float16_max_cpt(ident_t *id_ref, int gtid, 1257 QUAD_LEGACY *lhs, QUAD_LEGACY rhs, 1258 int flag); 1259 QUAD_LEGACY __kmpc_atomic_float16_min_cpt(ident_t *id_ref, int gtid, 1260 QUAD_LEGACY *lhs, QUAD_LEGACY rhs, 1261 int flag); 1262 #endif 1263 // .NEQV. (same as xor) 1264 char __kmpc_atomic_fixed1_neqv_cpt(ident_t *id_ref, int gtid, char *lhs, 1265 char rhs, int flag); 1266 short __kmpc_atomic_fixed2_neqv_cpt(ident_t *id_ref, int gtid, short *lhs, 1267 short rhs, int flag); 1268 kmp_int32 __kmpc_atomic_fixed4_neqv_cpt(ident_t *id_ref, int gtid, 1269 kmp_int32 *lhs, kmp_int32 rhs, 1270 int flag); 1271 kmp_int64 __kmpc_atomic_fixed8_neqv_cpt(ident_t *id_ref, int gtid, 1272 kmp_int64 *lhs, kmp_int64 rhs, 1273 int flag); 1274 // .EQV. (same as ~xor) 1275 char __kmpc_atomic_fixed1_eqv_cpt(ident_t *id_ref, int gtid, char *lhs, 1276 char rhs, int flag); 1277 short __kmpc_atomic_fixed2_eqv_cpt(ident_t *id_ref, int gtid, short *lhs, 1278 short rhs, int flag); 1279 kmp_int32 __kmpc_atomic_fixed4_eqv_cpt(ident_t *id_ref, int gtid, 1280 kmp_int32 *lhs, kmp_int32 rhs, int flag); 1281 kmp_int64 __kmpc_atomic_fixed8_eqv_cpt(ident_t *id_ref, int gtid, 1282 kmp_int64 *lhs, kmp_int64 rhs, int flag); 1283 // long double type 1284 long double __kmpc_atomic_float10_add_cpt(ident_t *id_ref, int gtid, 1285 long double *lhs, long double rhs, 1286 int flag); 1287 long double __kmpc_atomic_float10_sub_cpt(ident_t *id_ref, int gtid, 1288 long double *lhs, long double rhs, 1289 int flag); 1290 long double __kmpc_atomic_float10_mul_cpt(ident_t *id_ref, int gtid, 1291 long double *lhs, long double rhs, 1292 int flag); 1293 long double __kmpc_atomic_float10_div_cpt(ident_t *id_ref, int gtid, 1294 long double *lhs, long double rhs, 1295 int flag); 1296 #if KMP_HAVE_QUAD 1297 // _Quad type 1298 QUAD_LEGACY __kmpc_atomic_float16_add_cpt(ident_t *id_ref, int gtid, 1299 QUAD_LEGACY *lhs, QUAD_LEGACY rhs, 1300 int flag); 1301 QUAD_LEGACY __kmpc_atomic_float16_sub_cpt(ident_t *id_ref, int gtid, 1302 QUAD_LEGACY *lhs, QUAD_LEGACY rhs, 1303 int flag); 1304 QUAD_LEGACY __kmpc_atomic_float16_mul_cpt(ident_t *id_ref, int gtid, 1305 QUAD_LEGACY *lhs, QUAD_LEGACY rhs, 1306 int flag); 1307 QUAD_LEGACY __kmpc_atomic_float16_div_cpt(ident_t *id_ref, int gtid, 1308 QUAD_LEGACY *lhs, QUAD_LEGACY rhs, 1309 int flag); 1310 #endif 1311 // routines for complex types 1312 // Workaround for cmplx4 routines - return void; captured value is returned via 1313 // the argument 1314 void __kmpc_atomic_cmplx4_add_cpt(ident_t *id_ref, int gtid, kmp_cmplx32 *lhs, 1315 kmp_cmplx32 rhs, kmp_cmplx32 *out, int flag); 1316 void __kmpc_atomic_cmplx4_sub_cpt(ident_t *id_ref, int gtid, kmp_cmplx32 *lhs, 1317 kmp_cmplx32 rhs, kmp_cmplx32 *out, int flag); 1318 void __kmpc_atomic_cmplx4_mul_cpt(ident_t *id_ref, int gtid, kmp_cmplx32 *lhs, 1319 kmp_cmplx32 rhs, kmp_cmplx32 *out, int flag); 1320 void __kmpc_atomic_cmplx4_div_cpt(ident_t *id_ref, int gtid, kmp_cmplx32 *lhs, 1321 kmp_cmplx32 rhs, kmp_cmplx32 *out, int flag); 1322 1323 kmp_cmplx64 __kmpc_atomic_cmplx8_add_cpt(ident_t *id_ref, int gtid, 1324 kmp_cmplx64 *lhs, kmp_cmplx64 rhs, 1325 int flag); 1326 kmp_cmplx64 __kmpc_atomic_cmplx8_sub_cpt(ident_t *id_ref, int gtid, 1327 kmp_cmplx64 *lhs, kmp_cmplx64 rhs, 1328 int flag); 1329 kmp_cmplx64 __kmpc_atomic_cmplx8_mul_cpt(ident_t *id_ref, int gtid, 1330 kmp_cmplx64 *lhs, kmp_cmplx64 rhs, 1331 int flag); 1332 kmp_cmplx64 __kmpc_atomic_cmplx8_div_cpt(ident_t *id_ref, int gtid, 1333 kmp_cmplx64 *lhs, kmp_cmplx64 rhs, 1334 int flag); 1335 kmp_cmplx80 __kmpc_atomic_cmplx10_add_cpt(ident_t *id_ref, int gtid, 1336 kmp_cmplx80 *lhs, kmp_cmplx80 rhs, 1337 int flag); 1338 kmp_cmplx80 __kmpc_atomic_cmplx10_sub_cpt(ident_t *id_ref, int gtid, 1339 kmp_cmplx80 *lhs, kmp_cmplx80 rhs, 1340 int flag); 1341 kmp_cmplx80 __kmpc_atomic_cmplx10_mul_cpt(ident_t *id_ref, int gtid, 1342 kmp_cmplx80 *lhs, kmp_cmplx80 rhs, 1343 int flag); 1344 kmp_cmplx80 __kmpc_atomic_cmplx10_div_cpt(ident_t *id_ref, int gtid, 1345 kmp_cmplx80 *lhs, kmp_cmplx80 rhs, 1346 int flag); 1347 #if KMP_HAVE_QUAD 1348 CPLX128_LEG __kmpc_atomic_cmplx16_add_cpt(ident_t *id_ref, int gtid, 1349 CPLX128_LEG *lhs, CPLX128_LEG rhs, 1350 int flag); 1351 CPLX128_LEG __kmpc_atomic_cmplx16_sub_cpt(ident_t *id_ref, int gtid, 1352 CPLX128_LEG *lhs, CPLX128_LEG rhs, 1353 int flag); 1354 CPLX128_LEG __kmpc_atomic_cmplx16_mul_cpt(ident_t *id_ref, int gtid, 1355 CPLX128_LEG *lhs, CPLX128_LEG rhs, 1356 int flag); 1357 CPLX128_LEG __kmpc_atomic_cmplx16_div_cpt(ident_t *id_ref, int gtid, 1358 CPLX128_LEG *lhs, CPLX128_LEG rhs, 1359 int flag); 1360 #if (KMP_ARCH_X86) 1361 // Routines with 16-byte arguments aligned to 16-byte boundary 1362 Quad_a16_t __kmpc_atomic_float16_add_a16_cpt(ident_t *id_ref, int gtid, 1363 Quad_a16_t *lhs, Quad_a16_t rhs, 1364 int flag); 1365 Quad_a16_t __kmpc_atomic_float16_sub_a16_cpt(ident_t *id_ref, int gtid, 1366 Quad_a16_t *lhs, Quad_a16_t rhs, 1367 int flag); 1368 Quad_a16_t __kmpc_atomic_float16_mul_a16_cpt(ident_t *id_ref, int gtid, 1369 Quad_a16_t *lhs, Quad_a16_t rhs, 1370 int flag); 1371 Quad_a16_t __kmpc_atomic_float16_div_a16_cpt(ident_t *id_ref, int gtid, 1372 Quad_a16_t *lhs, Quad_a16_t rhs, 1373 int flag); 1374 Quad_a16_t __kmpc_atomic_float16_max_a16_cpt(ident_t *id_ref, int gtid, 1375 Quad_a16_t *lhs, Quad_a16_t rhs, 1376 int flag); 1377 Quad_a16_t __kmpc_atomic_float16_min_a16_cpt(ident_t *id_ref, int gtid, 1378 Quad_a16_t *lhs, Quad_a16_t rhs, 1379 int flag); 1380 kmp_cmplx128_a16_t __kmpc_atomic_cmplx16_add_a16_cpt(ident_t *id_ref, int gtid, 1381 kmp_cmplx128_a16_t *lhs, 1382 kmp_cmplx128_a16_t rhs, 1383 int flag); 1384 kmp_cmplx128_a16_t __kmpc_atomic_cmplx16_sub_a16_cpt(ident_t *id_ref, int gtid, 1385 kmp_cmplx128_a16_t *lhs, 1386 kmp_cmplx128_a16_t rhs, 1387 int flag); 1388 kmp_cmplx128_a16_t __kmpc_atomic_cmplx16_mul_a16_cpt(ident_t *id_ref, int gtid, 1389 kmp_cmplx128_a16_t *lhs, 1390 kmp_cmplx128_a16_t rhs, 1391 int flag); 1392 kmp_cmplx128_a16_t __kmpc_atomic_cmplx16_div_a16_cpt(ident_t *id_ref, int gtid, 1393 kmp_cmplx128_a16_t *lhs, 1394 kmp_cmplx128_a16_t rhs, 1395 int flag); 1396 #endif 1397 #endif 1398 1399 void __kmpc_atomic_start(void); 1400 void __kmpc_atomic_end(void); 1401 1402 // OpenMP 4.0: v = x = expr binop x; { v = x; x = expr binop x; } { x = expr 1403 // binop x; v = x; } for non-commutative operations. 1404 1405 char __kmpc_atomic_fixed1_sub_cpt_rev(ident_t *id_ref, int gtid, char *lhs, 1406 char rhs, int flag); 1407 char __kmpc_atomic_fixed1_div_cpt_rev(ident_t *id_ref, int gtid, char *lhs, 1408 char rhs, int flag); 1409 unsigned char __kmpc_atomic_fixed1u_div_cpt_rev(ident_t *id_ref, int gtid, 1410 unsigned char *lhs, 1411 unsigned char rhs, int flag); 1412 char __kmpc_atomic_fixed1_shl_cpt_rev(ident_t *id_ref, int gtid, char *lhs, 1413 char rhs, int flag); 1414 char __kmpc_atomic_fixed1_shr_cpt_rev(ident_t *id_ref, int gtid, char *lhs, 1415 char rhs, int flag); 1416 unsigned char __kmpc_atomic_fixed1u_shr_cpt_rev(ident_t *id_ref, int gtid, 1417 unsigned char *lhs, 1418 unsigned char rhs, int flag); 1419 short __kmpc_atomic_fixed2_sub_cpt_rev(ident_t *id_ref, int gtid, short *lhs, 1420 short rhs, int flag); 1421 short __kmpc_atomic_fixed2_div_cpt_rev(ident_t *id_ref, int gtid, short *lhs, 1422 short rhs, int flag); 1423 unsigned short __kmpc_atomic_fixed2u_div_cpt_rev(ident_t *id_ref, int gtid, 1424 unsigned short *lhs, 1425 unsigned short rhs, int flag); 1426 short __kmpc_atomic_fixed2_shl_cpt_rev(ident_t *id_ref, int gtid, short *lhs, 1427 short rhs, int flag); 1428 short __kmpc_atomic_fixed2_shr_cpt_rev(ident_t *id_ref, int gtid, short *lhs, 1429 short rhs, int flag); 1430 unsigned short __kmpc_atomic_fixed2u_shr_cpt_rev(ident_t *id_ref, int gtid, 1431 unsigned short *lhs, 1432 unsigned short rhs, int flag); 1433 kmp_int32 __kmpc_atomic_fixed4_sub_cpt_rev(ident_t *id_ref, int gtid, 1434 kmp_int32 *lhs, kmp_int32 rhs, 1435 int flag); 1436 kmp_int32 __kmpc_atomic_fixed4_div_cpt_rev(ident_t *id_ref, int gtid, 1437 kmp_int32 *lhs, kmp_int32 rhs, 1438 int flag); 1439 kmp_uint32 __kmpc_atomic_fixed4u_div_cpt_rev(ident_t *id_ref, int gtid, 1440 kmp_uint32 *lhs, kmp_uint32 rhs, 1441 int flag); 1442 kmp_int32 __kmpc_atomic_fixed4_shl_cpt_rev(ident_t *id_ref, int gtid, 1443 kmp_int32 *lhs, kmp_int32 rhs, 1444 int flag); 1445 kmp_int32 __kmpc_atomic_fixed4_shr_cpt_rev(ident_t *id_ref, int gtid, 1446 kmp_int32 *lhs, kmp_int32 rhs, 1447 int flag); 1448 kmp_uint32 __kmpc_atomic_fixed4u_shr_cpt_rev(ident_t *id_ref, int gtid, 1449 kmp_uint32 *lhs, kmp_uint32 rhs, 1450 int flag); 1451 kmp_int64 __kmpc_atomic_fixed8_sub_cpt_rev(ident_t *id_ref, int gtid, 1452 kmp_int64 *lhs, kmp_int64 rhs, 1453 int flag); 1454 kmp_int64 __kmpc_atomic_fixed8_div_cpt_rev(ident_t *id_ref, int gtid, 1455 kmp_int64 *lhs, kmp_int64 rhs, 1456 int flag); 1457 kmp_uint64 __kmpc_atomic_fixed8u_div_cpt_rev(ident_t *id_ref, int gtid, 1458 kmp_uint64 *lhs, kmp_uint64 rhs, 1459 int flag); 1460 kmp_int64 __kmpc_atomic_fixed8_shl_cpt_rev(ident_t *id_ref, int gtid, 1461 kmp_int64 *lhs, kmp_int64 rhs, 1462 int flag); 1463 kmp_int64 __kmpc_atomic_fixed8_shr_cpt_rev(ident_t *id_ref, int gtid, 1464 kmp_int64 *lhs, kmp_int64 rhs, 1465 int flag); 1466 kmp_uint64 __kmpc_atomic_fixed8u_shr_cpt_rev(ident_t *id_ref, int gtid, 1467 kmp_uint64 *lhs, kmp_uint64 rhs, 1468 int flag); 1469 float __kmpc_atomic_float4_sub_cpt_rev(ident_t *id_ref, int gtid, float *lhs, 1470 float rhs, int flag); 1471 float __kmpc_atomic_float4_div_cpt_rev(ident_t *id_ref, int gtid, float *lhs, 1472 float rhs, int flag); 1473 double __kmpc_atomic_float8_sub_cpt_rev(ident_t *id_ref, int gtid, double *lhs, 1474 double rhs, int flag); 1475 double __kmpc_atomic_float8_div_cpt_rev(ident_t *id_ref, int gtid, double *lhs, 1476 double rhs, int flag); 1477 long double __kmpc_atomic_float10_sub_cpt_rev(ident_t *id_ref, int gtid, 1478 long double *lhs, long double rhs, 1479 int flag); 1480 long double __kmpc_atomic_float10_div_cpt_rev(ident_t *id_ref, int gtid, 1481 long double *lhs, long double rhs, 1482 int flag); 1483 #if KMP_HAVE_QUAD 1484 QUAD_LEGACY __kmpc_atomic_float16_sub_cpt_rev(ident_t *id_ref, int gtid, 1485 QUAD_LEGACY *lhs, QUAD_LEGACY rhs, 1486 int flag); 1487 QUAD_LEGACY __kmpc_atomic_float16_div_cpt_rev(ident_t *id_ref, int gtid, 1488 QUAD_LEGACY *lhs, QUAD_LEGACY rhs, 1489 int flag); 1490 #endif 1491 // Workaround for cmplx4 routines - return void; captured value is returned via 1492 // the argument 1493 void __kmpc_atomic_cmplx4_sub_cpt_rev(ident_t *id_ref, int gtid, 1494 kmp_cmplx32 *lhs, kmp_cmplx32 rhs, 1495 kmp_cmplx32 *out, int flag); 1496 void __kmpc_atomic_cmplx4_div_cpt_rev(ident_t *id_ref, int gtid, 1497 kmp_cmplx32 *lhs, kmp_cmplx32 rhs, 1498 kmp_cmplx32 *out, int flag); 1499 kmp_cmplx64 __kmpc_atomic_cmplx8_sub_cpt_rev(ident_t *id_ref, int gtid, 1500 kmp_cmplx64 *lhs, kmp_cmplx64 rhs, 1501 int flag); 1502 kmp_cmplx64 __kmpc_atomic_cmplx8_div_cpt_rev(ident_t *id_ref, int gtid, 1503 kmp_cmplx64 *lhs, kmp_cmplx64 rhs, 1504 int flag); 1505 kmp_cmplx80 __kmpc_atomic_cmplx10_sub_cpt_rev(ident_t *id_ref, int gtid, 1506 kmp_cmplx80 *lhs, kmp_cmplx80 rhs, 1507 int flag); 1508 kmp_cmplx80 __kmpc_atomic_cmplx10_div_cpt_rev(ident_t *id_ref, int gtid, 1509 kmp_cmplx80 *lhs, kmp_cmplx80 rhs, 1510 int flag); 1511 #if KMP_HAVE_QUAD 1512 CPLX128_LEG __kmpc_atomic_cmplx16_sub_cpt_rev(ident_t *id_ref, int gtid, 1513 CPLX128_LEG *lhs, CPLX128_LEG rhs, 1514 int flag); 1515 CPLX128_LEG __kmpc_atomic_cmplx16_div_cpt_rev(ident_t *id_ref, int gtid, 1516 CPLX128_LEG *lhs, CPLX128_LEG rhs, 1517 int flag); 1518 #if (KMP_ARCH_X86) 1519 Quad_a16_t __kmpc_atomic_float16_sub_a16_cpt_rev(ident_t *id_ref, int gtid, 1520 Quad_a16_t *lhs, 1521 Quad_a16_t rhs, int flag); 1522 Quad_a16_t __kmpc_atomic_float16_div_a16_cpt_rev(ident_t *id_ref, int gtid, 1523 Quad_a16_t *lhs, 1524 Quad_a16_t rhs, int flag); 1525 kmp_cmplx128_a16_t 1526 __kmpc_atomic_cmplx16_sub_a16_cpt_rev(ident_t *id_ref, int gtid, 1527 kmp_cmplx128_a16_t *lhs, 1528 kmp_cmplx128_a16_t rhs, int flag); 1529 kmp_cmplx128_a16_t 1530 __kmpc_atomic_cmplx16_div_a16_cpt_rev(ident_t *id_ref, int gtid, 1531 kmp_cmplx128_a16_t *lhs, 1532 kmp_cmplx128_a16_t rhs, int flag); 1533 #endif 1534 #endif 1535 1536 // OpenMP 4.0 Capture-write (swap): {v = x; x = expr;} 1537 char __kmpc_atomic_fixed1_swp(ident_t *id_ref, int gtid, char *lhs, char rhs); 1538 short __kmpc_atomic_fixed2_swp(ident_t *id_ref, int gtid, short *lhs, 1539 short rhs); 1540 kmp_int32 __kmpc_atomic_fixed4_swp(ident_t *id_ref, int gtid, kmp_int32 *lhs, 1541 kmp_int32 rhs); 1542 kmp_int64 __kmpc_atomic_fixed8_swp(ident_t *id_ref, int gtid, kmp_int64 *lhs, 1543 kmp_int64 rhs); 1544 float __kmpc_atomic_float4_swp(ident_t *id_ref, int gtid, float *lhs, 1545 float rhs); 1546 double __kmpc_atomic_float8_swp(ident_t *id_ref, int gtid, double *lhs, 1547 double rhs); 1548 long double __kmpc_atomic_float10_swp(ident_t *id_ref, int gtid, 1549 long double *lhs, long double rhs); 1550 #if KMP_HAVE_QUAD 1551 QUAD_LEGACY __kmpc_atomic_float16_swp(ident_t *id_ref, int gtid, 1552 QUAD_LEGACY *lhs, QUAD_LEGACY rhs); 1553 #endif 1554 // !!! TODO: check if we need a workaround here 1555 void __kmpc_atomic_cmplx4_swp(ident_t *id_ref, int gtid, kmp_cmplx32 *lhs, 1556 kmp_cmplx32 rhs, kmp_cmplx32 *out); 1557 // kmp_cmplx32 __kmpc_atomic_cmplx4_swp( ident_t *id_ref, int gtid, 1558 // kmp_cmplx32 * lhs, kmp_cmplx32 rhs ); 1559 1560 kmp_cmplx64 __kmpc_atomic_cmplx8_swp(ident_t *id_ref, int gtid, 1561 kmp_cmplx64 *lhs, kmp_cmplx64 rhs); 1562 kmp_cmplx80 __kmpc_atomic_cmplx10_swp(ident_t *id_ref, int gtid, 1563 kmp_cmplx80 *lhs, kmp_cmplx80 rhs); 1564 #if KMP_HAVE_QUAD 1565 CPLX128_LEG __kmpc_atomic_cmplx16_swp(ident_t *id_ref, int gtid, 1566 CPLX128_LEG *lhs, CPLX128_LEG rhs); 1567 #if (KMP_ARCH_X86) 1568 Quad_a16_t __kmpc_atomic_float16_a16_swp(ident_t *id_ref, int gtid, 1569 Quad_a16_t *lhs, Quad_a16_t rhs); 1570 kmp_cmplx128_a16_t __kmpc_atomic_cmplx16_a16_swp(ident_t *id_ref, int gtid, 1571 kmp_cmplx128_a16_t *lhs, 1572 kmp_cmplx128_a16_t rhs); 1573 #endif 1574 #endif 1575 1576 // Capture routines for mixed types (RHS=float16) 1577 #if KMP_HAVE_QUAD 1578 1579 char __kmpc_atomic_fixed1_add_cpt_fp(ident_t *id_ref, int gtid, char *lhs, 1580 _Quad rhs, int flag); 1581 char __kmpc_atomic_fixed1_sub_cpt_fp(ident_t *id_ref, int gtid, char *lhs, 1582 _Quad rhs, int flag); 1583 char __kmpc_atomic_fixed1_mul_cpt_fp(ident_t *id_ref, int gtid, char *lhs, 1584 _Quad rhs, int flag); 1585 char __kmpc_atomic_fixed1_div_cpt_fp(ident_t *id_ref, int gtid, char *lhs, 1586 _Quad rhs, int flag); 1587 unsigned char __kmpc_atomic_fixed1u_add_cpt_fp(ident_t *id_ref, int gtid, 1588 unsigned char *lhs, _Quad rhs, 1589 int flag); 1590 unsigned char __kmpc_atomic_fixed1u_sub_cpt_fp(ident_t *id_ref, int gtid, 1591 unsigned char *lhs, _Quad rhs, 1592 int flag); 1593 unsigned char __kmpc_atomic_fixed1u_mul_cpt_fp(ident_t *id_ref, int gtid, 1594 unsigned char *lhs, _Quad rhs, 1595 int flag); 1596 unsigned char __kmpc_atomic_fixed1u_div_cpt_fp(ident_t *id_ref, int gtid, 1597 unsigned char *lhs, _Quad rhs, 1598 int flag); 1599 1600 short __kmpc_atomic_fixed2_add_cpt_fp(ident_t *id_ref, int gtid, short *lhs, 1601 _Quad rhs, int flag); 1602 short __kmpc_atomic_fixed2_sub_cpt_fp(ident_t *id_ref, int gtid, short *lhs, 1603 _Quad rhs, int flag); 1604 short __kmpc_atomic_fixed2_mul_cpt_fp(ident_t *id_ref, int gtid, short *lhs, 1605 _Quad rhs, int flag); 1606 short __kmpc_atomic_fixed2_div_cpt_fp(ident_t *id_ref, int gtid, short *lhs, 1607 _Quad rhs, int flag); 1608 unsigned short __kmpc_atomic_fixed2u_add_cpt_fp(ident_t *id_ref, int gtid, 1609 unsigned short *lhs, _Quad rhs, 1610 int flag); 1611 unsigned short __kmpc_atomic_fixed2u_sub_cpt_fp(ident_t *id_ref, int gtid, 1612 unsigned short *lhs, _Quad rhs, 1613 int flag); 1614 unsigned short __kmpc_atomic_fixed2u_mul_cpt_fp(ident_t *id_ref, int gtid, 1615 unsigned short *lhs, _Quad rhs, 1616 int flag); 1617 unsigned short __kmpc_atomic_fixed2u_div_cpt_fp(ident_t *id_ref, int gtid, 1618 unsigned short *lhs, _Quad rhs, 1619 int flag); 1620 1621 kmp_int32 __kmpc_atomic_fixed4_add_cpt_fp(ident_t *id_ref, int gtid, 1622 kmp_int32 *lhs, _Quad rhs, int flag); 1623 kmp_int32 __kmpc_atomic_fixed4_sub_cpt_fp(ident_t *id_ref, int gtid, 1624 kmp_int32 *lhs, _Quad rhs, int flag); 1625 kmp_int32 __kmpc_atomic_fixed4_mul_cpt_fp(ident_t *id_ref, int gtid, 1626 kmp_int32 *lhs, _Quad rhs, int flag); 1627 kmp_int32 __kmpc_atomic_fixed4_div_cpt_fp(ident_t *id_ref, int gtid, 1628 kmp_int32 *lhs, _Quad rhs, int flag); 1629 kmp_uint32 __kmpc_atomic_fixed4u_add_cpt_fp(ident_t *id_ref, int gtid, 1630 kmp_uint32 *lhs, _Quad rhs, 1631 int flag); 1632 kmp_uint32 __kmpc_atomic_fixed4u_sub_cpt_fp(ident_t *id_ref, int gtid, 1633 kmp_uint32 *lhs, _Quad rhs, 1634 int flag); 1635 kmp_uint32 __kmpc_atomic_fixed4u_mul_cpt_fp(ident_t *id_ref, int gtid, 1636 kmp_uint32 *lhs, _Quad rhs, 1637 int flag); 1638 kmp_uint32 __kmpc_atomic_fixed4u_div_cpt_fp(ident_t *id_ref, int gtid, 1639 kmp_uint32 *lhs, _Quad rhs, 1640 int flag); 1641 1642 kmp_int64 __kmpc_atomic_fixed8_add_cpt_fp(ident_t *id_ref, int gtid, 1643 kmp_int64 *lhs, _Quad rhs, int flag); 1644 kmp_int64 __kmpc_atomic_fixed8_sub_cpt_fp(ident_t *id_ref, int gtid, 1645 kmp_int64 *lhs, _Quad rhs, int flag); 1646 kmp_int64 __kmpc_atomic_fixed8_mul_cpt_fp(ident_t *id_ref, int gtid, 1647 kmp_int64 *lhs, _Quad rhs, int flag); 1648 kmp_int64 __kmpc_atomic_fixed8_div_cpt_fp(ident_t *id_ref, int gtid, 1649 kmp_int64 *lhs, _Quad rhs, int flag); 1650 kmp_uint64 __kmpc_atomic_fixed8u_add_cpt_fp(ident_t *id_ref, int gtid, 1651 kmp_uint64 *lhs, _Quad rhs, 1652 int flag); 1653 kmp_uint64 __kmpc_atomic_fixed8u_sub_cpt_fp(ident_t *id_ref, int gtid, 1654 kmp_uint64 *lhs, _Quad rhs, 1655 int flag); 1656 kmp_uint64 __kmpc_atomic_fixed8u_mul_cpt_fp(ident_t *id_ref, int gtid, 1657 kmp_uint64 *lhs, _Quad rhs, 1658 int flag); 1659 kmp_uint64 __kmpc_atomic_fixed8u_div_cpt_fp(ident_t *id_ref, int gtid, 1660 kmp_uint64 *lhs, _Quad rhs, 1661 int flag); 1662 1663 float __kmpc_atomic_float4_add_cpt_fp(ident_t *id_ref, int gtid, 1664 kmp_real32 *lhs, _Quad rhs, int flag); 1665 float __kmpc_atomic_float4_sub_cpt_fp(ident_t *id_ref, int gtid, 1666 kmp_real32 *lhs, _Quad rhs, int flag); 1667 float __kmpc_atomic_float4_mul_cpt_fp(ident_t *id_ref, int gtid, 1668 kmp_real32 *lhs, _Quad rhs, int flag); 1669 float __kmpc_atomic_float4_div_cpt_fp(ident_t *id_ref, int gtid, 1670 kmp_real32 *lhs, _Quad rhs, int flag); 1671 1672 double __kmpc_atomic_float8_add_cpt_fp(ident_t *id_ref, int gtid, 1673 kmp_real64 *lhs, _Quad rhs, int flag); 1674 double __kmpc_atomic_float8_sub_cpt_fp(ident_t *id_ref, int gtid, 1675 kmp_real64 *lhs, _Quad rhs, int flag); 1676 double __kmpc_atomic_float8_mul_cpt_fp(ident_t *id_ref, int gtid, 1677 kmp_real64 *lhs, _Quad rhs, int flag); 1678 double __kmpc_atomic_float8_div_cpt_fp(ident_t *id_ref, int gtid, 1679 kmp_real64 *lhs, _Quad rhs, int flag); 1680 1681 long double __kmpc_atomic_float10_add_cpt_fp(ident_t *id_ref, int gtid, 1682 long double *lhs, _Quad rhs, 1683 int flag); 1684 long double __kmpc_atomic_float10_sub_cpt_fp(ident_t *id_ref, int gtid, 1685 long double *lhs, _Quad rhs, 1686 int flag); 1687 long double __kmpc_atomic_float10_mul_cpt_fp(ident_t *id_ref, int gtid, 1688 long double *lhs, _Quad rhs, 1689 int flag); 1690 long double __kmpc_atomic_float10_div_cpt_fp(ident_t *id_ref, int gtid, 1691 long double *lhs, _Quad rhs, 1692 int flag); 1693 1694 char __kmpc_atomic_fixed1_sub_cpt_rev_fp(ident_t *id_ref, int gtid, char *lhs, 1695 _Quad rhs, int flag); 1696 unsigned char __kmpc_atomic_fixed1u_sub_cpt_rev_fp(ident_t *id_ref, int gtid, 1697 unsigned char *lhs, 1698 _Quad rhs, int flag); 1699 char __kmpc_atomic_fixed1_div_cpt_rev_fp(ident_t *id_ref, int gtid, char *lhs, 1700 _Quad rhs, int flag); 1701 unsigned char __kmpc_atomic_fixed1u_div_cpt_rev_fp(ident_t *id_ref, int gtid, 1702 unsigned char *lhs, 1703 _Quad rhs, int flag); 1704 short __kmpc_atomic_fixed2_sub_cpt_rev_fp(ident_t *id_ref, int gtid, short *lhs, 1705 _Quad rhs, int flag); 1706 unsigned short __kmpc_atomic_fixed2u_sub_cpt_rev_fp(ident_t *id_ref, int gtid, 1707 unsigned short *lhs, 1708 _Quad rhs, int flag); 1709 short __kmpc_atomic_fixed2_div_cpt_rev_fp(ident_t *id_ref, int gtid, short *lhs, 1710 _Quad rhs, int flag); 1711 unsigned short __kmpc_atomic_fixed2u_div_cpt_rev_fp(ident_t *id_ref, int gtid, 1712 unsigned short *lhs, 1713 _Quad rhs, int flag); 1714 kmp_int32 __kmpc_atomic_fixed4_sub_cpt_rev_fp(ident_t *id_ref, int gtid, 1715 kmp_int32 *lhs, _Quad rhs, 1716 int flag); 1717 kmp_uint32 __kmpc_atomic_fixed4u_sub_cpt_rev_fp(ident_t *id_ref, int gtid, 1718 kmp_uint32 *lhs, _Quad rhs, 1719 int flag); 1720 kmp_int32 __kmpc_atomic_fixed4_div_cpt_rev_fp(ident_t *id_ref, int gtid, 1721 kmp_int32 *lhs, _Quad rhs, 1722 int flag); 1723 kmp_uint32 __kmpc_atomic_fixed4u_div_cpt_rev_fp(ident_t *id_ref, int gtid, 1724 kmp_uint32 *lhs, _Quad rhs, 1725 int flag); 1726 kmp_int64 __kmpc_atomic_fixed8_sub_cpt_rev_fp(ident_t *id_ref, int gtid, 1727 kmp_int64 *lhs, _Quad rhs, 1728 int flag); 1729 kmp_uint64 __kmpc_atomic_fixed8u_sub_cpt_rev_fp(ident_t *id_ref, int gtid, 1730 kmp_uint64 *lhs, _Quad rhs, 1731 int flag); 1732 kmp_int64 __kmpc_atomic_fixed8_div_cpt_rev_fp(ident_t *id_ref, int gtid, 1733 kmp_int64 *lhs, _Quad rhs, 1734 int flag); 1735 kmp_uint64 __kmpc_atomic_fixed8u_div_cpt_rev_fp(ident_t *id_ref, int gtid, 1736 kmp_uint64 *lhs, _Quad rhs, 1737 int flag); 1738 float __kmpc_atomic_float4_sub_cpt_rev_fp(ident_t *id_ref, int gtid, float *lhs, 1739 _Quad rhs, int flag); 1740 float __kmpc_atomic_float4_div_cpt_rev_fp(ident_t *id_ref, int gtid, float *lhs, 1741 _Quad rhs, int flag); 1742 double __kmpc_atomic_float8_sub_cpt_rev_fp(ident_t *id_ref, int gtid, 1743 double *lhs, _Quad rhs, int flag); 1744 double __kmpc_atomic_float8_div_cpt_rev_fp(ident_t *id_ref, int gtid, 1745 double *lhs, _Quad rhs, int flag); 1746 long double __kmpc_atomic_float10_sub_cpt_rev_fp(ident_t *id_ref, int gtid, 1747 long double *lhs, _Quad rhs, 1748 int flag); 1749 long double __kmpc_atomic_float10_div_cpt_rev_fp(ident_t *id_ref, int gtid, 1750 long double *lhs, _Quad rhs, 1751 int flag); 1752 1753 #endif // KMP_HAVE_QUAD 1754 1755 // End of OpenMP 4.0 capture 1756 1757 #endif // KMP_ARCH_X86 || KMP_ARCH_X86_64 1758 1759 /* ------------------------------------------------------------------------ */ 1760 1761 #ifdef __cplusplus 1762 } // extern "C" 1763 #endif 1764 1765 #endif /* KMP_ATOMIC_H */ 1766 1767 // end of file 1768