1 /* 2 * kmp_gsupport.cpp 3 */ 4 5 //===----------------------------------------------------------------------===// 6 // 7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 8 // See https://llvm.org/LICENSE.txt for license information. 9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "kmp.h" 14 #include "kmp_atomic.h" 15 16 #if OMPT_SUPPORT 17 #include "ompt-specific.h" 18 #endif 19 20 #ifdef __cplusplus 21 extern "C" { 22 #endif // __cplusplus 23 24 #define MKLOC(loc, routine) \ 25 static ident_t loc = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;"}; 26 27 #include "kmp_ftn_os.h" 28 29 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_BARRIER)(void) { 30 int gtid = __kmp_entry_gtid(); 31 MKLOC(loc, "GOMP_barrier"); 32 KA_TRACE(20, ("GOMP_barrier: T#%d\n", gtid)); 33 #if OMPT_SUPPORT && OMPT_OPTIONAL 34 ompt_frame_t *ompt_frame; 35 if (ompt_enabled.enabled) { 36 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); 37 ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 38 OMPT_STORE_RETURN_ADDRESS(gtid); 39 } 40 #endif 41 __kmpc_barrier(&loc, gtid); 42 #if OMPT_SUPPORT && OMPT_OPTIONAL 43 if (ompt_enabled.enabled) { 44 ompt_frame->enter_frame = ompt_data_none; 45 } 46 #endif 47 } 48 49 // Mutual exclusion 50 51 // The symbol that icc/ifort generates for unnamed for unnamed critical sections 52 // - .gomp_critical_user_ - is defined using .comm in any objects reference it. 53 // We can't reference it directly here in C code, as the symbol contains a ".". 54 // 55 // The RTL contains an assembly language definition of .gomp_critical_user_ 56 // with another symbol __kmp_unnamed_critical_addr initialized with it's 57 // address. 58 extern kmp_critical_name *__kmp_unnamed_critical_addr; 59 60 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_START)(void) { 61 int gtid = __kmp_entry_gtid(); 62 MKLOC(loc, "GOMP_critical_start"); 63 KA_TRACE(20, ("GOMP_critical_start: T#%d\n", gtid)); 64 #if OMPT_SUPPORT && OMPT_OPTIONAL 65 OMPT_STORE_RETURN_ADDRESS(gtid); 66 #endif 67 __kmpc_critical(&loc, gtid, __kmp_unnamed_critical_addr); 68 } 69 70 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_END)(void) { 71 int gtid = __kmp_get_gtid(); 72 MKLOC(loc, "GOMP_critical_end"); 73 KA_TRACE(20, ("GOMP_critical_end: T#%d\n", gtid)); 74 #if OMPT_SUPPORT && OMPT_OPTIONAL 75 OMPT_STORE_RETURN_ADDRESS(gtid); 76 #endif 77 __kmpc_end_critical(&loc, gtid, __kmp_unnamed_critical_addr); 78 } 79 80 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_NAME_START)(void **pptr) { 81 int gtid = __kmp_entry_gtid(); 82 MKLOC(loc, "GOMP_critical_name_start"); 83 KA_TRACE(20, ("GOMP_critical_name_start: T#%d\n", gtid)); 84 __kmpc_critical(&loc, gtid, (kmp_critical_name *)pptr); 85 } 86 87 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_NAME_END)(void **pptr) { 88 int gtid = __kmp_get_gtid(); 89 MKLOC(loc, "GOMP_critical_name_end"); 90 KA_TRACE(20, ("GOMP_critical_name_end: T#%d\n", gtid)); 91 __kmpc_end_critical(&loc, gtid, (kmp_critical_name *)pptr); 92 } 93 94 // The Gnu codegen tries to use locked operations to perform atomic updates 95 // inline. If it can't, then it calls GOMP_atomic_start() before performing 96 // the update and GOMP_atomic_end() afterward, regardless of the data type. 97 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ATOMIC_START)(void) { 98 int gtid = __kmp_entry_gtid(); 99 KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid)); 100 101 #if OMPT_SUPPORT 102 __ompt_thread_assign_wait_id(0); 103 #endif 104 105 __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid); 106 } 107 108 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ATOMIC_END)(void) { 109 int gtid = __kmp_get_gtid(); 110 KA_TRACE(20, ("GOMP_atomic_end: T#%d\n", gtid)); 111 __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid); 112 } 113 114 int KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_START)(void) { 115 int gtid = __kmp_entry_gtid(); 116 MKLOC(loc, "GOMP_single_start"); 117 KA_TRACE(20, ("GOMP_single_start: T#%d\n", gtid)); 118 119 if (!TCR_4(__kmp_init_parallel)) 120 __kmp_parallel_initialize(); 121 __kmp_resume_if_soft_paused(); 122 123 // 3rd parameter == FALSE prevents kmp_enter_single from pushing a 124 // workshare when USE_CHECKS is defined. We need to avoid the push, 125 // as there is no corresponding GOMP_single_end() call. 126 kmp_int32 rc = __kmp_enter_single(gtid, &loc, FALSE); 127 128 #if OMPT_SUPPORT && OMPT_OPTIONAL 129 kmp_info_t *this_thr = __kmp_threads[gtid]; 130 kmp_team_t *team = this_thr->th.th_team; 131 int tid = __kmp_tid_from_gtid(gtid); 132 133 if (ompt_enabled.enabled) { 134 if (rc) { 135 if (ompt_enabled.ompt_callback_work) { 136 ompt_callbacks.ompt_callback(ompt_callback_work)( 137 ompt_work_single_executor, ompt_scope_begin, 138 &(team->t.ompt_team_info.parallel_data), 139 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data), 140 1, OMPT_GET_RETURN_ADDRESS(0)); 141 } 142 } else { 143 if (ompt_enabled.ompt_callback_work) { 144 ompt_callbacks.ompt_callback(ompt_callback_work)( 145 ompt_work_single_other, ompt_scope_begin, 146 &(team->t.ompt_team_info.parallel_data), 147 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data), 148 1, OMPT_GET_RETURN_ADDRESS(0)); 149 ompt_callbacks.ompt_callback(ompt_callback_work)( 150 ompt_work_single_other, ompt_scope_end, 151 &(team->t.ompt_team_info.parallel_data), 152 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data), 153 1, OMPT_GET_RETURN_ADDRESS(0)); 154 } 155 } 156 } 157 #endif 158 159 return rc; 160 } 161 162 void *KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_COPY_START)(void) { 163 void *retval; 164 int gtid = __kmp_entry_gtid(); 165 MKLOC(loc, "GOMP_single_copy_start"); 166 KA_TRACE(20, ("GOMP_single_copy_start: T#%d\n", gtid)); 167 168 if (!TCR_4(__kmp_init_parallel)) 169 __kmp_parallel_initialize(); 170 __kmp_resume_if_soft_paused(); 171 172 // If this is the first thread to enter, return NULL. The generated code will 173 // then call GOMP_single_copy_end() for this thread only, with the 174 // copyprivate data pointer as an argument. 175 if (__kmp_enter_single(gtid, &loc, FALSE)) 176 return NULL; 177 178 // Wait for the first thread to set the copyprivate data pointer, 179 // and for all other threads to reach this point. 180 181 #if OMPT_SUPPORT && OMPT_OPTIONAL 182 ompt_frame_t *ompt_frame; 183 if (ompt_enabled.enabled) { 184 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); 185 ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 186 OMPT_STORE_RETURN_ADDRESS(gtid); 187 } 188 #endif 189 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); 190 191 // Retrieve the value of the copyprivate data point, and wait for all 192 // threads to do likewise, then return. 193 retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data; 194 #if OMPT_SUPPORT && OMPT_OPTIONAL 195 if (ompt_enabled.enabled) { 196 OMPT_STORE_RETURN_ADDRESS(gtid); 197 } 198 #endif 199 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); 200 #if OMPT_SUPPORT && OMPT_OPTIONAL 201 if (ompt_enabled.enabled) { 202 ompt_frame->enter_frame = ompt_data_none; 203 } 204 #endif 205 return retval; 206 } 207 208 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_COPY_END)(void *data) { 209 int gtid = __kmp_get_gtid(); 210 KA_TRACE(20, ("GOMP_single_copy_end: T#%d\n", gtid)); 211 212 // Set the copyprivate data pointer fo the team, then hit the barrier so that 213 // the other threads will continue on and read it. Hit another barrier before 214 // continuing, so that the know that the copyprivate data pointer has been 215 // propagated to all threads before trying to reuse the t_copypriv_data field. 216 __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data; 217 #if OMPT_SUPPORT && OMPT_OPTIONAL 218 ompt_frame_t *ompt_frame; 219 if (ompt_enabled.enabled) { 220 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); 221 ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 222 OMPT_STORE_RETURN_ADDRESS(gtid); 223 } 224 #endif 225 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); 226 #if OMPT_SUPPORT && OMPT_OPTIONAL 227 if (ompt_enabled.enabled) { 228 OMPT_STORE_RETURN_ADDRESS(gtid); 229 } 230 #endif 231 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); 232 #if OMPT_SUPPORT && OMPT_OPTIONAL 233 if (ompt_enabled.enabled) { 234 ompt_frame->enter_frame = ompt_data_none; 235 } 236 #endif 237 } 238 239 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_START)(void) { 240 int gtid = __kmp_entry_gtid(); 241 MKLOC(loc, "GOMP_ordered_start"); 242 KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid)); 243 #if OMPT_SUPPORT && OMPT_OPTIONAL 244 OMPT_STORE_RETURN_ADDRESS(gtid); 245 #endif 246 __kmpc_ordered(&loc, gtid); 247 } 248 249 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_END)(void) { 250 int gtid = __kmp_get_gtid(); 251 MKLOC(loc, "GOMP_ordered_end"); 252 KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid)); 253 #if OMPT_SUPPORT && OMPT_OPTIONAL 254 OMPT_STORE_RETURN_ADDRESS(gtid); 255 #endif 256 __kmpc_end_ordered(&loc, gtid); 257 } 258 259 // Dispatch macro defs 260 // 261 // They come in two flavors: 64-bit unsigned, and either 32-bit signed 262 // (IA-32 architecture) or 64-bit signed (Intel(R) 64). 263 264 #if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS 265 #define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_4 266 #define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_4 267 #define KMP_DISPATCH_NEXT __kmpc_dispatch_next_4 268 #else 269 #define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_8 270 #define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_8 271 #define KMP_DISPATCH_NEXT __kmpc_dispatch_next_8 272 #endif /* KMP_ARCH_X86 */ 273 274 #define KMP_DISPATCH_INIT_ULL __kmp_aux_dispatch_init_8u 275 #define KMP_DISPATCH_FINI_CHUNK_ULL __kmp_aux_dispatch_fini_chunk_8u 276 #define KMP_DISPATCH_NEXT_ULL __kmpc_dispatch_next_8u 277 278 // The parallel contruct 279 280 #ifndef KMP_DEBUG 281 static 282 #endif /* KMP_DEBUG */ 283 void 284 __kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *), 285 void *data) { 286 #if OMPT_SUPPORT 287 kmp_info_t *thr; 288 ompt_frame_t *ompt_frame; 289 ompt_state_t enclosing_state; 290 291 if (ompt_enabled.enabled) { 292 // get pointer to thread data structure 293 thr = __kmp_threads[*gtid]; 294 295 // save enclosing task state; set current state for task 296 enclosing_state = thr->th.ompt_thread_info.state; 297 thr->th.ompt_thread_info.state = ompt_state_work_parallel; 298 299 // set task frame 300 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); 301 ompt_frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 302 } 303 #endif 304 305 task(data); 306 307 #if OMPT_SUPPORT 308 if (ompt_enabled.enabled) { 309 // clear task frame 310 ompt_frame->exit_frame = ompt_data_none; 311 312 // restore enclosing state 313 thr->th.ompt_thread_info.state = enclosing_state; 314 } 315 #endif 316 } 317 318 #ifndef KMP_DEBUG 319 static 320 #endif /* KMP_DEBUG */ 321 void 322 __kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr, 323 void (*task)(void *), void *data, 324 unsigned num_threads, ident_t *loc, 325 enum sched_type schedule, long start, 326 long end, long incr, 327 long chunk_size) { 328 // Intialize the loop worksharing construct. 329 330 KMP_DISPATCH_INIT(loc, *gtid, schedule, start, end, incr, chunk_size, 331 schedule != kmp_sch_static); 332 333 #if OMPT_SUPPORT 334 kmp_info_t *thr; 335 ompt_frame_t *ompt_frame; 336 ompt_state_t enclosing_state; 337 338 if (ompt_enabled.enabled) { 339 thr = __kmp_threads[*gtid]; 340 // save enclosing task state; set current state for task 341 enclosing_state = thr->th.ompt_thread_info.state; 342 thr->th.ompt_thread_info.state = ompt_state_work_parallel; 343 344 // set task frame 345 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); 346 ompt_frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 347 } 348 #endif 349 350 // Now invoke the microtask. 351 task(data); 352 353 #if OMPT_SUPPORT 354 if (ompt_enabled.enabled) { 355 // clear task frame 356 ompt_frame->exit_frame = ompt_data_none; 357 358 // reset enclosing state 359 thr->th.ompt_thread_info.state = enclosing_state; 360 } 361 #endif 362 } 363 364 #ifndef KMP_DEBUG 365 static 366 #endif /* KMP_DEBUG */ 367 void 368 __kmp_GOMP_fork_call(ident_t *loc, int gtid, void (*unwrapped_task)(void *), 369 microtask_t wrapper, int argc, ...) { 370 int rc; 371 kmp_info_t *thr = __kmp_threads[gtid]; 372 kmp_team_t *team = thr->th.th_team; 373 int tid = __kmp_tid_from_gtid(gtid); 374 375 va_list ap; 376 va_start(ap, argc); 377 378 rc = __kmp_fork_call(loc, gtid, fork_context_gnu, argc, wrapper, 379 __kmp_invoke_task_func, 380 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX 381 &ap 382 #else 383 ap 384 #endif 385 ); 386 387 va_end(ap); 388 389 if (rc) { 390 __kmp_run_before_invoked_task(gtid, tid, thr, team); 391 } 392 393 #if OMPT_SUPPORT 394 int ompt_team_size; 395 if (ompt_enabled.enabled) { 396 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL); 397 ompt_task_info_t *task_info = __ompt_get_task_info_object(0); 398 399 // implicit task callback 400 if (ompt_enabled.ompt_callback_implicit_task) { 401 ompt_team_size = __kmp_team_from_gtid(gtid)->t.t_nproc; 402 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)( 403 ompt_scope_begin, &(team_info->parallel_data), 404 &(task_info->task_data), ompt_team_size, __kmp_tid_from_gtid(gtid), ompt_task_implicit); // TODO: Can this be ompt_task_initial? 405 task_info->thread_num = __kmp_tid_from_gtid(gtid); 406 } 407 thr->th.ompt_thread_info.state = ompt_state_work_parallel; 408 } 409 #endif 410 } 411 412 static void __kmp_GOMP_serialized_parallel(ident_t *loc, kmp_int32 gtid, 413 void (*task)(void *)) { 414 #if OMPT_SUPPORT 415 OMPT_STORE_RETURN_ADDRESS(gtid); 416 #endif 417 __kmp_serialized_parallel(loc, gtid); 418 } 419 420 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *), 421 void *data, 422 unsigned num_threads) { 423 int gtid = __kmp_entry_gtid(); 424 425 #if OMPT_SUPPORT 426 ompt_frame_t *parent_frame, *frame; 427 428 if (ompt_enabled.enabled) { 429 __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL); 430 parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 431 OMPT_STORE_RETURN_ADDRESS(gtid); 432 } 433 #endif 434 435 MKLOC(loc, "GOMP_parallel_start"); 436 KA_TRACE(20, ("GOMP_parallel_start: T#%d\n", gtid)); 437 438 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { 439 if (num_threads != 0) { 440 __kmp_push_num_threads(&loc, gtid, num_threads); 441 } 442 __kmp_GOMP_fork_call(&loc, gtid, task, 443 (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, 444 data); 445 } else { 446 __kmp_GOMP_serialized_parallel(&loc, gtid, task); 447 } 448 449 #if OMPT_SUPPORT 450 if (ompt_enabled.enabled) { 451 __ompt_get_task_info_internal(0, NULL, NULL, &frame, NULL, NULL); 452 frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 453 } 454 #endif 455 } 456 457 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(void) { 458 int gtid = __kmp_get_gtid(); 459 kmp_info_t *thr; 460 461 thr = __kmp_threads[gtid]; 462 463 MKLOC(loc, "GOMP_parallel_end"); 464 KA_TRACE(20, ("GOMP_parallel_end: T#%d\n", gtid)); 465 466 if (!thr->th.th_team->t.t_serialized) { 467 __kmp_run_after_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr, 468 thr->th.th_team); 469 470 #if OMPT_SUPPORT 471 if (ompt_enabled.enabled) { 472 // Implicit task is finished here, in the barrier we might schedule 473 // deferred tasks, 474 // these don't see the implicit task on the stack 475 OMPT_CUR_TASK_INFO(thr)->frame.exit_frame = ompt_data_none; 476 } 477 #endif 478 479 __kmp_join_call(&loc, gtid 480 #if OMPT_SUPPORT 481 , 482 fork_context_gnu 483 #endif 484 ); 485 } else { 486 __kmpc_end_serialized_parallel(&loc, gtid); 487 } 488 } 489 490 // Loop worksharing constructs 491 492 // The Gnu codegen passes in an exclusive upper bound for the overall range, 493 // but the libguide dispatch code expects an inclusive upper bound, hence the 494 // "end - incr" 5th argument to KMP_DISPATCH_INIT (and the " ub - str" 11th 495 // argument to __kmp_GOMP_fork_call). 496 // 497 // Conversely, KMP_DISPATCH_NEXT returns and inclusive upper bound in *p_ub, 498 // but the Gnu codegen expects an exclusive upper bound, so the adjustment 499 // "*p_ub += stride" compensates for the discrepancy. 500 // 501 // Correction: the gnu codegen always adjusts the upper bound by +-1, not the 502 // stride value. We adjust the dispatch parameters accordingly (by +-1), but 503 // we still adjust p_ub by the actual stride value. 504 // 505 // The "runtime" versions do not take a chunk_sz parameter. 506 // 507 // The profile lib cannot support construct checking of unordered loops that 508 // are predetermined by the compiler to be statically scheduled, as the gcc 509 // codegen will not always emit calls to GOMP_loop_static_next() to get the 510 // next iteration. Instead, it emits inline code to call omp_get_thread_num() 511 // num and calculate the iteration space using the result. It doesn't do this 512 // with ordered static loop, so they can be checked. 513 514 #if OMPT_SUPPORT 515 #define IF_OMPT_SUPPORT(code) code 516 #else 517 #define IF_OMPT_SUPPORT(code) 518 #endif 519 520 #define LOOP_START(func, schedule) \ 521 int func(long lb, long ub, long str, long chunk_sz, long *p_lb, \ 522 long *p_ub) { \ 523 int status; \ 524 long stride; \ 525 int gtid = __kmp_entry_gtid(); \ 526 MKLOC(loc, KMP_STR(func)); \ 527 KA_TRACE( \ 528 20, \ 529 (KMP_STR( \ 530 func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \ 531 gtid, lb, ub, str, chunk_sz)); \ 532 \ 533 if ((str > 0) ? (lb < ub) : (lb > ub)) { \ 534 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ 535 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ 536 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ 537 (schedule) != kmp_sch_static); \ 538 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ 539 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ 540 (kmp_int *)p_ub, (kmp_int *)&stride); \ 541 if (status) { \ 542 KMP_DEBUG_ASSERT(stride == str); \ 543 *p_ub += (str > 0) ? 1 : -1; \ 544 } \ 545 } else { \ 546 status = 0; \ 547 } \ 548 \ 549 KA_TRACE( \ 550 20, \ 551 (KMP_STR( \ 552 func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \ 553 gtid, *p_lb, *p_ub, status)); \ 554 return status; \ 555 } 556 557 #define LOOP_RUNTIME_START(func, schedule) \ 558 int func(long lb, long ub, long str, long *p_lb, long *p_ub) { \ 559 int status; \ 560 long stride; \ 561 long chunk_sz = 0; \ 562 int gtid = __kmp_entry_gtid(); \ 563 MKLOC(loc, KMP_STR(func)); \ 564 KA_TRACE( \ 565 20, \ 566 (KMP_STR(func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \ 567 gtid, lb, ub, str, chunk_sz)); \ 568 \ 569 if ((str > 0) ? (lb < ub) : (lb > ub)) { \ 570 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ 571 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ 572 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \ 573 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ 574 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ 575 (kmp_int *)p_ub, (kmp_int *)&stride); \ 576 if (status) { \ 577 KMP_DEBUG_ASSERT(stride == str); \ 578 *p_ub += (str > 0) ? 1 : -1; \ 579 } \ 580 } else { \ 581 status = 0; \ 582 } \ 583 \ 584 KA_TRACE( \ 585 20, \ 586 (KMP_STR( \ 587 func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \ 588 gtid, *p_lb, *p_ub, status)); \ 589 return status; \ 590 } 591 592 #define KMP_DOACROSS_FINI(status, gtid) \ 593 if (!status && __kmp_threads[gtid]->th.th_dispatch->th_doacross_flags) { \ 594 __kmpc_doacross_fini(NULL, gtid); \ 595 } 596 597 #define LOOP_NEXT(func, fini_code) \ 598 int func(long *p_lb, long *p_ub) { \ 599 int status; \ 600 long stride; \ 601 int gtid = __kmp_get_gtid(); \ 602 MKLOC(loc, KMP_STR(func)); \ 603 KA_TRACE(20, (KMP_STR(func) ": T#%d\n", gtid)); \ 604 \ 605 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ 606 fini_code status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ 607 (kmp_int *)p_ub, (kmp_int *)&stride); \ 608 if (status) { \ 609 *p_ub += (stride > 0) ? 1 : -1; \ 610 } \ 611 KMP_DOACROSS_FINI(status, gtid) \ 612 \ 613 KA_TRACE( \ 614 20, \ 615 (KMP_STR(func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, stride 0x%lx, " \ 616 "returning %d\n", \ 617 gtid, *p_lb, *p_ub, stride, status)); \ 618 return status; \ 619 } 620 621 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_STATIC_START), kmp_sch_static) 622 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT), {}) 623 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START), 624 kmp_sch_dynamic_chunked) 625 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_START), 626 kmp_sch_dynamic_chunked) 627 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT), {}) 628 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_NEXT), {}) 629 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_GUIDED_START), 630 kmp_sch_guided_chunked) 631 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_START), 632 kmp_sch_guided_chunked) 633 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT), {}) 634 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_NEXT), {}) 635 LOOP_RUNTIME_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_RUNTIME_START), 636 kmp_sch_runtime) 637 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT), {}) 638 639 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START), 640 kmp_ord_static) 641 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT), 642 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); }) 643 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START), 644 kmp_ord_dynamic_chunked) 645 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT), 646 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); }) 647 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START), 648 kmp_ord_guided_chunked) 649 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT), 650 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); }) 651 LOOP_RUNTIME_START( 652 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START), 653 kmp_ord_runtime) 654 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT), 655 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); }) 656 657 #define LOOP_DOACROSS_START(func, schedule) \ 658 bool func(unsigned ncounts, long *counts, long chunk_sz, long *p_lb, \ 659 long *p_ub) { \ 660 int status; \ 661 long stride, lb, ub, str; \ 662 int gtid = __kmp_entry_gtid(); \ 663 struct kmp_dim *dims = \ 664 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \ 665 MKLOC(loc, KMP_STR(func)); \ 666 for (unsigned i = 0; i < ncounts; ++i) { \ 667 dims[i].lo = 0; \ 668 dims[i].up = counts[i] - 1; \ 669 dims[i].st = 1; \ 670 } \ 671 __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \ 672 lb = 0; \ 673 ub = counts[0]; \ 674 str = 1; \ 675 KA_TRACE(20, (KMP_STR(func) ": T#%d, ncounts %u, lb 0x%lx, ub 0x%lx, str " \ 676 "0x%lx, chunk_sz " \ 677 "0x%lx\n", \ 678 gtid, ncounts, lb, ub, str, chunk_sz)); \ 679 \ 680 if ((str > 0) ? (lb < ub) : (lb > ub)) { \ 681 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ 682 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ 683 (schedule) != kmp_sch_static); \ 684 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ 685 (kmp_int *)p_ub, (kmp_int *)&stride); \ 686 if (status) { \ 687 KMP_DEBUG_ASSERT(stride == str); \ 688 *p_ub += (str > 0) ? 1 : -1; \ 689 } \ 690 } else { \ 691 status = 0; \ 692 } \ 693 KMP_DOACROSS_FINI(status, gtid); \ 694 \ 695 KA_TRACE( \ 696 20, \ 697 (KMP_STR( \ 698 func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \ 699 gtid, *p_lb, *p_ub, status)); \ 700 __kmp_free(dims); \ 701 return status; \ 702 } 703 704 #define LOOP_DOACROSS_RUNTIME_START(func, schedule) \ 705 int func(unsigned ncounts, long *counts, long *p_lb, long *p_ub) { \ 706 int status; \ 707 long stride, lb, ub, str; \ 708 long chunk_sz = 0; \ 709 int gtid = __kmp_entry_gtid(); \ 710 struct kmp_dim *dims = \ 711 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \ 712 MKLOC(loc, KMP_STR(func)); \ 713 for (unsigned i = 0; i < ncounts; ++i) { \ 714 dims[i].lo = 0; \ 715 dims[i].up = counts[i] - 1; \ 716 dims[i].st = 1; \ 717 } \ 718 __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \ 719 lb = 0; \ 720 ub = counts[0]; \ 721 str = 1; \ 722 KA_TRACE( \ 723 20, \ 724 (KMP_STR(func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \ 725 gtid, lb, ub, str, chunk_sz)); \ 726 \ 727 if ((str > 0) ? (lb < ub) : (lb > ub)) { \ 728 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ 729 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \ 730 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ 731 (kmp_int *)p_ub, (kmp_int *)&stride); \ 732 if (status) { \ 733 KMP_DEBUG_ASSERT(stride == str); \ 734 *p_ub += (str > 0) ? 1 : -1; \ 735 } \ 736 } else { \ 737 status = 0; \ 738 } \ 739 KMP_DOACROSS_FINI(status, gtid); \ 740 \ 741 KA_TRACE( \ 742 20, \ 743 (KMP_STR( \ 744 func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \ 745 gtid, *p_lb, *p_ub, status)); \ 746 __kmp_free(dims); \ 747 return status; \ 748 } 749 750 LOOP_DOACROSS_START( 751 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_STATIC_START), 752 kmp_sch_static) 753 LOOP_DOACROSS_START( 754 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_DYNAMIC_START), 755 kmp_sch_dynamic_chunked) 756 LOOP_DOACROSS_START( 757 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_GUIDED_START), 758 kmp_sch_guided_chunked) 759 LOOP_DOACROSS_RUNTIME_START( 760 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_RUNTIME_START), 761 kmp_sch_runtime) 762 763 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END)(void) { 764 int gtid = __kmp_get_gtid(); 765 KA_TRACE(20, ("GOMP_loop_end: T#%d\n", gtid)) 766 767 #if OMPT_SUPPORT && OMPT_OPTIONAL 768 ompt_frame_t *ompt_frame; 769 if (ompt_enabled.enabled) { 770 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); 771 ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 772 OMPT_STORE_RETURN_ADDRESS(gtid); 773 } 774 #endif 775 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); 776 #if OMPT_SUPPORT && OMPT_OPTIONAL 777 if (ompt_enabled.enabled) { 778 ompt_frame->enter_frame = ompt_data_none; 779 } 780 #endif 781 782 KA_TRACE(20, ("GOMP_loop_end exit: T#%d\n", gtid)) 783 } 784 785 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END_NOWAIT)(void) { 786 KA_TRACE(20, ("GOMP_loop_end_nowait: T#%d\n", __kmp_get_gtid())) 787 } 788 789 // Unsigned long long loop worksharing constructs 790 // 791 // These are new with gcc 4.4 792 793 #define LOOP_START_ULL(func, schedule) \ 794 int func(int up, unsigned long long lb, unsigned long long ub, \ 795 unsigned long long str, unsigned long long chunk_sz, \ 796 unsigned long long *p_lb, unsigned long long *p_ub) { \ 797 int status; \ 798 long long str2 = up ? ((long long)str) : -((long long)str); \ 799 long long stride; \ 800 int gtid = __kmp_entry_gtid(); \ 801 MKLOC(loc, KMP_STR(func)); \ 802 \ 803 KA_TRACE(20, (KMP_STR(func) ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str " \ 804 "0x%llx, chunk_sz 0x%llx\n", \ 805 gtid, up, lb, ub, str, chunk_sz)); \ 806 \ 807 if ((str > 0) ? (lb < ub) : (lb > ub)) { \ 808 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \ 809 (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \ 810 (schedule) != kmp_sch_static); \ 811 status = \ 812 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ 813 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 814 if (status) { \ 815 KMP_DEBUG_ASSERT(stride == str2); \ 816 *p_ub += (str > 0) ? 1 : -1; \ 817 } \ 818 } else { \ 819 status = 0; \ 820 } \ 821 \ 822 KA_TRACE( \ 823 20, \ 824 (KMP_STR( \ 825 func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \ 826 gtid, *p_lb, *p_ub, status)); \ 827 return status; \ 828 } 829 830 #define LOOP_RUNTIME_START_ULL(func, schedule) \ 831 int func(int up, unsigned long long lb, unsigned long long ub, \ 832 unsigned long long str, unsigned long long *p_lb, \ 833 unsigned long long *p_ub) { \ 834 int status; \ 835 long long str2 = up ? ((long long)str) : -((long long)str); \ 836 unsigned long long stride; \ 837 unsigned long long chunk_sz = 0; \ 838 int gtid = __kmp_entry_gtid(); \ 839 MKLOC(loc, KMP_STR(func)); \ 840 \ 841 KA_TRACE(20, (KMP_STR(func) ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str " \ 842 "0x%llx, chunk_sz 0x%llx\n", \ 843 gtid, up, lb, ub, str, chunk_sz)); \ 844 \ 845 if ((str > 0) ? (lb < ub) : (lb > ub)) { \ 846 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \ 847 (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \ 848 TRUE); \ 849 status = \ 850 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ 851 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 852 if (status) { \ 853 KMP_DEBUG_ASSERT((long long)stride == str2); \ 854 *p_ub += (str > 0) ? 1 : -1; \ 855 } \ 856 } else { \ 857 status = 0; \ 858 } \ 859 \ 860 KA_TRACE( \ 861 20, \ 862 (KMP_STR( \ 863 func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \ 864 gtid, *p_lb, *p_ub, status)); \ 865 return status; \ 866 } 867 868 #define LOOP_NEXT_ULL(func, fini_code) \ 869 int func(unsigned long long *p_lb, unsigned long long *p_ub) { \ 870 int status; \ 871 long long stride; \ 872 int gtid = __kmp_get_gtid(); \ 873 MKLOC(loc, KMP_STR(func)); \ 874 KA_TRACE(20, (KMP_STR(func) ": T#%d\n", gtid)); \ 875 \ 876 fini_code status = \ 877 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ 878 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 879 if (status) { \ 880 *p_ub += (stride > 0) ? 1 : -1; \ 881 } \ 882 \ 883 KA_TRACE( \ 884 20, \ 885 (KMP_STR( \ 886 func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, stride 0x%llx, " \ 887 "returning %d\n", \ 888 gtid, *p_lb, *p_ub, stride, status)); \ 889 return status; \ 890 } 891 892 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START), 893 kmp_sch_static) 894 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT), {}) 895 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START), 896 kmp_sch_dynamic_chunked) 897 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT), {}) 898 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START), 899 kmp_sch_guided_chunked) 900 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT), {}) 901 LOOP_START_ULL( 902 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_START), 903 kmp_sch_dynamic_chunked) 904 LOOP_NEXT_ULL( 905 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_NEXT), {}) 906 LOOP_START_ULL( 907 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_START), 908 kmp_sch_guided_chunked) 909 LOOP_NEXT_ULL( 910 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_NEXT), {}) 911 LOOP_RUNTIME_START_ULL( 912 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START), kmp_sch_runtime) 913 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT), {}) 914 915 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START), 916 kmp_ord_static) 917 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT), 918 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); }) 919 LOOP_START_ULL( 920 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START), 921 kmp_ord_dynamic_chunked) 922 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT), 923 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); }) 924 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START), 925 kmp_ord_guided_chunked) 926 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT), 927 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); }) 928 LOOP_RUNTIME_START_ULL( 929 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START), 930 kmp_ord_runtime) 931 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT), 932 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); }) 933 934 #define LOOP_DOACROSS_START_ULL(func, schedule) \ 935 int func(unsigned ncounts, unsigned long long *counts, \ 936 unsigned long long chunk_sz, unsigned long long *p_lb, \ 937 unsigned long long *p_ub) { \ 938 int status; \ 939 long long stride, str, lb, ub; \ 940 int gtid = __kmp_entry_gtid(); \ 941 struct kmp_dim *dims = \ 942 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \ 943 MKLOC(loc, KMP_STR(func)); \ 944 for (unsigned i = 0; i < ncounts; ++i) { \ 945 dims[i].lo = 0; \ 946 dims[i].up = counts[i] - 1; \ 947 dims[i].st = 1; \ 948 } \ 949 __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \ 950 lb = 0; \ 951 ub = counts[0]; \ 952 str = 1; \ 953 \ 954 KA_TRACE(20, (KMP_STR(func) ": T#%d, lb 0x%llx, ub 0x%llx, str " \ 955 "0x%llx, chunk_sz 0x%llx\n", \ 956 gtid, lb, ub, str, chunk_sz)); \ 957 \ 958 if ((str > 0) ? (lb < ub) : (lb > ub)) { \ 959 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \ 960 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ 961 (schedule) != kmp_sch_static); \ 962 status = \ 963 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ 964 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 965 if (status) { \ 966 KMP_DEBUG_ASSERT(stride == str); \ 967 *p_ub += (str > 0) ? 1 : -1; \ 968 } \ 969 } else { \ 970 status = 0; \ 971 } \ 972 KMP_DOACROSS_FINI(status, gtid); \ 973 \ 974 KA_TRACE( \ 975 20, \ 976 (KMP_STR( \ 977 func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \ 978 gtid, *p_lb, *p_ub, status)); \ 979 __kmp_free(dims); \ 980 return status; \ 981 } 982 983 #define LOOP_DOACROSS_RUNTIME_START_ULL(func, schedule) \ 984 int func(unsigned ncounts, unsigned long long *counts, \ 985 unsigned long long *p_lb, unsigned long long *p_ub) { \ 986 int status; \ 987 unsigned long long stride, str, lb, ub; \ 988 unsigned long long chunk_sz = 0; \ 989 int gtid = __kmp_entry_gtid(); \ 990 struct kmp_dim *dims = \ 991 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \ 992 MKLOC(loc, KMP_STR(func)); \ 993 for (unsigned i = 0; i < ncounts; ++i) { \ 994 dims[i].lo = 0; \ 995 dims[i].up = counts[i] - 1; \ 996 dims[i].st = 1; \ 997 } \ 998 __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \ 999 lb = 0; \ 1000 ub = counts[0]; \ 1001 str = 1; \ 1002 KA_TRACE(20, (KMP_STR(func) ": T#%d, lb 0x%llx, ub 0x%llx, str " \ 1003 "0x%llx, chunk_sz 0x%llx\n", \ 1004 gtid, lb, ub, str, chunk_sz)); \ 1005 \ 1006 if ((str > 0) ? (lb < ub) : (lb > ub)) { \ 1007 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \ 1008 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ 1009 TRUE); \ 1010 status = \ 1011 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ 1012 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 1013 if (status) { \ 1014 KMP_DEBUG_ASSERT(stride == str); \ 1015 *p_ub += (str > 0) ? 1 : -1; \ 1016 } \ 1017 } else { \ 1018 status = 0; \ 1019 } \ 1020 KMP_DOACROSS_FINI(status, gtid); \ 1021 \ 1022 KA_TRACE( \ 1023 20, \ 1024 (KMP_STR( \ 1025 func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \ 1026 gtid, *p_lb, *p_ub, status)); \ 1027 __kmp_free(dims); \ 1028 return status; \ 1029 } 1030 1031 LOOP_DOACROSS_START_ULL( 1032 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_STATIC_START), 1033 kmp_sch_static) 1034 LOOP_DOACROSS_START_ULL( 1035 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_DYNAMIC_START), 1036 kmp_sch_dynamic_chunked) 1037 LOOP_DOACROSS_START_ULL( 1038 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_GUIDED_START), 1039 kmp_sch_guided_chunked) 1040 LOOP_DOACROSS_RUNTIME_START_ULL( 1041 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_RUNTIME_START), 1042 kmp_sch_runtime) 1043 1044 // Combined parallel / loop worksharing constructs 1045 // 1046 // There are no ull versions (yet). 1047 1048 #define PARALLEL_LOOP_START(func, schedule, ompt_pre, ompt_post) \ 1049 void func(void (*task)(void *), void *data, unsigned num_threads, long lb, \ 1050 long ub, long str, long chunk_sz) { \ 1051 int gtid = __kmp_entry_gtid(); \ 1052 MKLOC(loc, KMP_STR(func)); \ 1053 KA_TRACE( \ 1054 20, \ 1055 (KMP_STR( \ 1056 func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \ 1057 gtid, lb, ub, str, chunk_sz)); \ 1058 \ 1059 ompt_pre(); \ 1060 \ 1061 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \ 1062 if (num_threads != 0) { \ 1063 __kmp_push_num_threads(&loc, gtid, num_threads); \ 1064 } \ 1065 __kmp_GOMP_fork_call(&loc, gtid, task, \ 1066 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, \ 1067 9, task, data, num_threads, &loc, (schedule), lb, \ 1068 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \ 1069 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid)); \ 1070 } else { \ 1071 __kmp_GOMP_serialized_parallel(&loc, gtid, task); \ 1072 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid)); \ 1073 } \ 1074 \ 1075 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ 1076 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ 1077 (schedule) != kmp_sch_static); \ 1078 \ 1079 ompt_post(); \ 1080 \ 1081 KA_TRACE(20, (KMP_STR(func) " exit: T#%d\n", gtid)); \ 1082 } 1083 1084 #if OMPT_SUPPORT && OMPT_OPTIONAL 1085 1086 #define OMPT_LOOP_PRE() \ 1087 ompt_frame_t *parent_frame; \ 1088 if (ompt_enabled.enabled) { \ 1089 __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL); \ 1090 parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); \ 1091 OMPT_STORE_RETURN_ADDRESS(gtid); \ 1092 } 1093 1094 #define OMPT_LOOP_POST() \ 1095 if (ompt_enabled.enabled) { \ 1096 parent_frame->enter_frame = ompt_data_none; \ 1097 } 1098 1099 #else 1100 1101 #define OMPT_LOOP_PRE() 1102 1103 #define OMPT_LOOP_POST() 1104 1105 #endif 1106 1107 PARALLEL_LOOP_START( 1108 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START), 1109 kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1110 PARALLEL_LOOP_START( 1111 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START), 1112 kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1113 PARALLEL_LOOP_START( 1114 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START), 1115 kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1116 PARALLEL_LOOP_START( 1117 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START), 1118 kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1119 1120 // Tasking constructs 1121 1122 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data, 1123 void (*copy_func)(void *, void *), 1124 long arg_size, long arg_align, 1125 bool if_cond, unsigned gomp_flags, 1126 void **depend) { 1127 MKLOC(loc, "GOMP_task"); 1128 int gtid = __kmp_entry_gtid(); 1129 kmp_int32 flags = 0; 1130 kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags; 1131 1132 KA_TRACE(20, ("GOMP_task: T#%d\n", gtid)); 1133 1134 // The low-order bit is the "untied" flag 1135 if (!(gomp_flags & 1)) { 1136 input_flags->tiedness = 1; 1137 } 1138 // The second low-order bit is the "final" flag 1139 if (gomp_flags & 2) { 1140 input_flags->final = 1; 1141 } 1142 input_flags->native = 1; 1143 // __kmp_task_alloc() sets up all other flags 1144 1145 if (!if_cond) { 1146 arg_size = 0; 1147 } 1148 1149 kmp_task_t *task = __kmp_task_alloc( 1150 &loc, gtid, input_flags, sizeof(kmp_task_t), 1151 arg_size ? arg_size + arg_align - 1 : 0, (kmp_routine_entry_t)func); 1152 1153 if (arg_size > 0) { 1154 if (arg_align > 0) { 1155 task->shareds = (void *)((((size_t)task->shareds) + arg_align - 1) / 1156 arg_align * arg_align); 1157 } 1158 // else error?? 1159 1160 if (copy_func) { 1161 (*copy_func)(task->shareds, data); 1162 } else { 1163 KMP_MEMCPY(task->shareds, data, arg_size); 1164 } 1165 } 1166 1167 #if OMPT_SUPPORT 1168 kmp_taskdata_t *current_task; 1169 if (ompt_enabled.enabled) { 1170 OMPT_STORE_RETURN_ADDRESS(gtid); 1171 current_task = __kmp_threads[gtid]->th.th_current_task; 1172 current_task->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 1173 } 1174 #endif 1175 1176 if (if_cond) { 1177 if (gomp_flags & 8) { 1178 KMP_ASSERT(depend); 1179 const size_t ndeps = (kmp_intptr_t)depend[0]; 1180 const size_t nout = (kmp_intptr_t)depend[1]; 1181 kmp_depend_info_t dep_list[ndeps]; 1182 1183 for (size_t i = 0U; i < ndeps; i++) { 1184 dep_list[i].base_addr = (kmp_intptr_t)depend[2U + i]; 1185 dep_list[i].len = 0U; 1186 dep_list[i].flags.in = 1; 1187 dep_list[i].flags.out = (i < nout); 1188 } 1189 __kmpc_omp_task_with_deps(&loc, gtid, task, ndeps, dep_list, 0, NULL); 1190 } else { 1191 __kmpc_omp_task(&loc, gtid, task); 1192 } 1193 } else { 1194 #if OMPT_SUPPORT 1195 ompt_thread_info_t oldInfo; 1196 kmp_info_t *thread; 1197 kmp_taskdata_t *taskdata; 1198 if (ompt_enabled.enabled) { 1199 // Store the threads states and restore them after the task 1200 thread = __kmp_threads[gtid]; 1201 taskdata = KMP_TASK_TO_TASKDATA(task); 1202 oldInfo = thread->th.ompt_thread_info; 1203 thread->th.ompt_thread_info.wait_id = 0; 1204 thread->th.ompt_thread_info.state = ompt_state_work_parallel; 1205 taskdata->ompt_task_info.frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 1206 OMPT_STORE_RETURN_ADDRESS(gtid); 1207 } 1208 #endif 1209 1210 __kmpc_omp_task_begin_if0(&loc, gtid, task); 1211 func(data); 1212 __kmpc_omp_task_complete_if0(&loc, gtid, task); 1213 1214 #if OMPT_SUPPORT 1215 if (ompt_enabled.enabled) { 1216 thread->th.ompt_thread_info = oldInfo; 1217 taskdata->ompt_task_info.frame.exit_frame = ompt_data_none; 1218 } 1219 #endif 1220 } 1221 #if OMPT_SUPPORT 1222 if (ompt_enabled.enabled) { 1223 current_task->ompt_task_info.frame.enter_frame = ompt_data_none; 1224 } 1225 #endif 1226 1227 KA_TRACE(20, ("GOMP_task exit: T#%d\n", gtid)); 1228 } 1229 1230 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKWAIT)(void) { 1231 MKLOC(loc, "GOMP_taskwait"); 1232 int gtid = __kmp_entry_gtid(); 1233 1234 #if OMPT_SUPPORT 1235 if (ompt_enabled.enabled) 1236 OMPT_STORE_RETURN_ADDRESS(gtid); 1237 #endif 1238 1239 KA_TRACE(20, ("GOMP_taskwait: T#%d\n", gtid)); 1240 1241 __kmpc_omp_taskwait(&loc, gtid); 1242 1243 KA_TRACE(20, ("GOMP_taskwait exit: T#%d\n", gtid)); 1244 } 1245 1246 // Sections worksharing constructs 1247 // 1248 // For the sections construct, we initialize a dynamically scheduled loop 1249 // worksharing construct with lb 1 and stride 1, and use the iteration #'s 1250 // that its returns as sections ids. 1251 // 1252 // There are no special entry points for ordered sections, so we always use 1253 // the dynamically scheduled workshare, even if the sections aren't ordered. 1254 1255 unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_START)(unsigned count) { 1256 int status; 1257 kmp_int lb, ub, stride; 1258 int gtid = __kmp_entry_gtid(); 1259 MKLOC(loc, "GOMP_sections_start"); 1260 KA_TRACE(20, ("GOMP_sections_start: T#%d\n", gtid)); 1261 1262 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE); 1263 1264 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride); 1265 if (status) { 1266 KMP_DEBUG_ASSERT(stride == 1); 1267 KMP_DEBUG_ASSERT(lb > 0); 1268 KMP_ASSERT(lb == ub); 1269 } else { 1270 lb = 0; 1271 } 1272 1273 KA_TRACE(20, ("GOMP_sections_start exit: T#%d returning %u\n", gtid, 1274 (unsigned)lb)); 1275 return (unsigned)lb; 1276 } 1277 1278 unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_NEXT)(void) { 1279 int status; 1280 kmp_int lb, ub, stride; 1281 int gtid = __kmp_get_gtid(); 1282 MKLOC(loc, "GOMP_sections_next"); 1283 KA_TRACE(20, ("GOMP_sections_next: T#%d\n", gtid)); 1284 1285 #if OMPT_SUPPORT 1286 OMPT_STORE_RETURN_ADDRESS(gtid); 1287 #endif 1288 1289 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride); 1290 if (status) { 1291 KMP_DEBUG_ASSERT(stride == 1); 1292 KMP_DEBUG_ASSERT(lb > 0); 1293 KMP_ASSERT(lb == ub); 1294 } else { 1295 lb = 0; 1296 } 1297 1298 KA_TRACE( 1299 20, ("GOMP_sections_next exit: T#%d returning %u\n", gtid, (unsigned)lb)); 1300 return (unsigned)lb; 1301 } 1302 1303 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)( 1304 void (*task)(void *), void *data, unsigned num_threads, unsigned count) { 1305 int gtid = __kmp_entry_gtid(); 1306 1307 #if OMPT_SUPPORT 1308 ompt_frame_t *parent_frame; 1309 1310 if (ompt_enabled.enabled) { 1311 __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL); 1312 parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 1313 OMPT_STORE_RETURN_ADDRESS(gtid); 1314 } 1315 #endif 1316 1317 MKLOC(loc, "GOMP_parallel_sections_start"); 1318 KA_TRACE(20, ("GOMP_parallel_sections_start: T#%d\n", gtid)); 1319 1320 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { 1321 if (num_threads != 0) { 1322 __kmp_push_num_threads(&loc, gtid, num_threads); 1323 } 1324 __kmp_GOMP_fork_call(&loc, gtid, task, 1325 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, 1326 task, data, num_threads, &loc, kmp_nm_dynamic_chunked, 1327 (kmp_int)1, (kmp_int)count, (kmp_int)1, (kmp_int)1); 1328 } else { 1329 __kmp_GOMP_serialized_parallel(&loc, gtid, task); 1330 } 1331 1332 #if OMPT_SUPPORT 1333 if (ompt_enabled.enabled) { 1334 parent_frame->enter_frame = ompt_data_none; 1335 } 1336 #endif 1337 1338 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE); 1339 1340 KA_TRACE(20, ("GOMP_parallel_sections_start exit: T#%d\n", gtid)); 1341 } 1342 1343 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END)(void) { 1344 int gtid = __kmp_get_gtid(); 1345 KA_TRACE(20, ("GOMP_sections_end: T#%d\n", gtid)) 1346 1347 #if OMPT_SUPPORT 1348 ompt_frame_t *ompt_frame; 1349 if (ompt_enabled.enabled) { 1350 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); 1351 ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 1352 OMPT_STORE_RETURN_ADDRESS(gtid); 1353 } 1354 #endif 1355 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); 1356 #if OMPT_SUPPORT 1357 if (ompt_enabled.enabled) { 1358 ompt_frame->enter_frame = ompt_data_none; 1359 } 1360 #endif 1361 1362 KA_TRACE(20, ("GOMP_sections_end exit: T#%d\n", gtid)) 1363 } 1364 1365 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT)(void) { 1366 KA_TRACE(20, ("GOMP_sections_end_nowait: T#%d\n", __kmp_get_gtid())) 1367 } 1368 1369 // libgomp has an empty function for GOMP_taskyield as of 2013-10-10 1370 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKYIELD)(void) { 1371 KA_TRACE(20, ("GOMP_taskyield: T#%d\n", __kmp_get_gtid())) 1372 return; 1373 } 1374 1375 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL)(void (*task)(void *), 1376 void *data, 1377 unsigned num_threads, 1378 unsigned int flags) { 1379 int gtid = __kmp_entry_gtid(); 1380 MKLOC(loc, "GOMP_parallel"); 1381 KA_TRACE(20, ("GOMP_parallel: T#%d\n", gtid)); 1382 1383 #if OMPT_SUPPORT 1384 ompt_task_info_t *parent_task_info, *task_info; 1385 if (ompt_enabled.enabled) { 1386 parent_task_info = __ompt_get_task_info_object(0); 1387 parent_task_info->frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 1388 OMPT_STORE_RETURN_ADDRESS(gtid); 1389 } 1390 #endif 1391 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { 1392 if (num_threads != 0) { 1393 __kmp_push_num_threads(&loc, gtid, num_threads); 1394 } 1395 if (flags != 0) { 1396 __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags); 1397 } 1398 __kmp_GOMP_fork_call(&loc, gtid, task, 1399 (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, 1400 data); 1401 } else { 1402 __kmp_GOMP_serialized_parallel(&loc, gtid, task); 1403 } 1404 #if OMPT_SUPPORT 1405 if (ompt_enabled.enabled) { 1406 task_info = __ompt_get_task_info_object(0); 1407 task_info->frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 1408 } 1409 #endif 1410 task(data); 1411 #if OMPT_SUPPORT 1412 if (ompt_enabled.enabled) { 1413 OMPT_STORE_RETURN_ADDRESS(gtid); 1414 } 1415 #endif 1416 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(); 1417 #if OMPT_SUPPORT 1418 if (ompt_enabled.enabled) { 1419 task_info->frame.exit_frame = ompt_data_none; 1420 parent_task_info->frame.enter_frame = ompt_data_none; 1421 } 1422 #endif 1423 } 1424 1425 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_SECTIONS)(void (*task)(void *), 1426 void *data, 1427 unsigned num_threads, 1428 unsigned count, 1429 unsigned flags) { 1430 int gtid = __kmp_entry_gtid(); 1431 MKLOC(loc, "GOMP_parallel_sections"); 1432 KA_TRACE(20, ("GOMP_parallel_sections: T#%d\n", gtid)); 1433 1434 #if OMPT_SUPPORT 1435 OMPT_STORE_RETURN_ADDRESS(gtid); 1436 #endif 1437 1438 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { 1439 if (num_threads != 0) { 1440 __kmp_push_num_threads(&loc, gtid, num_threads); 1441 } 1442 if (flags != 0) { 1443 __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags); 1444 } 1445 __kmp_GOMP_fork_call(&loc, gtid, task, 1446 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, 1447 task, data, num_threads, &loc, kmp_nm_dynamic_chunked, 1448 (kmp_int)1, (kmp_int)count, (kmp_int)1, (kmp_int)1); 1449 } else { 1450 __kmp_GOMP_serialized_parallel(&loc, gtid, task); 1451 } 1452 1453 #if OMPT_SUPPORT 1454 OMPT_STORE_RETURN_ADDRESS(gtid); 1455 #endif 1456 1457 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE); 1458 1459 task(data); 1460 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(); 1461 KA_TRACE(20, ("GOMP_parallel_sections exit: T#%d\n", gtid)); 1462 } 1463 1464 #define PARALLEL_LOOP(func, schedule, ompt_pre, ompt_post) \ 1465 void func(void (*task)(void *), void *data, unsigned num_threads, long lb, \ 1466 long ub, long str, long chunk_sz, unsigned flags) { \ 1467 int gtid = __kmp_entry_gtid(); \ 1468 MKLOC(loc, KMP_STR(func)); \ 1469 KA_TRACE( \ 1470 20, \ 1471 (KMP_STR( \ 1472 func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \ 1473 gtid, lb, ub, str, chunk_sz)); \ 1474 \ 1475 ompt_pre(); \ 1476 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \ 1477 if (num_threads != 0) { \ 1478 __kmp_push_num_threads(&loc, gtid, num_threads); \ 1479 } \ 1480 if (flags != 0) { \ 1481 __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags); \ 1482 } \ 1483 __kmp_GOMP_fork_call(&loc, gtid, task, \ 1484 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, \ 1485 9, task, data, num_threads, &loc, (schedule), lb, \ 1486 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \ 1487 } else { \ 1488 __kmp_GOMP_serialized_parallel(&loc, gtid, task); \ 1489 } \ 1490 \ 1491 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ 1492 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ 1493 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ 1494 (schedule) != kmp_sch_static); \ 1495 task(data); \ 1496 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(); \ 1497 ompt_post(); \ 1498 \ 1499 KA_TRACE(20, (KMP_STR(func) " exit: T#%d\n", gtid)); \ 1500 } 1501 1502 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC), 1503 kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1504 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC), 1505 kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1506 PARALLEL_LOOP( 1507 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_GUIDED), 1508 kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1509 PARALLEL_LOOP( 1510 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_DYNAMIC), 1511 kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1512 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED), 1513 kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1514 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME), 1515 kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1516 1517 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_START)(void) { 1518 int gtid = __kmp_entry_gtid(); 1519 MKLOC(loc, "GOMP_taskgroup_start"); 1520 KA_TRACE(20, ("GOMP_taskgroup_start: T#%d\n", gtid)); 1521 1522 #if OMPT_SUPPORT 1523 if (ompt_enabled.enabled) 1524 OMPT_STORE_RETURN_ADDRESS(gtid); 1525 #endif 1526 1527 __kmpc_taskgroup(&loc, gtid); 1528 1529 return; 1530 } 1531 1532 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_END)(void) { 1533 int gtid = __kmp_get_gtid(); 1534 MKLOC(loc, "GOMP_taskgroup_end"); 1535 KA_TRACE(20, ("GOMP_taskgroup_end: T#%d\n", gtid)); 1536 1537 #if OMPT_SUPPORT 1538 if (ompt_enabled.enabled) 1539 OMPT_STORE_RETURN_ADDRESS(gtid); 1540 #endif 1541 1542 __kmpc_end_taskgroup(&loc, gtid); 1543 1544 return; 1545 } 1546 1547 static kmp_int32 __kmp_gomp_to_omp_cancellation_kind(int gomp_kind) { 1548 kmp_int32 cncl_kind = 0; 1549 switch (gomp_kind) { 1550 case 1: 1551 cncl_kind = cancel_parallel; 1552 break; 1553 case 2: 1554 cncl_kind = cancel_loop; 1555 break; 1556 case 4: 1557 cncl_kind = cancel_sections; 1558 break; 1559 case 8: 1560 cncl_kind = cancel_taskgroup; 1561 break; 1562 } 1563 return cncl_kind; 1564 } 1565 1566 // Return true if cancellation should take place, false otherwise 1567 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCELLATION_POINT)(int which) { 1568 int gtid = __kmp_get_gtid(); 1569 MKLOC(loc, "GOMP_cancellation_point"); 1570 KA_TRACE(20, ("GOMP_cancellation_point: T#%d which:%d\n", gtid, which)); 1571 kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which); 1572 return __kmpc_cancellationpoint(&loc, gtid, cncl_kind); 1573 } 1574 1575 // Return true if cancellation should take place, false otherwise 1576 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCEL)(int which, bool do_cancel) { 1577 int gtid = __kmp_get_gtid(); 1578 MKLOC(loc, "GOMP_cancel"); 1579 KA_TRACE(20, ("GOMP_cancel: T#%d which:%d do_cancel:%d\n", gtid, which, 1580 (int)do_cancel)); 1581 kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which); 1582 1583 if (do_cancel == FALSE) { 1584 return __kmpc_cancellationpoint(&loc, gtid, cncl_kind); 1585 } else { 1586 return __kmpc_cancel(&loc, gtid, cncl_kind); 1587 } 1588 } 1589 1590 // Return true if cancellation should take place, false otherwise 1591 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_BARRIER_CANCEL)(void) { 1592 int gtid = __kmp_get_gtid(); 1593 KA_TRACE(20, ("GOMP_barrier_cancel: T#%d\n", gtid)); 1594 return __kmp_barrier_gomp_cancel(gtid); 1595 } 1596 1597 // Return true if cancellation should take place, false otherwise 1598 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL)(void) { 1599 int gtid = __kmp_get_gtid(); 1600 KA_TRACE(20, ("GOMP_sections_end_cancel: T#%d\n", gtid)); 1601 return __kmp_barrier_gomp_cancel(gtid); 1602 } 1603 1604 // Return true if cancellation should take place, false otherwise 1605 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END_CANCEL)(void) { 1606 int gtid = __kmp_get_gtid(); 1607 KA_TRACE(20, ("GOMP_loop_end_cancel: T#%d\n", gtid)); 1608 return __kmp_barrier_gomp_cancel(gtid); 1609 } 1610 1611 // All target functions are empty as of 2014-05-29 1612 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET)(int device, void (*fn)(void *), 1613 const void *openmp_target, 1614 size_t mapnum, void **hostaddrs, 1615 size_t *sizes, 1616 unsigned char *kinds) { 1617 return; 1618 } 1619 1620 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_DATA)( 1621 int device, const void *openmp_target, size_t mapnum, void **hostaddrs, 1622 size_t *sizes, unsigned char *kinds) { 1623 return; 1624 } 1625 1626 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_END_DATA)(void) { return; } 1627 1628 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_UPDATE)( 1629 int device, const void *openmp_target, size_t mapnum, void **hostaddrs, 1630 size_t *sizes, unsigned char *kinds) { 1631 return; 1632 } 1633 1634 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TEAMS)(unsigned int num_teams, 1635 unsigned int thread_limit) { 1636 return; 1637 } 1638 1639 // Task duplication function which copies src to dest (both are 1640 // preallocated task structures) 1641 static void __kmp_gomp_task_dup(kmp_task_t *dest, kmp_task_t *src, 1642 kmp_int32 last_private) { 1643 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(src); 1644 if (taskdata->td_copy_func) { 1645 (taskdata->td_copy_func)(dest->shareds, src->shareds); 1646 } 1647 } 1648 1649 #ifdef __cplusplus 1650 } // extern "C" 1651 #endif 1652 1653 template <typename T> 1654 void __GOMP_taskloop(void (*func)(void *), void *data, 1655 void (*copy_func)(void *, void *), long arg_size, 1656 long arg_align, unsigned gomp_flags, 1657 unsigned long num_tasks, int priority, T start, T end, 1658 T step) { 1659 typedef void (*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32); 1660 MKLOC(loc, "GOMP_taskloop"); 1661 int sched; 1662 T *loop_bounds; 1663 int gtid = __kmp_entry_gtid(); 1664 kmp_int32 flags = 0; 1665 int if_val = gomp_flags & (1u << 10); 1666 int nogroup = gomp_flags & (1u << 11); 1667 int up = gomp_flags & (1u << 8); 1668 p_task_dup_t task_dup = NULL; 1669 kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags; 1670 #ifdef KMP_DEBUG 1671 { 1672 char *buff; 1673 buff = __kmp_str_format( 1674 "GOMP_taskloop: T#%%d: func:%%p data:%%p copy_func:%%p " 1675 "arg_size:%%ld arg_align:%%ld gomp_flags:0x%%x num_tasks:%%lu " 1676 "priority:%%d start:%%%s end:%%%s step:%%%s\n", 1677 traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec); 1678 KA_TRACE(20, (buff, gtid, func, data, copy_func, arg_size, arg_align, 1679 gomp_flags, num_tasks, priority, start, end, step)); 1680 __kmp_str_free(&buff); 1681 } 1682 #endif 1683 KMP_ASSERT((size_t)arg_size >= 2 * sizeof(T)); 1684 KMP_ASSERT(arg_align > 0); 1685 // The low-order bit is the "untied" flag 1686 if (!(gomp_flags & 1)) { 1687 input_flags->tiedness = 1; 1688 } 1689 // The second low-order bit is the "final" flag 1690 if (gomp_flags & 2) { 1691 input_flags->final = 1; 1692 } 1693 // Negative step flag 1694 if (!up) { 1695 // If step is flagged as negative, but isn't properly sign extended 1696 // Then manually sign extend it. Could be a short, int, char embedded 1697 // in a long. So cannot assume any cast. 1698 if (step > 0) { 1699 for (int i = sizeof(T) * CHAR_BIT - 1; i >= 0L; --i) { 1700 // break at the first 1 bit 1701 if (step & ((T)1 << i)) 1702 break; 1703 step |= ((T)1 << i); 1704 } 1705 } 1706 } 1707 input_flags->native = 1; 1708 // Figure out if none/grainsize/num_tasks clause specified 1709 if (num_tasks > 0) { 1710 if (gomp_flags & (1u << 9)) 1711 sched = 1; // grainsize specified 1712 else 1713 sched = 2; // num_tasks specified 1714 // neither grainsize nor num_tasks specified 1715 } else { 1716 sched = 0; 1717 } 1718 1719 // __kmp_task_alloc() sets up all other flags 1720 kmp_task_t *task = 1721 __kmp_task_alloc(&loc, gtid, input_flags, sizeof(kmp_task_t), 1722 arg_size + arg_align - 1, (kmp_routine_entry_t)func); 1723 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); 1724 taskdata->td_copy_func = copy_func; 1725 taskdata->td_size_loop_bounds = sizeof(T); 1726 1727 // re-align shareds if needed and setup firstprivate copy constructors 1728 // through the task_dup mechanism 1729 task->shareds = (void *)((((size_t)task->shareds) + arg_align - 1) / 1730 arg_align * arg_align); 1731 if (copy_func) { 1732 task_dup = __kmp_gomp_task_dup; 1733 } 1734 KMP_MEMCPY(task->shareds, data, arg_size); 1735 1736 loop_bounds = (T *)task->shareds; 1737 loop_bounds[0] = start; 1738 loop_bounds[1] = end + (up ? -1 : 1); 1739 __kmpc_taskloop(&loc, gtid, task, if_val, (kmp_uint64 *)&(loop_bounds[0]), 1740 (kmp_uint64 *)&(loop_bounds[1]), (kmp_int64)step, nogroup, 1741 sched, (kmp_uint64)num_tasks, (void *)task_dup); 1742 } 1743 1744 // 4 byte version of GOMP_doacross_post 1745 // This verison needs to create a temporary array which converts 4 byte 1746 // integers into 8 byte integers 1747 template <typename T, bool need_conversion = (sizeof(long) == 4)> 1748 void __kmp_GOMP_doacross_post(T *count); 1749 1750 template <> void __kmp_GOMP_doacross_post<long, true>(long *count) { 1751 int gtid = __kmp_entry_gtid(); 1752 kmp_info_t *th = __kmp_threads[gtid]; 1753 MKLOC(loc, "GOMP_doacross_post"); 1754 kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0]; 1755 kmp_int64 *vec = 1756 (kmp_int64 *)__kmp_thread_malloc(th, sizeof(kmp_int64) * num_dims); 1757 for (kmp_int64 i = 0; i < num_dims; ++i) { 1758 vec[i] = (kmp_int64)count[i]; 1759 } 1760 __kmpc_doacross_post(&loc, gtid, vec); 1761 __kmp_thread_free(th, vec); 1762 } 1763 1764 // 8 byte versions of GOMP_doacross_post 1765 // This version can just pass in the count array directly instead of creating 1766 // a temporary array 1767 template <> void __kmp_GOMP_doacross_post<long, false>(long *count) { 1768 int gtid = __kmp_entry_gtid(); 1769 MKLOC(loc, "GOMP_doacross_post"); 1770 __kmpc_doacross_post(&loc, gtid, RCAST(kmp_int64 *, count)); 1771 } 1772 1773 template <typename T> void __kmp_GOMP_doacross_wait(T first, va_list args) { 1774 int gtid = __kmp_entry_gtid(); 1775 kmp_info_t *th = __kmp_threads[gtid]; 1776 MKLOC(loc, "GOMP_doacross_wait"); 1777 kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0]; 1778 kmp_int64 *vec = 1779 (kmp_int64 *)__kmp_thread_malloc(th, sizeof(kmp_int64) * num_dims); 1780 vec[0] = (kmp_int64)first; 1781 for (kmp_int64 i = 1; i < num_dims; ++i) { 1782 T item = va_arg(args, T); 1783 vec[i] = (kmp_int64)item; 1784 } 1785 __kmpc_doacross_wait(&loc, gtid, vec); 1786 __kmp_thread_free(th, vec); 1787 return; 1788 } 1789 1790 #ifdef __cplusplus 1791 extern "C" { 1792 #endif // __cplusplus 1793 1794 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKLOOP)( 1795 void (*func)(void *), void *data, void (*copy_func)(void *, void *), 1796 long arg_size, long arg_align, unsigned gomp_flags, unsigned long num_tasks, 1797 int priority, long start, long end, long step) { 1798 __GOMP_taskloop<long>(func, data, copy_func, arg_size, arg_align, gomp_flags, 1799 num_tasks, priority, start, end, step); 1800 } 1801 1802 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKLOOP_ULL)( 1803 void (*func)(void *), void *data, void (*copy_func)(void *, void *), 1804 long arg_size, long arg_align, unsigned gomp_flags, unsigned long num_tasks, 1805 int priority, unsigned long long start, unsigned long long end, 1806 unsigned long long step) { 1807 __GOMP_taskloop<unsigned long long>(func, data, copy_func, arg_size, 1808 arg_align, gomp_flags, num_tasks, 1809 priority, start, end, step); 1810 } 1811 1812 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_POST)(long *count) { 1813 __kmp_GOMP_doacross_post(count); 1814 } 1815 1816 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_WAIT)(long first, ...) { 1817 va_list args; 1818 va_start(args, first); 1819 __kmp_GOMP_doacross_wait<long>(first, args); 1820 va_end(args); 1821 } 1822 1823 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_ULL_POST)( 1824 unsigned long long *count) { 1825 int gtid = __kmp_entry_gtid(); 1826 MKLOC(loc, "GOMP_doacross_ull_post"); 1827 __kmpc_doacross_post(&loc, gtid, RCAST(kmp_int64 *, count)); 1828 } 1829 1830 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_ULL_WAIT)( 1831 unsigned long long first, ...) { 1832 va_list args; 1833 va_start(args, first); 1834 __kmp_GOMP_doacross_wait<unsigned long long>(first, args); 1835 va_end(args); 1836 } 1837 1838 /* The following sections of code create aliases for the GOMP_* functions, then 1839 create versioned symbols using the assembler directive .symver. This is only 1840 pertinent for ELF .so library. The KMP_VERSION_SYMBOL macro is defined in 1841 kmp_os.h */ 1842 1843 #ifdef KMP_USE_VERSION_SYMBOLS 1844 // GOMP_1.0 versioned symbols 1845 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ATOMIC_END, 10, "GOMP_1.0"); 1846 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ATOMIC_START, 10, "GOMP_1.0"); 1847 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_BARRIER, 10, "GOMP_1.0"); 1848 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_END, 10, "GOMP_1.0"); 1849 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10, "GOMP_1.0"); 1850 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10, "GOMP_1.0"); 1851 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_START, 10, "GOMP_1.0"); 1852 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10, "GOMP_1.0"); 1853 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10, "GOMP_1.0"); 1854 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END, 10, "GOMP_1.0"); 1855 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10, "GOMP_1.0"); 1856 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10, "GOMP_1.0"); 1857 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10, "GOMP_1.0"); 1858 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10, "GOMP_1.0"); 1859 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10, 1860 "GOMP_1.0"); 1861 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10, "GOMP_1.0"); 1862 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10, "GOMP_1.0"); 1863 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10, "GOMP_1.0"); 1864 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10, 1865 "GOMP_1.0"); 1866 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10, "GOMP_1.0"); 1867 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10, "GOMP_1.0"); 1868 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10, "GOMP_1.0"); 1869 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10, "GOMP_1.0"); 1870 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10, "GOMP_1.0"); 1871 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10, "GOMP_1.0"); 1872 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ORDERED_END, 10, "GOMP_1.0"); 1873 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ORDERED_START, 10, "GOMP_1.0"); 1874 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_END, 10, "GOMP_1.0"); 1875 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10, 1876 "GOMP_1.0"); 1877 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10, 1878 "GOMP_1.0"); 1879 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10, 1880 "GOMP_1.0"); 1881 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10, 1882 "GOMP_1.0"); 1883 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10, "GOMP_1.0"); 1884 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_START, 10, "GOMP_1.0"); 1885 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END, 10, "GOMP_1.0"); 1886 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10, "GOMP_1.0"); 1887 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10, "GOMP_1.0"); 1888 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_START, 10, "GOMP_1.0"); 1889 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10, "GOMP_1.0"); 1890 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10, "GOMP_1.0"); 1891 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_START, 10, "GOMP_1.0"); 1892 1893 // GOMP_2.0 versioned symbols 1894 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASK, 20, "GOMP_2.0"); 1895 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKWAIT, 20, "GOMP_2.0"); 1896 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20, "GOMP_2.0"); 1897 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20, "GOMP_2.0"); 1898 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20, "GOMP_2.0"); 1899 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20, "GOMP_2.0"); 1900 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20, 1901 "GOMP_2.0"); 1902 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20, 1903 "GOMP_2.0"); 1904 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20, 1905 "GOMP_2.0"); 1906 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20, 1907 "GOMP_2.0"); 1908 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20, 1909 "GOMP_2.0"); 1910 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20, 1911 "GOMP_2.0"); 1912 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20, 1913 "GOMP_2.0"); 1914 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20, 1915 "GOMP_2.0"); 1916 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20, "GOMP_2.0"); 1917 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20, "GOMP_2.0"); 1918 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20, "GOMP_2.0"); 1919 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20, "GOMP_2.0"); 1920 1921 // GOMP_3.0 versioned symbols 1922 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKYIELD, 30, "GOMP_3.0"); 1923 1924 // GOMP_4.0 versioned symbols 1925 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL, 40, "GOMP_4.0"); 1926 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40, "GOMP_4.0"); 1927 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40, "GOMP_4.0"); 1928 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40, "GOMP_4.0"); 1929 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40, "GOMP_4.0"); 1930 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40, "GOMP_4.0"); 1931 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_START, 40, "GOMP_4.0"); 1932 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_END, 40, "GOMP_4.0"); 1933 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40, "GOMP_4.0"); 1934 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CANCEL, 40, "GOMP_4.0"); 1935 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40, "GOMP_4.0"); 1936 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40, "GOMP_4.0"); 1937 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40, "GOMP_4.0"); 1938 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET, 40, "GOMP_4.0"); 1939 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_DATA, 40, "GOMP_4.0"); 1940 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_END_DATA, 40, "GOMP_4.0"); 1941 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_UPDATE, 40, "GOMP_4.0"); 1942 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TEAMS, 40, "GOMP_4.0"); 1943 1944 // GOMP_4.5 versioned symbols 1945 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKLOOP, 45, "GOMP_4.5"); 1946 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKLOOP_ULL, 45, "GOMP_4.5"); 1947 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_POST, 45, "GOMP_4.5"); 1948 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_WAIT, 45, "GOMP_4.5"); 1949 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_STATIC_START, 45, 1950 "GOMP_4.5"); 1951 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_DYNAMIC_START, 45, 1952 "GOMP_4.5"); 1953 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_GUIDED_START, 45, 1954 "GOMP_4.5"); 1955 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_RUNTIME_START, 45, 1956 "GOMP_4.5"); 1957 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_ULL_POST, 45, "GOMP_4.5"); 1958 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_ULL_WAIT, 45, "GOMP_4.5"); 1959 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_STATIC_START, 45, 1960 "GOMP_4.5"); 1961 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_DYNAMIC_START, 45, 1962 "GOMP_4.5"); 1963 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_GUIDED_START, 45, 1964 "GOMP_4.5"); 1965 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_RUNTIME_START, 45, 1966 "GOMP_4.5"); 1967 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_START, 45, 1968 "GOMP_4.5"); 1969 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_NEXT, 45, 1970 "GOMP_4.5"); 1971 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_START, 45, 1972 "GOMP_4.5"); 1973 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_NEXT, 45, 1974 "GOMP_4.5"); 1975 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_START, 45, 1976 "GOMP_4.5"); 1977 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_NEXT, 45, 1978 "GOMP_4.5"); 1979 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_START, 45, 1980 "GOMP_4.5"); 1981 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_NEXT, 45, 1982 "GOMP_4.5"); 1983 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_DYNAMIC, 45, 1984 "GOMP_4.5"); 1985 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_GUIDED, 45, 1986 "GOMP_4.5"); 1987 1988 #endif // KMP_USE_VERSION_SYMBOLS 1989 1990 #ifdef __cplusplus 1991 } // extern "C" 1992 #endif // __cplusplus 1993