1 /* 2 * kmp_gsupport.cpp 3 */ 4 5 //===----------------------------------------------------------------------===// 6 // 7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 8 // See https://llvm.org/LICENSE.txt for license information. 9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "kmp.h" 14 #include "kmp_atomic.h" 15 16 #if OMPT_SUPPORT 17 #include "ompt-specific.h" 18 #endif 19 20 #ifdef __cplusplus 21 extern "C" { 22 #endif // __cplusplus 23 24 #define MKLOC(loc, routine) \ 25 static ident_t loc = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;"}; 26 27 #include "kmp_ftn_os.h" 28 29 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_BARRIER)(void) { 30 int gtid = __kmp_entry_gtid(); 31 MKLOC(loc, "GOMP_barrier"); 32 KA_TRACE(20, ("GOMP_barrier: T#%d\n", gtid)); 33 #if OMPT_SUPPORT && OMPT_OPTIONAL 34 ompt_frame_t *ompt_frame; 35 if (ompt_enabled.enabled) { 36 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); 37 ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 38 OMPT_STORE_RETURN_ADDRESS(gtid); 39 } 40 #endif 41 __kmpc_barrier(&loc, gtid); 42 #if OMPT_SUPPORT && OMPT_OPTIONAL 43 if (ompt_enabled.enabled) { 44 ompt_frame->enter_frame = ompt_data_none; 45 } 46 #endif 47 } 48 49 // Mutual exclusion 50 51 // The symbol that icc/ifort generates for unnamed for unnamed critical sections 52 // - .gomp_critical_user_ - is defined using .comm in any objects reference it. 53 // We can't reference it directly here in C code, as the symbol contains a ".". 54 // 55 // The RTL contains an assembly language definition of .gomp_critical_user_ 56 // with another symbol __kmp_unnamed_critical_addr initialized with it's 57 // address. 58 extern kmp_critical_name *__kmp_unnamed_critical_addr; 59 60 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_START)(void) { 61 int gtid = __kmp_entry_gtid(); 62 MKLOC(loc, "GOMP_critical_start"); 63 KA_TRACE(20, ("GOMP_critical_start: T#%d\n", gtid)); 64 #if OMPT_SUPPORT && OMPT_OPTIONAL 65 OMPT_STORE_RETURN_ADDRESS(gtid); 66 #endif 67 __kmpc_critical(&loc, gtid, __kmp_unnamed_critical_addr); 68 } 69 70 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_END)(void) { 71 int gtid = __kmp_get_gtid(); 72 MKLOC(loc, "GOMP_critical_end"); 73 KA_TRACE(20, ("GOMP_critical_end: T#%d\n", gtid)); 74 #if OMPT_SUPPORT && OMPT_OPTIONAL 75 OMPT_STORE_RETURN_ADDRESS(gtid); 76 #endif 77 __kmpc_end_critical(&loc, gtid, __kmp_unnamed_critical_addr); 78 } 79 80 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_NAME_START)(void **pptr) { 81 int gtid = __kmp_entry_gtid(); 82 MKLOC(loc, "GOMP_critical_name_start"); 83 KA_TRACE(20, ("GOMP_critical_name_start: T#%d\n", gtid)); 84 __kmpc_critical(&loc, gtid, (kmp_critical_name *)pptr); 85 } 86 87 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_NAME_END)(void **pptr) { 88 int gtid = __kmp_get_gtid(); 89 MKLOC(loc, "GOMP_critical_name_end"); 90 KA_TRACE(20, ("GOMP_critical_name_end: T#%d\n", gtid)); 91 __kmpc_end_critical(&loc, gtid, (kmp_critical_name *)pptr); 92 } 93 94 // The Gnu codegen tries to use locked operations to perform atomic updates 95 // inline. If it can't, then it calls GOMP_atomic_start() before performing 96 // the update and GOMP_atomic_end() afterward, regardless of the data type. 97 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ATOMIC_START)(void) { 98 int gtid = __kmp_entry_gtid(); 99 KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid)); 100 101 #if OMPT_SUPPORT 102 __ompt_thread_assign_wait_id(0); 103 #endif 104 105 __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid); 106 } 107 108 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ATOMIC_END)(void) { 109 int gtid = __kmp_get_gtid(); 110 KA_TRACE(20, ("GOMP_atomic_end: T#%d\n", gtid)); 111 __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid); 112 } 113 114 int KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_START)(void) { 115 int gtid = __kmp_entry_gtid(); 116 MKLOC(loc, "GOMP_single_start"); 117 KA_TRACE(20, ("GOMP_single_start: T#%d\n", gtid)); 118 119 if (!TCR_4(__kmp_init_parallel)) 120 __kmp_parallel_initialize(); 121 __kmp_resume_if_soft_paused(); 122 123 // 3rd parameter == FALSE prevents kmp_enter_single from pushing a 124 // workshare when USE_CHECKS is defined. We need to avoid the push, 125 // as there is no corresponding GOMP_single_end() call. 126 kmp_int32 rc = __kmp_enter_single(gtid, &loc, FALSE); 127 128 #if OMPT_SUPPORT && OMPT_OPTIONAL 129 kmp_info_t *this_thr = __kmp_threads[gtid]; 130 kmp_team_t *team = this_thr->th.th_team; 131 int tid = __kmp_tid_from_gtid(gtid); 132 133 if (ompt_enabled.enabled) { 134 if (rc) { 135 if (ompt_enabled.ompt_callback_work) { 136 ompt_callbacks.ompt_callback(ompt_callback_work)( 137 ompt_work_single_executor, ompt_scope_begin, 138 &(team->t.ompt_team_info.parallel_data), 139 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data), 140 1, OMPT_GET_RETURN_ADDRESS(0)); 141 } 142 } else { 143 if (ompt_enabled.ompt_callback_work) { 144 ompt_callbacks.ompt_callback(ompt_callback_work)( 145 ompt_work_single_other, ompt_scope_begin, 146 &(team->t.ompt_team_info.parallel_data), 147 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data), 148 1, OMPT_GET_RETURN_ADDRESS(0)); 149 ompt_callbacks.ompt_callback(ompt_callback_work)( 150 ompt_work_single_other, ompt_scope_end, 151 &(team->t.ompt_team_info.parallel_data), 152 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data), 153 1, OMPT_GET_RETURN_ADDRESS(0)); 154 } 155 } 156 } 157 #endif 158 159 return rc; 160 } 161 162 void *KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_COPY_START)(void) { 163 void *retval; 164 int gtid = __kmp_entry_gtid(); 165 MKLOC(loc, "GOMP_single_copy_start"); 166 KA_TRACE(20, ("GOMP_single_copy_start: T#%d\n", gtid)); 167 168 if (!TCR_4(__kmp_init_parallel)) 169 __kmp_parallel_initialize(); 170 __kmp_resume_if_soft_paused(); 171 172 // If this is the first thread to enter, return NULL. The generated code will 173 // then call GOMP_single_copy_end() for this thread only, with the 174 // copyprivate data pointer as an argument. 175 if (__kmp_enter_single(gtid, &loc, FALSE)) 176 return NULL; 177 178 // Wait for the first thread to set the copyprivate data pointer, 179 // and for all other threads to reach this point. 180 181 #if OMPT_SUPPORT && OMPT_OPTIONAL 182 ompt_frame_t *ompt_frame; 183 if (ompt_enabled.enabled) { 184 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); 185 ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 186 OMPT_STORE_RETURN_ADDRESS(gtid); 187 } 188 #endif 189 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); 190 191 // Retrieve the value of the copyprivate data point, and wait for all 192 // threads to do likewise, then return. 193 retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data; 194 #if OMPT_SUPPORT && OMPT_OPTIONAL 195 if (ompt_enabled.enabled) { 196 OMPT_STORE_RETURN_ADDRESS(gtid); 197 } 198 #endif 199 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); 200 #if OMPT_SUPPORT && OMPT_OPTIONAL 201 if (ompt_enabled.enabled) { 202 ompt_frame->enter_frame = ompt_data_none; 203 } 204 #endif 205 return retval; 206 } 207 208 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_COPY_END)(void *data) { 209 int gtid = __kmp_get_gtid(); 210 KA_TRACE(20, ("GOMP_single_copy_end: T#%d\n", gtid)); 211 212 // Set the copyprivate data pointer fo the team, then hit the barrier so that 213 // the other threads will continue on and read it. Hit another barrier before 214 // continuing, so that the know that the copyprivate data pointer has been 215 // propagated to all threads before trying to reuse the t_copypriv_data field. 216 __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data; 217 #if OMPT_SUPPORT && OMPT_OPTIONAL 218 ompt_frame_t *ompt_frame; 219 if (ompt_enabled.enabled) { 220 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); 221 ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 222 OMPT_STORE_RETURN_ADDRESS(gtid); 223 } 224 #endif 225 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); 226 #if OMPT_SUPPORT && OMPT_OPTIONAL 227 if (ompt_enabled.enabled) { 228 OMPT_STORE_RETURN_ADDRESS(gtid); 229 } 230 #endif 231 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); 232 #if OMPT_SUPPORT && OMPT_OPTIONAL 233 if (ompt_enabled.enabled) { 234 ompt_frame->enter_frame = ompt_data_none; 235 } 236 #endif 237 } 238 239 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_START)(void) { 240 int gtid = __kmp_entry_gtid(); 241 MKLOC(loc, "GOMP_ordered_start"); 242 KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid)); 243 #if OMPT_SUPPORT && OMPT_OPTIONAL 244 OMPT_STORE_RETURN_ADDRESS(gtid); 245 #endif 246 __kmpc_ordered(&loc, gtid); 247 } 248 249 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_END)(void) { 250 int gtid = __kmp_get_gtid(); 251 MKLOC(loc, "GOMP_ordered_end"); 252 KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid)); 253 #if OMPT_SUPPORT && OMPT_OPTIONAL 254 OMPT_STORE_RETURN_ADDRESS(gtid); 255 #endif 256 __kmpc_end_ordered(&loc, gtid); 257 } 258 259 // Dispatch macro defs 260 // 261 // They come in two flavors: 64-bit unsigned, and either 32-bit signed 262 // (IA-32 architecture) or 64-bit signed (Intel(R) 64). 263 264 #if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS 265 #define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_4 266 #define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_4 267 #define KMP_DISPATCH_NEXT __kmpc_dispatch_next_4 268 #else 269 #define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_8 270 #define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_8 271 #define KMP_DISPATCH_NEXT __kmpc_dispatch_next_8 272 #endif /* KMP_ARCH_X86 */ 273 274 #define KMP_DISPATCH_INIT_ULL __kmp_aux_dispatch_init_8u 275 #define KMP_DISPATCH_FINI_CHUNK_ULL __kmp_aux_dispatch_fini_chunk_8u 276 #define KMP_DISPATCH_NEXT_ULL __kmpc_dispatch_next_8u 277 278 // The parallel construct 279 280 #ifndef KMP_DEBUG 281 static 282 #endif /* KMP_DEBUG */ 283 void 284 __kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *), 285 void *data) { 286 #if OMPT_SUPPORT 287 kmp_info_t *thr; 288 ompt_frame_t *ompt_frame; 289 ompt_state_t enclosing_state; 290 291 if (ompt_enabled.enabled) { 292 // get pointer to thread data structure 293 thr = __kmp_threads[*gtid]; 294 295 // save enclosing task state; set current state for task 296 enclosing_state = thr->th.ompt_thread_info.state; 297 thr->th.ompt_thread_info.state = ompt_state_work_parallel; 298 299 // set task frame 300 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); 301 ompt_frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 302 } 303 #endif 304 305 task(data); 306 307 #if OMPT_SUPPORT 308 if (ompt_enabled.enabled) { 309 // clear task frame 310 ompt_frame->exit_frame = ompt_data_none; 311 312 // restore enclosing state 313 thr->th.ompt_thread_info.state = enclosing_state; 314 } 315 #endif 316 } 317 318 #ifndef KMP_DEBUG 319 static 320 #endif /* KMP_DEBUG */ 321 void 322 __kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr, 323 void (*task)(void *), void *data, 324 unsigned num_threads, ident_t *loc, 325 enum sched_type schedule, long start, 326 long end, long incr, 327 long chunk_size) { 328 // Initialize the loop worksharing construct. 329 330 KMP_DISPATCH_INIT(loc, *gtid, schedule, start, end, incr, chunk_size, 331 schedule != kmp_sch_static); 332 333 #if OMPT_SUPPORT 334 kmp_info_t *thr; 335 ompt_frame_t *ompt_frame; 336 ompt_state_t enclosing_state; 337 338 if (ompt_enabled.enabled) { 339 thr = __kmp_threads[*gtid]; 340 // save enclosing task state; set current state for task 341 enclosing_state = thr->th.ompt_thread_info.state; 342 thr->th.ompt_thread_info.state = ompt_state_work_parallel; 343 344 // set task frame 345 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); 346 ompt_frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 347 } 348 #endif 349 350 // Now invoke the microtask. 351 task(data); 352 353 #if OMPT_SUPPORT 354 if (ompt_enabled.enabled) { 355 // clear task frame 356 ompt_frame->exit_frame = ompt_data_none; 357 358 // reset enclosing state 359 thr->th.ompt_thread_info.state = enclosing_state; 360 } 361 #endif 362 } 363 364 #ifndef KMP_DEBUG 365 static 366 #endif /* KMP_DEBUG */ 367 void 368 __kmp_GOMP_fork_call(ident_t *loc, int gtid, void (*unwrapped_task)(void *), 369 microtask_t wrapper, int argc, ...) { 370 int rc; 371 kmp_info_t *thr = __kmp_threads[gtid]; 372 kmp_team_t *team = thr->th.th_team; 373 int tid = __kmp_tid_from_gtid(gtid); 374 375 va_list ap; 376 va_start(ap, argc); 377 378 rc = __kmp_fork_call(loc, gtid, fork_context_gnu, argc, wrapper, 379 __kmp_invoke_task_func, kmp_va_addr_of(ap)); 380 381 va_end(ap); 382 383 if (rc) { 384 __kmp_run_before_invoked_task(gtid, tid, thr, team); 385 } 386 387 #if OMPT_SUPPORT 388 int ompt_team_size; 389 if (ompt_enabled.enabled) { 390 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL); 391 ompt_task_info_t *task_info = __ompt_get_task_info_object(0); 392 393 // implicit task callback 394 if (ompt_enabled.ompt_callback_implicit_task) { 395 ompt_team_size = __kmp_team_from_gtid(gtid)->t.t_nproc; 396 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)( 397 ompt_scope_begin, &(team_info->parallel_data), 398 &(task_info->task_data), ompt_team_size, __kmp_tid_from_gtid(gtid), ompt_task_implicit); // TODO: Can this be ompt_task_initial? 399 task_info->thread_num = __kmp_tid_from_gtid(gtid); 400 } 401 thr->th.ompt_thread_info.state = ompt_state_work_parallel; 402 } 403 #endif 404 } 405 406 static void __kmp_GOMP_serialized_parallel(ident_t *loc, kmp_int32 gtid, 407 void (*task)(void *)) { 408 #if OMPT_SUPPORT 409 OMPT_STORE_RETURN_ADDRESS(gtid); 410 #endif 411 __kmp_serialized_parallel(loc, gtid); 412 } 413 414 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *), 415 void *data, 416 unsigned num_threads) { 417 int gtid = __kmp_entry_gtid(); 418 419 #if OMPT_SUPPORT 420 ompt_frame_t *parent_frame, *frame; 421 422 if (ompt_enabled.enabled) { 423 __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL); 424 parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 425 OMPT_STORE_RETURN_ADDRESS(gtid); 426 } 427 #endif 428 429 MKLOC(loc, "GOMP_parallel_start"); 430 KA_TRACE(20, ("GOMP_parallel_start: T#%d\n", gtid)); 431 432 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { 433 if (num_threads != 0) { 434 __kmp_push_num_threads(&loc, gtid, num_threads); 435 } 436 __kmp_GOMP_fork_call(&loc, gtid, task, 437 (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, 438 data); 439 } else { 440 __kmp_GOMP_serialized_parallel(&loc, gtid, task); 441 } 442 443 #if OMPT_SUPPORT 444 if (ompt_enabled.enabled) { 445 __ompt_get_task_info_internal(0, NULL, NULL, &frame, NULL, NULL); 446 frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 447 } 448 #endif 449 } 450 451 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(void) { 452 int gtid = __kmp_get_gtid(); 453 kmp_info_t *thr; 454 455 thr = __kmp_threads[gtid]; 456 457 MKLOC(loc, "GOMP_parallel_end"); 458 KA_TRACE(20, ("GOMP_parallel_end: T#%d\n", gtid)); 459 460 if (!thr->th.th_team->t.t_serialized) { 461 __kmp_run_after_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr, 462 thr->th.th_team); 463 464 #if OMPT_SUPPORT 465 if (ompt_enabled.enabled) { 466 // Implicit task is finished here, in the barrier we might schedule 467 // deferred tasks, 468 // these don't see the implicit task on the stack 469 OMPT_CUR_TASK_INFO(thr)->frame.exit_frame = ompt_data_none; 470 } 471 #endif 472 473 __kmp_join_call(&loc, gtid 474 #if OMPT_SUPPORT 475 , 476 fork_context_gnu 477 #endif 478 ); 479 } else { 480 __kmpc_end_serialized_parallel(&loc, gtid); 481 } 482 } 483 484 // Loop worksharing constructs 485 486 // The Gnu codegen passes in an exclusive upper bound for the overall range, 487 // but the libguide dispatch code expects an inclusive upper bound, hence the 488 // "end - incr" 5th argument to KMP_DISPATCH_INIT (and the " ub - str" 11th 489 // argument to __kmp_GOMP_fork_call). 490 // 491 // Conversely, KMP_DISPATCH_NEXT returns and inclusive upper bound in *p_ub, 492 // but the Gnu codegen expects an exclusive upper bound, so the adjustment 493 // "*p_ub += stride" compensates for the discrepancy. 494 // 495 // Correction: the gnu codegen always adjusts the upper bound by +-1, not the 496 // stride value. We adjust the dispatch parameters accordingly (by +-1), but 497 // we still adjust p_ub by the actual stride value. 498 // 499 // The "runtime" versions do not take a chunk_sz parameter. 500 // 501 // The profile lib cannot support construct checking of unordered loops that 502 // are predetermined by the compiler to be statically scheduled, as the gcc 503 // codegen will not always emit calls to GOMP_loop_static_next() to get the 504 // next iteration. Instead, it emits inline code to call omp_get_thread_num() 505 // num and calculate the iteration space using the result. It doesn't do this 506 // with ordered static loop, so they can be checked. 507 508 #if OMPT_SUPPORT 509 #define IF_OMPT_SUPPORT(code) code 510 #else 511 #define IF_OMPT_SUPPORT(code) 512 #endif 513 514 #define LOOP_START(func, schedule) \ 515 int func(long lb, long ub, long str, long chunk_sz, long *p_lb, \ 516 long *p_ub) { \ 517 int status; \ 518 long stride; \ 519 int gtid = __kmp_entry_gtid(); \ 520 MKLOC(loc, KMP_STR(func)); \ 521 KA_TRACE( \ 522 20, \ 523 (KMP_STR( \ 524 func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \ 525 gtid, lb, ub, str, chunk_sz)); \ 526 \ 527 if ((str > 0) ? (lb < ub) : (lb > ub)) { \ 528 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ 529 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ 530 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ 531 (schedule) != kmp_sch_static); \ 532 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ 533 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ 534 (kmp_int *)p_ub, (kmp_int *)&stride); \ 535 if (status) { \ 536 KMP_DEBUG_ASSERT(stride == str); \ 537 *p_ub += (str > 0) ? 1 : -1; \ 538 } \ 539 } else { \ 540 status = 0; \ 541 } \ 542 \ 543 KA_TRACE( \ 544 20, \ 545 (KMP_STR( \ 546 func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \ 547 gtid, *p_lb, *p_ub, status)); \ 548 return status; \ 549 } 550 551 #define LOOP_RUNTIME_START(func, schedule) \ 552 int func(long lb, long ub, long str, long *p_lb, long *p_ub) { \ 553 int status; \ 554 long stride; \ 555 long chunk_sz = 0; \ 556 int gtid = __kmp_entry_gtid(); \ 557 MKLOC(loc, KMP_STR(func)); \ 558 KA_TRACE( \ 559 20, \ 560 (KMP_STR(func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \ 561 gtid, lb, ub, str, chunk_sz)); \ 562 \ 563 if ((str > 0) ? (lb < ub) : (lb > ub)) { \ 564 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ 565 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ 566 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \ 567 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ 568 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ 569 (kmp_int *)p_ub, (kmp_int *)&stride); \ 570 if (status) { \ 571 KMP_DEBUG_ASSERT(stride == str); \ 572 *p_ub += (str > 0) ? 1 : -1; \ 573 } \ 574 } else { \ 575 status = 0; \ 576 } \ 577 \ 578 KA_TRACE( \ 579 20, \ 580 (KMP_STR( \ 581 func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \ 582 gtid, *p_lb, *p_ub, status)); \ 583 return status; \ 584 } 585 586 #define KMP_DOACROSS_FINI(status, gtid) \ 587 if (!status && __kmp_threads[gtid]->th.th_dispatch->th_doacross_flags) { \ 588 __kmpc_doacross_fini(NULL, gtid); \ 589 } 590 591 #define LOOP_NEXT(func, fini_code) \ 592 int func(long *p_lb, long *p_ub) { \ 593 int status; \ 594 long stride; \ 595 int gtid = __kmp_get_gtid(); \ 596 MKLOC(loc, KMP_STR(func)); \ 597 KA_TRACE(20, (KMP_STR(func) ": T#%d\n", gtid)); \ 598 \ 599 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ 600 fini_code status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ 601 (kmp_int *)p_ub, (kmp_int *)&stride); \ 602 if (status) { \ 603 *p_ub += (stride > 0) ? 1 : -1; \ 604 } \ 605 KMP_DOACROSS_FINI(status, gtid) \ 606 \ 607 KA_TRACE( \ 608 20, \ 609 (KMP_STR(func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, stride 0x%lx, " \ 610 "returning %d\n", \ 611 gtid, *p_lb, *p_ub, stride, status)); \ 612 return status; \ 613 } 614 615 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_STATIC_START), kmp_sch_static) 616 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT), {}) 617 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START), 618 kmp_sch_dynamic_chunked) 619 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_START), 620 kmp_sch_dynamic_chunked) 621 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT), {}) 622 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_NEXT), {}) 623 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_GUIDED_START), 624 kmp_sch_guided_chunked) 625 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_START), 626 kmp_sch_guided_chunked) 627 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT), {}) 628 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_NEXT), {}) 629 LOOP_RUNTIME_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_RUNTIME_START), 630 kmp_sch_runtime) 631 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT), {}) 632 LOOP_RUNTIME_START( 633 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_START), 634 kmp_sch_runtime) 635 LOOP_RUNTIME_START( 636 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_START), 637 kmp_sch_runtime) 638 LOOP_NEXT( 639 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_NEXT), {}) 640 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_NEXT), {}) 641 642 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START), 643 kmp_ord_static) 644 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT), 645 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); }) 646 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START), 647 kmp_ord_dynamic_chunked) 648 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT), 649 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); }) 650 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START), 651 kmp_ord_guided_chunked) 652 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT), 653 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); }) 654 LOOP_RUNTIME_START( 655 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START), 656 kmp_ord_runtime) 657 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT), 658 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); }) 659 660 #define LOOP_DOACROSS_START(func, schedule) \ 661 bool func(unsigned ncounts, long *counts, long chunk_sz, long *p_lb, \ 662 long *p_ub) { \ 663 int status; \ 664 long stride, lb, ub, str; \ 665 int gtid = __kmp_entry_gtid(); \ 666 struct kmp_dim *dims = \ 667 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \ 668 MKLOC(loc, KMP_STR(func)); \ 669 for (unsigned i = 0; i < ncounts; ++i) { \ 670 dims[i].lo = 0; \ 671 dims[i].up = counts[i] - 1; \ 672 dims[i].st = 1; \ 673 } \ 674 __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \ 675 lb = 0; \ 676 ub = counts[0]; \ 677 str = 1; \ 678 KA_TRACE(20, (KMP_STR(func) ": T#%d, ncounts %u, lb 0x%lx, ub 0x%lx, str " \ 679 "0x%lx, chunk_sz " \ 680 "0x%lx\n", \ 681 gtid, ncounts, lb, ub, str, chunk_sz)); \ 682 \ 683 if ((str > 0) ? (lb < ub) : (lb > ub)) { \ 684 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ 685 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ 686 (schedule) != kmp_sch_static); \ 687 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ 688 (kmp_int *)p_ub, (kmp_int *)&stride); \ 689 if (status) { \ 690 KMP_DEBUG_ASSERT(stride == str); \ 691 *p_ub += (str > 0) ? 1 : -1; \ 692 } \ 693 } else { \ 694 status = 0; \ 695 } \ 696 KMP_DOACROSS_FINI(status, gtid); \ 697 \ 698 KA_TRACE( \ 699 20, \ 700 (KMP_STR( \ 701 func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \ 702 gtid, *p_lb, *p_ub, status)); \ 703 __kmp_free(dims); \ 704 return status; \ 705 } 706 707 #define LOOP_DOACROSS_RUNTIME_START(func, schedule) \ 708 int func(unsigned ncounts, long *counts, long *p_lb, long *p_ub) { \ 709 int status; \ 710 long stride, lb, ub, str; \ 711 long chunk_sz = 0; \ 712 int gtid = __kmp_entry_gtid(); \ 713 struct kmp_dim *dims = \ 714 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \ 715 MKLOC(loc, KMP_STR(func)); \ 716 for (unsigned i = 0; i < ncounts; ++i) { \ 717 dims[i].lo = 0; \ 718 dims[i].up = counts[i] - 1; \ 719 dims[i].st = 1; \ 720 } \ 721 __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \ 722 lb = 0; \ 723 ub = counts[0]; \ 724 str = 1; \ 725 KA_TRACE( \ 726 20, \ 727 (KMP_STR(func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \ 728 gtid, lb, ub, str, chunk_sz)); \ 729 \ 730 if ((str > 0) ? (lb < ub) : (lb > ub)) { \ 731 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ 732 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \ 733 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ 734 (kmp_int *)p_ub, (kmp_int *)&stride); \ 735 if (status) { \ 736 KMP_DEBUG_ASSERT(stride == str); \ 737 *p_ub += (str > 0) ? 1 : -1; \ 738 } \ 739 } else { \ 740 status = 0; \ 741 } \ 742 KMP_DOACROSS_FINI(status, gtid); \ 743 \ 744 KA_TRACE( \ 745 20, \ 746 (KMP_STR( \ 747 func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \ 748 gtid, *p_lb, *p_ub, status)); \ 749 __kmp_free(dims); \ 750 return status; \ 751 } 752 753 LOOP_DOACROSS_START( 754 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_STATIC_START), 755 kmp_sch_static) 756 LOOP_DOACROSS_START( 757 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_DYNAMIC_START), 758 kmp_sch_dynamic_chunked) 759 LOOP_DOACROSS_START( 760 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_GUIDED_START), 761 kmp_sch_guided_chunked) 762 LOOP_DOACROSS_RUNTIME_START( 763 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_RUNTIME_START), 764 kmp_sch_runtime) 765 766 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END)(void) { 767 int gtid = __kmp_get_gtid(); 768 KA_TRACE(20, ("GOMP_loop_end: T#%d\n", gtid)) 769 770 #if OMPT_SUPPORT && OMPT_OPTIONAL 771 ompt_frame_t *ompt_frame; 772 if (ompt_enabled.enabled) { 773 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); 774 ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 775 OMPT_STORE_RETURN_ADDRESS(gtid); 776 } 777 #endif 778 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); 779 #if OMPT_SUPPORT && OMPT_OPTIONAL 780 if (ompt_enabled.enabled) { 781 ompt_frame->enter_frame = ompt_data_none; 782 } 783 #endif 784 785 KA_TRACE(20, ("GOMP_loop_end exit: T#%d\n", gtid)) 786 } 787 788 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END_NOWAIT)(void) { 789 KA_TRACE(20, ("GOMP_loop_end_nowait: T#%d\n", __kmp_get_gtid())) 790 } 791 792 // Unsigned long long loop worksharing constructs 793 // 794 // These are new with gcc 4.4 795 796 #define LOOP_START_ULL(func, schedule) \ 797 int func(int up, unsigned long long lb, unsigned long long ub, \ 798 unsigned long long str, unsigned long long chunk_sz, \ 799 unsigned long long *p_lb, unsigned long long *p_ub) { \ 800 int status; \ 801 long long str2 = up ? ((long long)str) : -((long long)str); \ 802 long long stride; \ 803 int gtid = __kmp_entry_gtid(); \ 804 MKLOC(loc, KMP_STR(func)); \ 805 \ 806 KA_TRACE(20, (KMP_STR(func) ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str " \ 807 "0x%llx, chunk_sz 0x%llx\n", \ 808 gtid, up, lb, ub, str, chunk_sz)); \ 809 \ 810 if ((str > 0) ? (lb < ub) : (lb > ub)) { \ 811 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \ 812 (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \ 813 (schedule) != kmp_sch_static); \ 814 status = \ 815 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ 816 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 817 if (status) { \ 818 KMP_DEBUG_ASSERT(stride == str2); \ 819 *p_ub += (str > 0) ? 1 : -1; \ 820 } \ 821 } else { \ 822 status = 0; \ 823 } \ 824 \ 825 KA_TRACE( \ 826 20, \ 827 (KMP_STR( \ 828 func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \ 829 gtid, *p_lb, *p_ub, status)); \ 830 return status; \ 831 } 832 833 #define LOOP_RUNTIME_START_ULL(func, schedule) \ 834 int func(int up, unsigned long long lb, unsigned long long ub, \ 835 unsigned long long str, unsigned long long *p_lb, \ 836 unsigned long long *p_ub) { \ 837 int status; \ 838 long long str2 = up ? ((long long)str) : -((long long)str); \ 839 unsigned long long stride; \ 840 unsigned long long chunk_sz = 0; \ 841 int gtid = __kmp_entry_gtid(); \ 842 MKLOC(loc, KMP_STR(func)); \ 843 \ 844 KA_TRACE(20, (KMP_STR(func) ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str " \ 845 "0x%llx, chunk_sz 0x%llx\n", \ 846 gtid, up, lb, ub, str, chunk_sz)); \ 847 \ 848 if ((str > 0) ? (lb < ub) : (lb > ub)) { \ 849 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \ 850 (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \ 851 TRUE); \ 852 status = \ 853 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ 854 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 855 if (status) { \ 856 KMP_DEBUG_ASSERT((long long)stride == str2); \ 857 *p_ub += (str > 0) ? 1 : -1; \ 858 } \ 859 } else { \ 860 status = 0; \ 861 } \ 862 \ 863 KA_TRACE( \ 864 20, \ 865 (KMP_STR( \ 866 func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \ 867 gtid, *p_lb, *p_ub, status)); \ 868 return status; \ 869 } 870 871 #define LOOP_NEXT_ULL(func, fini_code) \ 872 int func(unsigned long long *p_lb, unsigned long long *p_ub) { \ 873 int status; \ 874 long long stride; \ 875 int gtid = __kmp_get_gtid(); \ 876 MKLOC(loc, KMP_STR(func)); \ 877 KA_TRACE(20, (KMP_STR(func) ": T#%d\n", gtid)); \ 878 \ 879 fini_code status = \ 880 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ 881 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 882 if (status) { \ 883 *p_ub += (stride > 0) ? 1 : -1; \ 884 } \ 885 \ 886 KA_TRACE( \ 887 20, \ 888 (KMP_STR( \ 889 func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, stride 0x%llx, " \ 890 "returning %d\n", \ 891 gtid, *p_lb, *p_ub, stride, status)); \ 892 return status; \ 893 } 894 895 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START), 896 kmp_sch_static) 897 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT), {}) 898 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START), 899 kmp_sch_dynamic_chunked) 900 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT), {}) 901 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START), 902 kmp_sch_guided_chunked) 903 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT), {}) 904 LOOP_START_ULL( 905 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_START), 906 kmp_sch_dynamic_chunked) 907 LOOP_NEXT_ULL( 908 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_NEXT), {}) 909 LOOP_START_ULL( 910 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_START), 911 kmp_sch_guided_chunked) 912 LOOP_NEXT_ULL( 913 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_NEXT), {}) 914 LOOP_RUNTIME_START_ULL( 915 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START), kmp_sch_runtime) 916 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT), {}) 917 LOOP_RUNTIME_START_ULL( 918 KMP_EXPAND_NAME( 919 KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_START), 920 kmp_sch_runtime) 921 LOOP_RUNTIME_START_ULL( 922 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_START), 923 kmp_sch_runtime) 924 LOOP_NEXT_ULL( 925 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_NEXT), 926 {}) 927 LOOP_NEXT_ULL( 928 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_NEXT), {}) 929 930 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START), 931 kmp_ord_static) 932 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT), 933 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); }) 934 LOOP_START_ULL( 935 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START), 936 kmp_ord_dynamic_chunked) 937 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT), 938 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); }) 939 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START), 940 kmp_ord_guided_chunked) 941 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT), 942 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); }) 943 LOOP_RUNTIME_START_ULL( 944 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START), 945 kmp_ord_runtime) 946 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT), 947 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); }) 948 949 #define LOOP_DOACROSS_START_ULL(func, schedule) \ 950 int func(unsigned ncounts, unsigned long long *counts, \ 951 unsigned long long chunk_sz, unsigned long long *p_lb, \ 952 unsigned long long *p_ub) { \ 953 int status; \ 954 long long stride, str, lb, ub; \ 955 int gtid = __kmp_entry_gtid(); \ 956 struct kmp_dim *dims = \ 957 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \ 958 MKLOC(loc, KMP_STR(func)); \ 959 for (unsigned i = 0; i < ncounts; ++i) { \ 960 dims[i].lo = 0; \ 961 dims[i].up = counts[i] - 1; \ 962 dims[i].st = 1; \ 963 } \ 964 __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \ 965 lb = 0; \ 966 ub = counts[0]; \ 967 str = 1; \ 968 \ 969 KA_TRACE(20, (KMP_STR(func) ": T#%d, lb 0x%llx, ub 0x%llx, str " \ 970 "0x%llx, chunk_sz 0x%llx\n", \ 971 gtid, lb, ub, str, chunk_sz)); \ 972 \ 973 if ((str > 0) ? (lb < ub) : (lb > ub)) { \ 974 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \ 975 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ 976 (schedule) != kmp_sch_static); \ 977 status = \ 978 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ 979 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 980 if (status) { \ 981 KMP_DEBUG_ASSERT(stride == str); \ 982 *p_ub += (str > 0) ? 1 : -1; \ 983 } \ 984 } else { \ 985 status = 0; \ 986 } \ 987 KMP_DOACROSS_FINI(status, gtid); \ 988 \ 989 KA_TRACE( \ 990 20, \ 991 (KMP_STR( \ 992 func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \ 993 gtid, *p_lb, *p_ub, status)); \ 994 __kmp_free(dims); \ 995 return status; \ 996 } 997 998 #define LOOP_DOACROSS_RUNTIME_START_ULL(func, schedule) \ 999 int func(unsigned ncounts, unsigned long long *counts, \ 1000 unsigned long long *p_lb, unsigned long long *p_ub) { \ 1001 int status; \ 1002 unsigned long long stride, str, lb, ub; \ 1003 unsigned long long chunk_sz = 0; \ 1004 int gtid = __kmp_entry_gtid(); \ 1005 struct kmp_dim *dims = \ 1006 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \ 1007 MKLOC(loc, KMP_STR(func)); \ 1008 for (unsigned i = 0; i < ncounts; ++i) { \ 1009 dims[i].lo = 0; \ 1010 dims[i].up = counts[i] - 1; \ 1011 dims[i].st = 1; \ 1012 } \ 1013 __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \ 1014 lb = 0; \ 1015 ub = counts[0]; \ 1016 str = 1; \ 1017 KA_TRACE(20, (KMP_STR(func) ": T#%d, lb 0x%llx, ub 0x%llx, str " \ 1018 "0x%llx, chunk_sz 0x%llx\n", \ 1019 gtid, lb, ub, str, chunk_sz)); \ 1020 \ 1021 if ((str > 0) ? (lb < ub) : (lb > ub)) { \ 1022 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \ 1023 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ 1024 TRUE); \ 1025 status = \ 1026 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ 1027 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 1028 if (status) { \ 1029 KMP_DEBUG_ASSERT(stride == str); \ 1030 *p_ub += (str > 0) ? 1 : -1; \ 1031 } \ 1032 } else { \ 1033 status = 0; \ 1034 } \ 1035 KMP_DOACROSS_FINI(status, gtid); \ 1036 \ 1037 KA_TRACE( \ 1038 20, \ 1039 (KMP_STR( \ 1040 func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \ 1041 gtid, *p_lb, *p_ub, status)); \ 1042 __kmp_free(dims); \ 1043 return status; \ 1044 } 1045 1046 LOOP_DOACROSS_START_ULL( 1047 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_STATIC_START), 1048 kmp_sch_static) 1049 LOOP_DOACROSS_START_ULL( 1050 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_DYNAMIC_START), 1051 kmp_sch_dynamic_chunked) 1052 LOOP_DOACROSS_START_ULL( 1053 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_GUIDED_START), 1054 kmp_sch_guided_chunked) 1055 LOOP_DOACROSS_RUNTIME_START_ULL( 1056 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_RUNTIME_START), 1057 kmp_sch_runtime) 1058 1059 // Combined parallel / loop worksharing constructs 1060 // 1061 // There are no ull versions (yet). 1062 1063 #define PARALLEL_LOOP_START(func, schedule, ompt_pre, ompt_post) \ 1064 void func(void (*task)(void *), void *data, unsigned num_threads, long lb, \ 1065 long ub, long str, long chunk_sz) { \ 1066 int gtid = __kmp_entry_gtid(); \ 1067 MKLOC(loc, KMP_STR(func)); \ 1068 KA_TRACE( \ 1069 20, \ 1070 (KMP_STR( \ 1071 func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \ 1072 gtid, lb, ub, str, chunk_sz)); \ 1073 \ 1074 ompt_pre(); \ 1075 \ 1076 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \ 1077 if (num_threads != 0) { \ 1078 __kmp_push_num_threads(&loc, gtid, num_threads); \ 1079 } \ 1080 __kmp_GOMP_fork_call(&loc, gtid, task, \ 1081 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, \ 1082 9, task, data, num_threads, &loc, (schedule), lb, \ 1083 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \ 1084 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid)); \ 1085 } else { \ 1086 __kmp_GOMP_serialized_parallel(&loc, gtid, task); \ 1087 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid)); \ 1088 } \ 1089 \ 1090 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ 1091 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ 1092 (schedule) != kmp_sch_static); \ 1093 \ 1094 ompt_post(); \ 1095 \ 1096 KA_TRACE(20, (KMP_STR(func) " exit: T#%d\n", gtid)); \ 1097 } 1098 1099 #if OMPT_SUPPORT && OMPT_OPTIONAL 1100 1101 #define OMPT_LOOP_PRE() \ 1102 ompt_frame_t *parent_frame; \ 1103 if (ompt_enabled.enabled) { \ 1104 __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL); \ 1105 parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); \ 1106 OMPT_STORE_RETURN_ADDRESS(gtid); \ 1107 } 1108 1109 #define OMPT_LOOP_POST() \ 1110 if (ompt_enabled.enabled) { \ 1111 parent_frame->enter_frame = ompt_data_none; \ 1112 } 1113 1114 #else 1115 1116 #define OMPT_LOOP_PRE() 1117 1118 #define OMPT_LOOP_POST() 1119 1120 #endif 1121 1122 PARALLEL_LOOP_START( 1123 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START), 1124 kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1125 PARALLEL_LOOP_START( 1126 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START), 1127 kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1128 PARALLEL_LOOP_START( 1129 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START), 1130 kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1131 PARALLEL_LOOP_START( 1132 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START), 1133 kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1134 1135 // Tasking constructs 1136 1137 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data, 1138 void (*copy_func)(void *, void *), 1139 long arg_size, long arg_align, 1140 bool if_cond, unsigned gomp_flags, 1141 void **depend) { 1142 MKLOC(loc, "GOMP_task"); 1143 int gtid = __kmp_entry_gtid(); 1144 kmp_int32 flags = 0; 1145 kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags; 1146 1147 KA_TRACE(20, ("GOMP_task: T#%d\n", gtid)); 1148 1149 // The low-order bit is the "untied" flag 1150 if (!(gomp_flags & 1)) { 1151 input_flags->tiedness = 1; 1152 } 1153 // The second low-order bit is the "final" flag 1154 if (gomp_flags & 2) { 1155 input_flags->final = 1; 1156 } 1157 input_flags->native = 1; 1158 // __kmp_task_alloc() sets up all other flags 1159 1160 if (!if_cond) { 1161 arg_size = 0; 1162 } 1163 1164 kmp_task_t *task = __kmp_task_alloc( 1165 &loc, gtid, input_flags, sizeof(kmp_task_t), 1166 arg_size ? arg_size + arg_align - 1 : 0, (kmp_routine_entry_t)func); 1167 1168 if (arg_size > 0) { 1169 if (arg_align > 0) { 1170 task->shareds = (void *)((((size_t)task->shareds) + arg_align - 1) / 1171 arg_align * arg_align); 1172 } 1173 // else error?? 1174 1175 if (copy_func) { 1176 (*copy_func)(task->shareds, data); 1177 } else { 1178 KMP_MEMCPY(task->shareds, data, arg_size); 1179 } 1180 } 1181 1182 #if OMPT_SUPPORT 1183 kmp_taskdata_t *current_task; 1184 if (ompt_enabled.enabled) { 1185 OMPT_STORE_RETURN_ADDRESS(gtid); 1186 current_task = __kmp_threads[gtid]->th.th_current_task; 1187 current_task->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 1188 } 1189 #endif 1190 1191 if (if_cond) { 1192 if (gomp_flags & 8) { 1193 KMP_ASSERT(depend); 1194 const size_t ndeps = (kmp_intptr_t)depend[0]; 1195 const size_t nout = (kmp_intptr_t)depend[1]; 1196 kmp_depend_info_t dep_list[ndeps]; 1197 1198 for (size_t i = 0U; i < ndeps; i++) { 1199 dep_list[i].base_addr = (kmp_intptr_t)depend[2U + i]; 1200 dep_list[i].len = 0U; 1201 dep_list[i].flags.in = 1; 1202 dep_list[i].flags.out = (i < nout); 1203 } 1204 __kmpc_omp_task_with_deps(&loc, gtid, task, ndeps, dep_list, 0, NULL); 1205 } else { 1206 __kmpc_omp_task(&loc, gtid, task); 1207 } 1208 } else { 1209 #if OMPT_SUPPORT 1210 ompt_thread_info_t oldInfo; 1211 kmp_info_t *thread; 1212 kmp_taskdata_t *taskdata; 1213 if (ompt_enabled.enabled) { 1214 // Store the threads states and restore them after the task 1215 thread = __kmp_threads[gtid]; 1216 taskdata = KMP_TASK_TO_TASKDATA(task); 1217 oldInfo = thread->th.ompt_thread_info; 1218 thread->th.ompt_thread_info.wait_id = 0; 1219 thread->th.ompt_thread_info.state = ompt_state_work_parallel; 1220 taskdata->ompt_task_info.frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 1221 OMPT_STORE_RETURN_ADDRESS(gtid); 1222 } 1223 #endif 1224 1225 __kmpc_omp_task_begin_if0(&loc, gtid, task); 1226 func(data); 1227 __kmpc_omp_task_complete_if0(&loc, gtid, task); 1228 1229 #if OMPT_SUPPORT 1230 if (ompt_enabled.enabled) { 1231 thread->th.ompt_thread_info = oldInfo; 1232 taskdata->ompt_task_info.frame.exit_frame = ompt_data_none; 1233 } 1234 #endif 1235 } 1236 #if OMPT_SUPPORT 1237 if (ompt_enabled.enabled) { 1238 current_task->ompt_task_info.frame.enter_frame = ompt_data_none; 1239 } 1240 #endif 1241 1242 KA_TRACE(20, ("GOMP_task exit: T#%d\n", gtid)); 1243 } 1244 1245 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKWAIT)(void) { 1246 MKLOC(loc, "GOMP_taskwait"); 1247 int gtid = __kmp_entry_gtid(); 1248 1249 #if OMPT_SUPPORT 1250 if (ompt_enabled.enabled) 1251 OMPT_STORE_RETURN_ADDRESS(gtid); 1252 #endif 1253 1254 KA_TRACE(20, ("GOMP_taskwait: T#%d\n", gtid)); 1255 1256 __kmpc_omp_taskwait(&loc, gtid); 1257 1258 KA_TRACE(20, ("GOMP_taskwait exit: T#%d\n", gtid)); 1259 } 1260 1261 // Sections worksharing constructs 1262 // 1263 // For the sections construct, we initialize a dynamically scheduled loop 1264 // worksharing construct with lb 1 and stride 1, and use the iteration #'s 1265 // that its returns as sections ids. 1266 // 1267 // There are no special entry points for ordered sections, so we always use 1268 // the dynamically scheduled workshare, even if the sections aren't ordered. 1269 1270 unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_START)(unsigned count) { 1271 int status; 1272 kmp_int lb, ub, stride; 1273 int gtid = __kmp_entry_gtid(); 1274 MKLOC(loc, "GOMP_sections_start"); 1275 KA_TRACE(20, ("GOMP_sections_start: T#%d\n", gtid)); 1276 1277 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE); 1278 1279 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride); 1280 if (status) { 1281 KMP_DEBUG_ASSERT(stride == 1); 1282 KMP_DEBUG_ASSERT(lb > 0); 1283 KMP_ASSERT(lb == ub); 1284 } else { 1285 lb = 0; 1286 } 1287 1288 KA_TRACE(20, ("GOMP_sections_start exit: T#%d returning %u\n", gtid, 1289 (unsigned)lb)); 1290 return (unsigned)lb; 1291 } 1292 1293 unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_NEXT)(void) { 1294 int status; 1295 kmp_int lb, ub, stride; 1296 int gtid = __kmp_get_gtid(); 1297 MKLOC(loc, "GOMP_sections_next"); 1298 KA_TRACE(20, ("GOMP_sections_next: T#%d\n", gtid)); 1299 1300 #if OMPT_SUPPORT 1301 OMPT_STORE_RETURN_ADDRESS(gtid); 1302 #endif 1303 1304 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride); 1305 if (status) { 1306 KMP_DEBUG_ASSERT(stride == 1); 1307 KMP_DEBUG_ASSERT(lb > 0); 1308 KMP_ASSERT(lb == ub); 1309 } else { 1310 lb = 0; 1311 } 1312 1313 KA_TRACE( 1314 20, ("GOMP_sections_next exit: T#%d returning %u\n", gtid, (unsigned)lb)); 1315 return (unsigned)lb; 1316 } 1317 1318 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)( 1319 void (*task)(void *), void *data, unsigned num_threads, unsigned count) { 1320 int gtid = __kmp_entry_gtid(); 1321 1322 #if OMPT_SUPPORT 1323 ompt_frame_t *parent_frame; 1324 1325 if (ompt_enabled.enabled) { 1326 __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL); 1327 parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 1328 OMPT_STORE_RETURN_ADDRESS(gtid); 1329 } 1330 #endif 1331 1332 MKLOC(loc, "GOMP_parallel_sections_start"); 1333 KA_TRACE(20, ("GOMP_parallel_sections_start: T#%d\n", gtid)); 1334 1335 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { 1336 if (num_threads != 0) { 1337 __kmp_push_num_threads(&loc, gtid, num_threads); 1338 } 1339 __kmp_GOMP_fork_call(&loc, gtid, task, 1340 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, 1341 task, data, num_threads, &loc, kmp_nm_dynamic_chunked, 1342 (kmp_int)1, (kmp_int)count, (kmp_int)1, (kmp_int)1); 1343 } else { 1344 __kmp_GOMP_serialized_parallel(&loc, gtid, task); 1345 } 1346 1347 #if OMPT_SUPPORT 1348 if (ompt_enabled.enabled) { 1349 parent_frame->enter_frame = ompt_data_none; 1350 } 1351 #endif 1352 1353 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE); 1354 1355 KA_TRACE(20, ("GOMP_parallel_sections_start exit: T#%d\n", gtid)); 1356 } 1357 1358 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END)(void) { 1359 int gtid = __kmp_get_gtid(); 1360 KA_TRACE(20, ("GOMP_sections_end: T#%d\n", gtid)) 1361 1362 #if OMPT_SUPPORT 1363 ompt_frame_t *ompt_frame; 1364 if (ompt_enabled.enabled) { 1365 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); 1366 ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 1367 OMPT_STORE_RETURN_ADDRESS(gtid); 1368 } 1369 #endif 1370 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); 1371 #if OMPT_SUPPORT 1372 if (ompt_enabled.enabled) { 1373 ompt_frame->enter_frame = ompt_data_none; 1374 } 1375 #endif 1376 1377 KA_TRACE(20, ("GOMP_sections_end exit: T#%d\n", gtid)) 1378 } 1379 1380 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT)(void) { 1381 KA_TRACE(20, ("GOMP_sections_end_nowait: T#%d\n", __kmp_get_gtid())) 1382 } 1383 1384 // libgomp has an empty function for GOMP_taskyield as of 2013-10-10 1385 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKYIELD)(void) { 1386 KA_TRACE(20, ("GOMP_taskyield: T#%d\n", __kmp_get_gtid())) 1387 return; 1388 } 1389 1390 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL)(void (*task)(void *), 1391 void *data, 1392 unsigned num_threads, 1393 unsigned int flags) { 1394 int gtid = __kmp_entry_gtid(); 1395 MKLOC(loc, "GOMP_parallel"); 1396 KA_TRACE(20, ("GOMP_parallel: T#%d\n", gtid)); 1397 1398 #if OMPT_SUPPORT 1399 ompt_task_info_t *parent_task_info, *task_info; 1400 if (ompt_enabled.enabled) { 1401 parent_task_info = __ompt_get_task_info_object(0); 1402 parent_task_info->frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 1403 OMPT_STORE_RETURN_ADDRESS(gtid); 1404 } 1405 #endif 1406 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { 1407 if (num_threads != 0) { 1408 __kmp_push_num_threads(&loc, gtid, num_threads); 1409 } 1410 if (flags != 0) { 1411 __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags); 1412 } 1413 __kmp_GOMP_fork_call(&loc, gtid, task, 1414 (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, 1415 data); 1416 } else { 1417 __kmp_GOMP_serialized_parallel(&loc, gtid, task); 1418 } 1419 #if OMPT_SUPPORT 1420 if (ompt_enabled.enabled) { 1421 task_info = __ompt_get_task_info_object(0); 1422 task_info->frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 1423 } 1424 #endif 1425 task(data); 1426 #if OMPT_SUPPORT 1427 if (ompt_enabled.enabled) { 1428 OMPT_STORE_RETURN_ADDRESS(gtid); 1429 } 1430 #endif 1431 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(); 1432 #if OMPT_SUPPORT 1433 if (ompt_enabled.enabled) { 1434 task_info->frame.exit_frame = ompt_data_none; 1435 parent_task_info->frame.enter_frame = ompt_data_none; 1436 } 1437 #endif 1438 } 1439 1440 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_SECTIONS)(void (*task)(void *), 1441 void *data, 1442 unsigned num_threads, 1443 unsigned count, 1444 unsigned flags) { 1445 int gtid = __kmp_entry_gtid(); 1446 MKLOC(loc, "GOMP_parallel_sections"); 1447 KA_TRACE(20, ("GOMP_parallel_sections: T#%d\n", gtid)); 1448 1449 #if OMPT_SUPPORT 1450 OMPT_STORE_RETURN_ADDRESS(gtid); 1451 #endif 1452 1453 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { 1454 if (num_threads != 0) { 1455 __kmp_push_num_threads(&loc, gtid, num_threads); 1456 } 1457 if (flags != 0) { 1458 __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags); 1459 } 1460 __kmp_GOMP_fork_call(&loc, gtid, task, 1461 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, 1462 task, data, num_threads, &loc, kmp_nm_dynamic_chunked, 1463 (kmp_int)1, (kmp_int)count, (kmp_int)1, (kmp_int)1); 1464 } else { 1465 __kmp_GOMP_serialized_parallel(&loc, gtid, task); 1466 } 1467 1468 #if OMPT_SUPPORT 1469 OMPT_STORE_RETURN_ADDRESS(gtid); 1470 #endif 1471 1472 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE); 1473 1474 task(data); 1475 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(); 1476 KA_TRACE(20, ("GOMP_parallel_sections exit: T#%d\n", gtid)); 1477 } 1478 1479 #define PARALLEL_LOOP(func, schedule, ompt_pre, ompt_post) \ 1480 void func(void (*task)(void *), void *data, unsigned num_threads, long lb, \ 1481 long ub, long str, long chunk_sz, unsigned flags) { \ 1482 int gtid = __kmp_entry_gtid(); \ 1483 MKLOC(loc, KMP_STR(func)); \ 1484 KA_TRACE( \ 1485 20, \ 1486 (KMP_STR( \ 1487 func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \ 1488 gtid, lb, ub, str, chunk_sz)); \ 1489 \ 1490 ompt_pre(); \ 1491 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \ 1492 if (num_threads != 0) { \ 1493 __kmp_push_num_threads(&loc, gtid, num_threads); \ 1494 } \ 1495 if (flags != 0) { \ 1496 __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags); \ 1497 } \ 1498 __kmp_GOMP_fork_call(&loc, gtid, task, \ 1499 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, \ 1500 9, task, data, num_threads, &loc, (schedule), lb, \ 1501 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \ 1502 } else { \ 1503 __kmp_GOMP_serialized_parallel(&loc, gtid, task); \ 1504 } \ 1505 \ 1506 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ 1507 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ 1508 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ 1509 (schedule) != kmp_sch_static); \ 1510 task(data); \ 1511 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(); \ 1512 ompt_post(); \ 1513 \ 1514 KA_TRACE(20, (KMP_STR(func) " exit: T#%d\n", gtid)); \ 1515 } 1516 1517 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC), 1518 kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1519 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC), 1520 kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1521 PARALLEL_LOOP( 1522 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_GUIDED), 1523 kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1524 PARALLEL_LOOP( 1525 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_DYNAMIC), 1526 kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1527 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED), 1528 kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1529 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME), 1530 kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1531 PARALLEL_LOOP( 1532 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_MAYBE_NONMONOTONIC_RUNTIME), 1533 kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1534 PARALLEL_LOOP( 1535 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_RUNTIME), 1536 kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST) 1537 1538 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_START)(void) { 1539 int gtid = __kmp_entry_gtid(); 1540 MKLOC(loc, "GOMP_taskgroup_start"); 1541 KA_TRACE(20, ("GOMP_taskgroup_start: T#%d\n", gtid)); 1542 1543 #if OMPT_SUPPORT 1544 if (ompt_enabled.enabled) 1545 OMPT_STORE_RETURN_ADDRESS(gtid); 1546 #endif 1547 1548 __kmpc_taskgroup(&loc, gtid); 1549 1550 return; 1551 } 1552 1553 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_END)(void) { 1554 int gtid = __kmp_get_gtid(); 1555 MKLOC(loc, "GOMP_taskgroup_end"); 1556 KA_TRACE(20, ("GOMP_taskgroup_end: T#%d\n", gtid)); 1557 1558 #if OMPT_SUPPORT 1559 if (ompt_enabled.enabled) 1560 OMPT_STORE_RETURN_ADDRESS(gtid); 1561 #endif 1562 1563 __kmpc_end_taskgroup(&loc, gtid); 1564 1565 return; 1566 } 1567 1568 static kmp_int32 __kmp_gomp_to_omp_cancellation_kind(int gomp_kind) { 1569 kmp_int32 cncl_kind = 0; 1570 switch (gomp_kind) { 1571 case 1: 1572 cncl_kind = cancel_parallel; 1573 break; 1574 case 2: 1575 cncl_kind = cancel_loop; 1576 break; 1577 case 4: 1578 cncl_kind = cancel_sections; 1579 break; 1580 case 8: 1581 cncl_kind = cancel_taskgroup; 1582 break; 1583 } 1584 return cncl_kind; 1585 } 1586 1587 // Return true if cancellation should take place, false otherwise 1588 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCELLATION_POINT)(int which) { 1589 int gtid = __kmp_get_gtid(); 1590 MKLOC(loc, "GOMP_cancellation_point"); 1591 KA_TRACE(20, ("GOMP_cancellation_point: T#%d which:%d\n", gtid, which)); 1592 kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which); 1593 return __kmpc_cancellationpoint(&loc, gtid, cncl_kind); 1594 } 1595 1596 // Return true if cancellation should take place, false otherwise 1597 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCEL)(int which, bool do_cancel) { 1598 int gtid = __kmp_get_gtid(); 1599 MKLOC(loc, "GOMP_cancel"); 1600 KA_TRACE(20, ("GOMP_cancel: T#%d which:%d do_cancel:%d\n", gtid, which, 1601 (int)do_cancel)); 1602 kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which); 1603 1604 if (do_cancel == FALSE) { 1605 return __kmpc_cancellationpoint(&loc, gtid, cncl_kind); 1606 } else { 1607 return __kmpc_cancel(&loc, gtid, cncl_kind); 1608 } 1609 } 1610 1611 // Return true if cancellation should take place, false otherwise 1612 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_BARRIER_CANCEL)(void) { 1613 int gtid = __kmp_get_gtid(); 1614 KA_TRACE(20, ("GOMP_barrier_cancel: T#%d\n", gtid)); 1615 return __kmp_barrier_gomp_cancel(gtid); 1616 } 1617 1618 // Return true if cancellation should take place, false otherwise 1619 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL)(void) { 1620 int gtid = __kmp_get_gtid(); 1621 KA_TRACE(20, ("GOMP_sections_end_cancel: T#%d\n", gtid)); 1622 return __kmp_barrier_gomp_cancel(gtid); 1623 } 1624 1625 // Return true if cancellation should take place, false otherwise 1626 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END_CANCEL)(void) { 1627 int gtid = __kmp_get_gtid(); 1628 KA_TRACE(20, ("GOMP_loop_end_cancel: T#%d\n", gtid)); 1629 return __kmp_barrier_gomp_cancel(gtid); 1630 } 1631 1632 // All target functions are empty as of 2014-05-29 1633 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET)(int device, void (*fn)(void *), 1634 const void *openmp_target, 1635 size_t mapnum, void **hostaddrs, 1636 size_t *sizes, 1637 unsigned char *kinds) { 1638 return; 1639 } 1640 1641 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_DATA)( 1642 int device, const void *openmp_target, size_t mapnum, void **hostaddrs, 1643 size_t *sizes, unsigned char *kinds) { 1644 return; 1645 } 1646 1647 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_END_DATA)(void) { return; } 1648 1649 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_UPDATE)( 1650 int device, const void *openmp_target, size_t mapnum, void **hostaddrs, 1651 size_t *sizes, unsigned char *kinds) { 1652 return; 1653 } 1654 1655 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TEAMS)(unsigned int num_teams, 1656 unsigned int thread_limit) { 1657 return; 1658 } 1659 1660 // Task duplication function which copies src to dest (both are 1661 // preallocated task structures) 1662 static void __kmp_gomp_task_dup(kmp_task_t *dest, kmp_task_t *src, 1663 kmp_int32 last_private) { 1664 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(src); 1665 if (taskdata->td_copy_func) { 1666 (taskdata->td_copy_func)(dest->shareds, src->shareds); 1667 } 1668 } 1669 1670 #ifdef __cplusplus 1671 } // extern "C" 1672 #endif 1673 1674 template <typename T> 1675 void __GOMP_taskloop(void (*func)(void *), void *data, 1676 void (*copy_func)(void *, void *), long arg_size, 1677 long arg_align, unsigned gomp_flags, 1678 unsigned long num_tasks, int priority, T start, T end, 1679 T step) { 1680 typedef void (*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32); 1681 MKLOC(loc, "GOMP_taskloop"); 1682 int sched; 1683 T *loop_bounds; 1684 int gtid = __kmp_entry_gtid(); 1685 kmp_int32 flags = 0; 1686 int if_val = gomp_flags & (1u << 10); 1687 int nogroup = gomp_flags & (1u << 11); 1688 int up = gomp_flags & (1u << 8); 1689 p_task_dup_t task_dup = NULL; 1690 kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags; 1691 #ifdef KMP_DEBUG 1692 { 1693 char *buff; 1694 buff = __kmp_str_format( 1695 "GOMP_taskloop: T#%%d: func:%%p data:%%p copy_func:%%p " 1696 "arg_size:%%ld arg_align:%%ld gomp_flags:0x%%x num_tasks:%%lu " 1697 "priority:%%d start:%%%s end:%%%s step:%%%s\n", 1698 traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec); 1699 KA_TRACE(20, (buff, gtid, func, data, copy_func, arg_size, arg_align, 1700 gomp_flags, num_tasks, priority, start, end, step)); 1701 __kmp_str_free(&buff); 1702 } 1703 #endif 1704 KMP_ASSERT((size_t)arg_size >= 2 * sizeof(T)); 1705 KMP_ASSERT(arg_align > 0); 1706 // The low-order bit is the "untied" flag 1707 if (!(gomp_flags & 1)) { 1708 input_flags->tiedness = 1; 1709 } 1710 // The second low-order bit is the "final" flag 1711 if (gomp_flags & 2) { 1712 input_flags->final = 1; 1713 } 1714 // Negative step flag 1715 if (!up) { 1716 // If step is flagged as negative, but isn't properly sign extended 1717 // Then manually sign extend it. Could be a short, int, char embedded 1718 // in a long. So cannot assume any cast. 1719 if (step > 0) { 1720 for (int i = sizeof(T) * CHAR_BIT - 1; i >= 0L; --i) { 1721 // break at the first 1 bit 1722 if (step & ((T)1 << i)) 1723 break; 1724 step |= ((T)1 << i); 1725 } 1726 } 1727 } 1728 input_flags->native = 1; 1729 // Figure out if none/grainsize/num_tasks clause specified 1730 if (num_tasks > 0) { 1731 if (gomp_flags & (1u << 9)) 1732 sched = 1; // grainsize specified 1733 else 1734 sched = 2; // num_tasks specified 1735 // neither grainsize nor num_tasks specified 1736 } else { 1737 sched = 0; 1738 } 1739 1740 // __kmp_task_alloc() sets up all other flags 1741 kmp_task_t *task = 1742 __kmp_task_alloc(&loc, gtid, input_flags, sizeof(kmp_task_t), 1743 arg_size + arg_align - 1, (kmp_routine_entry_t)func); 1744 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); 1745 taskdata->td_copy_func = copy_func; 1746 taskdata->td_size_loop_bounds = sizeof(T); 1747 1748 // re-align shareds if needed and setup firstprivate copy constructors 1749 // through the task_dup mechanism 1750 task->shareds = (void *)((((size_t)task->shareds) + arg_align - 1) / 1751 arg_align * arg_align); 1752 if (copy_func) { 1753 task_dup = __kmp_gomp_task_dup; 1754 } 1755 KMP_MEMCPY(task->shareds, data, arg_size); 1756 1757 loop_bounds = (T *)task->shareds; 1758 loop_bounds[0] = start; 1759 loop_bounds[1] = end + (up ? -1 : 1); 1760 __kmpc_taskloop(&loc, gtid, task, if_val, (kmp_uint64 *)&(loop_bounds[0]), 1761 (kmp_uint64 *)&(loop_bounds[1]), (kmp_int64)step, nogroup, 1762 sched, (kmp_uint64)num_tasks, (void *)task_dup); 1763 } 1764 1765 // 4 byte version of GOMP_doacross_post 1766 // This verison needs to create a temporary array which converts 4 byte 1767 // integers into 8 byte integers 1768 template <typename T, bool need_conversion = (sizeof(long) == 4)> 1769 void __kmp_GOMP_doacross_post(T *count); 1770 1771 template <> void __kmp_GOMP_doacross_post<long, true>(long *count) { 1772 int gtid = __kmp_entry_gtid(); 1773 kmp_info_t *th = __kmp_threads[gtid]; 1774 MKLOC(loc, "GOMP_doacross_post"); 1775 kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0]; 1776 kmp_int64 *vec = 1777 (kmp_int64 *)__kmp_thread_malloc(th, sizeof(kmp_int64) * num_dims); 1778 for (kmp_int64 i = 0; i < num_dims; ++i) { 1779 vec[i] = (kmp_int64)count[i]; 1780 } 1781 __kmpc_doacross_post(&loc, gtid, vec); 1782 __kmp_thread_free(th, vec); 1783 } 1784 1785 // 8 byte versions of GOMP_doacross_post 1786 // This version can just pass in the count array directly instead of creating 1787 // a temporary array 1788 template <> void __kmp_GOMP_doacross_post<long, false>(long *count) { 1789 int gtid = __kmp_entry_gtid(); 1790 MKLOC(loc, "GOMP_doacross_post"); 1791 __kmpc_doacross_post(&loc, gtid, RCAST(kmp_int64 *, count)); 1792 } 1793 1794 template <typename T> void __kmp_GOMP_doacross_wait(T first, va_list args) { 1795 int gtid = __kmp_entry_gtid(); 1796 kmp_info_t *th = __kmp_threads[gtid]; 1797 MKLOC(loc, "GOMP_doacross_wait"); 1798 kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0]; 1799 kmp_int64 *vec = 1800 (kmp_int64 *)__kmp_thread_malloc(th, sizeof(kmp_int64) * num_dims); 1801 vec[0] = (kmp_int64)first; 1802 for (kmp_int64 i = 1; i < num_dims; ++i) { 1803 T item = va_arg(args, T); 1804 vec[i] = (kmp_int64)item; 1805 } 1806 __kmpc_doacross_wait(&loc, gtid, vec); 1807 __kmp_thread_free(th, vec); 1808 return; 1809 } 1810 1811 #ifdef __cplusplus 1812 extern "C" { 1813 #endif // __cplusplus 1814 1815 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKLOOP)( 1816 void (*func)(void *), void *data, void (*copy_func)(void *, void *), 1817 long arg_size, long arg_align, unsigned gomp_flags, unsigned long num_tasks, 1818 int priority, long start, long end, long step) { 1819 __GOMP_taskloop<long>(func, data, copy_func, arg_size, arg_align, gomp_flags, 1820 num_tasks, priority, start, end, step); 1821 } 1822 1823 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKLOOP_ULL)( 1824 void (*func)(void *), void *data, void (*copy_func)(void *, void *), 1825 long arg_size, long arg_align, unsigned gomp_flags, unsigned long num_tasks, 1826 int priority, unsigned long long start, unsigned long long end, 1827 unsigned long long step) { 1828 __GOMP_taskloop<unsigned long long>(func, data, copy_func, arg_size, 1829 arg_align, gomp_flags, num_tasks, 1830 priority, start, end, step); 1831 } 1832 1833 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_POST)(long *count) { 1834 __kmp_GOMP_doacross_post(count); 1835 } 1836 1837 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_WAIT)(long first, ...) { 1838 va_list args; 1839 va_start(args, first); 1840 __kmp_GOMP_doacross_wait<long>(first, args); 1841 va_end(args); 1842 } 1843 1844 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_ULL_POST)( 1845 unsigned long long *count) { 1846 int gtid = __kmp_entry_gtid(); 1847 MKLOC(loc, "GOMP_doacross_ull_post"); 1848 __kmpc_doacross_post(&loc, gtid, RCAST(kmp_int64 *, count)); 1849 } 1850 1851 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_ULL_WAIT)( 1852 unsigned long long first, ...) { 1853 va_list args; 1854 va_start(args, first); 1855 __kmp_GOMP_doacross_wait<unsigned long long>(first, args); 1856 va_end(args); 1857 } 1858 1859 /* The following sections of code create aliases for the GOMP_* functions, then 1860 create versioned symbols using the assembler directive .symver. This is only 1861 pertinent for ELF .so library. The KMP_VERSION_SYMBOL macro is defined in 1862 kmp_os.h */ 1863 1864 #ifdef KMP_USE_VERSION_SYMBOLS 1865 // GOMP_1.0 versioned symbols 1866 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ATOMIC_END, 10, "GOMP_1.0"); 1867 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ATOMIC_START, 10, "GOMP_1.0"); 1868 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_BARRIER, 10, "GOMP_1.0"); 1869 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_END, 10, "GOMP_1.0"); 1870 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10, "GOMP_1.0"); 1871 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10, "GOMP_1.0"); 1872 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_START, 10, "GOMP_1.0"); 1873 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10, "GOMP_1.0"); 1874 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10, "GOMP_1.0"); 1875 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END, 10, "GOMP_1.0"); 1876 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10, "GOMP_1.0"); 1877 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10, "GOMP_1.0"); 1878 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10, "GOMP_1.0"); 1879 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10, "GOMP_1.0"); 1880 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10, 1881 "GOMP_1.0"); 1882 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10, "GOMP_1.0"); 1883 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10, "GOMP_1.0"); 1884 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10, "GOMP_1.0"); 1885 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10, 1886 "GOMP_1.0"); 1887 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10, "GOMP_1.0"); 1888 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10, "GOMP_1.0"); 1889 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10, "GOMP_1.0"); 1890 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10, "GOMP_1.0"); 1891 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10, "GOMP_1.0"); 1892 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10, "GOMP_1.0"); 1893 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ORDERED_END, 10, "GOMP_1.0"); 1894 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ORDERED_START, 10, "GOMP_1.0"); 1895 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_END, 10, "GOMP_1.0"); 1896 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10, 1897 "GOMP_1.0"); 1898 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10, 1899 "GOMP_1.0"); 1900 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10, 1901 "GOMP_1.0"); 1902 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10, 1903 "GOMP_1.0"); 1904 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10, "GOMP_1.0"); 1905 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_START, 10, "GOMP_1.0"); 1906 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END, 10, "GOMP_1.0"); 1907 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10, "GOMP_1.0"); 1908 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10, "GOMP_1.0"); 1909 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_START, 10, "GOMP_1.0"); 1910 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10, "GOMP_1.0"); 1911 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10, "GOMP_1.0"); 1912 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_START, 10, "GOMP_1.0"); 1913 1914 // GOMP_2.0 versioned symbols 1915 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASK, 20, "GOMP_2.0"); 1916 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKWAIT, 20, "GOMP_2.0"); 1917 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20, "GOMP_2.0"); 1918 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20, "GOMP_2.0"); 1919 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20, "GOMP_2.0"); 1920 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20, "GOMP_2.0"); 1921 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20, 1922 "GOMP_2.0"); 1923 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20, 1924 "GOMP_2.0"); 1925 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20, 1926 "GOMP_2.0"); 1927 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20, 1928 "GOMP_2.0"); 1929 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20, 1930 "GOMP_2.0"); 1931 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20, 1932 "GOMP_2.0"); 1933 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20, 1934 "GOMP_2.0"); 1935 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20, 1936 "GOMP_2.0"); 1937 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20, "GOMP_2.0"); 1938 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20, "GOMP_2.0"); 1939 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20, "GOMP_2.0"); 1940 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20, "GOMP_2.0"); 1941 1942 // GOMP_3.0 versioned symbols 1943 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKYIELD, 30, "GOMP_3.0"); 1944 1945 // GOMP_4.0 versioned symbols 1946 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL, 40, "GOMP_4.0"); 1947 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40, "GOMP_4.0"); 1948 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40, "GOMP_4.0"); 1949 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40, "GOMP_4.0"); 1950 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40, "GOMP_4.0"); 1951 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40, "GOMP_4.0"); 1952 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_START, 40, "GOMP_4.0"); 1953 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_END, 40, "GOMP_4.0"); 1954 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40, "GOMP_4.0"); 1955 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CANCEL, 40, "GOMP_4.0"); 1956 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40, "GOMP_4.0"); 1957 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40, "GOMP_4.0"); 1958 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40, "GOMP_4.0"); 1959 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET, 40, "GOMP_4.0"); 1960 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_DATA, 40, "GOMP_4.0"); 1961 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_END_DATA, 40, "GOMP_4.0"); 1962 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_UPDATE, 40, "GOMP_4.0"); 1963 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TEAMS, 40, "GOMP_4.0"); 1964 1965 // GOMP_4.5 versioned symbols 1966 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKLOOP, 45, "GOMP_4.5"); 1967 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKLOOP_ULL, 45, "GOMP_4.5"); 1968 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_POST, 45, "GOMP_4.5"); 1969 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_WAIT, 45, "GOMP_4.5"); 1970 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_STATIC_START, 45, 1971 "GOMP_4.5"); 1972 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_DYNAMIC_START, 45, 1973 "GOMP_4.5"); 1974 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_GUIDED_START, 45, 1975 "GOMP_4.5"); 1976 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_RUNTIME_START, 45, 1977 "GOMP_4.5"); 1978 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_ULL_POST, 45, "GOMP_4.5"); 1979 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_ULL_WAIT, 45, "GOMP_4.5"); 1980 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_STATIC_START, 45, 1981 "GOMP_4.5"); 1982 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_DYNAMIC_START, 45, 1983 "GOMP_4.5"); 1984 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_GUIDED_START, 45, 1985 "GOMP_4.5"); 1986 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_RUNTIME_START, 45, 1987 "GOMP_4.5"); 1988 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_START, 45, 1989 "GOMP_4.5"); 1990 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_NEXT, 45, 1991 "GOMP_4.5"); 1992 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_START, 45, 1993 "GOMP_4.5"); 1994 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_NEXT, 45, 1995 "GOMP_4.5"); 1996 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_START, 45, 1997 "GOMP_4.5"); 1998 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_NEXT, 45, 1999 "GOMP_4.5"); 2000 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_START, 45, 2001 "GOMP_4.5"); 2002 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_NEXT, 45, 2003 "GOMP_4.5"); 2004 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_DYNAMIC, 45, 2005 "GOMP_4.5"); 2006 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_GUIDED, 45, 2007 "GOMP_4.5"); 2008 2009 // GOMP_5.0 versioned symbols 2010 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_NEXT, 50, 2011 "GOMP_5.0"); 2012 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_START, 50, 2013 "GOMP_5.0"); 2014 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_NEXT, 50, 2015 "GOMP_5.0"); 2016 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_START, 50, 2017 "GOMP_5.0"); 2018 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_NEXT, 2019 50, "GOMP_5.0"); 2020 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_START, 2021 50, "GOMP_5.0"); 2022 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_NEXT, 50, 2023 "GOMP_5.0"); 2024 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_START, 50, 2025 "GOMP_5.0"); 2026 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_RUNTIME, 50, 2027 "GOMP_5.0"); 2028 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_MAYBE_NONMONOTONIC_RUNTIME, 2029 50, "GOMP_5.0"); 2030 2031 #endif // KMP_USE_VERSION_SYMBOLS 2032 2033 #ifdef __cplusplus 2034 } // extern "C" 2035 #endif // __cplusplus 2036