1 /* 2 * kmp_taskdeps.cpp 3 */ 4 5 //===----------------------------------------------------------------------===// 6 // 7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 8 // See https://llvm.org/LICENSE.txt for license information. 9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 10 // 11 //===----------------------------------------------------------------------===// 12 13 //#define KMP_SUPPORT_GRAPH_OUTPUT 1 14 15 #include "kmp.h" 16 #include "kmp_io.h" 17 #include "kmp_wait_release.h" 18 #include "kmp_taskdeps.h" 19 #if OMPT_SUPPORT 20 #include "ompt-specific.h" 21 #endif 22 23 // TODO: Improve memory allocation? keep a list of pre-allocated structures? 24 // allocate in blocks? re-use list finished list entries? 25 // TODO: don't use atomic ref counters for stack-allocated nodes. 26 // TODO: find an alternate to atomic refs for heap-allocated nodes? 27 // TODO: Finish graph output support 28 // TODO: kmp_lock_t seems a tad to big (and heavy weight) for this. Check other 29 // runtime locks 30 // TODO: Any ITT support needed? 31 32 #ifdef KMP_SUPPORT_GRAPH_OUTPUT 33 static std::atomic<kmp_int32> kmp_node_id_seed = ATOMIC_VAR_INIT(0); 34 #endif 35 36 static void __kmp_init_node(kmp_depnode_t *node) { 37 node->dn.successors = NULL; 38 node->dn.task = NULL; // will point to the right task 39 // once dependences have been processed 40 for (int i = 0; i < MAX_MTX_DEPS; ++i) 41 node->dn.mtx_locks[i] = NULL; 42 node->dn.mtx_num_locks = 0; 43 __kmp_init_lock(&node->dn.lock); 44 KMP_ATOMIC_ST_RLX(&node->dn.nrefs, 1); // init creates the first reference 45 #ifdef KMP_SUPPORT_GRAPH_OUTPUT 46 node->dn.id = KMP_ATOMIC_INC(&kmp_node_id_seed); 47 #endif 48 } 49 50 static inline kmp_depnode_t *__kmp_node_ref(kmp_depnode_t *node) { 51 KMP_ATOMIC_INC(&node->dn.nrefs); 52 return node; 53 } 54 55 enum { KMP_DEPHASH_OTHER_SIZE = 97, KMP_DEPHASH_MASTER_SIZE = 997 }; 56 57 size_t sizes[] = {997, 2003, 4001, 8191, 16001, 32003, 64007, 131071, 270029}; 58 const size_t MAX_GEN = 8; 59 60 static inline size_t __kmp_dephash_hash(kmp_intptr_t addr, size_t hsize) { 61 // TODO alternate to try: set = (((Addr64)(addrUsefulBits * 9.618)) % 62 // m_num_sets ); 63 return ((addr >> 6) ^ (addr >> 2)) % hsize; 64 } 65 66 static kmp_dephash_t *__kmp_dephash_extend(kmp_info_t *thread, 67 kmp_dephash_t *current_dephash) { 68 kmp_dephash_t *h; 69 70 size_t gen = current_dephash->generation + 1; 71 if (gen >= MAX_GEN) 72 return current_dephash; 73 size_t new_size = sizes[gen]; 74 75 size_t size_to_allocate = 76 new_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t); 77 78 #if USE_FAST_MEMORY 79 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size_to_allocate); 80 #else 81 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size_to_allocate); 82 #endif 83 84 h->size = new_size; 85 h->nelements = current_dephash->nelements; 86 h->buckets = (kmp_dephash_entry **)(h + 1); 87 h->generation = gen; 88 h->nconflicts = 0; 89 90 // make sure buckets are properly initialized 91 for (size_t i = 0; i < new_size; i++) { 92 h->buckets[i] = NULL; 93 } 94 95 // insert existing elements in the new table 96 for (size_t i = 0; i < current_dephash->size; i++) { 97 kmp_dephash_entry_t *next, *entry; 98 for (entry = current_dephash->buckets[i]; entry; entry = next) { 99 next = entry->next_in_bucket; 100 // Compute the new hash using the new size, and insert the entry in 101 // the new bucket. 102 size_t new_bucket = __kmp_dephash_hash(entry->addr, h->size); 103 entry->next_in_bucket = h->buckets[new_bucket]; 104 if (entry->next_in_bucket) { 105 h->nconflicts++; 106 } 107 h->buckets[new_bucket] = entry; 108 } 109 } 110 111 // Free old hash table 112 #if USE_FAST_MEMORY 113 __kmp_fast_free(thread, current_dephash); 114 #else 115 __kmp_thread_free(thread, current_dephash); 116 #endif 117 118 return h; 119 } 120 121 static kmp_dephash_t *__kmp_dephash_create(kmp_info_t *thread, 122 kmp_taskdata_t *current_task) { 123 kmp_dephash_t *h; 124 125 size_t h_size; 126 127 if (current_task->td_flags.tasktype == TASK_IMPLICIT) 128 h_size = KMP_DEPHASH_MASTER_SIZE; 129 else 130 h_size = KMP_DEPHASH_OTHER_SIZE; 131 132 size_t size = h_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t); 133 134 #if USE_FAST_MEMORY 135 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size); 136 #else 137 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size); 138 #endif 139 h->size = h_size; 140 141 h->generation = 0; 142 h->nelements = 0; 143 h->nconflicts = 0; 144 h->buckets = (kmp_dephash_entry **)(h + 1); 145 146 for (size_t i = 0; i < h_size; i++) 147 h->buckets[i] = 0; 148 149 return h; 150 } 151 152 static kmp_dephash_entry *__kmp_dephash_find(kmp_info_t *thread, 153 kmp_dephash_t **hash, 154 kmp_intptr_t addr) { 155 kmp_dephash_t *h = *hash; 156 if (h->nelements != 0 && h->nconflicts / h->size >= 1) { 157 *hash = __kmp_dephash_extend(thread, h); 158 h = *hash; 159 } 160 size_t bucket = __kmp_dephash_hash(addr, h->size); 161 162 kmp_dephash_entry_t *entry; 163 for (entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket) 164 if (entry->addr == addr) 165 break; 166 167 if (entry == NULL) { 168 // create entry. This is only done by one thread so no locking required 169 #if USE_FAST_MEMORY 170 entry = (kmp_dephash_entry_t *)__kmp_fast_allocate( 171 thread, sizeof(kmp_dephash_entry_t)); 172 #else 173 entry = (kmp_dephash_entry_t *)__kmp_thread_malloc( 174 thread, sizeof(kmp_dephash_entry_t)); 175 #endif 176 entry->addr = addr; 177 entry->last_out = NULL; 178 entry->last_set = NULL; 179 entry->prev_set = NULL; 180 entry->last_flag = 0; 181 entry->mtx_lock = NULL; 182 entry->next_in_bucket = h->buckets[bucket]; 183 h->buckets[bucket] = entry; 184 h->nelements++; 185 if (entry->next_in_bucket) 186 h->nconflicts++; 187 } 188 return entry; 189 } 190 191 static kmp_depnode_list_t *__kmp_add_node(kmp_info_t *thread, 192 kmp_depnode_list_t *list, 193 kmp_depnode_t *node) { 194 kmp_depnode_list_t *new_head; 195 196 #if USE_FAST_MEMORY 197 new_head = (kmp_depnode_list_t *)__kmp_fast_allocate( 198 thread, sizeof(kmp_depnode_list_t)); 199 #else 200 new_head = (kmp_depnode_list_t *)__kmp_thread_malloc( 201 thread, sizeof(kmp_depnode_list_t)); 202 #endif 203 204 new_head->node = __kmp_node_ref(node); 205 new_head->next = list; 206 207 return new_head; 208 } 209 210 static inline void __kmp_track_dependence(kmp_int32 gtid, kmp_depnode_t *source, 211 kmp_depnode_t *sink, 212 kmp_task_t *sink_task) { 213 #ifdef KMP_SUPPORT_GRAPH_OUTPUT 214 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task); 215 // do not use sink->dn.task as that is only filled after the dependences 216 // are already processed! 217 kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task); 218 219 __kmp_printf("%d(%s) -> %d(%s)\n", source->dn.id, 220 task_source->td_ident->psource, sink->dn.id, 221 task_sink->td_ident->psource); 222 #endif 223 #if OMPT_SUPPORT && OMPT_OPTIONAL 224 /* OMPT tracks dependences between task (a=source, b=sink) in which 225 task a blocks the execution of b through the ompt_new_dependence_callback 226 */ 227 if (ompt_enabled.ompt_callback_task_dependence) { 228 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task); 229 ompt_data_t *sink_data; 230 if (sink_task) 231 sink_data = &(KMP_TASK_TO_TASKDATA(sink_task)->ompt_task_info.task_data); 232 else 233 sink_data = &__kmp_threads[gtid]->th.ompt_thread_info.task_data; 234 235 ompt_callbacks.ompt_callback(ompt_callback_task_dependence)( 236 &(task_source->ompt_task_info.task_data), sink_data); 237 } 238 #endif /* OMPT_SUPPORT && OMPT_OPTIONAL */ 239 } 240 241 static inline kmp_int32 242 __kmp_depnode_link_successor(kmp_int32 gtid, kmp_info_t *thread, 243 kmp_task_t *task, kmp_depnode_t *node, 244 kmp_depnode_list_t *plist) { 245 if (!plist) 246 return 0; 247 kmp_int32 npredecessors = 0; 248 // link node as successor of list elements 249 for (kmp_depnode_list_t *p = plist; p; p = p->next) { 250 kmp_depnode_t *dep = p->node; 251 if (dep->dn.task) { 252 KMP_ACQUIRE_DEPNODE(gtid, dep); 253 if (dep->dn.task) { 254 __kmp_track_dependence(gtid, dep, node, task); 255 dep->dn.successors = __kmp_add_node(thread, dep->dn.successors, node); 256 KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to " 257 "%p\n", 258 gtid, KMP_TASK_TO_TASKDATA(dep->dn.task), 259 KMP_TASK_TO_TASKDATA(task))); 260 npredecessors++; 261 } 262 KMP_RELEASE_DEPNODE(gtid, dep); 263 } 264 } 265 return npredecessors; 266 } 267 268 static inline kmp_int32 __kmp_depnode_link_successor(kmp_int32 gtid, 269 kmp_info_t *thread, 270 kmp_task_t *task, 271 kmp_depnode_t *source, 272 kmp_depnode_t *sink) { 273 if (!sink) 274 return 0; 275 kmp_int32 npredecessors = 0; 276 if (sink->dn.task) { 277 // synchronously add source to sink' list of successors 278 KMP_ACQUIRE_DEPNODE(gtid, sink); 279 if (sink->dn.task) { 280 __kmp_track_dependence(gtid, sink, source, task); 281 sink->dn.successors = __kmp_add_node(thread, sink->dn.successors, source); 282 KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to " 283 "%p\n", 284 gtid, KMP_TASK_TO_TASKDATA(sink->dn.task), 285 KMP_TASK_TO_TASKDATA(task))); 286 npredecessors++; 287 } 288 KMP_RELEASE_DEPNODE(gtid, sink); 289 } 290 return npredecessors; 291 } 292 293 template <bool filter> 294 static inline kmp_int32 295 __kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t **hash, 296 bool dep_barrier, kmp_int32 ndeps, 297 kmp_depend_info_t *dep_list, kmp_task_t *task) { 298 KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d processing %d dependences : " 299 "dep_barrier = %d\n", 300 filter, gtid, ndeps, dep_barrier)); 301 302 kmp_info_t *thread = __kmp_threads[gtid]; 303 kmp_int32 npredecessors = 0; 304 for (kmp_int32 i = 0; i < ndeps; i++) { 305 const kmp_depend_info_t *dep = &dep_list[i]; 306 307 if (filter && dep->base_addr == 0) 308 continue; // skip filtered entries 309 310 kmp_dephash_entry_t *info = 311 __kmp_dephash_find(thread, hash, dep->base_addr); 312 kmp_depnode_t *last_out = info->last_out; 313 kmp_depnode_list_t *last_set = info->last_set; 314 kmp_depnode_list_t *prev_set = info->prev_set; 315 316 if (dep->flags.out) { // out or inout --> clean lists if any 317 if (last_set) { 318 npredecessors += 319 __kmp_depnode_link_successor(gtid, thread, task, node, last_set); 320 __kmp_depnode_list_free(thread, last_set); 321 __kmp_depnode_list_free(thread, prev_set); 322 info->last_set = NULL; 323 info->prev_set = NULL; 324 info->last_flag = 0; // no sets in this dephash entry 325 } else { 326 npredecessors += 327 __kmp_depnode_link_successor(gtid, thread, task, node, last_out); 328 } 329 __kmp_node_deref(thread, last_out); 330 if (!dep_barrier) { 331 info->last_out = __kmp_node_ref(node); 332 } else { 333 // if this is a sync point in the serial sequence, then the previous 334 // outputs are guaranteed to be completed after the execution of this 335 // task so the previous output nodes can be cleared. 336 info->last_out = NULL; 337 } 338 } else { // either IN or MTX or SET 339 if (info->last_flag == 0 || info->last_flag == dep->flag) { 340 // last_set either didn't exist or of same dep kind 341 // link node as successor of the last_out if any 342 npredecessors += 343 __kmp_depnode_link_successor(gtid, thread, task, node, last_out); 344 // link node as successor of all nodes in the prev_set if any 345 npredecessors += 346 __kmp_depnode_link_successor(gtid, thread, task, node, prev_set); 347 if (dep_barrier) { 348 // clean last_out and prev_set if any; don't touch last_set 349 __kmp_node_deref(thread, last_out); 350 info->last_out = NULL; 351 __kmp_depnode_list_free(thread, prev_set); 352 info->prev_set = NULL; 353 } 354 } else { // last_set is of different dep kind, make it prev_set 355 // link node as successor of all nodes in the last_set 356 npredecessors += 357 __kmp_depnode_link_successor(gtid, thread, task, node, last_set); 358 // clean last_out if any 359 __kmp_node_deref(thread, last_out); 360 info->last_out = NULL; 361 // clean prev_set if any 362 __kmp_depnode_list_free(thread, prev_set); 363 if (!dep_barrier) { 364 // move last_set to prev_set, new last_set will be allocated 365 info->prev_set = last_set; 366 } else { 367 info->prev_set = NULL; 368 info->last_flag = 0; 369 } 370 info->last_set = NULL; 371 } 372 // for dep_barrier last_flag value should remain: 373 // 0 if last_set is empty, unchanged otherwise 374 if (!dep_barrier) { 375 info->last_flag = dep->flag; // store dep kind of the last_set 376 info->last_set = __kmp_add_node(thread, info->last_set, node); 377 } 378 // check if we are processing MTX dependency 379 if (dep->flag == KMP_DEP_MTX) { 380 if (info->mtx_lock == NULL) { 381 info->mtx_lock = (kmp_lock_t *)__kmp_allocate(sizeof(kmp_lock_t)); 382 __kmp_init_lock(info->mtx_lock); 383 } 384 KMP_DEBUG_ASSERT(node->dn.mtx_num_locks < MAX_MTX_DEPS); 385 kmp_int32 m; 386 // Save lock in node's array 387 for (m = 0; m < MAX_MTX_DEPS; ++m) { 388 // sort pointers in decreasing order to avoid potential livelock 389 if (node->dn.mtx_locks[m] < info->mtx_lock) { 390 KMP_DEBUG_ASSERT(!node->dn.mtx_locks[node->dn.mtx_num_locks]); 391 for (int n = node->dn.mtx_num_locks; n > m; --n) { 392 // shift right all lesser non-NULL pointers 393 KMP_DEBUG_ASSERT(node->dn.mtx_locks[n - 1] != NULL); 394 node->dn.mtx_locks[n] = node->dn.mtx_locks[n - 1]; 395 } 396 node->dn.mtx_locks[m] = info->mtx_lock; 397 break; 398 } 399 } 400 KMP_DEBUG_ASSERT(m < MAX_MTX_DEPS); // must break from loop 401 node->dn.mtx_num_locks++; 402 } 403 } 404 } 405 KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter, 406 gtid, npredecessors)); 407 return npredecessors; 408 } 409 410 #define NO_DEP_BARRIER (false) 411 #define DEP_BARRIER (true) 412 413 // returns true if the task has any outstanding dependence 414 static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node, 415 kmp_task_t *task, kmp_dephash_t **hash, 416 bool dep_barrier, kmp_int32 ndeps, 417 kmp_depend_info_t *dep_list, 418 kmp_int32 ndeps_noalias, 419 kmp_depend_info_t *noalias_dep_list) { 420 int i, n_mtxs = 0; 421 #if KMP_DEBUG 422 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); 423 #endif 424 KA_TRACE(20, ("__kmp_check_deps: T#%d checking dependences for task %p : %d " 425 "possibly aliased dependences, %d non-aliased dependences : " 426 "dep_barrier=%d .\n", 427 gtid, taskdata, ndeps, ndeps_noalias, dep_barrier)); 428 429 // Filter deps in dep_list 430 // TODO: Different algorithm for large dep_list ( > 10 ? ) 431 for (i = 0; i < ndeps; i++) { 432 if (dep_list[i].base_addr != 0) { 433 KMP_DEBUG_ASSERT( 434 dep_list[i].flag == KMP_DEP_IN || dep_list[i].flag == KMP_DEP_OUT || 435 dep_list[i].flag == KMP_DEP_INOUT || 436 dep_list[i].flag == KMP_DEP_MTX || dep_list[i].flag == KMP_DEP_SET); 437 for (int j = i + 1; j < ndeps; j++) { 438 if (dep_list[i].base_addr == dep_list[j].base_addr) { 439 if (dep_list[i].flag != dep_list[j].flag) { 440 // two different dependences on same address work identical to OUT 441 dep_list[i].flag = KMP_DEP_OUT; 442 } 443 dep_list[j].base_addr = 0; // Mark j element as void 444 } 445 } 446 if (dep_list[i].flag == KMP_DEP_MTX) { 447 // limit number of mtx deps to MAX_MTX_DEPS per node 448 if (n_mtxs < MAX_MTX_DEPS && task != NULL) { 449 ++n_mtxs; 450 } else { 451 dep_list[i].flag = KMP_DEP_OUT; // downgrade mutexinoutset to inout 452 } 453 } 454 } 455 } 456 457 // doesn't need to be atomic as no other thread is going to be accessing this 458 // node just yet. 459 // npredecessors is set -1 to ensure that none of the releasing tasks queues 460 // this task before we have finished processing all the dependences 461 node->dn.npredecessors = -1; 462 463 // used to pack all npredecessors additions into a single atomic operation at 464 // the end 465 int npredecessors; 466 467 npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier, ndeps, 468 dep_list, task); 469 npredecessors += __kmp_process_deps<false>( 470 gtid, node, hash, dep_barrier, ndeps_noalias, noalias_dep_list, task); 471 472 node->dn.task = task; 473 KMP_MB(); 474 475 // Account for our initial fake value 476 npredecessors++; 477 478 // Update predecessors and obtain current value to check if there are still 479 // any outstanding dependences (some tasks may have finished while we 480 // processed the dependences) 481 npredecessors = 482 node->dn.npredecessors.fetch_add(npredecessors) + npredecessors; 483 484 KA_TRACE(20, ("__kmp_check_deps: T#%d found %d predecessors for task %p \n", 485 gtid, npredecessors, taskdata)); 486 487 // beyond this point the task could be queued (and executed) by a releasing 488 // task... 489 return npredecessors > 0 ? true : false; 490 } 491 492 /*! 493 @ingroup TASKING 494 @param loc_ref location of the original task directive 495 @param gtid Global Thread ID of encountering thread 496 @param new_task task thunk allocated by __kmp_omp_task_alloc() for the ''new 497 task'' 498 @param ndeps Number of depend items with possible aliasing 499 @param dep_list List of depend items with possible aliasing 500 @param ndeps_noalias Number of depend items with no aliasing 501 @param noalias_dep_list List of depend items with no aliasing 502 503 @return Returns either TASK_CURRENT_NOT_QUEUED if the current task was not 504 suspended and queued, or TASK_CURRENT_QUEUED if it was suspended and queued 505 506 Schedule a non-thread-switchable task with dependences for execution 507 */ 508 kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, 509 kmp_task_t *new_task, kmp_int32 ndeps, 510 kmp_depend_info_t *dep_list, 511 kmp_int32 ndeps_noalias, 512 kmp_depend_info_t *noalias_dep_list) { 513 514 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task); 515 KA_TRACE(10, ("__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n", gtid, 516 loc_ref, new_taskdata)); 517 __kmp_assert_valid_gtid(gtid); 518 kmp_info_t *thread = __kmp_threads[gtid]; 519 kmp_taskdata_t *current_task = thread->th.th_current_task; 520 521 #if OMPT_SUPPORT 522 if (ompt_enabled.enabled) { 523 if (!current_task->ompt_task_info.frame.enter_frame.ptr) 524 current_task->ompt_task_info.frame.enter_frame.ptr = 525 OMPT_GET_FRAME_ADDRESS(0); 526 if (ompt_enabled.ompt_callback_task_create) { 527 ompt_callbacks.ompt_callback(ompt_callback_task_create)( 528 &(current_task->ompt_task_info.task_data), 529 &(current_task->ompt_task_info.frame), 530 &(new_taskdata->ompt_task_info.task_data), 531 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 1, 532 OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid)); 533 } 534 535 new_taskdata->ompt_task_info.frame.enter_frame.ptr = 536 OMPT_GET_FRAME_ADDRESS(0); 537 } 538 539 #if OMPT_OPTIONAL 540 /* OMPT grab all dependences if requested by the tool */ 541 if (ndeps + ndeps_noalias > 0 && ompt_enabled.ompt_callback_dependences) { 542 kmp_int32 i; 543 544 int ompt_ndeps = ndeps + ndeps_noalias; 545 ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC( 546 thread, (ndeps + ndeps_noalias) * sizeof(ompt_dependence_t)); 547 548 KMP_ASSERT(ompt_deps != NULL); 549 550 for (i = 0; i < ndeps; i++) { 551 ompt_deps[i].variable.ptr = (void *)dep_list[i].base_addr; 552 if (dep_list[i].flags.in && dep_list[i].flags.out) 553 ompt_deps[i].dependence_type = ompt_dependence_type_inout; 554 else if (dep_list[i].flags.out) 555 ompt_deps[i].dependence_type = ompt_dependence_type_out; 556 else if (dep_list[i].flags.in) 557 ompt_deps[i].dependence_type = ompt_dependence_type_in; 558 else if (dep_list[i].flags.mtx) 559 ompt_deps[i].dependence_type = ompt_dependence_type_mutexinoutset; 560 else if (dep_list[i].flags.set) 561 ompt_deps[i].dependence_type = ompt_dependence_type_inoutset; 562 } 563 for (i = 0; i < ndeps_noalias; i++) { 564 ompt_deps[ndeps + i].variable.ptr = (void *)noalias_dep_list[i].base_addr; 565 if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out) 566 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout; 567 else if (noalias_dep_list[i].flags.out) 568 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out; 569 else if (noalias_dep_list[i].flags.in) 570 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in; 571 else if (noalias_dep_list[i].flags.mtx) 572 ompt_deps[ndeps + i].dependence_type = 573 ompt_dependence_type_mutexinoutset; 574 else if (noalias_dep_list[i].flags.set) 575 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inoutset; 576 } 577 ompt_callbacks.ompt_callback(ompt_callback_dependences)( 578 &(new_taskdata->ompt_task_info.task_data), ompt_deps, ompt_ndeps); 579 /* We can now free the allocated memory for the dependences */ 580 /* For OMPD we might want to delay the free until end of this function */ 581 KMP_OMPT_DEPS_FREE(thread, ompt_deps); 582 } 583 #endif /* OMPT_OPTIONAL */ 584 #endif /* OMPT_SUPPORT */ 585 586 bool serial = current_task->td_flags.team_serial || 587 current_task->td_flags.tasking_ser || 588 current_task->td_flags.final; 589 kmp_task_team_t *task_team = thread->th.th_task_team; 590 serial = serial && 591 !(task_team && (task_team->tt.tt_found_proxy_tasks || 592 task_team->tt.tt_hidden_helper_task_encountered)); 593 594 if (!serial && (ndeps > 0 || ndeps_noalias > 0)) { 595 /* if no dependences have been tracked yet, create the dependence hash */ 596 if (current_task->td_dephash == NULL) 597 current_task->td_dephash = __kmp_dephash_create(thread, current_task); 598 599 #if USE_FAST_MEMORY 600 kmp_depnode_t *node = 601 (kmp_depnode_t *)__kmp_fast_allocate(thread, sizeof(kmp_depnode_t)); 602 #else 603 kmp_depnode_t *node = 604 (kmp_depnode_t *)__kmp_thread_malloc(thread, sizeof(kmp_depnode_t)); 605 #endif 606 607 __kmp_init_node(node); 608 new_taskdata->td_depnode = node; 609 610 if (__kmp_check_deps(gtid, node, new_task, ¤t_task->td_dephash, 611 NO_DEP_BARRIER, ndeps, dep_list, ndeps_noalias, 612 noalias_dep_list)) { 613 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had blocking " 614 "dependences: " 615 "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n", 616 gtid, loc_ref, new_taskdata)); 617 #if OMPT_SUPPORT 618 if (ompt_enabled.enabled) { 619 current_task->ompt_task_info.frame.enter_frame = ompt_data_none; 620 } 621 #endif 622 return TASK_CURRENT_NOT_QUEUED; 623 } 624 } else { 625 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d ignored dependences " 626 "for task (serialized) loc=%p task=%p\n", 627 gtid, loc_ref, new_taskdata)); 628 } 629 630 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had no blocking " 631 "dependences : " 632 "loc=%p task=%p, transferring to __kmp_omp_task\n", 633 gtid, loc_ref, new_taskdata)); 634 635 kmp_int32 ret = __kmp_omp_task(gtid, new_task, true); 636 #if OMPT_SUPPORT 637 if (ompt_enabled.enabled) { 638 current_task->ompt_task_info.frame.enter_frame = ompt_data_none; 639 } 640 #endif 641 return ret; 642 } 643 644 #if OMPT_SUPPORT 645 void __ompt_taskwait_dep_finish(kmp_taskdata_t *current_task, 646 ompt_data_t *taskwait_task_data) { 647 if (ompt_enabled.ompt_callback_task_schedule) { 648 ompt_callbacks.ompt_callback(ompt_callback_task_schedule)( 649 taskwait_task_data, ompt_taskwait_complete, NULL); 650 } 651 current_task->ompt_task_info.frame.enter_frame.ptr = NULL; 652 *taskwait_task_data = ompt_data_none; 653 } 654 #endif /* OMPT_SUPPORT */ 655 656 /*! 657 @ingroup TASKING 658 @param loc_ref location of the original task directive 659 @param gtid Global Thread ID of encountering thread 660 @param ndeps Number of depend items with possible aliasing 661 @param dep_list List of depend items with possible aliasing 662 @param ndeps_noalias Number of depend items with no aliasing 663 @param noalias_dep_list List of depend items with no aliasing 664 665 Blocks the current task until all specifies dependences have been fulfilled. 666 */ 667 void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, 668 kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, 669 kmp_depend_info_t *noalias_dep_list) { 670 KA_TRACE(10, ("__kmpc_omp_wait_deps(enter): T#%d loc=%p\n", gtid, loc_ref)); 671 672 if (ndeps == 0 && ndeps_noalias == 0) { 673 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no dependences to " 674 "wait upon : loc=%p\n", 675 gtid, loc_ref)); 676 return; 677 } 678 __kmp_assert_valid_gtid(gtid); 679 kmp_info_t *thread = __kmp_threads[gtid]; 680 kmp_taskdata_t *current_task = thread->th.th_current_task; 681 682 #if OMPT_SUPPORT 683 // this function represents a taskwait construct with depend clause 684 // We signal 4 events: 685 // - creation of the taskwait task 686 // - dependences of the taskwait task 687 // - schedule and finish of the taskwait task 688 ompt_data_t *taskwait_task_data = &thread->th.ompt_thread_info.task_data; 689 KMP_ASSERT(taskwait_task_data->ptr == NULL); 690 if (ompt_enabled.enabled) { 691 if (!current_task->ompt_task_info.frame.enter_frame.ptr) 692 current_task->ompt_task_info.frame.enter_frame.ptr = 693 OMPT_GET_FRAME_ADDRESS(0); 694 if (ompt_enabled.ompt_callback_task_create) { 695 ompt_callbacks.ompt_callback(ompt_callback_task_create)( 696 &(current_task->ompt_task_info.task_data), 697 &(current_task->ompt_task_info.frame), taskwait_task_data, 698 ompt_task_taskwait | ompt_task_undeferred | ompt_task_mergeable, 1, 699 OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid)); 700 } 701 } 702 703 #if OMPT_OPTIONAL 704 /* OMPT grab all dependences if requested by the tool */ 705 if (ndeps + ndeps_noalias > 0 && ompt_enabled.ompt_callback_dependences) { 706 kmp_int32 i; 707 708 int ompt_ndeps = ndeps + ndeps_noalias; 709 ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC( 710 thread, (ndeps + ndeps_noalias) * sizeof(ompt_dependence_t)); 711 712 KMP_ASSERT(ompt_deps != NULL); 713 714 for (i = 0; i < ndeps; i++) { 715 ompt_deps[i].variable.ptr = (void *)dep_list[i].base_addr; 716 if (dep_list[i].flags.in && dep_list[i].flags.out) 717 ompt_deps[i].dependence_type = ompt_dependence_type_inout; 718 else if (dep_list[i].flags.out) 719 ompt_deps[i].dependence_type = ompt_dependence_type_out; 720 else if (dep_list[i].flags.in) 721 ompt_deps[i].dependence_type = ompt_dependence_type_in; 722 else if (dep_list[i].flags.mtx) 723 ompt_deps[ndeps + i].dependence_type = 724 ompt_dependence_type_mutexinoutset; 725 else if (dep_list[i].flags.set) 726 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inoutset; 727 } 728 for (i = 0; i < ndeps_noalias; i++) { 729 ompt_deps[ndeps + i].variable.ptr = (void *)noalias_dep_list[i].base_addr; 730 if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out) 731 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout; 732 else if (noalias_dep_list[i].flags.out) 733 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out; 734 else if (noalias_dep_list[i].flags.in) 735 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in; 736 else if (noalias_dep_list[i].flags.mtx) 737 ompt_deps[ndeps + i].dependence_type = 738 ompt_dependence_type_mutexinoutset; 739 else if (noalias_dep_list[i].flags.set) 740 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inoutset; 741 } 742 ompt_callbacks.ompt_callback(ompt_callback_dependences)( 743 taskwait_task_data, ompt_deps, ompt_ndeps); 744 /* We can now free the allocated memory for the dependences */ 745 /* For OMPD we might want to delay the free until end of this function */ 746 KMP_OMPT_DEPS_FREE(thread, ompt_deps); 747 ompt_deps = NULL; 748 } 749 #endif /* OMPT_OPTIONAL */ 750 #endif /* OMPT_SUPPORT */ 751 752 // We can return immediately as: 753 // - dependences are not computed in serial teams (except with proxy tasks) 754 // - if the dephash is not yet created it means we have nothing to wait for 755 bool ignore = current_task->td_flags.team_serial || 756 current_task->td_flags.tasking_ser || 757 current_task->td_flags.final; 758 ignore = ignore && thread->th.th_task_team != NULL && 759 thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE; 760 ignore = ignore || current_task->td_dephash == NULL; 761 762 if (ignore) { 763 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking " 764 "dependences : loc=%p\n", 765 gtid, loc_ref)); 766 #if OMPT_SUPPORT 767 __ompt_taskwait_dep_finish(current_task, taskwait_task_data); 768 #endif /* OMPT_SUPPORT */ 769 return; 770 } 771 772 kmp_depnode_t node = {0}; 773 __kmp_init_node(&node); 774 775 if (!__kmp_check_deps(gtid, &node, NULL, ¤t_task->td_dephash, 776 DEP_BARRIER, ndeps, dep_list, ndeps_noalias, 777 noalias_dep_list)) { 778 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking " 779 "dependences : loc=%p\n", 780 gtid, loc_ref)); 781 #if OMPT_SUPPORT 782 __ompt_taskwait_dep_finish(current_task, taskwait_task_data); 783 #endif /* OMPT_SUPPORT */ 784 return; 785 } 786 787 int thread_finished = FALSE; 788 kmp_flag_32<false, false> flag( 789 (std::atomic<kmp_uint32> *)&node.dn.npredecessors, 0U); 790 while (node.dn.npredecessors > 0) { 791 flag.execute_tasks(thread, gtid, FALSE, 792 &thread_finished USE_ITT_BUILD_ARG(NULL), 793 __kmp_task_stealing_constraint); 794 } 795 796 #if OMPT_SUPPORT 797 __ompt_taskwait_dep_finish(current_task, taskwait_task_data); 798 #endif /* OMPT_SUPPORT */ 799 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d finished waiting : loc=%p\n", 800 gtid, loc_ref)); 801 } 802