1 /* 2 * kmp_taskdeps.cpp 3 */ 4 5 //===----------------------------------------------------------------------===// 6 // 7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 8 // See https://llvm.org/LICENSE.txt for license information. 9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 10 // 11 //===----------------------------------------------------------------------===// 12 13 //#define KMP_SUPPORT_GRAPH_OUTPUT 1 14 15 #include "kmp.h" 16 #include "kmp_io.h" 17 #include "kmp_wait_release.h" 18 #include "kmp_taskdeps.h" 19 #if OMPT_SUPPORT 20 #include "ompt-specific.h" 21 #endif 22 23 // TODO: Improve memory allocation? keep a list of pre-allocated structures? 24 // allocate in blocks? re-use list finished list entries? 25 // TODO: don't use atomic ref counters for stack-allocated nodes. 26 // TODO: find an alternate to atomic refs for heap-allocated nodes? 27 // TODO: Finish graph output support 28 // TODO: kmp_lock_t seems a tad to big (and heavy weight) for this. Check other 29 // runtime locks 30 // TODO: Any ITT support needed? 31 32 #ifdef KMP_SUPPORT_GRAPH_OUTPUT 33 static std::atomic<kmp_int32> kmp_node_id_seed = ATOMIC_VAR_INIT(0); 34 #endif 35 36 static void __kmp_init_node(kmp_depnode_t *node) { 37 node->dn.successors = NULL; 38 node->dn.task = NULL; // will point to the rigth task 39 // once dependences have been processed 40 for (int i = 0; i < MAX_MTX_DEPS; ++i) 41 node->dn.mtx_locks[i] = NULL; 42 node->dn.mtx_num_locks = 0; 43 __kmp_init_lock(&node->dn.lock); 44 KMP_ATOMIC_ST_RLX(&node->dn.nrefs, 1); // init creates the first reference 45 #ifdef KMP_SUPPORT_GRAPH_OUTPUT 46 node->dn.id = KMP_ATOMIC_INC(&kmp_node_id_seed); 47 #endif 48 } 49 50 static inline kmp_depnode_t *__kmp_node_ref(kmp_depnode_t *node) { 51 KMP_ATOMIC_INC(&node->dn.nrefs); 52 return node; 53 } 54 55 enum { KMP_DEPHASH_OTHER_SIZE = 97, KMP_DEPHASH_MASTER_SIZE = 997 }; 56 57 size_t sizes[] = { 997, 2003, 4001, 8191, 16001, 32003, 64007, 131071, 270029 }; 58 const size_t MAX_GEN = 8; 59 60 static inline kmp_int32 __kmp_dephash_hash(kmp_intptr_t addr, size_t hsize) { 61 // TODO alternate to try: set = (((Addr64)(addrUsefulBits * 9.618)) % 62 // m_num_sets ); 63 return ((addr >> 6) ^ (addr >> 2)) % hsize; 64 } 65 66 static kmp_dephash_t *__kmp_dephash_extend(kmp_info_t *thread, 67 kmp_dephash_t *current_dephash) { 68 kmp_dephash_t *h; 69 70 size_t gen = current_dephash->generation + 1; 71 if (gen >= MAX_GEN) 72 return current_dephash; 73 size_t new_size = sizes[gen]; 74 75 kmp_int32 size_to_allocate = 76 new_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t); 77 78 #if USE_FAST_MEMORY 79 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size_to_allocate); 80 #else 81 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size_to_allocate); 82 #endif 83 84 h->size = new_size; 85 h->nelements = current_dephash->nelements; 86 h->buckets = (kmp_dephash_entry **)(h + 1); 87 h->generation = gen; 88 h->nconflicts = 0; 89 // insert existing elements in the new table 90 for (size_t i = 0; i < current_dephash->size; i++) { 91 kmp_dephash_entry_t *next, *entry; 92 for (entry = current_dephash->buckets[i]; entry; entry = next) { 93 next = entry->next_in_bucket; 94 // Compute the new hash using the new size, and insert the entry in 95 // the new bucket. 96 kmp_int32 new_bucket = __kmp_dephash_hash(entry->addr, h->size); 97 entry->next_in_bucket = h->buckets[new_bucket]; 98 if (entry->next_in_bucket) { 99 h->nconflicts++; 100 } 101 h->buckets[new_bucket] = entry; 102 } 103 } 104 105 // Free old hash table 106 #if USE_FAST_MEMORY 107 __kmp_fast_free(thread, current_dephash); 108 #else 109 __kmp_thread_free(thread, current_dephash); 110 #endif 111 112 return h; 113 } 114 115 static kmp_dephash_t *__kmp_dephash_create(kmp_info_t *thread, 116 kmp_taskdata_t *current_task) { 117 kmp_dephash_t *h; 118 119 size_t h_size; 120 121 if (current_task->td_flags.tasktype == TASK_IMPLICIT) 122 h_size = KMP_DEPHASH_MASTER_SIZE; 123 else 124 h_size = KMP_DEPHASH_OTHER_SIZE; 125 126 kmp_int32 size = 127 h_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t); 128 129 #if USE_FAST_MEMORY 130 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size); 131 #else 132 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size); 133 #endif 134 h->size = h_size; 135 136 h->generation = 0; 137 h->nelements = 0; 138 h->nconflicts = 0; 139 h->buckets = (kmp_dephash_entry **)(h + 1); 140 141 for (size_t i = 0; i < h_size; i++) 142 h->buckets[i] = 0; 143 144 return h; 145 } 146 147 #define ENTRY_LAST_INS 0 148 #define ENTRY_LAST_MTXS 1 149 150 static kmp_dephash_entry * 151 __kmp_dephash_find(kmp_info_t *thread, kmp_dephash_t **hash, kmp_intptr_t addr) { 152 kmp_dephash_t *h = *hash; 153 if (h->nelements != 0 154 && h->nconflicts/h->size >= 1) { 155 *hash = __kmp_dephash_extend(thread, h); 156 h = *hash; 157 } 158 kmp_int32 bucket = __kmp_dephash_hash(addr, h->size); 159 160 kmp_dephash_entry_t *entry; 161 for (entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket) 162 if (entry->addr == addr) 163 break; 164 165 if (entry == NULL) { 166 // create entry. This is only done by one thread so no locking required 167 #if USE_FAST_MEMORY 168 entry = (kmp_dephash_entry_t *)__kmp_fast_allocate( 169 thread, sizeof(kmp_dephash_entry_t)); 170 #else 171 entry = (kmp_dephash_entry_t *)__kmp_thread_malloc( 172 thread, sizeof(kmp_dephash_entry_t)); 173 #endif 174 entry->addr = addr; 175 entry->last_out = NULL; 176 entry->last_ins = NULL; 177 entry->last_mtxs = NULL; 178 entry->last_flag = ENTRY_LAST_INS; 179 entry->mtx_lock = NULL; 180 entry->next_in_bucket = h->buckets[bucket]; 181 h->buckets[bucket] = entry; 182 h->nelements++; 183 if (entry->next_in_bucket) 184 h->nconflicts++; 185 } 186 return entry; 187 } 188 189 static kmp_depnode_list_t *__kmp_add_node(kmp_info_t *thread, 190 kmp_depnode_list_t *list, 191 kmp_depnode_t *node) { 192 kmp_depnode_list_t *new_head; 193 194 #if USE_FAST_MEMORY 195 new_head = (kmp_depnode_list_t *)__kmp_fast_allocate( 196 thread, sizeof(kmp_depnode_list_t)); 197 #else 198 new_head = (kmp_depnode_list_t *)__kmp_thread_malloc( 199 thread, sizeof(kmp_depnode_list_t)); 200 #endif 201 202 new_head->node = __kmp_node_ref(node); 203 new_head->next = list; 204 205 return new_head; 206 } 207 208 static inline void __kmp_track_dependence(kmp_depnode_t *source, 209 kmp_depnode_t *sink, 210 kmp_task_t *sink_task) { 211 #ifdef KMP_SUPPORT_GRAPH_OUTPUT 212 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task); 213 // do not use sink->dn.task as that is only filled after the dependencies 214 // are already processed! 215 kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task); 216 217 __kmp_printf("%d(%s) -> %d(%s)\n", source->dn.id, 218 task_source->td_ident->psource, sink->dn.id, 219 task_sink->td_ident->psource); 220 #endif 221 #if OMPT_SUPPORT && OMPT_OPTIONAL 222 /* OMPT tracks dependences between task (a=source, b=sink) in which 223 task a blocks the execution of b through the ompt_new_dependence_callback 224 */ 225 if (ompt_enabled.ompt_callback_task_dependence) { 226 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task); 227 kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task); 228 229 ompt_callbacks.ompt_callback(ompt_callback_task_dependence)( 230 &(task_source->ompt_task_info.task_data), 231 &(task_sink->ompt_task_info.task_data)); 232 } 233 #endif /* OMPT_SUPPORT && OMPT_OPTIONAL */ 234 } 235 236 static inline kmp_int32 237 __kmp_depnode_link_successor(kmp_int32 gtid, kmp_info_t *thread, 238 kmp_task_t *task, kmp_depnode_t *node, 239 kmp_depnode_list_t *plist) { 240 if (!plist) 241 return 0; 242 kmp_int32 npredecessors = 0; 243 // link node as successor of list elements 244 for (kmp_depnode_list_t *p = plist; p; p = p->next) { 245 kmp_depnode_t *dep = p->node; 246 if (dep->dn.task) { 247 KMP_ACQUIRE_DEPNODE(gtid, dep); 248 if (dep->dn.task) { 249 __kmp_track_dependence(dep, node, task); 250 dep->dn.successors = __kmp_add_node(thread, dep->dn.successors, node); 251 KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to " 252 "%p\n", 253 gtid, KMP_TASK_TO_TASKDATA(dep->dn.task), 254 KMP_TASK_TO_TASKDATA(task))); 255 npredecessors++; 256 } 257 KMP_RELEASE_DEPNODE(gtid, dep); 258 } 259 } 260 return npredecessors; 261 } 262 263 static inline kmp_int32 __kmp_depnode_link_successor(kmp_int32 gtid, 264 kmp_info_t *thread, 265 kmp_task_t *task, 266 kmp_depnode_t *source, 267 kmp_depnode_t *sink) { 268 if (!sink) 269 return 0; 270 kmp_int32 npredecessors = 0; 271 if (sink->dn.task) { 272 // synchronously add source to sink' list of successors 273 KMP_ACQUIRE_DEPNODE(gtid, sink); 274 if (sink->dn.task) { 275 __kmp_track_dependence(sink, source, task); 276 sink->dn.successors = __kmp_add_node(thread, sink->dn.successors, source); 277 KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to " 278 "%p\n", 279 gtid, KMP_TASK_TO_TASKDATA(sink->dn.task), 280 KMP_TASK_TO_TASKDATA(task))); 281 npredecessors++; 282 } 283 KMP_RELEASE_DEPNODE(gtid, sink); 284 } 285 return npredecessors; 286 } 287 288 template <bool filter> 289 static inline kmp_int32 290 __kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t **hash, 291 bool dep_barrier, kmp_int32 ndeps, 292 kmp_depend_info_t *dep_list, kmp_task_t *task) { 293 KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d processing %d dependencies : " 294 "dep_barrier = %d\n", 295 filter, gtid, ndeps, dep_barrier)); 296 297 kmp_info_t *thread = __kmp_threads[gtid]; 298 kmp_int32 npredecessors = 0; 299 for (kmp_int32 i = 0; i < ndeps; i++) { 300 const kmp_depend_info_t *dep = &dep_list[i]; 301 302 if (filter && dep->base_addr == 0) 303 continue; // skip filtered entries 304 305 kmp_dephash_entry_t *info = 306 __kmp_dephash_find(thread, hash, dep->base_addr); 307 kmp_depnode_t *last_out = info->last_out; 308 kmp_depnode_list_t *last_ins = info->last_ins; 309 kmp_depnode_list_t *last_mtxs = info->last_mtxs; 310 311 if (dep->flags.out) { // out --> clean lists of ins and mtxs if any 312 if (last_ins || last_mtxs) { 313 if (info->last_flag == ENTRY_LAST_INS) { // INS were last 314 npredecessors += 315 __kmp_depnode_link_successor(gtid, thread, task, node, last_ins); 316 } else { // MTXS were last 317 npredecessors += 318 __kmp_depnode_link_successor(gtid, thread, task, node, last_mtxs); 319 } 320 __kmp_depnode_list_free(thread, last_ins); 321 __kmp_depnode_list_free(thread, last_mtxs); 322 info->last_ins = NULL; 323 info->last_mtxs = NULL; 324 } else { 325 npredecessors += 326 __kmp_depnode_link_successor(gtid, thread, task, node, last_out); 327 } 328 __kmp_node_deref(thread, last_out); 329 if (dep_barrier) { 330 // if this is a sync point in the serial sequence, then the previous 331 // outputs are guaranteed to be completed after the execution of this 332 // task so the previous output nodes can be cleared. 333 info->last_out = NULL; 334 } else { 335 info->last_out = __kmp_node_ref(node); 336 } 337 } else if (dep->flags.in) { 338 // in --> link node to either last_out or last_mtxs, clean earlier deps 339 if (last_mtxs) { 340 npredecessors += 341 __kmp_depnode_link_successor(gtid, thread, task, node, last_mtxs); 342 __kmp_node_deref(thread, last_out); 343 info->last_out = NULL; 344 if (info->last_flag == ENTRY_LAST_MTXS && last_ins) { // MTXS were last 345 // clean old INS before creating new list 346 __kmp_depnode_list_free(thread, last_ins); 347 info->last_ins = NULL; 348 } 349 } else { 350 // link node as successor of the last_out if any 351 npredecessors += 352 __kmp_depnode_link_successor(gtid, thread, task, node, last_out); 353 } 354 info->last_flag = ENTRY_LAST_INS; 355 info->last_ins = __kmp_add_node(thread, info->last_ins, node); 356 } else { 357 KMP_DEBUG_ASSERT(dep->flags.mtx == 1); 358 // mtx --> link node to either last_out or last_ins, clean earlier deps 359 if (last_ins) { 360 npredecessors += 361 __kmp_depnode_link_successor(gtid, thread, task, node, last_ins); 362 __kmp_node_deref(thread, last_out); 363 info->last_out = NULL; 364 if (info->last_flag == ENTRY_LAST_INS && last_mtxs) { // INS were last 365 // clean old MTXS before creating new list 366 __kmp_depnode_list_free(thread, last_mtxs); 367 info->last_mtxs = NULL; 368 } 369 } else { 370 // link node as successor of the last_out if any 371 npredecessors += 372 __kmp_depnode_link_successor(gtid, thread, task, node, last_out); 373 } 374 info->last_flag = ENTRY_LAST_MTXS; 375 info->last_mtxs = __kmp_add_node(thread, info->last_mtxs, node); 376 if (info->mtx_lock == NULL) { 377 info->mtx_lock = (kmp_lock_t *)__kmp_allocate(sizeof(kmp_lock_t)); 378 __kmp_init_lock(info->mtx_lock); 379 } 380 KMP_DEBUG_ASSERT(node->dn.mtx_num_locks < MAX_MTX_DEPS); 381 kmp_int32 m; 382 // Save lock in node's array 383 for (m = 0; m < MAX_MTX_DEPS; ++m) { 384 // sort pointers in decreasing order to avoid potential livelock 385 if (node->dn.mtx_locks[m] < info->mtx_lock) { 386 KMP_DEBUG_ASSERT(node->dn.mtx_locks[node->dn.mtx_num_locks] == NULL); 387 for (int n = node->dn.mtx_num_locks; n > m; --n) { 388 // shift right all lesser non-NULL pointers 389 KMP_DEBUG_ASSERT(node->dn.mtx_locks[n - 1] != NULL); 390 node->dn.mtx_locks[n] = node->dn.mtx_locks[n - 1]; 391 } 392 node->dn.mtx_locks[m] = info->mtx_lock; 393 break; 394 } 395 } 396 KMP_DEBUG_ASSERT(m < MAX_MTX_DEPS); // must break from loop 397 node->dn.mtx_num_locks++; 398 } 399 } 400 KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter, 401 gtid, npredecessors)); 402 return npredecessors; 403 } 404 405 #define NO_DEP_BARRIER (false) 406 #define DEP_BARRIER (true) 407 408 // returns true if the task has any outstanding dependence 409 static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node, 410 kmp_task_t *task, kmp_dephash_t **hash, 411 bool dep_barrier, kmp_int32 ndeps, 412 kmp_depend_info_t *dep_list, 413 kmp_int32 ndeps_noalias, 414 kmp_depend_info_t *noalias_dep_list) { 415 int i, n_mtxs = 0; 416 #if KMP_DEBUG 417 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); 418 #endif 419 KA_TRACE(20, ("__kmp_check_deps: T#%d checking dependencies for task %p : %d " 420 "possibly aliased dependencies, %d non-aliased dependencies : " 421 "dep_barrier=%d .\n", 422 gtid, taskdata, ndeps, ndeps_noalias, dep_barrier)); 423 424 // Filter deps in dep_list 425 // TODO: Different algorithm for large dep_list ( > 10 ? ) 426 for (i = 0; i < ndeps; i++) { 427 if (dep_list[i].base_addr != 0) { 428 for (int j = i + 1; j < ndeps; j++) { 429 if (dep_list[i].base_addr == dep_list[j].base_addr) { 430 dep_list[i].flags.in |= dep_list[j].flags.in; 431 dep_list[i].flags.out |= 432 (dep_list[j].flags.out || 433 (dep_list[i].flags.in && dep_list[j].flags.mtx) || 434 (dep_list[i].flags.mtx && dep_list[j].flags.in)); 435 dep_list[i].flags.mtx = 436 dep_list[i].flags.mtx | dep_list[j].flags.mtx && 437 !dep_list[i].flags.out; 438 dep_list[j].base_addr = 0; // Mark j element as void 439 } 440 } 441 if (dep_list[i].flags.mtx) { 442 // limit number of mtx deps to MAX_MTX_DEPS per node 443 if (n_mtxs < MAX_MTX_DEPS && task != NULL) { 444 ++n_mtxs; 445 } else { 446 dep_list[i].flags.in = 1; // downgrade mutexinoutset to inout 447 dep_list[i].flags.out = 1; 448 dep_list[i].flags.mtx = 0; 449 } 450 } 451 } 452 } 453 454 // doesn't need to be atomic as no other thread is going to be accessing this 455 // node just yet. 456 // npredecessors is set -1 to ensure that none of the releasing tasks queues 457 // this task before we have finished processing all the dependencies 458 node->dn.npredecessors = -1; 459 460 // used to pack all npredecessors additions into a single atomic operation at 461 // the end 462 int npredecessors; 463 464 npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier, ndeps, 465 dep_list, task); 466 npredecessors += __kmp_process_deps<false>( 467 gtid, node, hash, dep_barrier, ndeps_noalias, noalias_dep_list, task); 468 469 node->dn.task = task; 470 KMP_MB(); 471 472 // Account for our initial fake value 473 npredecessors++; 474 475 // Update predecessors and obtain current value to check if there are still 476 // any outstandig dependences (some tasks may have finished while we processed 477 // the dependences) 478 npredecessors = 479 node->dn.npredecessors.fetch_add(npredecessors) + npredecessors; 480 481 KA_TRACE(20, ("__kmp_check_deps: T#%d found %d predecessors for task %p \n", 482 gtid, npredecessors, taskdata)); 483 484 // beyond this point the task could be queued (and executed) by a releasing 485 // task... 486 return npredecessors > 0 ? true : false; 487 } 488 489 /*! 490 @ingroup TASKING 491 @param loc_ref location of the original task directive 492 @param gtid Global Thread ID of encountering thread 493 @param new_task task thunk allocated by __kmp_omp_task_alloc() for the ''new 494 task'' 495 @param ndeps Number of depend items with possible aliasing 496 @param dep_list List of depend items with possible aliasing 497 @param ndeps_noalias Number of depend items with no aliasing 498 @param noalias_dep_list List of depend items with no aliasing 499 500 @return Returns either TASK_CURRENT_NOT_QUEUED if the current task was not 501 suspendend and queued, or TASK_CURRENT_QUEUED if it was suspended and queued 502 503 Schedule a non-thread-switchable task with dependences for execution 504 */ 505 kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, 506 kmp_task_t *new_task, kmp_int32 ndeps, 507 kmp_depend_info_t *dep_list, 508 kmp_int32 ndeps_noalias, 509 kmp_depend_info_t *noalias_dep_list) { 510 511 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task); 512 KA_TRACE(10, ("__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n", gtid, 513 loc_ref, new_taskdata)); 514 515 kmp_info_t *thread = __kmp_threads[gtid]; 516 kmp_taskdata_t *current_task = thread->th.th_current_task; 517 518 #if OMPT_SUPPORT 519 if (ompt_enabled.enabled) { 520 OMPT_STORE_RETURN_ADDRESS(gtid); 521 if (!current_task->ompt_task_info.frame.enter_frame.ptr) 522 current_task->ompt_task_info.frame.enter_frame.ptr = 523 OMPT_GET_FRAME_ADDRESS(0); 524 if (ompt_enabled.ompt_callback_task_create) { 525 ompt_data_t task_data = ompt_data_none; 526 ompt_callbacks.ompt_callback(ompt_callback_task_create)( 527 current_task ? &(current_task->ompt_task_info.task_data) : &task_data, 528 current_task ? &(current_task->ompt_task_info.frame) : NULL, 529 &(new_taskdata->ompt_task_info.task_data), 530 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 1, 531 OMPT_LOAD_RETURN_ADDRESS(gtid)); 532 } 533 534 new_taskdata->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 535 } 536 537 #if OMPT_OPTIONAL 538 /* OMPT grab all dependences if requested by the tool */ 539 if (ndeps + ndeps_noalias > 0 && 540 ompt_enabled.ompt_callback_dependences) { 541 kmp_int32 i; 542 543 new_taskdata->ompt_task_info.ndeps = ndeps + ndeps_noalias; 544 new_taskdata->ompt_task_info.deps = 545 (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC( 546 thread, (ndeps + ndeps_noalias) * sizeof(ompt_dependence_t)); 547 548 KMP_ASSERT(new_taskdata->ompt_task_info.deps != NULL); 549 550 for (i = 0; i < ndeps; i++) { 551 new_taskdata->ompt_task_info.deps[i].variable.ptr = 552 (void *)dep_list[i].base_addr; 553 if (dep_list[i].flags.in && dep_list[i].flags.out) 554 new_taskdata->ompt_task_info.deps[i].dependence_type = 555 ompt_dependence_type_inout; 556 else if (dep_list[i].flags.out) 557 new_taskdata->ompt_task_info.deps[i].dependence_type = 558 ompt_dependence_type_out; 559 else if (dep_list[i].flags.in) 560 new_taskdata->ompt_task_info.deps[i].dependence_type = 561 ompt_dependence_type_in; 562 } 563 for (i = 0; i < ndeps_noalias; i++) { 564 new_taskdata->ompt_task_info.deps[ndeps + i].variable.ptr = 565 (void *)noalias_dep_list[i].base_addr; 566 if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out) 567 new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type = 568 ompt_dependence_type_inout; 569 else if (noalias_dep_list[i].flags.out) 570 new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type = 571 ompt_dependence_type_out; 572 else if (noalias_dep_list[i].flags.in) 573 new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type = 574 ompt_dependence_type_in; 575 } 576 ompt_callbacks.ompt_callback(ompt_callback_dependences)( 577 &(new_taskdata->ompt_task_info.task_data), 578 new_taskdata->ompt_task_info.deps, new_taskdata->ompt_task_info.ndeps); 579 /* We can now free the allocated memory for the dependencies */ 580 /* For OMPD we might want to delay the free until task_end */ 581 KMP_OMPT_DEPS_FREE(thread, new_taskdata->ompt_task_info.deps); 582 new_taskdata->ompt_task_info.deps = NULL; 583 new_taskdata->ompt_task_info.ndeps = 0; 584 } 585 #endif /* OMPT_OPTIONAL */ 586 #endif /* OMPT_SUPPORT */ 587 588 bool serial = current_task->td_flags.team_serial || 589 current_task->td_flags.tasking_ser || 590 current_task->td_flags.final; 591 kmp_task_team_t *task_team = thread->th.th_task_team; 592 serial = serial && !(task_team && task_team->tt.tt_found_proxy_tasks); 593 594 if (!serial && (ndeps > 0 || ndeps_noalias > 0)) { 595 /* if no dependencies have been tracked yet, create the dependence hash */ 596 if (current_task->td_dephash == NULL) 597 current_task->td_dephash = __kmp_dephash_create(thread, current_task); 598 599 #if USE_FAST_MEMORY 600 kmp_depnode_t *node = 601 (kmp_depnode_t *)__kmp_fast_allocate(thread, sizeof(kmp_depnode_t)); 602 #else 603 kmp_depnode_t *node = 604 (kmp_depnode_t *)__kmp_thread_malloc(thread, sizeof(kmp_depnode_t)); 605 #endif 606 607 __kmp_init_node(node); 608 new_taskdata->td_depnode = node; 609 610 if (__kmp_check_deps(gtid, node, new_task, ¤t_task->td_dephash, 611 NO_DEP_BARRIER, ndeps, dep_list, ndeps_noalias, 612 noalias_dep_list)) { 613 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had blocking " 614 "dependencies: " 615 "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n", 616 gtid, loc_ref, new_taskdata)); 617 #if OMPT_SUPPORT 618 if (ompt_enabled.enabled) { 619 current_task->ompt_task_info.frame.enter_frame = ompt_data_none; 620 } 621 #endif 622 return TASK_CURRENT_NOT_QUEUED; 623 } 624 } else { 625 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d ignored dependencies " 626 "for task (serialized)" 627 "loc=%p task=%p\n", 628 gtid, loc_ref, new_taskdata)); 629 } 630 631 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had no blocking " 632 "dependencies : " 633 "loc=%p task=%p, transferring to __kmp_omp_task\n", 634 gtid, loc_ref, new_taskdata)); 635 636 kmp_int32 ret = __kmp_omp_task(gtid, new_task, true); 637 #if OMPT_SUPPORT 638 if (ompt_enabled.enabled) { 639 current_task->ompt_task_info.frame.enter_frame = ompt_data_none; 640 } 641 #endif 642 return ret; 643 } 644 645 /*! 646 @ingroup TASKING 647 @param loc_ref location of the original task directive 648 @param gtid Global Thread ID of encountering thread 649 @param ndeps Number of depend items with possible aliasing 650 @param dep_list List of depend items with possible aliasing 651 @param ndeps_noalias Number of depend items with no aliasing 652 @param noalias_dep_list List of depend items with no aliasing 653 654 Blocks the current task until all specifies dependencies have been fulfilled. 655 */ 656 void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, 657 kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, 658 kmp_depend_info_t *noalias_dep_list) { 659 KA_TRACE(10, ("__kmpc_omp_wait_deps(enter): T#%d loc=%p\n", gtid, loc_ref)); 660 661 if (ndeps == 0 && ndeps_noalias == 0) { 662 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no dependencies to " 663 "wait upon : loc=%p\n", 664 gtid, loc_ref)); 665 return; 666 } 667 668 kmp_info_t *thread = __kmp_threads[gtid]; 669 kmp_taskdata_t *current_task = thread->th.th_current_task; 670 671 // We can return immediately as: 672 // - dependences are not computed in serial teams (except with proxy tasks) 673 // - if the dephash is not yet created it means we have nothing to wait for 674 bool ignore = current_task->td_flags.team_serial || 675 current_task->td_flags.tasking_ser || 676 current_task->td_flags.final; 677 ignore = ignore && thread->th.th_task_team != NULL && 678 thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE; 679 ignore = ignore || current_task->td_dephash == NULL; 680 681 if (ignore) { 682 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking " 683 "dependencies : loc=%p\n", 684 gtid, loc_ref)); 685 return; 686 } 687 688 kmp_depnode_t node = {0}; 689 __kmp_init_node(&node); 690 691 if (!__kmp_check_deps(gtid, &node, NULL, ¤t_task->td_dephash, 692 DEP_BARRIER, ndeps, dep_list, ndeps_noalias, 693 noalias_dep_list)) { 694 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking " 695 "dependencies : loc=%p\n", 696 gtid, loc_ref)); 697 return; 698 } 699 700 int thread_finished = FALSE; 701 kmp_flag_32 flag((std::atomic<kmp_uint32> *)&node.dn.npredecessors, 0U); 702 while (node.dn.npredecessors > 0) { 703 flag.execute_tasks(thread, gtid, FALSE, 704 &thread_finished USE_ITT_BUILD_ARG(NULL), 705 __kmp_task_stealing_constraint); 706 } 707 708 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d finished waiting : loc=%p\n", 709 gtid, loc_ref)); 710 } 711