1 /* 2 * kmp_taskdeps.h 3 */ 4 5 6 //===----------------------------------------------------------------------===// 7 // 8 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 9 // See https://llvm.org/LICENSE.txt for license information. 10 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 11 // 12 //===----------------------------------------------------------------------===// 13 14 15 #ifndef KMP_TASKDEPS_H 16 #define KMP_TASKDEPS_H 17 18 #include "kmp.h" 19 20 #define KMP_ACQUIRE_DEPNODE(gtid, n) __kmp_acquire_lock(&(n)->dn.lock, (gtid)) 21 #define KMP_RELEASE_DEPNODE(gtid, n) __kmp_release_lock(&(n)->dn.lock, (gtid)) 22 23 static inline void __kmp_node_deref(kmp_info_t *thread, kmp_depnode_t *node) { 24 if (!node) 25 return; 26 27 kmp_int32 n = KMP_ATOMIC_DEC(&node->dn.nrefs) - 1; 28 if (n == 0) { 29 KMP_ASSERT(node->dn.nrefs == 0); 30 #if USE_FAST_MEMORY 31 __kmp_fast_free(thread, node); 32 #else 33 __kmp_thread_free(thread, node); 34 #endif 35 } 36 } 37 38 static inline void __kmp_depnode_list_free(kmp_info_t *thread, 39 kmp_depnode_list *list) { 40 kmp_depnode_list *next; 41 42 for (; list; list = next) { 43 next = list->next; 44 45 __kmp_node_deref(thread, list->node); 46 #if USE_FAST_MEMORY 47 __kmp_fast_free(thread, list); 48 #else 49 __kmp_thread_free(thread, list); 50 #endif 51 } 52 } 53 54 static inline void __kmp_dephash_free_entries(kmp_info_t *thread, 55 kmp_dephash_t *h) { 56 for (size_t i = 0; i < h->size; i++) { 57 if (h->buckets[i]) { 58 kmp_dephash_entry_t *next; 59 for (kmp_dephash_entry_t *entry = h->buckets[i]; entry; entry = next) { 60 next = entry->next_in_bucket; 61 __kmp_depnode_list_free(thread, entry->last_ins); 62 __kmp_depnode_list_free(thread, entry->last_mtxs); 63 __kmp_node_deref(thread, entry->last_out); 64 if (entry->mtx_lock) { 65 __kmp_destroy_lock(entry->mtx_lock); 66 __kmp_free(entry->mtx_lock); 67 } 68 #if USE_FAST_MEMORY 69 __kmp_fast_free(thread, entry); 70 #else 71 __kmp_thread_free(thread, entry); 72 #endif 73 } 74 h->buckets[i] = 0; 75 } 76 } 77 } 78 79 static inline void __kmp_dephash_free(kmp_info_t *thread, kmp_dephash_t *h) { 80 __kmp_dephash_free_entries(thread, h); 81 #if USE_FAST_MEMORY 82 __kmp_fast_free(thread, h); 83 #else 84 __kmp_thread_free(thread, h); 85 #endif 86 } 87 88 static inline void __kmp_release_deps(kmp_int32 gtid, kmp_taskdata_t *task) { 89 kmp_info_t *thread = __kmp_threads[gtid]; 90 kmp_depnode_t *node = task->td_depnode; 91 92 // Check mutexinoutset dependencies, release locks 93 if (UNLIKELY(node && (node->dn.mtx_num_locks < 0))) { 94 // negative num_locks means all locks were acquired 95 node->dn.mtx_num_locks = -node->dn.mtx_num_locks; 96 for (int i = node->dn.mtx_num_locks - 1; i >= 0; --i) { 97 KMP_DEBUG_ASSERT(node->dn.mtx_locks[i] != NULL); 98 __kmp_release_lock(node->dn.mtx_locks[i], gtid); 99 } 100 } 101 102 if (task->td_dephash) { 103 KA_TRACE( 104 40, ("__kmp_release_deps: T#%d freeing dependencies hash of task %p.\n", 105 gtid, task)); 106 __kmp_dephash_free(thread, task->td_dephash); 107 task->td_dephash = NULL; 108 } 109 110 if (!node) 111 return; 112 113 KA_TRACE(20, ("__kmp_release_deps: T#%d notifying successors of task %p.\n", 114 gtid, task)); 115 116 KMP_ACQUIRE_DEPNODE(gtid, node); 117 node->dn.task = 118 NULL; // mark this task as finished, so no new dependencies are generated 119 KMP_RELEASE_DEPNODE(gtid, node); 120 121 kmp_depnode_list_t *next; 122 kmp_taskdata_t *next_taskdata; 123 for (kmp_depnode_list_t *p = node->dn.successors; p; p = next) { 124 kmp_depnode_t *successor = p->node; 125 kmp_int32 npredecessors = KMP_ATOMIC_DEC(&successor->dn.npredecessors) - 1; 126 127 // successor task can be NULL for wait_depends or because deps are still 128 // being processed 129 if (npredecessors == 0) { 130 KMP_MB(); 131 if (successor->dn.task) { 132 KA_TRACE(20, ("__kmp_release_deps: T#%d successor %p of %p scheduled " 133 "for execution.\n", 134 gtid, successor->dn.task, task)); 135 // If a regular task depending on a hidden helper task, when the 136 // hidden helper task is done, the regular task should be executed by 137 // its encountering team. 138 if (KMP_HIDDEN_HELPER_THREAD(gtid)) { 139 // Hidden helper thread can only execute hidden helper tasks 140 KMP_ASSERT(task->td_flags.hidden_helper); 141 next_taskdata = KMP_TASK_TO_TASKDATA(successor->dn.task); 142 // If the dependent task is a regular task, we need to push to its 143 // encountering thread's queue; otherwise, it can be pushed to its own 144 // queue. 145 if (!next_taskdata->td_flags.hidden_helper) { 146 __kmp_omp_task(task->encountering_gtid, successor->dn.task, false); 147 } else { 148 __kmp_omp_task(gtid, successor->dn.task, false); 149 } 150 } else { 151 __kmp_omp_task(gtid, successor->dn.task, false); 152 } 153 } 154 } 155 156 next = p->next; 157 __kmp_node_deref(thread, p->node); 158 #if USE_FAST_MEMORY 159 __kmp_fast_free(thread, p); 160 #else 161 __kmp_thread_free(thread, p); 162 #endif 163 } 164 165 __kmp_node_deref(thread, node); 166 167 KA_TRACE( 168 20, 169 ("__kmp_release_deps: T#%d all successors of %p notified of completion\n", 170 gtid, task)); 171 } 172 173 #endif // KMP_TASKDEPS_H 174