xref: /freebsd/contrib/llvm-project/openmp/runtime/src/z_Linux_util.cpp (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 /*
2  * z_Linux_util.cpp -- platform specific routines.
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_affinity.h"
15 #include "kmp_i18n.h"
16 #include "kmp_io.h"
17 #include "kmp_itt.h"
18 #include "kmp_lock.h"
19 #include "kmp_stats.h"
20 #include "kmp_str.h"
21 #include "kmp_wait_release.h"
22 #include "kmp_wrapper_getpid.h"
23 
24 #if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
25 #include <alloca.h>
26 #endif
27 #include <math.h> // HUGE_VAL.
28 #if KMP_OS_LINUX
29 #include <semaphore.h>
30 #endif // KMP_OS_LINUX
31 #include <sys/resource.h>
32 #include <sys/syscall.h>
33 #include <sys/time.h>
34 #include <sys/times.h>
35 #include <unistd.h>
36 
37 #if KMP_OS_LINUX
38 #include <sys/sysinfo.h>
39 #if KMP_USE_FUTEX
40 // We should really include <futex.h>, but that causes compatibility problems on
41 // different Linux* OS distributions that either require that you include (or
42 // break when you try to include) <pci/types.h>. Since all we need is the two
43 // macros below (which are part of the kernel ABI, so can't change) we just
44 // define the constants here and don't include <futex.h>
45 #ifndef FUTEX_WAIT
46 #define FUTEX_WAIT 0
47 #endif
48 #ifndef FUTEX_WAKE
49 #define FUTEX_WAKE 1
50 #endif
51 #endif
52 #elif KMP_OS_DARWIN
53 #include <mach/mach.h>
54 #include <sys/sysctl.h>
55 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
56 #include <sys/types.h>
57 #include <sys/sysctl.h>
58 #include <sys/user.h>
59 #include <pthread_np.h>
60 #elif KMP_OS_NETBSD || KMP_OS_OPENBSD
61 #include <sys/types.h>
62 #include <sys/sysctl.h>
63 #endif
64 
65 #include <ctype.h>
66 #include <dirent.h>
67 #include <fcntl.h>
68 
69 struct kmp_sys_timer {
70   struct timespec start;
71 };
72 
73 // Convert timespec to nanoseconds.
74 #define TS2NS(timespec)                                                        \
75   (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec)
76 
77 static struct kmp_sys_timer __kmp_sys_timer_data;
78 
79 #if KMP_HANDLE_SIGNALS
80 typedef void (*sig_func_t)(int);
81 STATIC_EFI2_WORKAROUND struct sigaction __kmp_sighldrs[NSIG];
82 static sigset_t __kmp_sigset;
83 #endif
84 
85 static int __kmp_init_runtime = FALSE;
86 
87 static int __kmp_fork_count = 0;
88 
89 static pthread_condattr_t __kmp_suspend_cond_attr;
90 static pthread_mutexattr_t __kmp_suspend_mutex_attr;
91 
92 static kmp_cond_align_t __kmp_wait_cv;
93 static kmp_mutex_align_t __kmp_wait_mx;
94 
95 kmp_uint64 __kmp_ticks_per_msec = 1000000;
96 
97 #ifdef DEBUG_SUSPEND
98 static void __kmp_print_cond(char *buffer, kmp_cond_align_t *cond) {
99   KMP_SNPRINTF(buffer, 128, "(cond (lock (%ld, %d)), (descr (%p)))",
100                cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
101                cond->c_cond.__c_waiting);
102 }
103 #endif
104 
105 #if ((KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED)
106 
107 /* Affinity support */
108 
109 void __kmp_affinity_bind_thread(int which) {
110   KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
111               "Illegal set affinity operation when not capable");
112 
113   kmp_affin_mask_t *mask;
114   KMP_CPU_ALLOC_ON_STACK(mask);
115   KMP_CPU_ZERO(mask);
116   KMP_CPU_SET(which, mask);
117   __kmp_set_system_affinity(mask, TRUE);
118   KMP_CPU_FREE_FROM_STACK(mask);
119 }
120 
121 /* Determine if we can access affinity functionality on this version of
122  * Linux* OS by checking __NR_sched_{get,set}affinity system calls, and set
123  * __kmp_affin_mask_size to the appropriate value (0 means not capable). */
124 void __kmp_affinity_determine_capable(const char *env_var) {
125   // Check and see if the OS supports thread affinity.
126 
127 #if KMP_OS_LINUX
128 #define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
129 #define KMP_CPU_SET_TRY_SIZE CACHE_LINE
130 #elif KMP_OS_FREEBSD
131 #define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t))
132 #endif
133 
134   int verbose = __kmp_affinity.flags.verbose;
135   int warnings = __kmp_affinity.flags.warnings;
136   enum affinity_type type = __kmp_affinity.type;
137 
138 #if KMP_OS_LINUX
139   long gCode;
140   unsigned char *buf;
141   buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
142 
143   // If the syscall returns a suggestion for the size,
144   // then we don't have to search for an appropriate size.
145   gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_TRY_SIZE, buf);
146   KA_TRACE(30, ("__kmp_affinity_determine_capable: "
147                 "initial getaffinity call returned %ld errno = %d\n",
148                 gCode, errno));
149 
150   if (gCode < 0 && errno != EINVAL) {
151     // System call not supported
152     if (verbose ||
153         (warnings && (type != affinity_none) && (type != affinity_default) &&
154          (type != affinity_disabled))) {
155       int error = errno;
156       kmp_msg_t err_code = KMP_ERR(error);
157       __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
158                 err_code, __kmp_msg_null);
159       if (__kmp_generate_warnings == kmp_warnings_off) {
160         __kmp_str_free(&err_code.str);
161       }
162     }
163     KMP_AFFINITY_DISABLE();
164     KMP_INTERNAL_FREE(buf);
165     return;
166   } else if (gCode > 0) {
167     // The optimal situation: the OS returns the size of the buffer it expects.
168     KMP_AFFINITY_ENABLE(gCode);
169     KA_TRACE(10, ("__kmp_affinity_determine_capable: "
170                   "affinity supported (mask size %d)\n",
171                   (int)__kmp_affin_mask_size));
172     KMP_INTERNAL_FREE(buf);
173     return;
174   }
175 
176   // Call the getaffinity system call repeatedly with increasing set sizes
177   // until we succeed, or reach an upper bound on the search.
178   KA_TRACE(30, ("__kmp_affinity_determine_capable: "
179                 "searching for proper set size\n"));
180   int size;
181   for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
182     gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
183     KA_TRACE(30, ("__kmp_affinity_determine_capable: "
184                   "getaffinity for mask size %ld returned %ld errno = %d\n",
185                   size, gCode, errno));
186 
187     if (gCode < 0) {
188       if (errno == ENOSYS) {
189         // We shouldn't get here
190         KA_TRACE(30, ("__kmp_affinity_determine_capable: "
191                       "inconsistent OS call behavior: errno == ENOSYS for mask "
192                       "size %d\n",
193                       size));
194         if (verbose ||
195             (warnings && (type != affinity_none) &&
196              (type != affinity_default) && (type != affinity_disabled))) {
197           int error = errno;
198           kmp_msg_t err_code = KMP_ERR(error);
199           __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
200                     err_code, __kmp_msg_null);
201           if (__kmp_generate_warnings == kmp_warnings_off) {
202             __kmp_str_free(&err_code.str);
203           }
204         }
205         KMP_AFFINITY_DISABLE();
206         KMP_INTERNAL_FREE(buf);
207         return;
208       }
209       continue;
210     }
211 
212     KMP_AFFINITY_ENABLE(gCode);
213     KA_TRACE(10, ("__kmp_affinity_determine_capable: "
214                   "affinity supported (mask size %d)\n",
215                   (int)__kmp_affin_mask_size));
216     KMP_INTERNAL_FREE(buf);
217     return;
218   }
219 #elif KMP_OS_FREEBSD
220   long gCode;
221   unsigned char *buf;
222   buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
223   gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
224                                  reinterpret_cast<cpuset_t *>(buf));
225   KA_TRACE(30, ("__kmp_affinity_determine_capable: "
226                 "initial getaffinity call returned %d errno = %d\n",
227                 gCode, errno));
228   if (gCode == 0) {
229     KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
230     KA_TRACE(10, ("__kmp_affinity_determine_capable: "
231                   "affinity supported (mask size %d)\n",
232                   (int)__kmp_affin_mask_size));
233     KMP_INTERNAL_FREE(buf);
234     return;
235   }
236 #endif
237   KMP_INTERNAL_FREE(buf);
238 
239   // Affinity is not supported
240   KMP_AFFINITY_DISABLE();
241   KA_TRACE(10, ("__kmp_affinity_determine_capable: "
242                 "cannot determine mask size - affinity not supported\n"));
243   if (verbose || (warnings && (type != affinity_none) &&
244                   (type != affinity_default) && (type != affinity_disabled))) {
245     KMP_WARNING(AffCantGetMaskSize, env_var);
246   }
247 }
248 
249 #endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
250 
251 #if KMP_USE_FUTEX
252 
253 int __kmp_futex_determine_capable() {
254   int loc = 0;
255   long rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
256   int retval = (rc == 0) || (errno != ENOSYS);
257 
258   KA_TRACE(10,
259            ("__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
260   KA_TRACE(10, ("__kmp_futex_determine_capable: futex syscall%s supported\n",
261                 retval ? "" : " not"));
262 
263   return retval;
264 }
265 
266 #endif // KMP_USE_FUTEX
267 
268 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (!KMP_ASM_INTRINS)
269 /* Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to
270    use compare_and_store for these routines */
271 
272 kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 d) {
273   kmp_int8 old_value, new_value;
274 
275   old_value = TCR_1(*p);
276   new_value = old_value | d;
277 
278   while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
279     KMP_CPU_PAUSE();
280     old_value = TCR_1(*p);
281     new_value = old_value | d;
282   }
283   return old_value;
284 }
285 
286 kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 d) {
287   kmp_int8 old_value, new_value;
288 
289   old_value = TCR_1(*p);
290   new_value = old_value & d;
291 
292   while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
293     KMP_CPU_PAUSE();
294     old_value = TCR_1(*p);
295     new_value = old_value & d;
296   }
297   return old_value;
298 }
299 
300 kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 d) {
301   kmp_uint32 old_value, new_value;
302 
303   old_value = TCR_4(*p);
304   new_value = old_value | d;
305 
306   while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
307     KMP_CPU_PAUSE();
308     old_value = TCR_4(*p);
309     new_value = old_value | d;
310   }
311   return old_value;
312 }
313 
314 kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 d) {
315   kmp_uint32 old_value, new_value;
316 
317   old_value = TCR_4(*p);
318   new_value = old_value & d;
319 
320   while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
321     KMP_CPU_PAUSE();
322     old_value = TCR_4(*p);
323     new_value = old_value & d;
324   }
325   return old_value;
326 }
327 
328 #if KMP_ARCH_X86
329 kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 d) {
330   kmp_int8 old_value, new_value;
331 
332   old_value = TCR_1(*p);
333   new_value = old_value + d;
334 
335   while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
336     KMP_CPU_PAUSE();
337     old_value = TCR_1(*p);
338     new_value = old_value + d;
339   }
340   return old_value;
341 }
342 
343 kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 d) {
344   kmp_int64 old_value, new_value;
345 
346   old_value = TCR_8(*p);
347   new_value = old_value + d;
348 
349   while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
350     KMP_CPU_PAUSE();
351     old_value = TCR_8(*p);
352     new_value = old_value + d;
353   }
354   return old_value;
355 }
356 #endif /* KMP_ARCH_X86 */
357 
358 kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 d) {
359   kmp_uint64 old_value, new_value;
360 
361   old_value = TCR_8(*p);
362   new_value = old_value | d;
363   while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
364     KMP_CPU_PAUSE();
365     old_value = TCR_8(*p);
366     new_value = old_value | d;
367   }
368   return old_value;
369 }
370 
371 kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 d) {
372   kmp_uint64 old_value, new_value;
373 
374   old_value = TCR_8(*p);
375   new_value = old_value & d;
376   while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
377     KMP_CPU_PAUSE();
378     old_value = TCR_8(*p);
379     new_value = old_value & d;
380   }
381   return old_value;
382 }
383 
384 #endif /* (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) */
385 
386 void __kmp_terminate_thread(int gtid) {
387   int status;
388   kmp_info_t *th = __kmp_threads[gtid];
389 
390   if (!th)
391     return;
392 
393 #ifdef KMP_CANCEL_THREADS
394   KA_TRACE(10, ("__kmp_terminate_thread: kill (%d)\n", gtid));
395   status = pthread_cancel(th->th.th_info.ds.ds_thread);
396   if (status != 0 && status != ESRCH) {
397     __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
398                 __kmp_msg_null);
399   }
400 #endif
401   KMP_YIELD(TRUE);
402 } //
403 
404 /* Set thread stack info according to values returned by pthread_getattr_np().
405    If values are unreasonable, assume call failed and use incremental stack
406    refinement method instead. Returns TRUE if the stack parameters could be
407    determined exactly, FALSE if incremental refinement is necessary. */
408 static kmp_int32 __kmp_set_stack_info(int gtid, kmp_info_t *th) {
409   int stack_data;
410 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
411     KMP_OS_HURD
412   pthread_attr_t attr;
413   int status;
414   size_t size = 0;
415   void *addr = 0;
416 
417   /* Always do incremental stack refinement for ubermaster threads since the
418      initial thread stack range can be reduced by sibling thread creation so
419      pthread_attr_getstack may cause thread gtid aliasing */
420   if (!KMP_UBER_GTID(gtid)) {
421 
422     /* Fetch the real thread attributes */
423     status = pthread_attr_init(&attr);
424     KMP_CHECK_SYSFAIL("pthread_attr_init", status);
425 #if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
426     status = pthread_attr_get_np(pthread_self(), &attr);
427     KMP_CHECK_SYSFAIL("pthread_attr_get_np", status);
428 #else
429     status = pthread_getattr_np(pthread_self(), &attr);
430     KMP_CHECK_SYSFAIL("pthread_getattr_np", status);
431 #endif
432     status = pthread_attr_getstack(&attr, &addr, &size);
433     KMP_CHECK_SYSFAIL("pthread_attr_getstack", status);
434     KA_TRACE(60,
435              ("__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
436               " %lu, low addr: %p\n",
437               gtid, size, addr));
438     status = pthread_attr_destroy(&attr);
439     KMP_CHECK_SYSFAIL("pthread_attr_destroy", status);
440   }
441 
442   if (size != 0 && addr != 0) { // was stack parameter determination successful?
443     /* Store the correct base and size */
444     TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size));
445     TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
446     TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
447     return TRUE;
448   }
449 #endif /* KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD  \
450           || KMP_OS_HURD */
451   /* Use incremental refinement starting from initial conservative estimate */
452   TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
453   TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
454   TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
455   return FALSE;
456 }
457 
458 static void *__kmp_launch_worker(void *thr) {
459   int status, old_type, old_state;
460 #ifdef KMP_BLOCK_SIGNALS
461   sigset_t new_set, old_set;
462 #endif /* KMP_BLOCK_SIGNALS */
463   void *exit_val;
464 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
465     KMP_OS_OPENBSD || KMP_OS_HURD
466   void *volatile padding = 0;
467 #endif
468   int gtid;
469 
470   gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
471   __kmp_gtid_set_specific(gtid);
472 #ifdef KMP_TDATA_GTID
473   __kmp_gtid = gtid;
474 #endif
475 #if KMP_STATS_ENABLED
476   // set thread local index to point to thread-specific stats
477   __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
478   __kmp_stats_thread_ptr->startLife();
479   KMP_SET_THREAD_STATE(IDLE);
480   KMP_INIT_PARTITIONED_TIMERS(OMP_idle);
481 #endif
482 
483 #if USE_ITT_BUILD
484   __kmp_itt_thread_name(gtid);
485 #endif /* USE_ITT_BUILD */
486 
487 #if KMP_AFFINITY_SUPPORTED
488   __kmp_affinity_set_init_mask(gtid, FALSE);
489 #endif
490 
491 #ifdef KMP_CANCEL_THREADS
492   status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
493   KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
494   // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
495   status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
496   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
497 #endif
498 
499 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
500   // Set FP control regs to be a copy of the parallel initialization thread's.
501   __kmp_clear_x87_fpu_status_word();
502   __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
503   __kmp_load_mxcsr(&__kmp_init_mxcsr);
504 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
505 
506 #ifdef KMP_BLOCK_SIGNALS
507   status = sigfillset(&new_set);
508   KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
509   status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
510   KMP_CHECK_SYSFAIL("pthread_sigmask", status);
511 #endif /* KMP_BLOCK_SIGNALS */
512 
513 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
514     KMP_OS_OPENBSD
515   if (__kmp_stkoffset > 0 && gtid > 0) {
516     padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
517     (void)padding;
518   }
519 #endif
520 
521   KMP_MB();
522   __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
523 
524   __kmp_check_stack_overlap((kmp_info_t *)thr);
525 
526   exit_val = __kmp_launch_thread((kmp_info_t *)thr);
527 
528 #ifdef KMP_BLOCK_SIGNALS
529   status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
530   KMP_CHECK_SYSFAIL("pthread_sigmask", status);
531 #endif /* KMP_BLOCK_SIGNALS */
532 
533   return exit_val;
534 }
535 
536 #if KMP_USE_MONITOR
537 /* The monitor thread controls all of the threads in the complex */
538 
539 static void *__kmp_launch_monitor(void *thr) {
540   int status, old_type, old_state;
541 #ifdef KMP_BLOCK_SIGNALS
542   sigset_t new_set;
543 #endif /* KMP_BLOCK_SIGNALS */
544   struct timespec interval;
545 
546   KMP_MB(); /* Flush all pending memory write invalidates.  */
547 
548   KA_TRACE(10, ("__kmp_launch_monitor: #1 launched\n"));
549 
550   /* register us as the monitor thread */
551   __kmp_gtid_set_specific(KMP_GTID_MONITOR);
552 #ifdef KMP_TDATA_GTID
553   __kmp_gtid = KMP_GTID_MONITOR;
554 #endif
555 
556   KMP_MB();
557 
558 #if USE_ITT_BUILD
559   // Instruct Intel(R) Threading Tools to ignore monitor thread.
560   __kmp_itt_thread_ignore();
561 #endif /* USE_ITT_BUILD */
562 
563   __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
564                        (kmp_info_t *)thr);
565 
566   __kmp_check_stack_overlap((kmp_info_t *)thr);
567 
568 #ifdef KMP_CANCEL_THREADS
569   status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
570   KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
571   // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
572   status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
573   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
574 #endif
575 
576 #if KMP_REAL_TIME_FIX
577   // This is a potential fix which allows application with real-time scheduling
578   // policy work. However, decision about the fix is not made yet, so it is
579   // disabled by default.
580   { // Are program started with real-time scheduling policy?
581     int sched = sched_getscheduler(0);
582     if (sched == SCHED_FIFO || sched == SCHED_RR) {
583       // Yes, we are a part of real-time application. Try to increase the
584       // priority of the monitor.
585       struct sched_param param;
586       int max_priority = sched_get_priority_max(sched);
587       int rc;
588       KMP_WARNING(RealTimeSchedNotSupported);
589       sched_getparam(0, &param);
590       if (param.sched_priority < max_priority) {
591         param.sched_priority += 1;
592         rc = sched_setscheduler(0, sched, &param);
593         if (rc != 0) {
594           int error = errno;
595           kmp_msg_t err_code = KMP_ERR(error);
596           __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
597                     err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
598           if (__kmp_generate_warnings == kmp_warnings_off) {
599             __kmp_str_free(&err_code.str);
600           }
601         }
602       } else {
603         // We cannot abort here, because number of CPUs may be enough for all
604         // the threads, including the monitor thread, so application could
605         // potentially work...
606         __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
607                   KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
608                   __kmp_msg_null);
609       }
610     }
611     // AC: free thread that waits for monitor started
612     TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
613   }
614 #endif // KMP_REAL_TIME_FIX
615 
616   KMP_MB(); /* Flush all pending memory write invalidates.  */
617 
618   if (__kmp_monitor_wakeups == 1) {
619     interval.tv_sec = 1;
620     interval.tv_nsec = 0;
621   } else {
622     interval.tv_sec = 0;
623     interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
624   }
625 
626   KA_TRACE(10, ("__kmp_launch_monitor: #2 monitor\n"));
627 
628   while (!TCR_4(__kmp_global.g.g_done)) {
629     struct timespec now;
630     struct timeval tval;
631 
632     /*  This thread monitors the state of the system */
633 
634     KA_TRACE(15, ("__kmp_launch_monitor: update\n"));
635 
636     status = gettimeofday(&tval, NULL);
637     KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
638     TIMEVAL_TO_TIMESPEC(&tval, &now);
639 
640     now.tv_sec += interval.tv_sec;
641     now.tv_nsec += interval.tv_nsec;
642 
643     if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
644       now.tv_sec += 1;
645       now.tv_nsec -= KMP_NSEC_PER_SEC;
646     }
647 
648     status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
649     KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
650     // AC: the monitor should not fall asleep if g_done has been set
651     if (!TCR_4(__kmp_global.g.g_done)) { // check once more under mutex
652       status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
653                                       &__kmp_wait_mx.m_mutex, &now);
654       if (status != 0) {
655         if (status != ETIMEDOUT && status != EINTR) {
656           KMP_SYSFAIL("pthread_cond_timedwait", status);
657         }
658       }
659     }
660     status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
661     KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
662 
663     TCW_4(__kmp_global.g.g_time.dt.t_value,
664           TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
665 
666     KMP_MB(); /* Flush all pending memory write invalidates.  */
667   }
668 
669   KA_TRACE(10, ("__kmp_launch_monitor: #3 cleanup\n"));
670 
671 #ifdef KMP_BLOCK_SIGNALS
672   status = sigfillset(&new_set);
673   KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
674   status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
675   KMP_CHECK_SYSFAIL("pthread_sigmask", status);
676 #endif /* KMP_BLOCK_SIGNALS */
677 
678   KA_TRACE(10, ("__kmp_launch_monitor: #4 finished\n"));
679 
680   if (__kmp_global.g.g_abort != 0) {
681     /* now we need to terminate the worker threads  */
682     /* the value of t_abort is the signal we caught */
683 
684     int gtid;
685 
686     KA_TRACE(10, ("__kmp_launch_monitor: #5 terminate sig=%d\n",
687                   __kmp_global.g.g_abort));
688 
689     /* terminate the OpenMP worker threads */
690     /* TODO this is not valid for sibling threads!!
691      * the uber master might not be 0 anymore.. */
692     for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
693       __kmp_terminate_thread(gtid);
694 
695     __kmp_cleanup();
696 
697     KA_TRACE(10, ("__kmp_launch_monitor: #6 raise sig=%d\n",
698                   __kmp_global.g.g_abort));
699 
700     if (__kmp_global.g.g_abort > 0)
701       raise(__kmp_global.g.g_abort);
702   }
703 
704   KA_TRACE(10, ("__kmp_launch_monitor: #7 exit\n"));
705 
706   return thr;
707 }
708 #endif // KMP_USE_MONITOR
709 
710 void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size) {
711   pthread_t handle;
712   pthread_attr_t thread_attr;
713   int status;
714 
715   th->th.th_info.ds.ds_gtid = gtid;
716 
717 #if KMP_STATS_ENABLED
718   // sets up worker thread stats
719   __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
720 
721   // th->th.th_stats is used to transfer thread-specific stats-pointer to
722   // __kmp_launch_worker. So when thread is created (goes into
723   // __kmp_launch_worker) it will set its thread local pointer to
724   // th->th.th_stats
725   if (!KMP_UBER_GTID(gtid)) {
726     th->th.th_stats = __kmp_stats_list->push_back(gtid);
727   } else {
728     // For root threads, __kmp_stats_thread_ptr is set in __kmp_register_root(),
729     // so set the th->th.th_stats field to it.
730     th->th.th_stats = __kmp_stats_thread_ptr;
731   }
732   __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
733 
734 #endif // KMP_STATS_ENABLED
735 
736   if (KMP_UBER_GTID(gtid)) {
737     KA_TRACE(10, ("__kmp_create_worker: uber thread (%d)\n", gtid));
738     th->th.th_info.ds.ds_thread = pthread_self();
739     __kmp_set_stack_info(gtid, th);
740     __kmp_check_stack_overlap(th);
741     return;
742   }
743 
744   KA_TRACE(10, ("__kmp_create_worker: try to create thread (%d)\n", gtid));
745 
746   KMP_MB(); /* Flush all pending memory write invalidates.  */
747 
748 #ifdef KMP_THREAD_ATTR
749   status = pthread_attr_init(&thread_attr);
750   if (status != 0) {
751     __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
752   }
753   status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
754   if (status != 0) {
755     __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
756   }
757 
758   /* Set stack size for this thread now.
759      The multiple of 2 is there because on some machines, requesting an unusual
760      stacksize causes the thread to have an offset before the dummy alloca()
761      takes place to create the offset.  Since we want the user to have a
762      sufficient stacksize AND support a stack offset, we alloca() twice the
763      offset so that the upcoming alloca() does not eliminate any premade offset,
764      and also gives the user the stack space they requested for all threads */
765   stack_size += gtid * __kmp_stkoffset * 2;
766 
767   KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
768                 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
769                 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
770 
771 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
772   status = pthread_attr_setstacksize(&thread_attr, stack_size);
773 #ifdef KMP_BACKUP_STKSIZE
774   if (status != 0) {
775     if (!__kmp_env_stksize) {
776       stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
777       __kmp_stksize = KMP_BACKUP_STKSIZE;
778       KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
779                     "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
780                     "bytes\n",
781                     gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
782       status = pthread_attr_setstacksize(&thread_attr, stack_size);
783     }
784   }
785 #endif /* KMP_BACKUP_STKSIZE */
786   if (status != 0) {
787     __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
788                 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
789   }
790 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
791 
792 #endif /* KMP_THREAD_ATTR */
793 
794   status =
795       pthread_create(&handle, &thread_attr, __kmp_launch_worker, (void *)th);
796   if (status != 0 || !handle) { // ??? Why do we check handle??
797 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
798     if (status == EINVAL) {
799       __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
800                   KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
801     }
802     if (status == ENOMEM) {
803       __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
804                   KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
805     }
806 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
807     if (status == EAGAIN) {
808       __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
809                   KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
810     }
811     KMP_SYSFAIL("pthread_create", status);
812   }
813 
814   th->th.th_info.ds.ds_thread = handle;
815 
816 #ifdef KMP_THREAD_ATTR
817   status = pthread_attr_destroy(&thread_attr);
818   if (status) {
819     kmp_msg_t err_code = KMP_ERR(status);
820     __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
821               __kmp_msg_null);
822     if (__kmp_generate_warnings == kmp_warnings_off) {
823       __kmp_str_free(&err_code.str);
824     }
825   }
826 #endif /* KMP_THREAD_ATTR */
827 
828   KMP_MB(); /* Flush all pending memory write invalidates.  */
829 
830   KA_TRACE(10, ("__kmp_create_worker: done creating thread (%d)\n", gtid));
831 
832 } // __kmp_create_worker
833 
834 #if KMP_USE_MONITOR
835 void __kmp_create_monitor(kmp_info_t *th) {
836   pthread_t handle;
837   pthread_attr_t thread_attr;
838   size_t size;
839   int status;
840   int auto_adj_size = FALSE;
841 
842   if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
843     // We don't need monitor thread in case of MAX_BLOCKTIME
844     KA_TRACE(10, ("__kmp_create_monitor: skipping monitor thread because of "
845                   "MAX blocktime\n"));
846     th->th.th_info.ds.ds_tid = 0; // this makes reap_monitor no-op
847     th->th.th_info.ds.ds_gtid = 0;
848     return;
849   }
850   KA_TRACE(10, ("__kmp_create_monitor: try to create monitor\n"));
851 
852   KMP_MB(); /* Flush all pending memory write invalidates.  */
853 
854   th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
855   th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
856 #if KMP_REAL_TIME_FIX
857   TCW_4(__kmp_global.g.g_time.dt.t_value,
858         -1); // Will use it for synchronization a bit later.
859 #else
860   TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
861 #endif // KMP_REAL_TIME_FIX
862 
863 #ifdef KMP_THREAD_ATTR
864   if (__kmp_monitor_stksize == 0) {
865     __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
866     auto_adj_size = TRUE;
867   }
868   status = pthread_attr_init(&thread_attr);
869   if (status != 0) {
870     __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
871   }
872   status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
873   if (status != 0) {
874     __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
875   }
876 
877 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
878   status = pthread_attr_getstacksize(&thread_attr, &size);
879   KMP_CHECK_SYSFAIL("pthread_attr_getstacksize", status);
880 #else
881   size = __kmp_sys_min_stksize;
882 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
883 #endif /* KMP_THREAD_ATTR */
884 
885   if (__kmp_monitor_stksize == 0) {
886     __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
887   }
888   if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
889     __kmp_monitor_stksize = __kmp_sys_min_stksize;
890   }
891 
892   KA_TRACE(10, ("__kmp_create_monitor: default stacksize = %lu bytes,"
893                 "requested stacksize = %lu bytes\n",
894                 size, __kmp_monitor_stksize));
895 
896 retry:
897 
898 /* Set stack size for this thread now. */
899 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
900   KA_TRACE(10, ("__kmp_create_monitor: setting stacksize = %lu bytes,",
901                 __kmp_monitor_stksize));
902   status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
903   if (status != 0) {
904     if (auto_adj_size) {
905       __kmp_monitor_stksize *= 2;
906       goto retry;
907     }
908     kmp_msg_t err_code = KMP_ERR(status);
909     __kmp_msg(kmp_ms_warning, // should this be fatal?  BB
910               KMP_MSG(CantSetMonitorStackSize, (long int)__kmp_monitor_stksize),
911               err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
912     if (__kmp_generate_warnings == kmp_warnings_off) {
913       __kmp_str_free(&err_code.str);
914     }
915   }
916 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
917 
918   status =
919       pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (void *)th);
920 
921   if (status != 0) {
922 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
923     if (status == EINVAL) {
924       if (auto_adj_size && (__kmp_monitor_stksize < (size_t)0x40000000)) {
925         __kmp_monitor_stksize *= 2;
926         goto retry;
927       }
928       __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
929                   KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
930                   __kmp_msg_null);
931     }
932     if (status == ENOMEM) {
933       __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
934                   KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
935                   __kmp_msg_null);
936     }
937 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
938     if (status == EAGAIN) {
939       __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
940                   KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
941     }
942     KMP_SYSFAIL("pthread_create", status);
943   }
944 
945   th->th.th_info.ds.ds_thread = handle;
946 
947 #if KMP_REAL_TIME_FIX
948   // Wait for the monitor thread is really started and set its *priority*.
949   KMP_DEBUG_ASSERT(sizeof(kmp_uint32) ==
950                    sizeof(__kmp_global.g.g_time.dt.t_value));
951   __kmp_wait_4((kmp_uint32 volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
952                &__kmp_neq_4, NULL);
953 #endif // KMP_REAL_TIME_FIX
954 
955 #ifdef KMP_THREAD_ATTR
956   status = pthread_attr_destroy(&thread_attr);
957   if (status != 0) {
958     kmp_msg_t err_code = KMP_ERR(status);
959     __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
960               __kmp_msg_null);
961     if (__kmp_generate_warnings == kmp_warnings_off) {
962       __kmp_str_free(&err_code.str);
963     }
964   }
965 #endif
966 
967   KMP_MB(); /* Flush all pending memory write invalidates.  */
968 
969   KA_TRACE(10, ("__kmp_create_monitor: monitor created %#.8lx\n",
970                 th->th.th_info.ds.ds_thread));
971 
972 } // __kmp_create_monitor
973 #endif // KMP_USE_MONITOR
974 
975 void __kmp_exit_thread(int exit_status) {
976   pthread_exit((void *)(intptr_t)exit_status);
977 } // __kmp_exit_thread
978 
979 #if KMP_USE_MONITOR
980 void __kmp_resume_monitor();
981 
982 extern "C" void __kmp_reap_monitor(kmp_info_t *th) {
983   int status;
984   void *exit_val;
985 
986   KA_TRACE(10, ("__kmp_reap_monitor: try to reap monitor thread with handle"
987                 " %#.8lx\n",
988                 th->th.th_info.ds.ds_thread));
989 
990   // If monitor has been created, its tid and gtid should be KMP_GTID_MONITOR.
991   // If both tid and gtid are 0, it means the monitor did not ever start.
992   // If both tid and gtid are KMP_GTID_DNE, the monitor has been shut down.
993   KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
994   if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
995     KA_TRACE(10, ("__kmp_reap_monitor: monitor did not start, returning\n"));
996     return;
997   }
998 
999   KMP_MB(); /* Flush all pending memory write invalidates.  */
1000 
1001   /* First, check to see whether the monitor thread exists to wake it up. This
1002      is to avoid performance problem when the monitor sleeps during
1003      blocktime-size interval */
1004 
1005   status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1006   if (status != ESRCH) {
1007     __kmp_resume_monitor(); // Wake up the monitor thread
1008   }
1009   KA_TRACE(10, ("__kmp_reap_monitor: try to join with monitor\n"));
1010   status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1011   if (exit_val != th) {
1012     __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1013   }
1014 
1015   th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1016   th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1017 
1018   KA_TRACE(10, ("__kmp_reap_monitor: done reaping monitor thread with handle"
1019                 " %#.8lx\n",
1020                 th->th.th_info.ds.ds_thread));
1021 
1022   KMP_MB(); /* Flush all pending memory write invalidates.  */
1023 }
1024 #else
1025 // Empty symbol to export (see exports_so.txt) when
1026 // monitor thread feature is disabled
1027 extern "C" void __kmp_reap_monitor(kmp_info_t *th) {
1028   (void)th;
1029 }
1030 #endif // KMP_USE_MONITOR
1031 
1032 void __kmp_reap_worker(kmp_info_t *th) {
1033   int status;
1034   void *exit_val;
1035 
1036   KMP_MB(); /* Flush all pending memory write invalidates.  */
1037 
1038   KA_TRACE(
1039       10, ("__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1040 
1041   status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1042 #ifdef KMP_DEBUG
1043   /* Don't expose these to the user until we understand when they trigger */
1044   if (status != 0) {
1045     __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1046   }
1047   if (exit_val != th) {
1048     KA_TRACE(10, ("__kmp_reap_worker: worker T#%d did not reap properly, "
1049                   "exit_val = %p\n",
1050                   th->th.th_info.ds.ds_gtid, exit_val));
1051   }
1052 #else
1053   (void)status; // unused variable
1054 #endif /* KMP_DEBUG */
1055 
1056   KA_TRACE(10, ("__kmp_reap_worker: done reaping T#%d\n",
1057                 th->th.th_info.ds.ds_gtid));
1058 
1059   KMP_MB(); /* Flush all pending memory write invalidates.  */
1060 }
1061 
1062 #if KMP_HANDLE_SIGNALS
1063 
1064 static void __kmp_null_handler(int signo) {
1065   //  Do nothing, for doing SIG_IGN-type actions.
1066 } // __kmp_null_handler
1067 
1068 static void __kmp_team_handler(int signo) {
1069   if (__kmp_global.g.g_abort == 0) {
1070 /* Stage 1 signal handler, let's shut down all of the threads */
1071 #ifdef KMP_DEBUG
1072     __kmp_debug_printf("__kmp_team_handler: caught signal = %d\n", signo);
1073 #endif
1074     switch (signo) {
1075     case SIGHUP:
1076     case SIGINT:
1077     case SIGQUIT:
1078     case SIGILL:
1079     case SIGABRT:
1080     case SIGFPE:
1081     case SIGBUS:
1082     case SIGSEGV:
1083 #ifdef SIGSYS
1084     case SIGSYS:
1085 #endif
1086     case SIGTERM:
1087       if (__kmp_debug_buf) {
1088         __kmp_dump_debug_buffer();
1089       }
1090       __kmp_unregister_library(); // cleanup shared memory
1091       KMP_MB(); // Flush all pending memory write invalidates.
1092       TCW_4(__kmp_global.g.g_abort, signo);
1093       KMP_MB(); // Flush all pending memory write invalidates.
1094       TCW_4(__kmp_global.g.g_done, TRUE);
1095       KMP_MB(); // Flush all pending memory write invalidates.
1096       break;
1097     default:
1098 #ifdef KMP_DEBUG
1099       __kmp_debug_printf("__kmp_team_handler: unknown signal type");
1100 #endif
1101       break;
1102     }
1103   }
1104 } // __kmp_team_handler
1105 
1106 static void __kmp_sigaction(int signum, const struct sigaction *act,
1107                             struct sigaction *oldact) {
1108   int rc = sigaction(signum, act, oldact);
1109   KMP_CHECK_SYSFAIL_ERRNO("sigaction", rc);
1110 }
1111 
1112 static void __kmp_install_one_handler(int sig, sig_func_t handler_func,
1113                                       int parallel_init) {
1114   KMP_MB(); // Flush all pending memory write invalidates.
1115   KB_TRACE(60,
1116            ("__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1117   if (parallel_init) {
1118     struct sigaction new_action;
1119     struct sigaction old_action;
1120     new_action.sa_handler = handler_func;
1121     new_action.sa_flags = 0;
1122     sigfillset(&new_action.sa_mask);
1123     __kmp_sigaction(sig, &new_action, &old_action);
1124     if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1125       sigaddset(&__kmp_sigset, sig);
1126     } else {
1127       // Restore/keep user's handler if one previously installed.
1128       __kmp_sigaction(sig, &old_action, NULL);
1129     }
1130   } else {
1131     // Save initial/system signal handlers to see if user handlers installed.
1132     __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1133   }
1134   KMP_MB(); // Flush all pending memory write invalidates.
1135 } // __kmp_install_one_handler
1136 
1137 static void __kmp_remove_one_handler(int sig) {
1138   KB_TRACE(60, ("__kmp_remove_one_handler( %d )\n", sig));
1139   if (sigismember(&__kmp_sigset, sig)) {
1140     struct sigaction old;
1141     KMP_MB(); // Flush all pending memory write invalidates.
1142     __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1143     if ((old.sa_handler != __kmp_team_handler) &&
1144         (old.sa_handler != __kmp_null_handler)) {
1145       // Restore the users signal handler.
1146       KB_TRACE(10, ("__kmp_remove_one_handler: oops, not our handler, "
1147                     "restoring: sig=%d\n",
1148                     sig));
1149       __kmp_sigaction(sig, &old, NULL);
1150     }
1151     sigdelset(&__kmp_sigset, sig);
1152     KMP_MB(); // Flush all pending memory write invalidates.
1153   }
1154 } // __kmp_remove_one_handler
1155 
1156 void __kmp_install_signals(int parallel_init) {
1157   KB_TRACE(10, ("__kmp_install_signals( %d )\n", parallel_init));
1158   if (__kmp_handle_signals || !parallel_init) {
1159     // If ! parallel_init, we do not install handlers, just save original
1160     // handlers. Let us do it even __handle_signals is 0.
1161     sigemptyset(&__kmp_sigset);
1162     __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1163     __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1164     __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1165     __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1166     __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1167     __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1168     __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1169     __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1170 #ifdef SIGSYS
1171     __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1172 #endif // SIGSYS
1173     __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1174 #ifdef SIGPIPE
1175     __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1176 #endif // SIGPIPE
1177   }
1178 } // __kmp_install_signals
1179 
1180 void __kmp_remove_signals(void) {
1181   int sig;
1182   KB_TRACE(10, ("__kmp_remove_signals()\n"));
1183   for (sig = 1; sig < NSIG; ++sig) {
1184     __kmp_remove_one_handler(sig);
1185   }
1186 } // __kmp_remove_signals
1187 
1188 #endif // KMP_HANDLE_SIGNALS
1189 
1190 void __kmp_enable(int new_state) {
1191 #ifdef KMP_CANCEL_THREADS
1192   int status, old_state;
1193   status = pthread_setcancelstate(new_state, &old_state);
1194   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1195   KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1196 #endif
1197 }
1198 
1199 void __kmp_disable(int *old_state) {
1200 #ifdef KMP_CANCEL_THREADS
1201   int status;
1202   status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1203   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1204 #endif
1205 }
1206 
1207 static void __kmp_atfork_prepare(void) {
1208   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1209   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1210 }
1211 
1212 static void __kmp_atfork_parent(void) {
1213   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1214   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1215 }
1216 
1217 /* Reset the library so execution in the child starts "all over again" with
1218    clean data structures in initial states.  Don't worry about freeing memory
1219    allocated by parent, just abandon it to be safe. */
1220 static void __kmp_atfork_child(void) {
1221   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1222   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1223   /* TODO make sure this is done right for nested/sibling */
1224   // ATT:  Memory leaks are here? TODO: Check it and fix.
1225   /* KMP_ASSERT( 0 ); */
1226 
1227   ++__kmp_fork_count;
1228 
1229 #if KMP_AFFINITY_SUPPORTED
1230 #if KMP_OS_LINUX || KMP_OS_FREEBSD
1231   // reset the affinity in the child to the initial thread
1232   // affinity in the parent
1233   kmp_set_thread_affinity_mask_initial();
1234 #endif
1235   // Set default not to bind threads tightly in the child (we're expecting
1236   // over-subscription after the fork and this can improve things for
1237   // scripting languages that use OpenMP inside process-parallel code).
1238   if (__kmp_nested_proc_bind.bind_types != NULL) {
1239     __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1240   }
1241   for (kmp_affinity_t *affinity : __kmp_affinities)
1242     *affinity = KMP_AFFINITY_INIT(affinity->env_var);
1243   __kmp_affin_fullMask = nullptr;
1244   __kmp_affin_origMask = nullptr;
1245 #endif // KMP_AFFINITY_SUPPORTED
1246 
1247 #if KMP_USE_MONITOR
1248   __kmp_init_monitor = 0;
1249 #endif
1250   __kmp_init_parallel = FALSE;
1251   __kmp_init_middle = FALSE;
1252   __kmp_init_serial = FALSE;
1253   TCW_4(__kmp_init_gtid, FALSE);
1254   __kmp_init_common = FALSE;
1255 
1256   TCW_4(__kmp_init_user_locks, FALSE);
1257 #if !KMP_USE_DYNAMIC_LOCK
1258   __kmp_user_lock_table.used = 1;
1259   __kmp_user_lock_table.allocated = 0;
1260   __kmp_user_lock_table.table = NULL;
1261   __kmp_lock_blocks = NULL;
1262 #endif
1263 
1264   __kmp_all_nth = 0;
1265   TCW_4(__kmp_nth, 0);
1266 
1267   __kmp_thread_pool = NULL;
1268   __kmp_thread_pool_insert_pt = NULL;
1269   __kmp_team_pool = NULL;
1270 
1271   /* Must actually zero all the *cache arguments passed to __kmpc_threadprivate
1272      here so threadprivate doesn't use stale data */
1273   KA_TRACE(10, ("__kmp_atfork_child: checking cache address list %p\n",
1274                 __kmp_threadpriv_cache_list));
1275 
1276   while (__kmp_threadpriv_cache_list != NULL) {
1277 
1278     if (*__kmp_threadpriv_cache_list->addr != NULL) {
1279       KC_TRACE(50, ("__kmp_atfork_child: zeroing cache at address %p\n",
1280                     &(*__kmp_threadpriv_cache_list->addr)));
1281 
1282       *__kmp_threadpriv_cache_list->addr = NULL;
1283     }
1284     __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1285   }
1286 
1287   __kmp_init_runtime = FALSE;
1288 
1289   /* reset statically initialized locks */
1290   __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1291   __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1292   __kmp_init_bootstrap_lock(&__kmp_console_lock);
1293   __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1294 
1295 #if USE_ITT_BUILD
1296   __kmp_itt_reset(); // reset ITT's global state
1297 #endif /* USE_ITT_BUILD */
1298 
1299   {
1300     // Child process often get terminated without any use of OpenMP. That might
1301     // cause mapped shared memory file to be left unattended. Thus we postpone
1302     // library registration till middle initialization in the child process.
1303     __kmp_need_register_serial = FALSE;
1304     __kmp_serial_initialize();
1305   }
1306 
1307   /* This is necessary to make sure no stale data is left around */
1308   /* AC: customers complain that we use unsafe routines in the atfork
1309      handler. Mathworks: dlsym() is unsafe. We call dlsym and dlopen
1310      in dynamic_link when check the presence of shared tbbmalloc library.
1311      Suggestion is to make the library initialization lazier, similar
1312      to what done for __kmpc_begin(). */
1313   // TODO: synchronize all static initializations with regular library
1314   //       startup; look at kmp_global.cpp and etc.
1315   //__kmp_internal_begin ();
1316 }
1317 
1318 void __kmp_register_atfork(void) {
1319   if (__kmp_need_register_atfork) {
1320     int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1321                                 __kmp_atfork_child);
1322     KMP_CHECK_SYSFAIL("pthread_atfork", status);
1323     __kmp_need_register_atfork = FALSE;
1324   }
1325 }
1326 
1327 void __kmp_suspend_initialize(void) {
1328   int status;
1329   status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1330   KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1331   status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1332   KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1333 }
1334 
1335 void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1336   int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
1337   int new_value = __kmp_fork_count + 1;
1338   // Return if already initialized
1339   if (old_value == new_value)
1340     return;
1341   // Wait, then return if being initialized
1342   if (old_value == -1 || !__kmp_atomic_compare_store(
1343                              &th->th.th_suspend_init_count, old_value, -1)) {
1344     while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
1345       KMP_CPU_PAUSE();
1346     }
1347   } else {
1348     // Claim to be the initializer and do initializations
1349     int status;
1350     status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1351                                &__kmp_suspend_cond_attr);
1352     KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1353     status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1354                                 &__kmp_suspend_mutex_attr);
1355     KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1356     KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
1357   }
1358 }
1359 
1360 void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1361   if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
1362     /* this means we have initialize the suspension pthread objects for this
1363        thread in this instance of the process */
1364     int status;
1365 
1366     status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1367     if (status != 0 && status != EBUSY) {
1368       KMP_SYSFAIL("pthread_cond_destroy", status);
1369     }
1370     status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1371     if (status != 0 && status != EBUSY) {
1372       KMP_SYSFAIL("pthread_mutex_destroy", status);
1373     }
1374     --th->th.th_suspend_init_count;
1375     KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
1376                      __kmp_fork_count);
1377   }
1378 }
1379 
1380 // return true if lock obtained, false otherwise
1381 int __kmp_try_suspend_mx(kmp_info_t *th) {
1382   return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1383 }
1384 
1385 void __kmp_lock_suspend_mx(kmp_info_t *th) {
1386   int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1387   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1388 }
1389 
1390 void __kmp_unlock_suspend_mx(kmp_info_t *th) {
1391   int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1392   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1393 }
1394 
1395 /* This routine puts the calling thread to sleep after setting the
1396    sleep bit for the indicated flag variable to true. */
1397 template <class C>
1398 static inline void __kmp_suspend_template(int th_gtid, C *flag) {
1399   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1400   kmp_info_t *th = __kmp_threads[th_gtid];
1401   int status;
1402   typename C::flag_t old_spin;
1403 
1404   KF_TRACE(30, ("__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1405                 flag->get()));
1406 
1407   __kmp_suspend_initialize_thread(th);
1408 
1409   __kmp_lock_suspend_mx(th);
1410 
1411   KF_TRACE(10, ("__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1412                 th_gtid, flag->get()));
1413 
1414   /* TODO: shouldn't this use release semantics to ensure that
1415      __kmp_suspend_initialize_thread gets called first? */
1416   old_spin = flag->set_sleeping();
1417   TCW_PTR(th->th.th_sleep_loc, (void *)flag);
1418   th->th.th_sleep_loc_type = flag->get_type();
1419   if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
1420       __kmp_pause_status != kmp_soft_paused) {
1421     flag->unset_sleeping();
1422     TCW_PTR(th->th.th_sleep_loc, NULL);
1423     th->th.th_sleep_loc_type = flag_unset;
1424     __kmp_unlock_suspend_mx(th);
1425     return;
1426   }
1427   KF_TRACE(5, ("__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1428                " was %x\n",
1429                th_gtid, flag->get(), flag->load(), old_spin));
1430 
1431   if (flag->done_check_val(old_spin) || flag->done_check()) {
1432     flag->unset_sleeping();
1433     TCW_PTR(th->th.th_sleep_loc, NULL);
1434     th->th.th_sleep_loc_type = flag_unset;
1435     KF_TRACE(5, ("__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1436                  "for spin(%p)\n",
1437                  th_gtid, flag->get()));
1438   } else {
1439     /* Encapsulate in a loop as the documentation states that this may
1440        "with low probability" return when the condition variable has
1441        not been signaled or broadcast */
1442     int deactivated = FALSE;
1443 
1444     while (flag->is_sleeping()) {
1445 #ifdef DEBUG_SUSPEND
1446       char buffer[128];
1447       __kmp_suspend_count++;
1448       __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1449       __kmp_printf("__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1450                    buffer);
1451 #endif
1452       // Mark the thread as no longer active (only in the first iteration of the
1453       // loop).
1454       if (!deactivated) {
1455         th->th.th_active = FALSE;
1456         if (th->th.th_active_in_pool) {
1457           th->th.th_active_in_pool = FALSE;
1458           KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1459           KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1460         }
1461         deactivated = TRUE;
1462       }
1463 
1464       KMP_DEBUG_ASSERT(th->th.th_sleep_loc);
1465       KMP_DEBUG_ASSERT(flag->get_type() == th->th.th_sleep_loc_type);
1466 
1467 #if USE_SUSPEND_TIMEOUT
1468       struct timespec now;
1469       struct timeval tval;
1470       int msecs;
1471 
1472       status = gettimeofday(&tval, NULL);
1473       KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1474       TIMEVAL_TO_TIMESPEC(&tval, &now);
1475 
1476       msecs = (4 * __kmp_dflt_blocktime) + 200;
1477       now.tv_sec += msecs / 1000;
1478       now.tv_nsec += (msecs % 1000) * 1000;
1479 
1480       KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform "
1481                     "pthread_cond_timedwait\n",
1482                     th_gtid));
1483       status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1484                                       &th->th.th_suspend_mx.m_mutex, &now);
1485 #else
1486       KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform"
1487                     " pthread_cond_wait\n",
1488                     th_gtid));
1489       status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1490                                  &th->th.th_suspend_mx.m_mutex);
1491 #endif // USE_SUSPEND_TIMEOUT
1492 
1493       if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1494         KMP_SYSFAIL("pthread_cond_wait", status);
1495       }
1496 
1497       KMP_DEBUG_ASSERT(flag->get_type() == flag->get_ptr_type());
1498 
1499       if (!flag->is_sleeping() &&
1500           ((status == EINTR) || (status == ETIMEDOUT))) {
1501         // if interrupt or timeout, and thread is no longer sleeping, we need to
1502         // make sure sleep_loc gets reset; however, this shouldn't be needed if
1503         // we woke up with resume
1504         flag->unset_sleeping();
1505         TCW_PTR(th->th.th_sleep_loc, NULL);
1506         th->th.th_sleep_loc_type = flag_unset;
1507       }
1508 #ifdef KMP_DEBUG
1509       if (status == ETIMEDOUT) {
1510         if (flag->is_sleeping()) {
1511           KF_TRACE(100,
1512                    ("__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1513         } else {
1514           KF_TRACE(2, ("__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1515                        "not set!\n",
1516                        th_gtid));
1517           TCW_PTR(th->th.th_sleep_loc, NULL);
1518           th->th.th_sleep_loc_type = flag_unset;
1519         }
1520       } else if (flag->is_sleeping()) {
1521         KF_TRACE(100,
1522                  ("__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1523       }
1524 #endif
1525     } // while
1526 
1527     // Mark the thread as active again (if it was previous marked as inactive)
1528     if (deactivated) {
1529       th->th.th_active = TRUE;
1530       if (TCR_4(th->th.th_in_pool)) {
1531         KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1532         th->th.th_active_in_pool = TRUE;
1533       }
1534     }
1535   }
1536   // We may have had the loop variable set before entering the loop body;
1537   // so we need to reset sleep_loc.
1538   TCW_PTR(th->th.th_sleep_loc, NULL);
1539   th->th.th_sleep_loc_type = flag_unset;
1540 
1541   KMP_DEBUG_ASSERT(!flag->is_sleeping());
1542   KMP_DEBUG_ASSERT(!th->th.th_sleep_loc);
1543 #ifdef DEBUG_SUSPEND
1544   {
1545     char buffer[128];
1546     __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1547     __kmp_printf("__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1548                  buffer);
1549   }
1550 #endif
1551 
1552   __kmp_unlock_suspend_mx(th);
1553   KF_TRACE(30, ("__kmp_suspend_template: T#%d exit\n", th_gtid));
1554 }
1555 
1556 template <bool C, bool S>
1557 void __kmp_suspend_32(int th_gtid, kmp_flag_32<C, S> *flag) {
1558   __kmp_suspend_template(th_gtid, flag);
1559 }
1560 template <bool C, bool S>
1561 void __kmp_suspend_64(int th_gtid, kmp_flag_64<C, S> *flag) {
1562   __kmp_suspend_template(th_gtid, flag);
1563 }
1564 template <bool C, bool S>
1565 void __kmp_atomic_suspend_64(int th_gtid, kmp_atomic_flag_64<C, S> *flag) {
1566   __kmp_suspend_template(th_gtid, flag);
1567 }
1568 void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag) {
1569   __kmp_suspend_template(th_gtid, flag);
1570 }
1571 
1572 template void __kmp_suspend_32<false, false>(int, kmp_flag_32<false, false> *);
1573 template void __kmp_suspend_64<false, true>(int, kmp_flag_64<false, true> *);
1574 template void __kmp_suspend_64<true, false>(int, kmp_flag_64<true, false> *);
1575 template void
1576 __kmp_atomic_suspend_64<false, true>(int, kmp_atomic_flag_64<false, true> *);
1577 template void
1578 __kmp_atomic_suspend_64<true, false>(int, kmp_atomic_flag_64<true, false> *);
1579 
1580 /* This routine signals the thread specified by target_gtid to wake up
1581    after setting the sleep bit indicated by the flag argument to FALSE.
1582    The target thread must already have called __kmp_suspend_template() */
1583 template <class C>
1584 static inline void __kmp_resume_template(int target_gtid, C *flag) {
1585   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1586   kmp_info_t *th = __kmp_threads[target_gtid];
1587   int status;
1588 
1589 #ifdef KMP_DEBUG
1590   int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1591 #endif
1592 
1593   KF_TRACE(30, ("__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1594                 gtid, target_gtid));
1595   KMP_DEBUG_ASSERT(gtid != target_gtid);
1596 
1597   __kmp_suspend_initialize_thread(th);
1598 
1599   __kmp_lock_suspend_mx(th);
1600 
1601   if (!flag || flag != th->th.th_sleep_loc) {
1602     // coming from __kmp_null_resume_wrapper, or thread is now sleeping on a
1603     // different location; wake up at new location
1604     flag = (C *)CCAST(void *, th->th.th_sleep_loc);
1605   }
1606 
1607   // First, check if the flag is null or its type has changed. If so, someone
1608   // else woke it up.
1609   if (!flag) { // Thread doesn't appear to be sleeping on anything
1610     KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1611                  "awake: flag(%p)\n",
1612                  gtid, target_gtid, (void *)NULL));
1613     __kmp_unlock_suspend_mx(th);
1614     return;
1615   } else if (flag->get_type() != th->th.th_sleep_loc_type) {
1616     // Flag type does not appear to match this function template; possibly the
1617     // thread is sleeping on something else. Try null resume again.
1618     KF_TRACE(
1619         5,
1620         ("__kmp_resume_template: T#%d retrying, thread T#%d Mismatch flag(%p), "
1621          "spin(%p) type=%d ptr_type=%d\n",
1622          gtid, target_gtid, flag, flag->get(), flag->get_type(),
1623          th->th.th_sleep_loc_type));
1624     __kmp_unlock_suspend_mx(th);
1625     __kmp_null_resume_wrapper(th);
1626     return;
1627   } else { // if multiple threads are sleeping, flag should be internally
1628     // referring to a specific thread here
1629     if (!flag->is_sleeping()) {
1630       KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1631                    "awake: flag(%p): %u\n",
1632                    gtid, target_gtid, flag->get(), (unsigned int)flag->load()));
1633       __kmp_unlock_suspend_mx(th);
1634       return;
1635     }
1636   }
1637   KMP_DEBUG_ASSERT(flag);
1638   flag->unset_sleeping();
1639   TCW_PTR(th->th.th_sleep_loc, NULL);
1640   th->th.th_sleep_loc_type = flag_unset;
1641 
1642   KF_TRACE(5, ("__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1643                "sleep bit for flag's loc(%p): %u\n",
1644                gtid, target_gtid, flag->get(), (unsigned int)flag->load()));
1645 
1646 #ifdef DEBUG_SUSPEND
1647   {
1648     char buffer[128];
1649     __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1650     __kmp_printf("__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1651                  target_gtid, buffer);
1652   }
1653 #endif
1654   status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1655   KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1656   __kmp_unlock_suspend_mx(th);
1657   KF_TRACE(30, ("__kmp_resume_template: T#%d exiting after signaling wake up"
1658                 " for T#%d\n",
1659                 gtid, target_gtid));
1660 }
1661 
1662 template <bool C, bool S>
1663 void __kmp_resume_32(int target_gtid, kmp_flag_32<C, S> *flag) {
1664   __kmp_resume_template(target_gtid, flag);
1665 }
1666 template <bool C, bool S>
1667 void __kmp_resume_64(int target_gtid, kmp_flag_64<C, S> *flag) {
1668   __kmp_resume_template(target_gtid, flag);
1669 }
1670 template <bool C, bool S>
1671 void __kmp_atomic_resume_64(int target_gtid, kmp_atomic_flag_64<C, S> *flag) {
1672   __kmp_resume_template(target_gtid, flag);
1673 }
1674 void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag) {
1675   __kmp_resume_template(target_gtid, flag);
1676 }
1677 
1678 template void __kmp_resume_32<false, true>(int, kmp_flag_32<false, true> *);
1679 template void __kmp_resume_32<false, false>(int, kmp_flag_32<false, false> *);
1680 template void __kmp_resume_64<false, true>(int, kmp_flag_64<false, true> *);
1681 template void
1682 __kmp_atomic_resume_64<false, true>(int, kmp_atomic_flag_64<false, true> *);
1683 
1684 #if KMP_USE_MONITOR
1685 void __kmp_resume_monitor() {
1686   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1687   int status;
1688 #ifdef KMP_DEBUG
1689   int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1690   KF_TRACE(30, ("__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1691                 KMP_GTID_MONITOR));
1692   KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1693 #endif
1694   status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1695   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1696 #ifdef DEBUG_SUSPEND
1697   {
1698     char buffer[128];
1699     __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1700     __kmp_printf("__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1701                  KMP_GTID_MONITOR, buffer);
1702   }
1703 #endif
1704   status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1705   KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1706   status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1707   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1708   KF_TRACE(30, ("__kmp_resume_monitor: T#%d exiting after signaling wake up"
1709                 " for T#%d\n",
1710                 gtid, KMP_GTID_MONITOR));
1711 }
1712 #endif // KMP_USE_MONITOR
1713 
1714 void __kmp_yield() { sched_yield(); }
1715 
1716 void __kmp_gtid_set_specific(int gtid) {
1717   if (__kmp_init_gtid) {
1718     int status;
1719     status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1720                                  (void *)(intptr_t)(gtid + 1));
1721     KMP_CHECK_SYSFAIL("pthread_setspecific", status);
1722   } else {
1723     KA_TRACE(50, ("__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1724   }
1725 }
1726 
1727 int __kmp_gtid_get_specific() {
1728   int gtid;
1729   if (!__kmp_init_gtid) {
1730     KA_TRACE(50, ("__kmp_gtid_get_specific: runtime shutdown, returning "
1731                   "KMP_GTID_SHUTDOWN\n"));
1732     return KMP_GTID_SHUTDOWN;
1733   }
1734   gtid = (int)(size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1735   if (gtid == 0) {
1736     gtid = KMP_GTID_DNE;
1737   } else {
1738     gtid--;
1739   }
1740   KA_TRACE(50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n",
1741                 __kmp_gtid_threadprivate_key, gtid));
1742   return gtid;
1743 }
1744 
1745 double __kmp_read_cpu_time(void) {
1746   /*clock_t   t;*/
1747   struct tms buffer;
1748 
1749   /*t =*/times(&buffer);
1750 
1751   return (double)(buffer.tms_utime + buffer.tms_cutime) /
1752          (double)CLOCKS_PER_SEC;
1753 }
1754 
1755 int __kmp_read_system_info(struct kmp_sys_info *info) {
1756   int status;
1757   struct rusage r_usage;
1758 
1759   memset(info, 0, sizeof(*info));
1760 
1761   status = getrusage(RUSAGE_SELF, &r_usage);
1762   KMP_CHECK_SYSFAIL_ERRNO("getrusage", status);
1763 
1764   // The maximum resident set size utilized (in kilobytes)
1765   info->maxrss = r_usage.ru_maxrss;
1766   // The number of page faults serviced without any I/O
1767   info->minflt = r_usage.ru_minflt;
1768   // The number of page faults serviced that required I/O
1769   info->majflt = r_usage.ru_majflt;
1770   // The number of times a process was "swapped" out of memory
1771   info->nswap = r_usage.ru_nswap;
1772   // The number of times the file system had to perform input
1773   info->inblock = r_usage.ru_inblock;
1774   // The number of times the file system had to perform output
1775   info->oublock = r_usage.ru_oublock;
1776   // The number of times a context switch was voluntarily
1777   info->nvcsw = r_usage.ru_nvcsw;
1778   // The number of times a context switch was forced
1779   info->nivcsw = r_usage.ru_nivcsw;
1780 
1781   return (status != 0);
1782 }
1783 
1784 void __kmp_read_system_time(double *delta) {
1785   double t_ns;
1786   struct timeval tval;
1787   struct timespec stop;
1788   int status;
1789 
1790   status = gettimeofday(&tval, NULL);
1791   KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1792   TIMEVAL_TO_TIMESPEC(&tval, &stop);
1793   t_ns = (double)(TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start));
1794   *delta = (t_ns * 1e-9);
1795 }
1796 
1797 void __kmp_clear_system_time(void) {
1798   struct timeval tval;
1799   int status;
1800   status = gettimeofday(&tval, NULL);
1801   KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1802   TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1803 }
1804 
1805 static int __kmp_get_xproc(void) {
1806 
1807   int r = 0;
1808 
1809 #if KMP_OS_LINUX
1810 
1811   __kmp_type_convert(sysconf(_SC_NPROCESSORS_CONF), &(r));
1812 
1813 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_OPENBSD || \
1814     KMP_OS_HURD
1815 
1816   __kmp_type_convert(sysconf(_SC_NPROCESSORS_ONLN), &(r));
1817 
1818 #elif KMP_OS_DARWIN
1819 
1820   // Bug C77011 High "OpenMP Threads and number of active cores".
1821 
1822   // Find the number of available CPUs.
1823   kern_return_t rc;
1824   host_basic_info_data_t info;
1825   mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1826   rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num);
1827   if (rc == 0 && num == HOST_BASIC_INFO_COUNT) {
1828     // Cannot use KA_TRACE() here because this code works before trace support
1829     // is initialized.
1830     r = info.avail_cpus;
1831   } else {
1832     KMP_WARNING(CantGetNumAvailCPU);
1833     KMP_INFORM(AssumedNumCPU);
1834   }
1835 
1836 #else
1837 
1838 #error "Unknown or unsupported OS."
1839 
1840 #endif
1841 
1842   return r > 0 ? r : 2; /* guess value of 2 if OS told us 0 */
1843 
1844 } // __kmp_get_xproc
1845 
1846 int __kmp_read_from_file(char const *path, char const *format, ...) {
1847   int result;
1848   va_list args;
1849 
1850   va_start(args, format);
1851   FILE *f = fopen(path, "rb");
1852   if (f == NULL) {
1853     va_end(args);
1854     return 0;
1855   }
1856   result = vfscanf(f, format, args);
1857   fclose(f);
1858   va_end(args);
1859 
1860   return result;
1861 }
1862 
1863 void __kmp_runtime_initialize(void) {
1864   int status;
1865   pthread_mutexattr_t mutex_attr;
1866   pthread_condattr_t cond_attr;
1867 
1868   if (__kmp_init_runtime) {
1869     return;
1870   }
1871 
1872 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1873   if (!__kmp_cpuinfo.initialized) {
1874     __kmp_query_cpuid(&__kmp_cpuinfo);
1875   }
1876 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
1877 
1878   __kmp_xproc = __kmp_get_xproc();
1879 
1880 #if !KMP_32_BIT_ARCH
1881   struct rlimit rlim;
1882   // read stack size of calling thread, save it as default for worker threads;
1883   // this should be done before reading environment variables
1884   status = getrlimit(RLIMIT_STACK, &rlim);
1885   if (status == 0) { // success?
1886     __kmp_stksize = rlim.rlim_cur;
1887     __kmp_check_stksize(&__kmp_stksize); // check value and adjust if needed
1888   }
1889 #endif /* KMP_32_BIT_ARCH */
1890 
1891   if (sysconf(_SC_THREADS)) {
1892 
1893     /* Query the maximum number of threads */
1894     __kmp_type_convert(sysconf(_SC_THREAD_THREADS_MAX), &(__kmp_sys_max_nth));
1895     if (__kmp_sys_max_nth == -1) {
1896       /* Unlimited threads for NPTL */
1897       __kmp_sys_max_nth = INT_MAX;
1898     } else if (__kmp_sys_max_nth <= 1) {
1899       /* Can't tell, just use PTHREAD_THREADS_MAX */
1900       __kmp_sys_max_nth = KMP_MAX_NTH;
1901     }
1902 
1903     /* Query the minimum stack size */
1904     __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1905     if (__kmp_sys_min_stksize <= 1) {
1906       __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1907     }
1908   }
1909 
1910   /* Set up minimum number of threads to switch to TLS gtid */
1911   __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1912 
1913   status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1914                               __kmp_internal_end_dest);
1915   KMP_CHECK_SYSFAIL("pthread_key_create", status);
1916   status = pthread_mutexattr_init(&mutex_attr);
1917   KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1918   status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1919   KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1920   status = pthread_mutexattr_destroy(&mutex_attr);
1921   KMP_CHECK_SYSFAIL("pthread_mutexattr_destroy", status);
1922   status = pthread_condattr_init(&cond_attr);
1923   KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1924   status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1925   KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1926   status = pthread_condattr_destroy(&cond_attr);
1927   KMP_CHECK_SYSFAIL("pthread_condattr_destroy", status);
1928 #if USE_ITT_BUILD
1929   __kmp_itt_initialize();
1930 #endif /* USE_ITT_BUILD */
1931 
1932   __kmp_init_runtime = TRUE;
1933 }
1934 
1935 void __kmp_runtime_destroy(void) {
1936   int status;
1937 
1938   if (!__kmp_init_runtime) {
1939     return; // Nothing to do.
1940   }
1941 
1942 #if USE_ITT_BUILD
1943   __kmp_itt_destroy();
1944 #endif /* USE_ITT_BUILD */
1945 
1946   status = pthread_key_delete(__kmp_gtid_threadprivate_key);
1947   KMP_CHECK_SYSFAIL("pthread_key_delete", status);
1948 
1949   status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
1950   if (status != 0 && status != EBUSY) {
1951     KMP_SYSFAIL("pthread_mutex_destroy", status);
1952   }
1953   status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
1954   if (status != 0 && status != EBUSY) {
1955     KMP_SYSFAIL("pthread_cond_destroy", status);
1956   }
1957 #if KMP_AFFINITY_SUPPORTED
1958   __kmp_affinity_uninitialize();
1959 #endif
1960 
1961   __kmp_init_runtime = FALSE;
1962 }
1963 
1964 /* Put the thread to sleep for a time period */
1965 /* NOTE: not currently used anywhere */
1966 void __kmp_thread_sleep(int millis) { sleep((millis + 500) / 1000); }
1967 
1968 /* Calculate the elapsed wall clock time for the user */
1969 void __kmp_elapsed(double *t) {
1970   int status;
1971 #ifdef FIX_SGI_CLOCK
1972   struct timespec ts;
1973 
1974   status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
1975   KMP_CHECK_SYSFAIL_ERRNO("clock_gettime", status);
1976   *t =
1977       (double)ts.tv_nsec * (1.0 / (double)KMP_NSEC_PER_SEC) + (double)ts.tv_sec;
1978 #else
1979   struct timeval tv;
1980 
1981   status = gettimeofday(&tv, NULL);
1982   KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1983   *t =
1984       (double)tv.tv_usec * (1.0 / (double)KMP_USEC_PER_SEC) + (double)tv.tv_sec;
1985 #endif
1986 }
1987 
1988 /* Calculate the elapsed wall clock tick for the user */
1989 void __kmp_elapsed_tick(double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
1990 
1991 /* Return the current time stamp in nsec */
1992 kmp_uint64 __kmp_now_nsec() {
1993   struct timeval t;
1994   gettimeofday(&t, NULL);
1995   kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
1996                     (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
1997   return nsec;
1998 }
1999 
2000 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2001 /* Measure clock ticks per millisecond */
2002 void __kmp_initialize_system_tick() {
2003   kmp_uint64 now, nsec2, diff;
2004   kmp_uint64 delay = 100000; // 50~100 usec on most machines.
2005   kmp_uint64 nsec = __kmp_now_nsec();
2006   kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
2007   while ((now = __kmp_hardware_timestamp()) < goal)
2008     ;
2009   nsec2 = __kmp_now_nsec();
2010   diff = nsec2 - nsec;
2011   if (diff > 0) {
2012     kmp_uint64 tpms = ((kmp_uint64)1e6 * (delay + (now - goal)) / diff);
2013     if (tpms > 0)
2014       __kmp_ticks_per_msec = tpms;
2015   }
2016 }
2017 #endif
2018 
2019 /* Determine whether the given address is mapped into the current address
2020    space. */
2021 
2022 int __kmp_is_address_mapped(void *addr) {
2023 
2024   int found = 0;
2025   int rc;
2026 
2027 #if KMP_OS_LINUX || KMP_OS_HURD
2028 
2029   /* On GNUish OSes, read the /proc/<pid>/maps pseudo-file to get all the
2030      address ranges mapped into the address space. */
2031 
2032   char *name = __kmp_str_format("/proc/%d/maps", getpid());
2033   FILE *file = NULL;
2034 
2035   file = fopen(name, "r");
2036   KMP_ASSERT(file != NULL);
2037 
2038   for (;;) {
2039 
2040     void *beginning = NULL;
2041     void *ending = NULL;
2042     char perms[5];
2043 
2044     rc = fscanf(file, "%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2045     if (rc == EOF) {
2046       break;
2047     }
2048     KMP_ASSERT(rc == 3 &&
2049                KMP_STRLEN(perms) == 4); // Make sure all fields are read.
2050 
2051     // Ending address is not included in the region, but beginning is.
2052     if ((addr >= beginning) && (addr < ending)) {
2053       perms[2] = 0; // 3th and 4th character does not matter.
2054       if (strcmp(perms, "rw") == 0) {
2055         // Memory we are looking for should be readable and writable.
2056         found = 1;
2057       }
2058       break;
2059     }
2060   }
2061 
2062   // Free resources.
2063   fclose(file);
2064   KMP_INTERNAL_FREE(name);
2065 #elif KMP_OS_FREEBSD
2066   char *buf;
2067   size_t lstsz;
2068   int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2069   rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2070   if (rc < 0)
2071     return 0;
2072   // We pass from number of vm entry's semantic
2073   // to size of whole entry map list.
2074   lstsz = lstsz * 4 / 3;
2075   buf = reinterpret_cast<char *>(kmpc_malloc(lstsz));
2076   rc = sysctl(mib, 4, buf, &lstsz, NULL, 0);
2077   if (rc < 0) {
2078     kmpc_free(buf);
2079     return 0;
2080   }
2081 
2082   char *lw = buf;
2083   char *up = buf + lstsz;
2084 
2085   while (lw < up) {
2086     struct kinfo_vmentry *cur = reinterpret_cast<struct kinfo_vmentry *>(lw);
2087     size_t cursz = cur->kve_structsize;
2088     if (cursz == 0)
2089       break;
2090     void *start = reinterpret_cast<void *>(cur->kve_start);
2091     void *end = reinterpret_cast<void *>(cur->kve_end);
2092     // Readable/Writable addresses within current map entry
2093     if ((addr >= start) && (addr < end)) {
2094       if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2095           (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2096         found = 1;
2097         break;
2098       }
2099     }
2100     lw += cursz;
2101   }
2102   kmpc_free(buf);
2103 
2104 #elif KMP_OS_DARWIN
2105 
2106   /* On OS X*, /proc pseudo filesystem is not available. Try to read memory
2107      using vm interface. */
2108 
2109   int buffer;
2110   vm_size_t count;
2111   rc = vm_read_overwrite(
2112       mach_task_self(), // Task to read memory of.
2113       (vm_address_t)(addr), // Address to read from.
2114       1, // Number of bytes to be read.
2115       (vm_address_t)(&buffer), // Address of buffer to save read bytes in.
2116       &count // Address of var to save number of read bytes in.
2117   );
2118   if (rc == 0) {
2119     // Memory successfully read.
2120     found = 1;
2121   }
2122 
2123 #elif KMP_OS_NETBSD
2124 
2125   int mib[5];
2126   mib[0] = CTL_VM;
2127   mib[1] = VM_PROC;
2128   mib[2] = VM_PROC_MAP;
2129   mib[3] = getpid();
2130   mib[4] = sizeof(struct kinfo_vmentry);
2131 
2132   size_t size;
2133   rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2134   KMP_ASSERT(!rc);
2135   KMP_ASSERT(size);
2136 
2137   size = size * 4 / 3;
2138   struct kinfo_vmentry *kiv = (struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2139   KMP_ASSERT(kiv);
2140 
2141   rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2142   KMP_ASSERT(!rc);
2143   KMP_ASSERT(size);
2144 
2145   for (size_t i = 0; i < size; i++) {
2146     if (kiv[i].kve_start >= (uint64_t)addr &&
2147         kiv[i].kve_end <= (uint64_t)addr) {
2148       found = 1;
2149       break;
2150     }
2151   }
2152   KMP_INTERNAL_FREE(kiv);
2153 #elif KMP_OS_OPENBSD
2154 
2155   int mib[3];
2156   mib[0] = CTL_KERN;
2157   mib[1] = KERN_PROC_VMMAP;
2158   mib[2] = getpid();
2159 
2160   size_t size;
2161   uint64_t end;
2162   rc = sysctl(mib, 3, NULL, &size, NULL, 0);
2163   KMP_ASSERT(!rc);
2164   KMP_ASSERT(size);
2165   end = size;
2166 
2167   struct kinfo_vmentry kiv = {.kve_start = 0};
2168 
2169   while ((rc = sysctl(mib, 3, &kiv, &size, NULL, 0)) == 0) {
2170     KMP_ASSERT(size);
2171     if (kiv.kve_end == end)
2172       break;
2173 
2174     if (kiv.kve_start >= (uint64_t)addr && kiv.kve_end <= (uint64_t)addr) {
2175       found = 1;
2176       break;
2177     }
2178     kiv.kve_start += 1;
2179   }
2180 #elif KMP_OS_DRAGONFLY
2181 
2182   // FIXME(DragonFly): Implement this
2183   found = 1;
2184 
2185 #else
2186 
2187 #error "Unknown or unsupported OS"
2188 
2189 #endif
2190 
2191   return found;
2192 
2193 } // __kmp_is_address_mapped
2194 
2195 #ifdef USE_LOAD_BALANCE
2196 
2197 #if KMP_OS_DARWIN || KMP_OS_NETBSD
2198 
2199 // The function returns the rounded value of the system load average
2200 // during given time interval which depends on the value of
2201 // __kmp_load_balance_interval variable (default is 60 sec, other values
2202 // may be 300 sec or 900 sec).
2203 // It returns -1 in case of error.
2204 int __kmp_get_load_balance(int max) {
2205   double averages[3];
2206   int ret_avg = 0;
2207 
2208   int res = getloadavg(averages, 3);
2209 
2210   // Check __kmp_load_balance_interval to determine which of averages to use.
2211   // getloadavg() may return the number of samples less than requested that is
2212   // less than 3.
2213   if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2214     ret_avg = (int)averages[0]; // 1 min
2215   } else if ((__kmp_load_balance_interval >= 180 &&
2216               __kmp_load_balance_interval < 600) &&
2217              (res >= 2)) {
2218     ret_avg = (int)averages[1]; // 5 min
2219   } else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2220     ret_avg = (int)averages[2]; // 15 min
2221   } else { // Error occurred
2222     return -1;
2223   }
2224 
2225   return ret_avg;
2226 }
2227 
2228 #else // Linux* OS
2229 
2230 // The function returns number of running (not sleeping) threads, or -1 in case
2231 // of error. Error could be reported if Linux* OS kernel too old (without
2232 // "/proc" support). Counting running threads stops if max running threads
2233 // encountered.
2234 int __kmp_get_load_balance(int max) {
2235   static int permanent_error = 0;
2236   static int glb_running_threads = 0; // Saved count of the running threads for
2237   // the thread balance algorithm
2238   static double glb_call_time = 0; /* Thread balance algorithm call time */
2239 
2240   int running_threads = 0; // Number of running threads in the system.
2241 
2242   DIR *proc_dir = NULL; // Handle of "/proc/" directory.
2243   struct dirent *proc_entry = NULL;
2244 
2245   kmp_str_buf_t task_path; // "/proc/<pid>/task/<tid>/" path.
2246   DIR *task_dir = NULL; // Handle of "/proc/<pid>/task/<tid>/" directory.
2247   struct dirent *task_entry = NULL;
2248   int task_path_fixed_len;
2249 
2250   kmp_str_buf_t stat_path; // "/proc/<pid>/task/<tid>/stat" path.
2251   int stat_file = -1;
2252   int stat_path_fixed_len;
2253 
2254 #ifdef KMP_DEBUG
2255   int total_processes = 0; // Total number of processes in system.
2256 #endif
2257 
2258   double call_time = 0.0;
2259 
2260   __kmp_str_buf_init(&task_path);
2261   __kmp_str_buf_init(&stat_path);
2262 
2263   __kmp_elapsed(&call_time);
2264 
2265   if (glb_call_time &&
2266       (call_time - glb_call_time < __kmp_load_balance_interval)) {
2267     running_threads = glb_running_threads;
2268     goto finish;
2269   }
2270 
2271   glb_call_time = call_time;
2272 
2273   // Do not spend time on scanning "/proc/" if we have a permanent error.
2274   if (permanent_error) {
2275     running_threads = -1;
2276     goto finish;
2277   }
2278 
2279   if (max <= 0) {
2280     max = INT_MAX;
2281   }
2282 
2283   // Open "/proc/" directory.
2284   proc_dir = opendir("/proc");
2285   if (proc_dir == NULL) {
2286     // Cannot open "/prroc/". Probably the kernel does not support it. Return an
2287     // error now and in subsequent calls.
2288     running_threads = -1;
2289     permanent_error = 1;
2290     goto finish;
2291   }
2292 
2293   // Initialize fixed part of task_path. This part will not change.
2294   __kmp_str_buf_cat(&task_path, "/proc/", 6);
2295   task_path_fixed_len = task_path.used; // Remember number of used characters.
2296 
2297   proc_entry = readdir(proc_dir);
2298   while (proc_entry != NULL) {
2299     // Proc entry is a directory and name starts with a digit. Assume it is a
2300     // process' directory.
2301     if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2302 
2303 #ifdef KMP_DEBUG
2304       ++total_processes;
2305 #endif
2306       // Make sure init process is the very first in "/proc", so we can replace
2307       // strcmp( proc_entry->d_name, "1" ) == 0 with simpler total_processes ==
2308       // 1. We are going to check that total_processes == 1 => d_name == "1" is
2309       // true (where "=>" is implication). Since C++ does not have => operator,
2310       // let us replace it with its equivalent: a => b == ! a || b.
2311       KMP_DEBUG_ASSERT(total_processes != 1 ||
2312                        strcmp(proc_entry->d_name, "1") == 0);
2313 
2314       // Construct task_path.
2315       task_path.used = task_path_fixed_len; // Reset task_path to "/proc/".
2316       __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2317                         KMP_STRLEN(proc_entry->d_name));
2318       __kmp_str_buf_cat(&task_path, "/task", 5);
2319 
2320       task_dir = opendir(task_path.str);
2321       if (task_dir == NULL) {
2322         // Process can finish between reading "/proc/" directory entry and
2323         // opening process' "task/" directory. So, in general case we should not
2324         // complain, but have to skip this process and read the next one. But on
2325         // systems with no "task/" support we will spend lot of time to scan
2326         // "/proc/" tree again and again without any benefit. "init" process
2327         // (its pid is 1) should exist always, so, if we cannot open
2328         // "/proc/1/task/" directory, it means "task/" is not supported by
2329         // kernel. Report an error now and in the future.
2330         if (strcmp(proc_entry->d_name, "1") == 0) {
2331           running_threads = -1;
2332           permanent_error = 1;
2333           goto finish;
2334         }
2335       } else {
2336         // Construct fixed part of stat file path.
2337         __kmp_str_buf_clear(&stat_path);
2338         __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2339         __kmp_str_buf_cat(&stat_path, "/", 1);
2340         stat_path_fixed_len = stat_path.used;
2341 
2342         task_entry = readdir(task_dir);
2343         while (task_entry != NULL) {
2344           // It is a directory and name starts with a digit.
2345           if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2346 
2347             // Construct complete stat file path. Easiest way would be:
2348             //  __kmp_str_buf_print( & stat_path, "%s/%s/stat", task_path.str,
2349             //  task_entry->d_name );
2350             // but seriae of __kmp_str_buf_cat works a bit faster.
2351             stat_path.used =
2352                 stat_path_fixed_len; // Reset stat path to its fixed part.
2353             __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2354                               KMP_STRLEN(task_entry->d_name));
2355             __kmp_str_buf_cat(&stat_path, "/stat", 5);
2356 
2357             // Note: Low-level API (open/read/close) is used. High-level API
2358             // (fopen/fclose)  works ~ 30 % slower.
2359             stat_file = open(stat_path.str, O_RDONLY);
2360             if (stat_file == -1) {
2361               // We cannot report an error because task (thread) can terminate
2362               // just before reading this file.
2363             } else {
2364               /* Content of "stat" file looks like:
2365                  24285 (program) S ...
2366 
2367                  It is a single line (if program name does not include funny
2368                  symbols). First number is a thread id, then name of executable
2369                  file name in paretheses, then state of the thread. We need just
2370                  thread state.
2371 
2372                  Good news: Length of program name is 15 characters max. Longer
2373                  names are truncated.
2374 
2375                  Thus, we need rather short buffer: 15 chars for program name +
2376                  2 parenthesis, + 3 spaces + ~7 digits of pid = 37.
2377 
2378                  Bad news: Program name may contain special symbols like space,
2379                  closing parenthesis, or even new line. This makes parsing
2380                  "stat" file not 100 % reliable. In case of fanny program names
2381                  parsing may fail (report incorrect thread state).
2382 
2383                  Parsing "status" file looks more promissing (due to different
2384                  file structure and escaping special symbols) but reading and
2385                  parsing of "status" file works slower.
2386                   -- ln
2387               */
2388               char buffer[65];
2389               ssize_t len;
2390               len = read(stat_file, buffer, sizeof(buffer) - 1);
2391               if (len >= 0) {
2392                 buffer[len] = 0;
2393                 // Using scanf:
2394                 //     sscanf( buffer, "%*d (%*s) %c ", & state );
2395                 // looks very nice, but searching for a closing parenthesis
2396                 // works a bit faster.
2397                 char *close_parent = strstr(buffer, ") ");
2398                 if (close_parent != NULL) {
2399                   char state = *(close_parent + 2);
2400                   if (state == 'R') {
2401                     ++running_threads;
2402                     if (running_threads >= max) {
2403                       goto finish;
2404                     }
2405                   }
2406                 }
2407               }
2408               close(stat_file);
2409               stat_file = -1;
2410             }
2411           }
2412           task_entry = readdir(task_dir);
2413         }
2414         closedir(task_dir);
2415         task_dir = NULL;
2416       }
2417     }
2418     proc_entry = readdir(proc_dir);
2419   }
2420 
2421   // There _might_ be a timing hole where the thread executing this
2422   // code get skipped in the load balance, and running_threads is 0.
2423   // Assert in the debug builds only!!!
2424   KMP_DEBUG_ASSERT(running_threads > 0);
2425   if (running_threads <= 0) {
2426     running_threads = 1;
2427   }
2428 
2429 finish: // Clean up and exit.
2430   if (proc_dir != NULL) {
2431     closedir(proc_dir);
2432   }
2433   __kmp_str_buf_free(&task_path);
2434   if (task_dir != NULL) {
2435     closedir(task_dir);
2436   }
2437   __kmp_str_buf_free(&stat_path);
2438   if (stat_file != -1) {
2439     close(stat_file);
2440   }
2441 
2442   glb_running_threads = running_threads;
2443 
2444   return running_threads;
2445 
2446 } // __kmp_get_load_balance
2447 
2448 #endif // KMP_OS_DARWIN
2449 
2450 #endif // USE_LOAD_BALANCE
2451 
2452 #if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC ||                            \
2453       ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) ||                 \
2454       KMP_ARCH_PPC64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 ||            \
2455       KMP_ARCH_ARM)
2456 
2457 // we really only need the case with 1 argument, because CLANG always build
2458 // a struct of pointers to shared variables referenced in the outlined function
2459 int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int tid, int argc,
2460                            void *p_argv[]
2461 #if OMPT_SUPPORT
2462                            ,
2463                            void **exit_frame_ptr
2464 #endif
2465 ) {
2466 #if OMPT_SUPPORT
2467   *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2468 #endif
2469 
2470   switch (argc) {
2471   default:
2472     fprintf(stderr, "Too many args to microtask: %d!\n", argc);
2473     fflush(stderr);
2474     exit(-1);
2475   case 0:
2476     (*pkfn)(&gtid, &tid);
2477     break;
2478   case 1:
2479     (*pkfn)(&gtid, &tid, p_argv[0]);
2480     break;
2481   case 2:
2482     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1]);
2483     break;
2484   case 3:
2485     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2]);
2486     break;
2487   case 4:
2488     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2489     break;
2490   case 5:
2491     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2492     break;
2493   case 6:
2494     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2495             p_argv[5]);
2496     break;
2497   case 7:
2498     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2499             p_argv[5], p_argv[6]);
2500     break;
2501   case 8:
2502     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2503             p_argv[5], p_argv[6], p_argv[7]);
2504     break;
2505   case 9:
2506     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2507             p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2508     break;
2509   case 10:
2510     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2511             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2512     break;
2513   case 11:
2514     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2515             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2516     break;
2517   case 12:
2518     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2519             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2520             p_argv[11]);
2521     break;
2522   case 13:
2523     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2524             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2525             p_argv[11], p_argv[12]);
2526     break;
2527   case 14:
2528     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2529             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2530             p_argv[11], p_argv[12], p_argv[13]);
2531     break;
2532   case 15:
2533     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2534             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2535             p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2536     break;
2537   }
2538 
2539   return 1;
2540 }
2541 
2542 #endif
2543 
2544 #if KMP_OS_LINUX
2545 // Functions for hidden helper task
2546 namespace {
2547 // Condition variable for initializing hidden helper team
2548 pthread_cond_t hidden_helper_threads_initz_cond_var;
2549 pthread_mutex_t hidden_helper_threads_initz_lock;
2550 volatile int hidden_helper_initz_signaled = FALSE;
2551 
2552 // Condition variable for deinitializing hidden helper team
2553 pthread_cond_t hidden_helper_threads_deinitz_cond_var;
2554 pthread_mutex_t hidden_helper_threads_deinitz_lock;
2555 volatile int hidden_helper_deinitz_signaled = FALSE;
2556 
2557 // Condition variable for the wrapper function of main thread
2558 pthread_cond_t hidden_helper_main_thread_cond_var;
2559 pthread_mutex_t hidden_helper_main_thread_lock;
2560 volatile int hidden_helper_main_thread_signaled = FALSE;
2561 
2562 // Semaphore for worker threads. We don't use condition variable here in case
2563 // that when multiple signals are sent at the same time, only one thread might
2564 // be waken.
2565 sem_t hidden_helper_task_sem;
2566 } // namespace
2567 
2568 void __kmp_hidden_helper_worker_thread_wait() {
2569   int status = sem_wait(&hidden_helper_task_sem);
2570   KMP_CHECK_SYSFAIL("sem_wait", status);
2571 }
2572 
2573 void __kmp_do_initialize_hidden_helper_threads() {
2574   // Initialize condition variable
2575   int status =
2576       pthread_cond_init(&hidden_helper_threads_initz_cond_var, nullptr);
2577   KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2578 
2579   status = pthread_cond_init(&hidden_helper_threads_deinitz_cond_var, nullptr);
2580   KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2581 
2582   status = pthread_cond_init(&hidden_helper_main_thread_cond_var, nullptr);
2583   KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2584 
2585   status = pthread_mutex_init(&hidden_helper_threads_initz_lock, nullptr);
2586   KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2587 
2588   status = pthread_mutex_init(&hidden_helper_threads_deinitz_lock, nullptr);
2589   KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2590 
2591   status = pthread_mutex_init(&hidden_helper_main_thread_lock, nullptr);
2592   KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2593 
2594   // Initialize the semaphore
2595   status = sem_init(&hidden_helper_task_sem, 0, 0);
2596   KMP_CHECK_SYSFAIL("sem_init", status);
2597 
2598   // Create a new thread to finish initialization
2599   pthread_t handle;
2600   status = pthread_create(
2601       &handle, nullptr,
2602       [](void *) -> void * {
2603         __kmp_hidden_helper_threads_initz_routine();
2604         return nullptr;
2605       },
2606       nullptr);
2607   KMP_CHECK_SYSFAIL("pthread_create", status);
2608 }
2609 
2610 void __kmp_hidden_helper_threads_initz_wait() {
2611   // Initial thread waits here for the completion of the initialization. The
2612   // condition variable will be notified by main thread of hidden helper teams.
2613   int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2614   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2615 
2616   if (!TCR_4(hidden_helper_initz_signaled)) {
2617     status = pthread_cond_wait(&hidden_helper_threads_initz_cond_var,
2618                                &hidden_helper_threads_initz_lock);
2619     KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2620   }
2621 
2622   status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2623   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2624 }
2625 
2626 void __kmp_hidden_helper_initz_release() {
2627   // After all initialization, reset __kmp_init_hidden_helper_threads to false.
2628   int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2629   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2630 
2631   status = pthread_cond_signal(&hidden_helper_threads_initz_cond_var);
2632   KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2633 
2634   TCW_SYNC_4(hidden_helper_initz_signaled, TRUE);
2635 
2636   status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2637   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2638 }
2639 
2640 void __kmp_hidden_helper_main_thread_wait() {
2641   // The main thread of hidden helper team will be blocked here. The
2642   // condition variable can only be signal in the destructor of RTL.
2643   int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2644   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2645 
2646   if (!TCR_4(hidden_helper_main_thread_signaled)) {
2647     status = pthread_cond_wait(&hidden_helper_main_thread_cond_var,
2648                                &hidden_helper_main_thread_lock);
2649     KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2650   }
2651 
2652   status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2653   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2654 }
2655 
2656 void __kmp_hidden_helper_main_thread_release() {
2657   // The initial thread of OpenMP RTL should call this function to wake up the
2658   // main thread of hidden helper team.
2659   int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2660   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2661 
2662   status = pthread_cond_signal(&hidden_helper_main_thread_cond_var);
2663   KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
2664 
2665   // The hidden helper team is done here
2666   TCW_SYNC_4(hidden_helper_main_thread_signaled, TRUE);
2667 
2668   status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2669   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2670 }
2671 
2672 void __kmp_hidden_helper_worker_thread_signal() {
2673   int status = sem_post(&hidden_helper_task_sem);
2674   KMP_CHECK_SYSFAIL("sem_post", status);
2675 }
2676 
2677 void __kmp_hidden_helper_threads_deinitz_wait() {
2678   // Initial thread waits here for the completion of the deinitialization. The
2679   // condition variable will be notified by main thread of hidden helper teams.
2680   int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2681   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2682 
2683   if (!TCR_4(hidden_helper_deinitz_signaled)) {
2684     status = pthread_cond_wait(&hidden_helper_threads_deinitz_cond_var,
2685                                &hidden_helper_threads_deinitz_lock);
2686     KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2687   }
2688 
2689   status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2690   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2691 }
2692 
2693 void __kmp_hidden_helper_threads_deinitz_release() {
2694   int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2695   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2696 
2697   status = pthread_cond_signal(&hidden_helper_threads_deinitz_cond_var);
2698   KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2699 
2700   TCW_SYNC_4(hidden_helper_deinitz_signaled, TRUE);
2701 
2702   status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2703   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2704 }
2705 #else // KMP_OS_LINUX
2706 void __kmp_hidden_helper_worker_thread_wait() {
2707   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2708 }
2709 
2710 void __kmp_do_initialize_hidden_helper_threads() {
2711   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2712 }
2713 
2714 void __kmp_hidden_helper_threads_initz_wait() {
2715   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2716 }
2717 
2718 void __kmp_hidden_helper_initz_release() {
2719   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2720 }
2721 
2722 void __kmp_hidden_helper_main_thread_wait() {
2723   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2724 }
2725 
2726 void __kmp_hidden_helper_main_thread_release() {
2727   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2728 }
2729 
2730 void __kmp_hidden_helper_worker_thread_signal() {
2731   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2732 }
2733 
2734 void __kmp_hidden_helper_threads_deinitz_wait() {
2735   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2736 }
2737 
2738 void __kmp_hidden_helper_threads_deinitz_release() {
2739   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2740 }
2741 #endif // KMP_OS_LINUX
2742 
2743 // end of file //
2744