xref: /freebsd/contrib/llvm-project/openmp/runtime/src/ompt-specific.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 /*
2  * ompt-specific.cpp -- OMPT internal functions
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 //******************************************************************************
14 // include files
15 //******************************************************************************
16 
17 #include "kmp.h"
18 #include "ompt-specific.h"
19 
20 #if KMP_OS_UNIX
21 #include <dlfcn.h>
22 #endif
23 
24 #if KMP_OS_WINDOWS
25 #define THREAD_LOCAL __declspec(thread)
26 #else
27 #define THREAD_LOCAL __thread
28 #endif
29 
30 #define OMPT_WEAK_ATTRIBUTE KMP_WEAK_ATTRIBUTE_INTERNAL
31 
32 //******************************************************************************
33 // macros
34 //******************************************************************************
35 
36 #define LWT_FROM_TEAM(team) (team)->t.ompt_serialized_team_info
37 
38 #define OMPT_THREAD_ID_BITS 16
39 
40 //******************************************************************************
41 // private operations
42 //******************************************************************************
43 
44 //----------------------------------------------------------
45 // traverse the team and task hierarchy
46 // note: __ompt_get_teaminfo and __ompt_get_task_info_object
47 //       traverse the hierarchy similarly and need to be
48 //       kept consistent
49 //----------------------------------------------------------
50 
__ompt_get_teaminfo(int depth,int * size)51 ompt_team_info_t *__ompt_get_teaminfo(int depth, int *size) {
52   kmp_info_t *thr = ompt_get_thread();
53 
54   if (thr) {
55     kmp_team *team = thr->th.th_team;
56     if (team == NULL)
57       return NULL;
58 
59     ompt_lw_taskteam_t *next_lwt = LWT_FROM_TEAM(team), *lwt = NULL;
60 
61     while (depth > 0) {
62       // next lightweight team (if any)
63       if (lwt)
64         lwt = lwt->parent;
65 
66       // next heavyweight team (if any) after
67       // lightweight teams are exhausted
68       if (!lwt && team) {
69         if (next_lwt) {
70           lwt = next_lwt;
71           next_lwt = NULL;
72         } else {
73           team = team->t.t_parent;
74           if (team) {
75             next_lwt = LWT_FROM_TEAM(team);
76           }
77         }
78       }
79 
80       depth--;
81     }
82 
83     if (lwt) {
84       // lightweight teams have one task
85       if (size)
86         *size = 1;
87 
88       // return team info for lightweight team
89       return &lwt->ompt_team_info;
90     } else if (team) {
91       // extract size from heavyweight team
92       if (size)
93         *size = team->t.t_nproc;
94 
95       // return team info for heavyweight team
96       return &team->t.ompt_team_info;
97     }
98   }
99 
100   return NULL;
101 }
102 
__ompt_get_task_info_object(int depth)103 ompt_task_info_t *__ompt_get_task_info_object(int depth) {
104   ompt_task_info_t *info = NULL;
105   kmp_info_t *thr = ompt_get_thread();
106 
107   if (thr) {
108     kmp_taskdata_t *taskdata = thr->th.th_current_task;
109     ompt_lw_taskteam_t *lwt = NULL,
110                        *next_lwt = LWT_FROM_TEAM(taskdata->td_team);
111 
112     while (depth > 0) {
113       // next lightweight team (if any)
114       if (lwt)
115         lwt = lwt->parent;
116 
117       // next heavyweight team (if any) after
118       // lightweight teams are exhausted
119       if (!lwt && taskdata) {
120         if (next_lwt) {
121           lwt = next_lwt;
122           next_lwt = NULL;
123         } else {
124           taskdata = taskdata->td_parent;
125           if (taskdata) {
126             next_lwt = LWT_FROM_TEAM(taskdata->td_team);
127           }
128         }
129       }
130       depth--;
131     }
132 
133     if (lwt) {
134       info = &lwt->ompt_task_info;
135     } else if (taskdata) {
136       info = &taskdata->ompt_task_info;
137     }
138   }
139 
140   return info;
141 }
142 
__ompt_get_scheduling_taskinfo(int depth)143 ompt_task_info_t *__ompt_get_scheduling_taskinfo(int depth) {
144   ompt_task_info_t *info = NULL;
145   kmp_info_t *thr = ompt_get_thread();
146 
147   if (thr) {
148     kmp_taskdata_t *taskdata = thr->th.th_current_task;
149 
150     ompt_lw_taskteam_t *lwt = NULL,
151                        *next_lwt = LWT_FROM_TEAM(taskdata->td_team);
152 
153     while (depth > 0) {
154       // next lightweight team (if any)
155       if (lwt)
156         lwt = lwt->parent;
157 
158       // next heavyweight team (if any) after
159       // lightweight teams are exhausted
160       if (!lwt && taskdata) {
161         // first try scheduling parent (for explicit task scheduling)
162         if (taskdata->ompt_task_info.scheduling_parent) {
163           taskdata = taskdata->ompt_task_info.scheduling_parent;
164         } else if (next_lwt) {
165           lwt = next_lwt;
166           next_lwt = NULL;
167         } else {
168           // then go for implicit tasks
169           taskdata = taskdata->td_parent;
170           if (taskdata) {
171             next_lwt = LWT_FROM_TEAM(taskdata->td_team);
172           }
173         }
174       }
175       depth--;
176     }
177 
178     if (lwt) {
179       info = &lwt->ompt_task_info;
180     } else if (taskdata) {
181       info = &taskdata->ompt_task_info;
182     }
183   }
184 
185   return info;
186 }
187 
188 //******************************************************************************
189 // interface operations
190 //******************************************************************************
191 //----------------------------------------------------------
192 // initialization support
193 //----------------------------------------------------------
194 
__ompt_force_initialization()195 void __ompt_force_initialization() { __kmp_serial_initialize(); }
196 
197 //----------------------------------------------------------
198 // thread support
199 //----------------------------------------------------------
200 
__ompt_get_thread_data_internal()201 ompt_data_t *__ompt_get_thread_data_internal() {
202   if (__kmp_get_gtid() >= 0) {
203     kmp_info_t *thread = ompt_get_thread();
204     if (thread == NULL)
205       return NULL;
206     return &(thread->th.ompt_thread_info.thread_data);
207   }
208   return NULL;
209 }
210 
211 //----------------------------------------------------------
212 // state support
213 //----------------------------------------------------------
214 
__ompt_thread_assign_wait_id(void * variable)215 void __ompt_thread_assign_wait_id(void *variable) {
216   kmp_info_t *ti = ompt_get_thread();
217 
218   if (ti)
219     ti->th.ompt_thread_info.wait_id = (ompt_wait_id_t)(uintptr_t)variable;
220 }
221 
__ompt_get_state_internal(ompt_wait_id_t * omp_wait_id)222 int __ompt_get_state_internal(ompt_wait_id_t *omp_wait_id) {
223   kmp_info_t *ti = ompt_get_thread();
224 
225   if (ti) {
226     if (omp_wait_id)
227       *omp_wait_id = ti->th.ompt_thread_info.wait_id;
228     return ti->th.ompt_thread_info.state;
229   }
230   return ompt_state_undefined;
231 }
232 
233 //----------------------------------------------------------
234 // parallel region support
235 //----------------------------------------------------------
236 
__ompt_get_parallel_info_internal(int ancestor_level,ompt_data_t ** parallel_data,int * team_size)237 int __ompt_get_parallel_info_internal(int ancestor_level,
238                                       ompt_data_t **parallel_data,
239                                       int *team_size) {
240   if (__kmp_get_gtid() >= 0) {
241     ompt_team_info_t *info;
242     if (team_size) {
243       info = __ompt_get_teaminfo(ancestor_level, team_size);
244     } else {
245       info = __ompt_get_teaminfo(ancestor_level, NULL);
246     }
247     if (parallel_data) {
248       *parallel_data = info ? &(info->parallel_data) : NULL;
249     }
250     return info ? 2 : 0;
251   } else {
252     return 0;
253   }
254 }
255 
256 //----------------------------------------------------------
257 // lightweight task team support
258 //----------------------------------------------------------
259 
__ompt_lw_taskteam_init(ompt_lw_taskteam_t * lwt,kmp_info_t * thr,int gtid,ompt_data_t * ompt_pid,void * codeptr)260 void __ompt_lw_taskteam_init(ompt_lw_taskteam_t *lwt, kmp_info_t *thr, int gtid,
261                              ompt_data_t *ompt_pid, void *codeptr) {
262   // initialize parallel_data with input, return address to parallel_data on
263   // exit
264   lwt->ompt_team_info.parallel_data = *ompt_pid;
265   lwt->ompt_team_info.master_return_address = codeptr;
266   lwt->ompt_task_info.task_data.value = 0;
267   lwt->ompt_task_info.frame.enter_frame = ompt_data_none;
268   lwt->ompt_task_info.frame.exit_frame = ompt_data_none;
269   lwt->ompt_task_info.scheduling_parent = NULL;
270   lwt->heap = 0;
271   lwt->parent = 0;
272 }
273 
__ompt_lw_taskteam_link(ompt_lw_taskteam_t * lwt,kmp_info_t * thr,int on_heap,bool always)274 void __ompt_lw_taskteam_link(ompt_lw_taskteam_t *lwt, kmp_info_t *thr,
275                              int on_heap, bool always) {
276   ompt_lw_taskteam_t *link_lwt = lwt;
277   if (always ||
278       thr->th.th_team->t.t_serialized >
279           1) { // we already have a team, so link the new team and swap values
280     if (on_heap) { // the lw_taskteam cannot stay on stack, allocate it on heap
281       link_lwt =
282           (ompt_lw_taskteam_t *)__kmp_allocate(sizeof(ompt_lw_taskteam_t));
283     }
284     link_lwt->heap = on_heap;
285 
286     // would be swap in the (on_stack) case.
287     ompt_team_info_t tmp_team = lwt->ompt_team_info;
288     link_lwt->ompt_team_info = *OMPT_CUR_TEAM_INFO(thr);
289     *OMPT_CUR_TEAM_INFO(thr) = tmp_team;
290 
291     // link the taskteam into the list of taskteams:
292     ompt_lw_taskteam_t *my_parent =
293         thr->th.th_team->t.ompt_serialized_team_info;
294     link_lwt->parent = my_parent;
295     thr->th.th_team->t.ompt_serialized_team_info = link_lwt;
296 #if OMPD_SUPPORT
297     if (ompd_state & OMPD_ENABLE_BP) {
298       ompd_bp_parallel_begin();
299     }
300 #endif
301 
302     ompt_task_info_t tmp_task = lwt->ompt_task_info;
303     link_lwt->ompt_task_info = *OMPT_CUR_TASK_INFO(thr);
304     *OMPT_CUR_TASK_INFO(thr) = tmp_task;
305   } else {
306     // this is the first serialized team, so we just store the values in the
307     // team and drop the taskteam-object
308     *OMPT_CUR_TEAM_INFO(thr) = lwt->ompt_team_info;
309 #if OMPD_SUPPORT
310     if (ompd_state & OMPD_ENABLE_BP) {
311       ompd_bp_parallel_begin();
312     }
313 #endif
314     *OMPT_CUR_TASK_INFO(thr) = lwt->ompt_task_info;
315   }
316 }
317 
__ompt_lw_taskteam_unlink(kmp_info_t * thr)318 void __ompt_lw_taskteam_unlink(kmp_info_t *thr) {
319   ompt_lw_taskteam_t *lwtask = thr->th.th_team->t.ompt_serialized_team_info;
320   if (lwtask) {
321     ompt_task_info_t tmp_task = lwtask->ompt_task_info;
322     lwtask->ompt_task_info = *OMPT_CUR_TASK_INFO(thr);
323     *OMPT_CUR_TASK_INFO(thr) = tmp_task;
324 #if OMPD_SUPPORT
325     if (ompd_state & OMPD_ENABLE_BP) {
326       ompd_bp_parallel_end();
327     }
328 #endif
329     thr->th.th_team->t.ompt_serialized_team_info = lwtask->parent;
330 
331     ompt_team_info_t tmp_team = lwtask->ompt_team_info;
332     lwtask->ompt_team_info = *OMPT_CUR_TEAM_INFO(thr);
333     *OMPT_CUR_TEAM_INFO(thr) = tmp_team;
334 
335     if (lwtask->heap) {
336       __kmp_free(lwtask);
337       lwtask = NULL;
338     }
339   }
340   //    return lwtask;
341 }
342 
343 //----------------------------------------------------------
344 // task support
345 //----------------------------------------------------------
346 
__ompt_get_task_data()347 ompt_data_t *__ompt_get_task_data() {
348   kmp_info_t *thr = ompt_get_thread();
349   ompt_data_t *task_data = thr ? OMPT_CUR_TASK_DATA(thr) : NULL;
350   return task_data;
351 }
352 
__ompt_get_target_task_data()353 ompt_data_t *__ompt_get_target_task_data() {
354   return &__kmp_threads[__kmp_get_gtid()]->th.ompt_thread_info.target_task_data;
355 }
356 
__ompt_get_task_info_internal(int ancestor_level,int * type,ompt_data_t ** task_data,ompt_frame_t ** task_frame,ompt_data_t ** parallel_data,int * thread_num)357 int __ompt_get_task_info_internal(int ancestor_level, int *type,
358                                   ompt_data_t **task_data,
359                                   ompt_frame_t **task_frame,
360                                   ompt_data_t **parallel_data,
361                                   int *thread_num) {
362   if (__kmp_get_gtid() < 0)
363     return 0;
364 
365   if (ancestor_level < 0)
366     return 0;
367 
368   // copied from __ompt_get_scheduling_taskinfo
369   ompt_task_info_t *info = NULL;
370   ompt_team_info_t *team_info = NULL;
371   kmp_info_t *thr = ompt_get_thread();
372   int level = ancestor_level;
373 
374   if (thr) {
375     kmp_taskdata_t *taskdata = thr->th.th_current_task;
376     if (taskdata == NULL)
377       return 0;
378     kmp_team *team = thr->th.th_team, *prev_team = NULL;
379     if (team == NULL)
380       return 0;
381     ompt_lw_taskteam_t *lwt = NULL,
382                        *next_lwt = LWT_FROM_TEAM(taskdata->td_team);
383 
384     while (ancestor_level > 0) {
385       // next lightweight team (if any)
386       if (lwt)
387         lwt = lwt->parent;
388 
389       // next heavyweight team (if any) after
390       // lightweight teams are exhausted
391       if (!lwt && taskdata) {
392         // first try scheduling parent (for explicit task scheduling)
393         if (taskdata->ompt_task_info.scheduling_parent) {
394           taskdata = taskdata->ompt_task_info.scheduling_parent;
395         } else if (next_lwt) {
396           lwt = next_lwt;
397           next_lwt = NULL;
398         } else {
399           // then go for implicit tasks
400           taskdata = taskdata->td_parent;
401           if (team == NULL)
402             return 0;
403           prev_team = team;
404           team = team->t.t_parent;
405           if (taskdata) {
406             next_lwt = LWT_FROM_TEAM(taskdata->td_team);
407           }
408         }
409       }
410       ancestor_level--;
411     }
412 
413     if (lwt) {
414       info = &lwt->ompt_task_info;
415       team_info = &lwt->ompt_team_info;
416       if (type) {
417         *type = ompt_task_implicit;
418       }
419     } else if (taskdata) {
420       info = &taskdata->ompt_task_info;
421       team_info = &team->t.ompt_team_info;
422       if (type) {
423         if (taskdata->td_parent) {
424           *type = TASK_TYPE_DETAILS_FORMAT(taskdata);
425         } else {
426           *type = ompt_task_initial;
427         }
428       }
429     }
430     if (task_data) {
431       *task_data = info ? &info->task_data : NULL;
432     }
433     if (task_frame) {
434       // OpenMP spec asks for the scheduling task to be returned.
435       *task_frame = info ? &info->frame : NULL;
436     }
437     if (parallel_data) {
438       *parallel_data = team_info ? &(team_info->parallel_data) : NULL;
439     }
440     if (thread_num) {
441       if (level == 0)
442         *thread_num = __kmp_get_tid();
443       else if (lwt)
444         *thread_num = 0;
445       else if (!prev_team) {
446         // The innermost parallel region contains at least one explicit task.
447         // The task at level > 0 is either an implicit task that
448         // corresponds to the mentioned region or one of the explicit tasks
449         // nested inside the same region. Note that the task isn't the
450         // innermost explicit tasks (because of condition level > 0).
451         // Since the task at this level still belongs to the innermost parallel
452         // region, thread_num is determined the same way as for level==0.
453         *thread_num = __kmp_get_tid();
454       } else
455         *thread_num = prev_team->t.t_master_tid;
456       //        *thread_num = team->t.t_master_tid;
457     }
458     return info ? 2 : 0;
459   }
460   return 0;
461 }
462 
__ompt_get_task_memory_internal(void ** addr,size_t * size,int blocknum)463 int __ompt_get_task_memory_internal(void **addr, size_t *size, int blocknum) {
464   *size = 0;
465   if (blocknum != 0)
466     return 0; // support only a single block
467 
468   kmp_info_t *thr = ompt_get_thread();
469   if (!thr)
470     return 0;
471 
472   kmp_taskdata_t *taskdata = thr->th.th_current_task;
473 
474   if (taskdata->td_flags.tasktype != TASK_EXPLICIT)
475     return 0; // support only explicit task
476 
477   *addr = taskdata;
478   *size = taskdata->td_size_alloc;
479   return 0;
480 }
481 
482 //----------------------------------------------------------
483 // team support
484 //----------------------------------------------------------
485 
__ompt_team_assign_id(kmp_team_t * team,ompt_data_t ompt_pid)486 void __ompt_team_assign_id(kmp_team_t *team, ompt_data_t ompt_pid) {
487   team->t.ompt_team_info.parallel_data = ompt_pid;
488 }
489 
490 //----------------------------------------------------------
491 // misc
492 //----------------------------------------------------------
493 
__ompt_get_unique_id_internal()494 static uint64_t __ompt_get_unique_id_internal() {
495   static uint64_t thread = 1;
496   static THREAD_LOCAL uint64_t ID = 0;
497   if (ID == 0) {
498     uint64_t new_thread = KMP_TEST_THEN_INC64((kmp_int64 *)&thread);
499     ID = new_thread << (sizeof(uint64_t) * 8 - OMPT_THREAD_ID_BITS);
500   }
501   return ++ID;
502 }
503 
__ompt_get_barrier_kind(enum barrier_type bt,kmp_info_t * thr)504 ompt_sync_region_t __ompt_get_barrier_kind(enum barrier_type bt,
505                                            kmp_info_t *thr) {
506   if (bt == bs_forkjoin_barrier) {
507     if (thr->th.ompt_thread_info.parallel_flags & ompt_parallel_league)
508       return ompt_sync_region_barrier_teams;
509     else
510       return ompt_sync_region_barrier_implicit_parallel;
511   }
512 
513   if (bt != bs_plain_barrier || !thr->th.th_ident)
514     return ompt_sync_region_barrier_implementation;
515 
516   kmp_int32 flags = thr->th.th_ident->flags;
517 
518   if ((flags & KMP_IDENT_BARRIER_EXPL) != 0)
519     return ompt_sync_region_barrier_explicit;
520 
521   if ((flags & KMP_IDENT_BARRIER_IMPL) != 0)
522     return ompt_sync_region_barrier_implicit_workshare;
523 
524   return ompt_sync_region_barrier_implementation;
525 }
526