Searched refs:__kmp_allocate (Results 1 – 20 of 20) sorted by relevance
91 d = (struct private_data *)__kmp_allocate(sizeof(struct private_data)); in __kmp_init_common_data()104 d->data = __kmp_allocate(pc_size); in __kmp_init_common_data()307 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common)); in kmp_threadprivate_insert_private_data()342 tn = (struct private_common *)__kmp_allocate(sizeof(struct private_common)); in kmp_threadprivate_insert()363 d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size); in kmp_threadprivate_insert()375 d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size); in kmp_threadprivate_insert()385 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common)); in kmp_threadprivate_insert()409 tn->par_addr = (void *)__kmp_allocate(tn->cmn_size); in kmp_threadprivate_insert()519 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common)); in __kmpc_threadprivate_register()635 KMP_ITT_IGNORE(my_cache = (void **)__kmp_allocate( in __kmpc_threadprivate_cached()[all …]
63 scheds = (enum sched_type *)__kmp_allocate(sizeof(enum sched_type) * in append()65 small_chunks = (kmp_int32 *)__kmp_allocate(sizeof(kmp_int32) * in append()67 large_chunks = (kmp_int64 *)__kmp_allocate(sizeof(kmp_int64) * in append()69 layers = (kmp_hier_layer_e *)__kmp_allocate(sizeof(kmp_hier_layer_e) * in append()673 info = (kmp_hier_layer_info_t<T> *)__kmp_allocate( in allocate_hier()675 layers = (kmp_hier_top_unit_t<T> **)__kmp_allocate( in allocate_hier()692 layers[i] = (kmp_hier_top_unit_t<T> *)__kmp_allocate( in allocate_hier()984 sh->hier = (kmp_hier_t<T> *)__kmp_allocate(sizeof(kmp_hier_t<T>)); in __kmp_dispatch_init_hierarchy()999 th->th.th_hier_bar_data = (kmp_hier_private_bdata_t *)__kmp_allocate( in __kmp_dispatch_init_hierarchy()
340 Mask() { mask = (mask_t *)__kmp_allocate(__kmp_affin_mask_size); } in Mask()561 mask = (mask_t *)__kmp_allocate(sizeof(mask_t) * __kmp_num_proc_groups); in Mask()1111 (kmp_hw_subset_t *)__kmp_allocate(sizeof(kmp_hw_subset_t)); in allocate()1116 retval->items = (item_t *)__kmp_allocate(sizeof(item_t) * initial_capacity); in allocate()1141 item_t *new_items = (item_t *)__kmp_allocate(sizeof(item_t) * capacity); in push_back()1321 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 * sizeof(kmp_uint32)); in init()1413 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 * sizeof(kmp_uint32)); in resize()
71 p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) * in __kmp_expand_cons_stack()141 p = (struct cons_header *)__kmp_allocate(sizeof(struct cons_header)); in __kmp_allocate_cons_stack()143 p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) * in __kmp_allocate_cons_stack()
320 int *ids = (int *)__kmp_allocate(sizeof(int) * num_hw_threads); in _insert_windows_proc_groups()623 char *bytes = (char *)__kmp_allocate(size); in allocate()1405 void *KMPAffinity::Mask::operator new(size_t n) { return __kmp_allocate(n); } in operator new()1406 void *KMPAffinity::Mask::operator new[](size_t n) { return __kmp_allocate(n); } in operator new[]()1409 void *KMPAffinity::operator new(size_t n) { return __kmp_allocate(n); } in operator new()1826 cpukinds = (kmp_hwloc_cpukinds_info_t *)__kmp_allocate( in __kmp_affinity_create_hwloc_map()2291 apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate( in __kmp_affinity_create_apicid_map()3007 (unsigned **)__kmp_allocate((num_records + 1) * sizeof(unsigned *)); in __kmp_affinity_create_cpuinfo_map()3011 (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned)); in __kmp_affinity_create_cpuinfo_map()3376 (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned)); in __kmp_affinity_create_cpuinfo_map()[all …]
323 (kmp_taskdata_t **)__kmp_allocate(new_size * sizeof(kmp_taskdata_t *));339 kmp_task_pri_t *l = (kmp_task_pri_t *)__kmp_allocate(sizeof(kmp_task_pri_t));346 thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate(2046 kmp_node_info_t *new_record = (kmp_node_info_t *)__kmp_allocate(2055 kmp_int32 *successorsList = (kmp_int32 *)__kmp_allocate(2559 arr[i].reduce_priv = __kmp_allocate(nth * size);2571 arr[i].reduce_priv = __kmp_allocate(nth * sizeof(void *));2599 __kmp_allocate(sizeof(kmp_task_red_input_t) * num);2626 __kmp_allocate(sizeof(kmp_task_red_input_t) * num);2712 p_priv[tid] = __kmp_allocate(arr[i].reduce_size);[all …]
2288 polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls * in __kmp_acquire_drdpa_lock_timed_template() 2306 // of the old polling area to the new area. __kmp_allocate() in __kmp_acquire_drdpa_lock_timed_template() 2309 polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls * in __kmp_acquire_drdpa_lock_timed_template() 2459 lck->lk.polls = (std::atomic<kmp_uint64> *)__kmp_allocate( in __kmp_init_drdpa_lock() 3149 lock_table->table[row] = (kmp_indirect_lock_t *)__kmp_allocate( in __kmp_allocate_indirect_lock() 3157 (kmp_indirect_lock_table_t *)__kmp_allocate( in __kmp_allocate_indirect_lock() 3159 next_table->table = (kmp_indirect_lock_t **)__kmp_allocate( in __kmp_allocate_indirect_lock() 3173 lck->lock = (kmp_user_lock_p)__kmp_allocate(__kmp_indirect_lock_size[tag]); in __kmp_allocate_indirect_lock() 3356 __kmp_i_lock_table.table = (kmp_indirect_lock_t **)__kmp_allocate( in __kmp_init_dynamic_user_locks() 3358 *(__kmp_i_lock_table.table) = (kmp_indirect_lock_t *)__kmp_allocate( in __kmp_init_dynamic_user_locks() [all...]
244 kmp_int32 *new_succ_ids = (kmp_int32 *)__kmp_allocate( in __kmp_track_dependence()538 info->mtx_lock = (kmp_lock_t *)__kmp_allocate(sizeof(kmp_lock_t)); in __kmp_process_deps()702 kmp_node_info_t *new_record = (kmp_node_info_t *)__kmp_allocate( in __kmpc_omp_task_with_deps()711 kmp_int32 *successorsList = (kmp_int32 *)__kmp_allocate( in __kmpc_omp_task_with_deps()
1348 (dispatch_private_info_t *)__kmp_allocate( in __kmp_serialized_parallel()1385 (dispatch_private_info_t *)__kmp_allocate( in __kmp_serialized_parallel()2009 *p_hot_teams = (kmp_hot_team_ptr_t *)__kmp_allocate( in __kmp_fork_call()2777 (kmp_internal_control_t *)__kmp_allocate( in __kmp_save_internal_controls()3245 (kmp_info_t **)__kmp_allocate(sizeof(kmp_info_t *) * max_nth); in __kmp_allocate_team_arrays()3246 team->t.t_disp_buffer = (dispatch_shared_info_t *)__kmp_allocate( in __kmp_allocate_team_arrays()3249 (kmp_disp_t *)__kmp_allocate(sizeof(kmp_disp_t) * max_nth); in __kmp_allocate_team_arrays()3251 (kmp_taskdata_t *)__kmp_allocate(sizeof(kmp_taskdata_t) * max_nth); in __kmp_allocate_team_arrays()3766 newThreads = (kmp_info_t **)__kmp_allocate( in __kmp_expand_threads()3777 (kmp_old_threads_list_t *)__kmp_allocate(sizeof(kmp_old_threads_list_t)); in __kmp_expand_threads()[all …]
667 (kmp_stats_event *)__kmp_allocate(sizeof(kmp_stats_event) * INIT_SIZE); in kmp_stats_event_vector()678 kmp_stats_event *tmp = (kmp_stats_event *)__kmp_allocate( in push_back()
282 (ompt_lw_taskteam_t *)__kmp_allocate(sizeof(ompt_lw_taskteam_t)); in __ompt_lw_taskteam_link()
763 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \810 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \1053 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \1102 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \2020 data[2] = (uintptr_t)__kmp_allocate(nthreads * data[1]); in __kmp_GOMP_taskgroup_reduction_register()
158 void *operator new(size_t size) { return __kmp_allocate(size); }961 void *operator new(size_t size) { return __kmp_allocate(size); }
388 (kmp_stats_list *)__kmp_allocate(sizeof(kmp_stats_list)); in push_back()
232 ? __kmp_allocate(sizeof(*data)) in set_thr_data()1388 al = (kmp_allocator_t *)__kmp_allocate(sizeof(kmp_allocator_t)); // zeroed in __kmpc_init_allocator()
56 pTAlloc = reinterpret_cast<pT>(__kmp_allocate(n * sizeof(T))); in CollapseAllocator()
2201 char *retlist = (char *)__kmp_allocate((len + 1) * sizeof(char)); in __kmp_parse_affinity_proc_id_list()3019 char *retlist = (char *)__kmp_allocate((len + 1) * sizeof(char)); in __kmp_parse_place_list()6626 ompd_env_block = (char *)__kmp_allocate(buffer.used + 1); in __kmp_env_dump()
205 team_icvs = __kmp_allocate(sizeof(kmp_internal_control_t)); in init()
452 pr->u.p.steal_lock = (kmp_lock_t *)__kmp_allocate(sizeof(kmp_lock_t)); in __kmp_dispatch_init_algorithm()
3763 #define __kmp_allocate(size) ___kmp_allocate((size)KMP_SRC_LOC_CURR) macro