Home
last modified time | relevance | path

Searched refs:__kmp_allocate (Results 1 – 17 of 17) sorted by relevance

/external/llvm-project/openmp/runtime/src/
Dkmp_threadprivate.cpp91 d = (struct private_data *)__kmp_allocate(sizeof(struct private_data)); in __kmp_init_common_data()
104 d->data = __kmp_allocate(pc_size); in __kmp_init_common_data()
308 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common)); in kmp_threadprivate_insert_private_data()
343 tn = (struct private_common *)__kmp_allocate(sizeof(struct private_common)); in kmp_threadprivate_insert()
364 d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size); in kmp_threadprivate_insert()
376 d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size); in kmp_threadprivate_insert()
386 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common)); in kmp_threadprivate_insert()
410 tn->par_addr = (void *)__kmp_allocate(tn->cmn_size); in kmp_threadprivate_insert()
519 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common)); in __kmpc_threadprivate_register()
635 KMP_ITT_IGNORE(my_cache = (void **)__kmp_allocate( in __kmpc_threadprivate_cached()
[all …]
Dkmp_affinity.cpp51 void *KMPAffinity::Mask::operator new(size_t n) { return __kmp_allocate(n); } in operator new()
52 void *KMPAffinity::Mask::operator new[](size_t n) { return __kmp_allocate(n); } in operator new[]()
55 void *KMPAffinity::operator new(size_t n) { return __kmp_allocate(n); } in operator new()
257 unsigned *counts = (unsigned *)__kmp_allocate(depth * sizeof(unsigned)); in __kmp_affinity_assign_child_nums()
258 unsigned *lastLabel = (unsigned *)__kmp_allocate(depth * sizeof(unsigned)); in __kmp_affinity_assign_child_nums()
620 (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * __kmp_avail_proc); in __kmp_affinity_create_hwloc_map()
622 __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc); in __kmp_affinity_create_hwloc_map()
917 __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc); in __kmp_affinity_create_flat_map()
931 (AddrUnsPair *)__kmp_allocate(sizeof(**address2os) * __kmp_avail_proc); in __kmp_affinity_create_flat_map()
980 (AddrUnsPair *)__kmp_allocate(sizeof(**address2os) * __kmp_avail_proc); in __kmp_affinity_create_proc_group_map()
[all …]
Dkmp_dispatch_hier.h63 scheds = (enum sched_type *)__kmp_allocate(sizeof(enum sched_type) * in append()
65 small_chunks = (kmp_int32 *)__kmp_allocate(sizeof(kmp_int32) * in append()
67 large_chunks = (kmp_int64 *)__kmp_allocate(sizeof(kmp_int64) * in append()
69 layers = (kmp_hier_layer_e *)__kmp_allocate(sizeof(kmp_hier_layer_e) * in append()
671 info = (kmp_hier_layer_info_t<T> *)__kmp_allocate( in allocate_hier()
673 layers = (kmp_hier_top_unit_t<T> **)__kmp_allocate( in allocate_hier()
690 layers[i] = (kmp_hier_top_unit_t<T> *)__kmp_allocate( in allocate_hier()
978 sh->hier = (kmp_hier_t<T> *)__kmp_allocate(sizeof(kmp_hier_t<T>)); in __kmp_dispatch_init_hierarchy()
993 th->th.th_hier_bar_data = (kmp_hier_private_bdata_t *)__kmp_allocate( in __kmp_dispatch_init_hierarchy()
Dkmp_error.cpp65 p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) * in __kmp_expand_cons_stack()
135 p = (struct cons_header *)__kmp_allocate(sizeof(struct cons_header)); in __kmp_allocate_cons_stack()
137 p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) * in __kmp_allocate_cons_stack()
Dkmp_affinity.h249 Mask() { mask = (mask_t *)__kmp_allocate(__kmp_affin_mask_size); } in Mask()
378 mask = (mask_t *)__kmp_allocate(sizeof(mask_t) * __kmp_num_proc_groups); in Mask()
721 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 * sizeof(kmp_uint32)); in init()
815 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 * sizeof(kmp_uint32)); in resize()
Dkmp_lock.cpp2304 polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls * in __kmp_acquire_drdpa_lock_timed_template()
2325 polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls * in __kmp_acquire_drdpa_lock_timed_template()
2477 lck->lk.polls = (std::atomic<kmp_uint64> *)__kmp_allocate( in __kmp_init_drdpa_lock()
3053 kmp_indirect_lock_t **new_table = (kmp_indirect_lock_t **)__kmp_allocate( in __kmp_allocate_indirect_lock()
3062 *(__kmp_i_lock_table.table + i) = (kmp_indirect_lock_t *)__kmp_allocate( in __kmp_allocate_indirect_lock()
3069 lck->lock = (kmp_user_lock_p)__kmp_allocate(__kmp_indirect_lock_size[tag]); in __kmp_allocate_indirect_lock()
3256 (kmp_indirect_lock_t **)__kmp_allocate(sizeof(kmp_indirect_lock_t *)); in __kmp_init_dynamic_user_locks()
3257 *(__kmp_i_lock_table.table) = (kmp_indirect_lock_t *)__kmp_allocate( in __kmp_init_dynamic_user_locks()
3699 table = (kmp_user_lock_p *)__kmp_allocate(sizeof(kmp_user_lock_p) * size); in __kmp_lock_table_insert()
3728 (char *)__kmp_allocate(space_for_locks + sizeof(kmp_block_of_locks)); in __kmp_lock_block_allocate()
[all …]
Dkmp_runtime.cpp1308 (dispatch_private_info_t *)__kmp_allocate( in __kmp_serialized_parallel()
1341 (dispatch_private_info_t *)__kmp_allocate( in __kmp_serialized_parallel()
1473 *p_hot_teams = (kmp_hot_team_ptr_t *)__kmp_allocate( in __kmp_fork_call()
2093 new_stack = (kmp_uint8 *)__kmp_allocate(new_size); in __kmp_fork_call()
2609 (kmp_internal_control_t *)__kmp_allocate( in __kmp_save_internal_controls()
3056 (kmp_info_t **)__kmp_allocate(sizeof(kmp_info_t *) * max_nth); in __kmp_allocate_team_arrays()
3057 team->t.t_disp_buffer = (dispatch_shared_info_t *)__kmp_allocate( in __kmp_allocate_team_arrays()
3060 (kmp_disp_t *)__kmp_allocate(sizeof(kmp_disp_t) * max_nth); in __kmp_allocate_team_arrays()
3062 (kmp_taskdata_t *)__kmp_allocate(sizeof(kmp_taskdata_t) * max_nth); in __kmp_allocate_team_arrays()
3570 newThreads = (kmp_info_t **)__kmp_allocate( in __kmp_expand_threads()
[all …]
Dkmp_stats.h657 (kmp_stats_event *)__kmp_allocate(sizeof(kmp_stats_event) * INIT_SIZE); in kmp_stats_event_vector()
668 kmp_stats_event *tmp = (kmp_stats_event *)__kmp_allocate( in push_back()
Dompt-specific.cpp277 (ompt_lw_taskteam_t *)__kmp_allocate(sizeof(ompt_lw_taskteam_t)); in __ompt_lw_taskteam_link()
Dkmp_gsupport.cpp724 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \
771 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \
1014 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \
1063 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \
Dkmp_tasking.cpp309 (kmp_taskdata_t **)__kmp_allocate(new_size * sizeof(kmp_taskdata_t *)); in __kmp_realloc_task_deque()
2137 arr[i].reduce_priv = __kmp_allocate(nth * size); in __kmp_task_reduction_init()
2149 arr[i].reduce_priv = __kmp_allocate(nth * sizeof(void *)); in __kmp_task_reduction_init()
2254 p_priv[tid] = __kmp_allocate(arr[i].reduce_size); in __kmpc_task_reduction_get_th_data()
3188 thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate( in __kmp_alloc_task_deque()
3261 new_data = (kmp_thread_data_t *)__kmp_allocate( in __kmp_realloc_task_threads_data()
3285 *threads_data_p = (kmp_thread_data_t *)__kmp_allocate( in __kmp_realloc_task_threads_data()
3368 task_team = (kmp_task_team_t *)__kmp_allocate(sizeof(kmp_task_team_t)); in __kmp_allocate_task_team()
Dkmp_stats.cpp385 (kmp_stats_list *)__kmp_allocate(sizeof(kmp_stats_list)); in push_back()
Dkmp_taskdeps.cpp380 info->mtx_lock = (kmp_lock_t *)__kmp_allocate(sizeof(kmp_lock_t)); in __kmp_process_deps()
Dkmp_alloc.cpp232 ? __kmp_allocate(sizeof(*data)) in set_thr_data()
1347 al = (kmp_allocator_t *)__kmp_allocate(sizeof(kmp_allocator_t)); // zeroed in __kmpc_init_allocator()
Dkmp_settings.cpp1990 char *retlist = (char *)__kmp_allocate((len + 1) * sizeof(char)); in __kmp_parse_affinity_proc_id_list()
2773 char *retlist = (char *)__kmp_allocate((len + 1) * sizeof(char)); in __kmp_parse_place_list()
Dkmp_dispatch.cpp385 (kmp_lock_t *)__kmp_allocate(sizeof(kmp_lock_t)); in __kmp_dispatch_init_algorithm()
Dkmp.h3274 #define __kmp_allocate(size) ___kmp_allocate((size)KMP_SRC_LOC_CURR) macro