Lines Matching full:team

41 //  thread_data: thread data for task team thread containing stack
97 // thread_data: thread data for task team thread containing stack
156 return; // Don't push anything on stack if team or team tasks are serialized in __kmp_push_task_stack()
216 // Don't pop anything from stack if team or team tasks are serialized in __kmp_pop_task_stack()
348 KA_TRACE(20, ("__kmp_push_task: T#%d team serialized; returning " in __kmp_push_task()
436 // when team ends
456 // new team
459 // team: team for implicit task data
460 // tid: thread within team to set up
461 void __kmp_push_current_task_to_thread(kmp_info_t *this_thr, kmp_team_t *team, in __kmp_push_current_task_to_thread() argument
464 // tasks of new team in __kmp_push_current_task_to_thread()
469 team->t.t_implicit_task_taskdata[tid].td_parent)); in __kmp_push_current_task_to_thread()
474 if (this_thr->th.th_current_task != &team->t.t_implicit_task_taskdata[0]) { in __kmp_push_current_task_to_thread()
475 team->t.t_implicit_task_taskdata[0].td_parent = in __kmp_push_current_task_to_thread()
477 this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[0]; in __kmp_push_current_task_to_thread()
480 team->t.t_implicit_task_taskdata[tid].td_parent = in __kmp_push_current_task_to_thread()
481 team->t.t_implicit_task_taskdata[0].td_parent; in __kmp_push_current_task_to_thread()
482 this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[tid]; in __kmp_push_current_task_to_thread()
489 team->t.t_implicit_task_taskdata[tid].td_parent)); in __kmp_push_current_task_to_thread()
509 // TODO: GEH - make sure root team implicit task is initialized properly. in __kmp_task_start()
922 // Only need to keep track of count if team parallel and tasking not in __kmp_task_finish()
959 // TODO: GEH - make sure root team implicit task is initialized properly. in __kmp_task_finish()
1041 // team: team for this_thr
1042 // tid: thread id of given thread within team
1048 kmp_team_t *team, int tid, int set_curr_task) { in __kmp_init_implicit_task() argument
1049 kmp_taskdata_t *task = &team->t.t_implicit_task_taskdata[tid]; in __kmp_init_implicit_task()
1053 ("__kmp_init_implicit_task(enter): T#:%d team=%p task=%p, reinit=%s\n", in __kmp_init_implicit_task()
1054 tid, team, task, set_curr_task ? "TRUE" : "FALSE")); in __kmp_init_implicit_task()
1057 task->td_team = team; in __kmp_init_implicit_task()
1072 task->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0; in __kmp_init_implicit_task()
1089 __kmp_push_current_task_to_thread(this_thr, team, tid); in __kmp_init_implicit_task()
1100 KF_TRACE(10, ("__kmp_init_implicit_task(exit): T#:%d team=%p task=%p\n", tid, in __kmp_init_implicit_task()
1101 team, task)); in __kmp_init_implicit_task()
1174 kmp_team_t *team = thread->th.th_team; in __kmp_task_alloc() local
1191 if (flags->tiedness == TASK_UNTIED && !team->t.t_serialized) { in __kmp_task_alloc()
1209 /* This should only happen if the team is serialized in __kmp_task_alloc()
1210 setup a task team and propagate it to the thread */ in __kmp_task_alloc()
1211 KMP_DEBUG_ASSERT(team->t.t_serialized); in __kmp_task_alloc()
1213 ("T#%d creating task team in __kmp_task_alloc for proxy task\n", in __kmp_task_alloc()
1216 thread, team, in __kmp_task_alloc()
1217 1); // 1 indicates setup the current team regardless of nthreads in __kmp_task_alloc()
1218 thread->th.th_task_team = team->t.t_task_team[thread->th.th_task_state]; in __kmp_task_alloc()
1284 taskdata->td_team = team; in __kmp_task_alloc()
1312 taskdata->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0; in __kmp_task_alloc()
1314 // GEH - Note we serialize the task if the team is serialized to make sure in __kmp_task_alloc()
1345 // Only need to keep track of child task counts if team parallel and tasking not in __kmp_task_alloc()
2336 kmp_team_t *team = thr->th.th_team; in __kmp_task_reduction_modifier_init() local
2339 reduce_data = KMP_ATOMIC_LD_RLX(&team->t.t_tg_reduce_data[is_ws]); in __kmp_task_reduction_modifier_init()
2341 __kmp_atomic_compare_store(&team->t.t_tg_reduce_data[is_ws], reduce_data, in __kmp_task_reduction_modifier_init()
2350 KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&team->t.t_tg_fini_counter[0]) == 0); in __kmp_task_reduction_modifier_init()
2351 KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&team->t.t_tg_fini_counter[1]) == 0); in __kmp_task_reduction_modifier_init()
2352 KMP_ATOMIC_ST_REL(&team->t.t_tg_reduce_data[is_ws], reduce_data); in __kmp_task_reduction_modifier_init()
2355 (reduce_data = KMP_ATOMIC_LD_ACQ(&team->t.t_tg_reduce_data[is_ws])) == in __kmp_task_reduction_modifier_init()
2440 kmp_team_t *team = thread->th.th_team; in __kmpc_taskgroup() local
2443 ompt_data_t my_parallel_data = team->t.ompt_team_info.parallel_data; in __kmpc_taskgroup()
2462 kmp_team_t *team; in __kmpc_end_taskgroup() local
2467 team = thread->th.th_team; in __kmpc_end_taskgroup()
2470 my_parallel_data = team->t.ompt_team_info.parallel_data; in __kmpc_end_taskgroup()
2536 // check if <priv> data of the first reduction variable shared for the team in __kmpc_end_taskgroup()
2546 // cleanup fields in the team structure: in __kmpc_end_taskgroup()
2564 // cleanup fields in team structure: in __kmpc_end_taskgroup()
2994 // If this thread's task team is NULL, master has recognized that there are in __kmp_execute_tasks_template()
3060 // __kmp_enable_tasking: Allocate task team and resume threads sleeping at the
3062 // First thread in allocates the task team atomically.
3129 * Utility routines for "task teams". A task team (kmp_task_t) is kind of
3134 * master thread may exit the barrier code and free the team data structure,
3139 * spawned my a member of the team, and the thread still needs access to all
3140 * to each thread in the team, so that it can steal work from it.
3160 // Lock for task team data structures
3167 // per task team since task teams are recycled. No lock is needed during
3214 // Allocates a threads_data array for a task team, either by allocating an
3242 kmp_team_t *team = thread->th.th_team; in __kmp_realloc_task_threads_data() local
3305 thread_data->td.td_thr = team->t.t_threads[i]; in __kmp_realloc_task_threads_data()
3324 // Deallocates a threads_data array for a task team, including any attached
3340 // Allocates a task team associated with a specific team, taking it from
3341 // the global task team free list if possible. Also initializes data
3344 kmp_team_t *team) { in __kmp_allocate_task_team() argument
3348 KA_TRACE(20, ("__kmp_allocate_task_team: T#%d entering; team = %p\n", in __kmp_allocate_task_team()
3349 (thread ? __kmp_gtid_from_thread(thread) : -1), team)); in __kmp_allocate_task_team()
3352 // Take a task team from the task team pool in __kmp_allocate_task_team()
3364 "task team for team %p\n", in __kmp_allocate_task_team()
3365 __kmp_gtid_from_thread(thread), team)); in __kmp_allocate_task_team()
3366 // Allocate a new task team if one is not available. Cannot use in __kmp_allocate_task_team()
3389 task_team->tt.tt_nproc = nthreads = team->t.t_nproc; in __kmp_allocate_task_team()
3402 // Frees the task team associated with a specific thread, and adds it
3403 // to the global task team free list.
3408 // Put task team back on free list in __kmp_free_task_team()
3419 // Free all the task teams on the task team free list.
3445 // trying to steal tasks. Wait for each thread to unreference its task team.
3505 // __kmp_task_team_setup: Create a task_team for the current team, but use
3507 void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team, int always) { in __kmp_task_team_setup() argument
3512 // If it exists, it is the current task team and shouldn't be touched yet as in __kmp_task_team_setup()
3514 if (team->t.t_task_team[this_thr->th.th_task_state] == NULL && in __kmp_task_team_setup()
3515 (always || team->t.t_nproc > 1)) { in __kmp_task_team_setup()
3516 team->t.t_task_team[this_thr->th.th_task_state] = in __kmp_task_team_setup()
3517 __kmp_allocate_task_team(this_thr, team); in __kmp_task_team_setup()
3519 "for team %d at parity=%d\n", in __kmp_task_team_setup()
3521 team->t.t_task_team[this_thr->th.th_task_state], in __kmp_task_team_setup()
3522 ((team != NULL) ? team->t.t_id : -1), in __kmp_task_team_setup()
3533 if (team->t.t_nproc > 1) { in __kmp_task_team_setup()
3535 if (team->t.t_task_team[other_team] == NULL) { // setup other team as well in __kmp_task_team_setup()
3536 team->t.t_task_team[other_team] = in __kmp_task_team_setup()
3537 __kmp_allocate_task_team(this_thr, team); in __kmp_task_team_setup()
3539 "task_team %p for team %d at parity=%d\n", in __kmp_task_team_setup()
3541 team->t.t_task_team[other_team], in __kmp_task_team_setup()
3542 ((team != NULL) ? team->t.t_id : -1), other_team)); in __kmp_task_team_setup()
3543 } else { // Leave the old task team struct in place for the upcoming region; in __kmp_task_team_setup()
3545 kmp_task_team_t *task_team = team->t.t_task_team[other_team]; in __kmp_task_team_setup()
3547 team->t.t_nproc != task_team->tt.tt_nproc) { in __kmp_task_team_setup()
3548 TCW_4(task_team->tt.tt_nproc, team->t.t_nproc); in __kmp_task_team_setup()
3552 team->t.t_nproc); in __kmp_task_team_setup()
3555 // if team size has changed, the first thread to enable tasking will in __kmp_task_team_setup()
3558 "%p for team %d at parity=%d\n", in __kmp_task_team_setup()
3560 team->t.t_task_team[other_team], in __kmp_task_team_setup()
3561 ((team != NULL) ? team->t.t_id : -1), other_team)); in __kmp_task_team_setup()
3566 // __kmp_task_team_sync: Propagation of task team data from team to threads
3567 // which happens just after the release phase of a team barrier. This may be
3569 void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team) { in __kmp_task_team_sync() argument
3575 // It is now safe to propagate the task team pointer from the team struct to in __kmp_task_team_sync()
3578 team->t.t_task_team[this_thr->th.th_task_state]); in __kmp_task_team_sync()
3580 ("__kmp_task_team_sync: Thread T#%d task team switched to task_team " in __kmp_task_team_sync()
3581 "%p from Team #%d (parity=%d)\n", in __kmp_task_team_sync()
3583 ((team != NULL) ? team->t.t_id : -1), this_thr->th.th_task_state)); in __kmp_task_team_sync()
3587 // barrier gather phase. Only called by master thread if #threads in team > 1 or
3595 kmp_team_t *team USE_ITT_BUILD_ARG(void *itt_sync_obj), int wait) { in __kmp_task_team_wait()
3596 kmp_task_team_t *task_team = team->t.t_task_team[this_thr->th.th_task_state]; in __kmp_task_team_wait()
3615 // Deactivate the old task team, so that the worker threads will stop in __kmp_task_team_wait()
3620 "setting active to false, setting local and team's pointer to NULL\n", in __kmp_task_team_wait()
3638 void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, int gtid) { in __kmp_tasking_barrier() argument
3641 &team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads); in __kmp_tasking_barrier()
3754 - the top half is the one that can be done from a thread outside the team
3755 - the bottom half must be run from a thread within the team
3758 threads of the team. Once the td_incomplete_child_task counter of the parent
3820 team. Run first and bottom halves directly.
3845 the team.
3861 // corresponding team in __kmpc_proxy_task_completed_ooo()
3862 kmp_team_t *team = taskdata->td_team; in __kmpc_proxy_task_completed_ooo() local
3863 kmp_int32 nthreads = team->t.t_nproc; in __kmpc_proxy_task_completed_ooo()
3874 thread = team->t.t_threads[k]; in __kmpc_proxy_task_completed_ooo()
3935 kmp_team_t *team = taskdata->td_team; in __kmp_fulfill_event() local
3937 if (thread->th.th_team == team) { in __kmp_fulfill_event()
3999 // Only need to keep track of child task counts if team parallel and tasking in __kmp_task_dup_alloc()