Lines Matching full:team
50 kmp_team_t *team = this_thr->th.th_team; in __kmp_linear_barrier_gather_template() local
52 kmp_info_t **other_threads = team->t.t_threads; in __kmp_linear_barrier_gather_template()
57 gtid, team->t.t_id, tid, bt)); in __kmp_linear_barrier_gather_template()
73 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(0, team), in __kmp_linear_barrier_gather_template()
74 team->t.t_id, 0, &thr_bar->b_arrived, thr_bar->b_arrived, in __kmp_linear_barrier_gather_template()
77 /* After performing this write, a worker thread may not assume that the team in __kmp_linear_barrier_gather_template()
84 kmp_balign_team_t *team_bar = &team->t.t_bar[bt]; in __kmp_linear_barrier_gather_template()
87 // Don't have to worry about sleep bit here or atomic since team setting in __kmp_linear_barrier_gather_template()
90 // Collect all the worker team member threads. in __kmp_linear_barrier_gather_template()
99 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(i, team), in __kmp_linear_barrier_gather_template()
100 team->t.t_id, i, in __kmp_linear_barrier_gather_template()
126 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(i, team), in __kmp_linear_barrier_gather_template()
127 team->t.t_id, i)); in __kmp_linear_barrier_gather_template()
135 ANNOTATE_REDUCE_BEFORE(&team->t.t_bar); in __kmp_linear_barrier_gather_template()
138 // Don't have to worry about sleep bit here or atomic since team setting in __kmp_linear_barrier_gather_template()
140 KA_TRACE(20, ("__kmp_linear_barrier_gather: T#%d(%d:%d) set team %d " in __kmp_linear_barrier_gather_template()
142 gtid, team->t.t_id, tid, team->t.t_id, &team_bar->b_arrived, in __kmp_linear_barrier_gather_template()
148 gtid, team->t.t_id, tid, bt)); in __kmp_linear_barrier_gather_template()
158 kmp_team_t *team; in __kmp_linear_barrier_release_template() local
165 team = __kmp_threads[gtid]->th.th_team; in __kmp_linear_barrier_release_template()
166 KMP_DEBUG_ASSERT(team != NULL); in __kmp_linear_barrier_release_template()
167 other_threads = team->t.t_threads; in __kmp_linear_barrier_release_template()
171 gtid, team->t.t_id, tid, bt)); in __kmp_linear_barrier_release_template()
178 ngo_load(&team->t.t_implicit_task_taskdata[0].td_icvs); in __kmp_linear_barrier_release_template()
180 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[i], in __kmp_linear_barrier_release_template()
181 team, i, FALSE); in __kmp_linear_barrier_release_template()
182 ngo_store_icvs(&team->t.t_implicit_task_taskdata[i].td_icvs, in __kmp_linear_barrier_release_template()
183 &team->t.t_implicit_task_taskdata[0].td_icvs); in __kmp_linear_barrier_release_template()
201 gtid, team->t.t_id, tid, other_threads[i]->th.th_info.ds.ds_gtid, in __kmp_linear_barrier_release_template()
202 team->t.t_id, i, &other_threads[i]->th.th_bar[bt].bb.b_go, in __kmp_linear_barrier_release_template()
243 // The worker thread may now assume that the team is valid. in __kmp_linear_barrier_release_template()
246 team = __kmp_threads[gtid]->th.th_team; in __kmp_linear_barrier_release_template()
248 KMP_DEBUG_ASSERT(team != NULL); in __kmp_linear_barrier_release_template()
252 gtid, team->t.t_id, tid, &thr_bar->b_go, KMP_INIT_BARRIER_STATE)); in __kmp_linear_barrier_release_template()
258 gtid, team->t.t_id, tid, bt)); in __kmp_linear_barrier_release_template()
296 kmp_team_t *team = this_thr->th.th_team; in __kmp_tree_barrier_gather() local
298 kmp_info_t **other_threads = team->t.t_threads; in __kmp_tree_barrier_gather()
308 gtid, team->t.t_id, tid, bt)); in __kmp_tree_barrier_gather()
323 new_state = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP; in __kmp_tree_barrier_gather()
337 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team), in __kmp_tree_barrier_gather()
338 team->t.t_id, child_tid, &child_bar->b_arrived, new_state)); in __kmp_tree_barrier_gather()
354 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team), in __kmp_tree_barrier_gather()
355 team->t.t_id, child_tid)); in __kmp_tree_barrier_gather()
363 ANNOTATE_REDUCE_BEFORE(&team->t.t_bar); in __kmp_tree_barrier_gather()
376 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(parent_tid, team), in __kmp_tree_barrier_gather()
377 team->t.t_id, parent_tid, &thr_bar->b_arrived, thr_bar->b_arrived, in __kmp_tree_barrier_gather()
381 /* After performing this write, a worker thread may not assume that the team in __kmp_tree_barrier_gather()
388 // Need to update the team arrived pointer if we are the master thread in __kmp_tree_barrier_gather()
390 team->t.t_bar[bt].b_arrived = new_state; in __kmp_tree_barrier_gather()
392 team->t.t_bar[bt].b_arrived += KMP_BARRIER_STATE_BUMP; in __kmp_tree_barrier_gather()
393 KA_TRACE(20, ("__kmp_tree_barrier_gather: T#%d(%d:%d) set team %d " in __kmp_tree_barrier_gather()
395 gtid, team->t.t_id, tid, team->t.t_id, in __kmp_tree_barrier_gather()
396 &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived)); in __kmp_tree_barrier_gather()
400 gtid, team->t.t_id, tid, bt)); in __kmp_tree_barrier_gather()
407 kmp_team_t *team; in __kmp_tree_barrier_release() local
417 tid)) { // Handle fork barrier workers who aren't part of a team yet in __kmp_tree_barrier_release()
445 // The worker thread may now assume that the team is valid. in __kmp_tree_barrier_release()
446 team = __kmp_threads[gtid]->th.th_team; in __kmp_tree_barrier_release()
447 KMP_DEBUG_ASSERT(team != NULL); in __kmp_tree_barrier_release()
453 team->t.t_id, tid, &thr_bar->b_go, KMP_INIT_BARRIER_STATE)); in __kmp_tree_barrier_release()
456 team = __kmp_threads[gtid]->th.th_team; in __kmp_tree_barrier_release()
457 KMP_DEBUG_ASSERT(team != NULL); in __kmp_tree_barrier_release()
460 gtid, team->t.t_id, tid, bt)); in __kmp_tree_barrier_release()
466 kmp_info_t **other_threads = team->t.t_threads; in __kmp_tree_barrier_release()
483 __kmp_init_implicit_task(team->t.t_ident, in __kmp_tree_barrier_release()
484 team->t.t_threads[child_tid], team, in __kmp_tree_barrier_release()
486 copy_icvs(&team->t.t_implicit_task_taskdata[child_tid].td_icvs, in __kmp_tree_barrier_release()
487 &team->t.t_implicit_task_taskdata[0].td_icvs); in __kmp_tree_barrier_release()
494 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team), in __kmp_tree_barrier_release()
495 team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go, in __kmp_tree_barrier_release()
507 gtid, team->t.t_id, tid, bt)); in __kmp_tree_barrier_release()
516 kmp_team_t *team = this_thr->th.th_team; in __kmp_hyper_barrier_gather() local
518 kmp_info_t **other_threads = team->t.t_threads; in __kmp_hyper_barrier_gather()
529 gtid, team->t.t_id, tid, bt)); in __kmp_hyper_barrier_gather()
554 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(parent_tid, team), in __kmp_hyper_barrier_gather()
555 team->t.t_id, parent_tid, &thr_bar->b_arrived, in __kmp_hyper_barrier_gather()
560 loop), a worker thread may not assume that the team is valid any more in __kmp_hyper_barrier_gather()
570 new_state = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP; in __kmp_hyper_barrier_gather()
586 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team), in __kmp_hyper_barrier_gather()
587 team->t.t_id, child_tid, &child_bar->b_arrived, new_state)); in __kmp_hyper_barrier_gather()
604 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team), in __kmp_hyper_barrier_gather()
605 team->t.t_id, child_tid)); in __kmp_hyper_barrier_gather()
613 ANNOTATE_REDUCE_BEFORE(&team->t.t_bar); in __kmp_hyper_barrier_gather()
619 // Need to update the team arrived pointer if we are the master thread in __kmp_hyper_barrier_gather()
621 team->t.t_bar[bt].b_arrived += KMP_BARRIER_STATE_BUMP; in __kmp_hyper_barrier_gather()
623 team->t.t_bar[bt].b_arrived = new_state; in __kmp_hyper_barrier_gather()
624 KA_TRACE(20, ("__kmp_hyper_barrier_gather: T#%d(%d:%d) set team %d " in __kmp_hyper_barrier_gather()
626 gtid, team->t.t_id, tid, team->t.t_id, in __kmp_hyper_barrier_gather()
627 &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived)); in __kmp_hyper_barrier_gather()
631 gtid, team->t.t_id, tid, bt)); in __kmp_hyper_barrier_gather()
640 kmp_team_t *team; in __kmp_hyper_barrier_release() local
656 team = __kmp_threads[gtid]->th.th_team; in __kmp_hyper_barrier_release()
657 KMP_DEBUG_ASSERT(team != NULL); in __kmp_hyper_barrier_release()
660 gtid, team->t.t_id, tid, bt)); in __kmp_hyper_barrier_release()
664 &team->t.t_implicit_task_taskdata[tid].td_icvs); in __kmp_hyper_barrier_release()
667 } else { // Handle fork barrier workers who aren't part of a team yet in __kmp_hyper_barrier_release()
694 // The worker thread may now assume that the team is valid. in __kmp_hyper_barrier_release()
695 team = __kmp_threads[gtid]->th.th_team; in __kmp_hyper_barrier_release()
696 KMP_DEBUG_ASSERT(team != NULL); in __kmp_hyper_barrier_release()
702 gtid, team->t.t_id, tid, &thr_bar->b_go, KMP_INIT_BARRIER_STATE)); in __kmp_hyper_barrier_release()
706 other_threads = team->t.t_threads; in __kmp_hyper_barrier_release()
768 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team), in __kmp_hyper_barrier_release()
769 team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go, in __kmp_hyper_barrier_release()
781 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team, tid, in __kmp_hyper_barrier_release()
783 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs, in __kmp_hyper_barrier_release()
790 gtid, team->t.t_id, tid, bt)); in __kmp_hyper_barrier_release()
797 Performs the minimum amount of initialization required based on how the team
799 traditional wake-up mechanisms. For example, if the team size increases,
800 threads already in the team will respond to on-core wakeup on their parent
801 thread, but threads newly added to the team will only be listening on the
806 int tid, kmp_team_t *team) { in __kmp_init_hierarchical_barrier_thread() argument
808 bool uninitialized = thr_bar->team == NULL; in __kmp_init_hierarchical_barrier_thread()
809 bool team_changed = team != thr_bar->team; in __kmp_init_hierarchical_barrier_thread()
844 thr_bar->team = team; in __kmp_init_hierarchical_barrier_thread()
846 &team->t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb; in __kmp_init_hierarchical_barrier_thread()
849 thr_bar->team = team; in __kmp_init_hierarchical_barrier_thread()
851 &team->t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb; in __kmp_init_hierarchical_barrier_thread()
872 kmp_team_t *team = this_thr->th.th_team; in __kmp_hierarchical_barrier_gather() local
875 kmp_info_t **other_threads = team->t.t_threads; in __kmp_hierarchical_barrier_gather()
878 int level = team->t.t_level; in __kmp_hierarchical_barrier_gather()
890 gtid, team->t.t_id, tid, bt)); in __kmp_hierarchical_barrier_gather()
901 team); in __kmp_hierarchical_barrier_gather()
906 (kmp_uint64)team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP; in __kmp_hierarchical_barrier_gather()
914 : team->t.t_bar[bt].b_arrived | thr_bar->leaf_state; in __kmp_hierarchical_barrier_gather()
917 gtid, team->t.t_id, tid)); in __kmp_hierarchical_barrier_gather()
928 gtid, team->t.t_id, tid, in __kmp_hierarchical_barrier_gather()
929 __kmp_gtid_from_tid(child_tid, team), team->t.t_id, in __kmp_hierarchical_barrier_gather()
937 ANNOTATE_REDUCE_BEFORE(&team->t.t_bar); in __kmp_hierarchical_barrier_gather()
955 gtid, team->t.t_id, tid, in __kmp_hierarchical_barrier_gather()
956 __kmp_gtid_from_tid(child_tid, team), team->t.t_id, in __kmp_hierarchical_barrier_gather()
964 gtid, team->t.t_id, tid, in __kmp_hierarchical_barrier_gather()
965 __kmp_gtid_from_tid(child_tid, team), team->t.t_id, in __kmp_hierarchical_barrier_gather()
971 ANNOTATE_REDUCE_BEFORE(&team->t.t_bar); in __kmp_hierarchical_barrier_gather()
988 gtid, team->t.t_id, tid, in __kmp_hierarchical_barrier_gather()
989 __kmp_gtid_from_tid(child_tid, team), team->t.t_id, in __kmp_hierarchical_barrier_gather()
997 gtid, team->t.t_id, tid, in __kmp_hierarchical_barrier_gather()
998 __kmp_gtid_from_tid(child_tid, team), team->t.t_id, in __kmp_hierarchical_barrier_gather()
1004 ANNOTATE_REDUCE_BEFORE(&team->t.t_bar); in __kmp_hierarchical_barrier_gather()
1015 gtid, team->t.t_id, tid, in __kmp_hierarchical_barrier_gather()
1016 __kmp_gtid_from_tid(thr_bar->parent_tid, team), team->t.t_id, in __kmp_hierarchical_barrier_gather()
1020 not assume that the team is valid any more - it could be deallocated by in __kmp_hierarchical_barrier_gather()
1031 thr_bar->b_arrived = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP; in __kmp_hierarchical_barrier_gather()
1036 } else { // Master thread needs to update the team's b_arrived value in __kmp_hierarchical_barrier_gather()
1037 team->t.t_bar[bt].b_arrived = new_state; in __kmp_hierarchical_barrier_gather()
1038 KA_TRACE(20, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) set team %d " in __kmp_hierarchical_barrier_gather()
1040 gtid, team->t.t_id, tid, team->t.t_id, in __kmp_hierarchical_barrier_gather()
1041 &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived)); in __kmp_hierarchical_barrier_gather()
1043 // Is the team access below unsafe or just technically invalid? in __kmp_hierarchical_barrier_gather()
1046 gtid, team->t.t_id, tid, bt)); in __kmp_hierarchical_barrier_gather()
1053 kmp_team_t *team; in __kmp_hierarchical_barrier_release() local
1059 team = __kmp_threads[gtid]->th.th_team; in __kmp_hierarchical_barrier_release()
1060 KMP_DEBUG_ASSERT(team != NULL); in __kmp_hierarchical_barrier_release()
1063 gtid, team->t.t_id, tid, bt)); in __kmp_hierarchical_barrier_release()
1068 thr_bar->team == NULL) { in __kmp_hierarchical_barrier_release()
1097 // The worker thread may now assume that the team is valid. in __kmp_hierarchical_barrier_release()
1098 team = __kmp_threads[gtid]->th.th_team; in __kmp_hierarchical_barrier_release()
1099 KMP_DEBUG_ASSERT(team != NULL); in __kmp_hierarchical_barrier_release()
1105 gtid, team->t.t_id, tid, &thr_bar->b_go, KMP_INIT_BARRIER_STATE)); in __kmp_hierarchical_barrier_release()
1110 int level = team->t.t_level; in __kmp_hierarchical_barrier_release()
1111 if (team->t.t_threads[0] in __kmp_hierarchical_barrier_release()
1113 if (team->t.t_pkfn != (microtask_t)__kmp_teams_master && in __kmp_hierarchical_barrier_release()
1124 // If the team size has increased, we still communicate with old leaves via in __kmp_hierarchical_barrier_release()
1129 tid, team); in __kmp_hierarchical_barrier_release()
1130 // But if the entire team changes, we won't use oncore barrier at all in __kmp_hierarchical_barrier_release()
1136 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team, tid, in __kmp_hierarchical_barrier_release()
1141 &team->t.t_implicit_task_taskdata[tid].td_icvs); in __kmp_hierarchical_barrier_release()
1147 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs, in __kmp_hierarchical_barrier_release()
1155 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs, in __kmp_hierarchical_barrier_release()
1179 &team->t.t_threads[child_tid]->th.th_bar[bt].bb; in __kmp_hierarchical_barrier_release()
1183 gtid, team->t.t_id, tid, in __kmp_hierarchical_barrier_release()
1184 __kmp_gtid_from_tid(child_tid, team), team->t.t_id, in __kmp_hierarchical_barrier_release()
1197 // We test team_change on the off-chance that the level 1 team changed. in __kmp_hierarchical_barrier_release()
1209 kmp_info_t *child_thr = team->t.t_threads[child_tid]; in __kmp_hierarchical_barrier_release()
1215 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team), in __kmp_hierarchical_barrier_release()
1216 team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go, in __kmp_hierarchical_barrier_release()
1236 kmp_info_t *child_thr = team->t.t_threads[child_tid]; in __kmp_hierarchical_barrier_release()
1240 gtid, team->t.t_id, tid, in __kmp_hierarchical_barrier_release()
1241 __kmp_gtid_from_tid(child_tid, team), team->t.t_id, in __kmp_hierarchical_barrier_release()
1254 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs, in __kmp_hierarchical_barrier_release()
1260 gtid, team->t.t_id, tid, bt)); in __kmp_hierarchical_barrier_release()
1300 kmp_team_t *team = this_thr->th.th_team; in __kmp_barrier_template() local
1313 ANNOTATE_BARRIER_BEGIN(&team->t.t_bar); in __kmp_barrier_template()
1339 if (!team->t.t_serialized) { in __kmp_barrier_template()
1349 __kmp_tasking_barrier(team, this_thr, gtid); in __kmp_barrier_template()
1356 access it when the team struct is not guaranteed to exist. */ in __kmp_barrier_template()
1362 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals; in __kmp_barrier_template()
1364 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set; in __kmp_barrier_template()
1366 this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid); in __kmp_barrier_template()
1376 if (KMP_MASTER_TID(tid)) { // Master counter is stored in team structure. in __kmp_barrier_template()
1377 team->t.t_bar[bt].b_master_arrived += 1; in __kmp_barrier_template()
1388 // use 0 to only setup the current team if nthreads > 1 in __kmp_barrier_template()
1389 __kmp_task_team_setup(this_thr, team, 0); in __kmp_barrier_template()
1427 __kmp_task_team_wait(this_thr, team USE_ITT_BUILD_ARG(itt_sync_obj)); in __kmp_barrier_template()
1432 team->t.t_bar[bt].b_team_arrived += 1; in __kmp_barrier_template()
1436 kmp_int32 cancel_request = KMP_ATOMIC_LD_RLX(&team->t.t_cancel_request); in __kmp_barrier_template()
1440 KMP_ATOMIC_ST_RLX(&team->t.t_cancel_request, cancel_noreq); in __kmp_barrier_template()
1456 this_thr->th.th_teams_size.nteams == 1) && // or inside single team in __kmp_barrier_template()
1457 team->t.t_active_level == 1) { in __kmp_barrier_template()
1460 kmp_info_t **other_threads = team->t.t_threads; in __kmp_barrier_template()
1533 __kmp_task_team_sync(this_thr, team); in __kmp_barrier_template()
1544 } else { // Team is serialized. in __kmp_barrier_template()
1558 __kmp_task_team_wait(this_thr, team USE_ITT_BUILD_ARG(itt_sync_obj)); in __kmp_barrier_template()
1559 __kmp_task_team_setup(this_thr, team, 0); in __kmp_barrier_template()
1589 ANNOTATE_BARRIER_END(&team->t.t_bar); in __kmp_barrier_template()
1633 kmp_team_t *team = this_thr->th.th_team; in __kmp_end_split_barrier() local
1635 ANNOTATE_BARRIER_BEGIN(&team->t.t_bar); in __kmp_end_split_barrier()
1636 if (!team->t.t_serialized) { in __kmp_end_split_barrier()
1662 __kmp_task_team_sync(this_thr, team); in __kmp_end_split_barrier()
1666 ANNOTATE_BARRIER_END(&team->t.t_bar); in __kmp_end_split_barrier()
1673 kmp_team_t *team; in __kmp_join_barrier() local
1691 team = this_thr->th.th_team; in __kmp_join_barrier()
1693 KMP_DEBUG_ASSERT((int)nproc == team->t.t_nproc); in __kmp_join_barrier()
1696 team_id = team->t.t_id; in __kmp_join_barrier()
1700 if (master_thread != team->t.t_threads[0]) { in __kmp_join_barrier()
1704 KMP_DEBUG_ASSERT(master_thread == team->t.t_threads[0]); in __kmp_join_barrier()
1711 KMP_DEBUG_ASSERT(this_thr == team->t.t_threads[tid]); in __kmp_join_barrier()
1715 ANNOTATE_BARRIER_BEGIN(&team->t.t_bar); in __kmp_join_barrier()
1726 codeptr = team->t.ompt_team_info.master_return_address; in __kmp_join_barrier()
1747 __kmp_tasking_barrier(team, this_thr, gtid); in __kmp_join_barrier()
1753 KA_TRACE(20, ("__kmp_join_barrier: T#%d, old team = %d, old task_team = " in __kmp_join_barrier()
1756 team->t.t_task_team[this_thr->th.th_task_state], in __kmp_join_barrier()
1759 team->t.t_task_team[this_thr->th.th_task_state]); in __kmp_join_barrier()
1764 access it when the team struct is not guaranteed to exist. Doing these in __kmp_join_barrier()
1771 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals; in __kmp_join_barrier()
1773 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set; in __kmp_join_barrier()
1775 this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid); in __kmp_join_barrier()
1808 /* From this point on, the team data structure may be deallocated at any time in __kmp_join_barrier()
1810 threads. Any per-team data items that need to be referenced before the in __kmp_join_barrier()
1814 __kmp_task_team_wait(this_thr, team USE_ITT_BUILD_ARG(itt_sync_obj)); in __kmp_join_barrier()
1817 KMP_CHECK_UPDATE(team->t.t_display_affinity, 0); in __kmp_join_barrier()
1823 for (int i = 0; i < team->t.t_nproc; ++i) { in __kmp_join_barrier()
1824 kmp_info_t *team_thread = team->t.t_threads[i]; in __kmp_join_barrier()
1844 this_thr->th.th_teams_size.nteams == 1) && // or inside single team in __kmp_join_barrier()
1845 team->t.t_active_level == 1) { in __kmp_join_barrier()
1847 ident_t *loc = team->t.t_ident; in __kmp_join_barrier()
1848 kmp_info_t **other_threads = team->t.t_threads; in __kmp_join_barrier()
1893 ("__kmp_join_barrier: T#%d(%d:%d) says all %d team threads arrived\n", in __kmp_join_barrier()
1903 ANNOTATE_BARRIER_END(&team->t.t_bar); in __kmp_join_barrier()
1912 kmp_team_t *team = (tid == 0) ? this_thr->th.th_team : NULL; in __kmp_fork_barrier() local
1916 if (team) in __kmp_fork_barrier()
1917 ANNOTATE_BARRIER_END(&team->t.t_bar); in __kmp_fork_barrier()
1920 (team != NULL) ? team->t.t_id : -1, tid)); in __kmp_fork_barrier()
1933 kmp_info_t **other_threads = team->t.t_threads; in __kmp_fork_barrier()
1939 for (i = 1; i < team->t.t_nproc; ++i) { in __kmp_fork_barrier()
1943 gtid, team->t.t_id, other_threads[i]->th.th_info.ds.ds_gtid, in __kmp_fork_barrier()
1944 team->t.t_id, other_threads[i]->th.th_info.ds.ds_tid, in __kmp_fork_barrier()
1949 KMP_DEBUG_ASSERT(other_threads[i]->th.th_team == team); in __kmp_fork_barrier()
1954 // 0 indicates setup current task team if nthreads > 1 in __kmp_fork_barrier()
1955 __kmp_task_team_setup(this_thr, team, 0); in __kmp_fork_barrier()
1960 __kmp_wait_template() can access it when the team struct is not in __kmp_fork_barrier()
1967 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals; in __kmp_fork_barrier()
1969 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set; in __kmp_fork_barrier()
1971 this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid); in __kmp_fork_barrier()
2004 ompt_data_t *task_data = (team) in __kmp_fork_barrier()
2013 codeptr = team->t.ompt_team_info.master_return_address; in __kmp_fork_barrier()
2049 /* We can now assume that a valid team structure has been allocated by the in __kmp_fork_barrier()
2051 may not be part of the team, so we can't blindly assume that the team in __kmp_fork_barrier()
2053 team = (kmp_team_t *)TCR_PTR(this_thr->th.th_team); in __kmp_fork_barrier()
2054 KMP_DEBUG_ASSERT(team != NULL); in __kmp_fork_barrier()
2071 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team, in __kmp_fork_barrier()
2073 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs, in __kmp_fork_barrier()
2074 &team->t.t_threads[0] in __kmp_fork_barrier()
2082 __kmp_task_team_sync(this_thr, team); in __kmp_fork_barrier()
2086 kmp_proc_bind_t proc_bind = team->t.t_proc_bind; in __kmp_fork_barrier()
2089 if (__kmp_affinity_type == affinity_balanced && team->t.t_size_changed) { in __kmp_fork_barrier()
2090 __kmp_balanced_affinity(this_thr, team->t.t_nproc); in __kmp_fork_barrier()
2104 if (team->t.t_display_affinity in __kmp_fork_barrier()
2106 || (__kmp_affinity_type == affinity_balanced && team->t.t_size_changed) in __kmp_fork_barrier()
2111 this_thr->th.th_prev_num_threads = team->t.t_nproc; in __kmp_fork_barrier()
2112 this_thr->th.th_prev_level = team->t.t_level; in __kmp_fork_barrier()
2116 KMP_CHECK_UPDATE(this_thr->th.th_def_allocator, team->t.t_def_allocator); in __kmp_fork_barrier()
2127 ANNOTATE_BARRIER_END(&team->t.t_bar); in __kmp_fork_barrier()
2129 team->t.t_id, tid)); in __kmp_fork_barrier()
2132 void __kmp_setup_icv_copy(kmp_team_t *team, int new_nproc, in __kmp_setup_icv_copy() argument
2136 KMP_DEBUG_ASSERT(team && new_nproc && new_icvs); in __kmp_setup_icv_copy()
2146 KMP_DEBUG_ASSERT(team->t.t_threads[0]); // The threads arrays should be in __kmp_setup_icv_copy()
2149 &team->t.t_threads[0]->th.th_bar[bs_forkjoin_barrier].bb.th_fixed_icvs, in __kmp_setup_icv_copy()
2151 KF_TRACE(10, ("__kmp_setup_icv_copy: PULL: T#%d this_thread=%p team=%p\n", 0, in __kmp_setup_icv_copy()
2152 team->t.t_threads[0], team)); in __kmp_setup_icv_copy()
2156 KF_TRACE(10, ("__kmp_setup_icv_copy: PUSH: T#%d this_thread=%p team=%p\n", 0, in __kmp_setup_icv_copy()
2157 team->t.t_threads[0], team)); in __kmp_setup_icv_copy()
2162 KMP_DEBUG_ASSERT(team->t.t_threads[0]); // The threads arrays should be in __kmp_setup_icv_copy()
2166 KF_TRACE(10, ("__kmp_setup_icv_copy: LINEAR: T#%d this_thread=%p team=%p\n", in __kmp_setup_icv_copy()
2167 f, team->t.t_threads[f], team)); in __kmp_setup_icv_copy()
2168 __kmp_init_implicit_task(loc, team->t.t_threads[f], team, f, FALSE); in __kmp_setup_icv_copy()
2169 ngo_store_icvs(&team->t.t_implicit_task_taskdata[f].td_icvs, new_icvs); in __kmp_setup_icv_copy()
2170 KF_TRACE(10, ("__kmp_setup_icv_copy: LINEAR: T#%d this_thread=%p team=%p\n", in __kmp_setup_icv_copy()
2171 f, team->t.t_threads[f], team)); in __kmp_setup_icv_copy()