1 // Test device global memory data sharing codegen.
2 ///==========================================================================///
3
4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix CK1 --check-prefix SEQ
6 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns -fopenmp-cuda-parallel-target-regions | FileCheck %s --check-prefix CK1 --check-prefix PAR
7
8 // expected-no-diagnostics
9
10 #ifndef HEADER
11 #define HEADER
12
test_ds()13 void test_ds(){
14 #pragma omp target
15 {
16 int a = 10;
17 #pragma omp parallel
18 {
19 a = 1000;
20 }
21 int b = 100;
22 int c = 1000;
23 #pragma omp parallel private(c)
24 {
25 int *c1 = &c;
26 b = a + 10000;
27 }
28 }
29 }
30 // SEQ: [[MEM_TY:%.+]] = type { [128 x i8] }
31 // SEQ-DAG: [[SHARED_GLOBAL_RD:@.+]] = weak addrspace(3) global [[MEM_TY]] undef
32 // SEQ-DAG: [[KERNEL_PTR:@.+]] = internal addrspace(3) global i8* undef
33 // SEQ-DAG: [[KERNEL_SIZE:@.+]] = internal unnamed_addr constant i64 8
34 // SEQ-DAG: [[KERNEL_SHARED:@.+]] = internal unnamed_addr constant i16 1
35
36 /// ========= In the worker function ========= ///
37 // CK1: {{.*}}define internal void @__omp_offloading{{.*}}test_ds{{.*}}_worker()
38 // CK1: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
39 // CK1-NOT: call void @__kmpc_data_sharing_init_stack
40
41 /// ========= In the kernel function ========= ///
42
43 // CK1: {{.*}}define weak void @__omp_offloading{{.*}}test_ds{{.*}}()
44 // CK1: [[SHAREDARGS1:%.+]] = alloca i8**
45 // CK1: [[SHAREDARGS2:%.+]] = alloca i8**
46 // CK1: call void @__kmpc_kernel_init
47 // CK1: call void @__kmpc_data_sharing_init_stack
48 // SEQ: [[SHARED_MEM_FLAG:%.+]] = load i16, i16* [[KERNEL_SHARED]],
49 // SEQ: [[SIZE:%.+]] = load i64, i64* [[KERNEL_SIZE]],
50 // SEQ: call void @__kmpc_get_team_static_memory(i16 0, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds ([[MEM_TY]], [[MEM_TY]] addrspace(3)* [[SHARED_GLOBAL_RD]], i32 0, i32 0, i32 0) to i8*), i64 [[SIZE]], i16 [[SHARED_MEM_FLAG]], i8** addrspacecast (i8* addrspace(3)* [[KERNEL_PTR]] to i8**))
51 // SEQ: [[KERNEL_RD:%.+]] = load i8*, i8* addrspace(3)* [[KERNEL_PTR]],
52 // SEQ: [[GLOBALSTACK:%.+]] = getelementptr inbounds i8, i8* [[KERNEL_RD]], i64 0
53 // PAR: [[GLOBALSTACK:%.+]] = call i8* @__kmpc_data_sharing_push_stack(i{{32|64}} 8, i16 1)
54 // CK1: [[GLOBALSTACK2:%.+]] = bitcast i8* [[GLOBALSTACK]] to %struct._globalized_locals_ty*
55 // CK1: [[A:%.+]] = getelementptr inbounds %struct._globalized_locals_ty, %struct._globalized_locals_ty* [[GLOBALSTACK2]], i32 0, i32 0
56 // CK1: [[B:%.+]] = getelementptr inbounds %struct._globalized_locals_ty, %struct._globalized_locals_ty* [[GLOBALSTACK2]], i32 0, i32 1
57 // CK1: store i32 10, i32* [[A]]
58 // CK1: call void @__kmpc_kernel_prepare_parallel({{.*}})
59 // CK1: call void @__kmpc_begin_sharing_variables(i8*** [[SHAREDARGS1]], i64 1)
60 // CK1: [[SHARGSTMP1:%.+]] = load i8**, i8*** [[SHAREDARGS1]]
61 // CK1: [[SHARGSTMP2:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP1]], i64 0
62 // CK1: [[SHAREDVAR:%.+]] = bitcast i32* [[A]] to i8*
63 // CK1: store i8* [[SHAREDVAR]], i8** [[SHARGSTMP2]]
64 // CK1: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
65 // CK1: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
66 // CK1: call void @__kmpc_end_sharing_variables()
67 // CK1: store i32 100, i32* [[B]]
68 // CK1: call void @__kmpc_kernel_prepare_parallel({{.*}})
69 // CK1: call void @__kmpc_begin_sharing_variables(i8*** [[SHAREDARGS2]], i64 2)
70 // CK1: [[SHARGSTMP3:%.+]] = load i8**, i8*** [[SHAREDARGS2]]
71 // CK1: [[SHARGSTMP4:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP3]], i64 0
72 // CK1: [[SHAREDVAR1:%.+]] = bitcast i32* [[B]] to i8*
73 // CK1: store i8* [[SHAREDVAR1]], i8** [[SHARGSTMP4]]
74 // CK1: [[SHARGSTMP12:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP3]], i64 1
75 // CK1: [[SHAREDVAR2:%.+]] = bitcast i32* [[A]] to i8*
76 // CK1: store i8* [[SHAREDVAR2]], i8** [[SHARGSTMP12]]
77 // CK1: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
78 // CK1: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
79 // CK1: call void @__kmpc_end_sharing_variables()
80 // SEQ: [[SHARED_MEM_FLAG:%.+]] = load i16, i16* [[KERNEL_SHARED]],
81 // SEQ: call void @__kmpc_restore_team_static_memory(i16 0, i16 [[SHARED_MEM_FLAG]])
82 // PAR: call void @__kmpc_data_sharing_pop_stack(i8* [[GLOBALSTACK]])
83 // CK1: call void @__kmpc_kernel_deinit(i16 1)
84
85 /// ========= In the data sharing wrapper function ========= ///
86
87 // CK1: {{.*}}define internal void @__omp_outlined{{.*}}wrapper({{.*}})
88 // CK1: [[SHAREDARGS4:%.+]] = alloca i8**
89 // CK1: call void @__kmpc_get_shared_variables(i8*** [[SHAREDARGS4]])
90 // CK1: [[SHARGSTMP13:%.+]] = load i8**, i8*** [[SHAREDARGS4]]
91 // CK1: [[SHARGSTMP14:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP13]], i64 0
92 // CK1: [[SHARGSTMP15:%.+]] = bitcast i8** [[SHARGSTMP14]] to i32**
93 // CK1: [[SHARGSTMP16:%.+]] = load i32*, i32** [[SHARGSTMP15]]
94 // CK1: call void @__omp_outlined__{{.*}}({{.*}}, i32* [[SHARGSTMP16]])
95
96 /// outlined function for the second parallel region ///
97
98 // CK1: define internal void @{{.+}}(i32* noalias %{{.+}}, i32* noalias %{{.+}}, i32* nonnull align {{[0-9]+}} dereferenceable{{.+}}, i32* nonnull align {{[0-9]+}} dereferenceable{{.+}})
99 // CK1-NOT: call i8* @__kmpc_data_sharing_push_stack(
100 // CK1: [[C_ADDR:%.+]] = alloca i32,
101 // CK1: store i32* [[C_ADDR]], i32** %
102 // CK1i-NOT: call void @__kmpc_data_sharing_pop_stack(
103
104 /// ========= In the data sharing wrapper function ========= ///
105
106 // CK1: {{.*}}define internal void @__omp_outlined{{.*}}wrapper({{.*}})
107 // CK1: [[SHAREDARGS3:%.+]] = alloca i8**
108 // CK1: call void @__kmpc_get_shared_variables(i8*** [[SHAREDARGS3]])
109 // CK1: [[SHARGSTMP5:%.+]] = load i8**, i8*** [[SHAREDARGS3]]
110 // CK1: [[SHARGSTMP6:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP5]], i64 0
111 // CK1: [[SHARGSTMP7:%.+]] = bitcast i8** [[SHARGSTMP6]] to i32**
112 // CK1: [[SHARGSTMP8:%.+]] = load i32*, i32** [[SHARGSTMP7]]
113 // CK1: [[SHARGSTMP9:%.+]] = getelementptr inbounds i8*, i8** [[SHARGSTMP5]], i64 1
114 // CK1: [[SHARGSTMP10:%.+]] = bitcast i8** [[SHARGSTMP9]] to i32**
115 // CK1: [[SHARGSTMP11:%.+]] = load i32*, i32** [[SHARGSTMP10]]
116 // CK1: call void @__omp_outlined__{{.*}}({{.*}}, i32* [[SHARGSTMP8]], i32* [[SHARGSTMP11]])
117
118 #endif
119
120