/external/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/ |
D | pointer-elements.ll | 9 ; CHECK: inttoptr i64 %{{[0-9]+}} to i8 addrspace(1)* 10 ; CHECK: inttoptr i64 %{{[0-9]+}} to i8 addrspace(1)* 12 define void @merge_v2p1i8(i8 addrspace(1)* addrspace(1)* nocapture %a, i8 addrspace(1)* addrspace(1… 14 %a.1 = getelementptr inbounds i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %a, i64 1 15 %b.1 = getelementptr inbounds i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %b, i64 1 17 %ld.c = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %b, align 4 18 %ld.c.idx.1 = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* %b.1, align 4 20 store i8 addrspace(1)* null, i8 addrspace(1)* addrspace(1)* %a, align 4 21 store i8 addrspace(1)* null, i8 addrspace(1)* addrspace(1)* %a.1, align 4 28 ; CHECK: inttoptr i32 %{{[0-9]+}} to i8 addrspace(3)* [all …]
|
D | merge-stores.ll | 12 ; CHECK: store <2 x i8> <i8 -56, i8 123>, <2 x i8> addrspace(1)* %{{[0-9]+}}, align 2 13 define void @merge_global_store_2_constants_i8(i8 addrspace(1)* %out) #0 { 14 %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1 16 store i8 123, i8 addrspace(1)* %out.gep.1 17 store i8 456, i8 addrspace(1)* %out, align 2 23 define void @merge_global_store_2_constants_i8_natural_align(i8 addrspace(1)* %out) #0 { 24 %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1 26 store i8 123, i8 addrspace(1)* %out.gep.1 27 store i8 456, i8 addrspace(1)* %out 32 ; CHECK: store <2 x i16> <i16 456, i16 123>, <2 x i16> addrspace(1)* %{{[0-9]+}}, align 4 [all …]
|
/external/llvm/test/Assembler/ |
D | 2007-12-11-AddressSpaces.ll | 1 ; RUN: llvm-as < %s | llvm-dis | grep "addrspace(33)" | count 7 2 ; RUN: llvm-as < %s | llvm-dis | grep "addrspace(42)" | count 2 3 ; RUN: llvm-as < %s | llvm-dis | grep "addrspace(66)" | count 2 4 ; RUN: llvm-as < %s | llvm-dis | grep "addrspace(11)" | count 6 5 ; RUN: llvm-as < %s | llvm-dis | grep "addrspace(22)" | count 5 8 %struct.mystruct = type { i32, i32 addrspace(33)*, i32, i32 addrspace(33)* } 9 @input = weak addrspace(42) global %struct.mystruct zeroinitializer ; <%struct.mystruct addrspac… 10 @output = addrspace(66) global %struct.mystruct zeroinitializer ; <%struct.mystruct addrspace(66)… 11 @y = external addrspace(33) global i32 addrspace(11)* addrspace(22)* ; <i32 addrspace(11)* addrsp… 15 …addrspace(33)*, i32 addrspace(33)* addrspace(42)* getelementptr (%struct.mystruct, %struct.mystruc… [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | addrspacecast-constantexpr.ll | 3 declare void @llvm.memcpy.p1i32.p4i32.i32(i32 addrspace(1)* nocapture, i32 addrspace(4)* nocapture,… 5 @lds.i32 = unnamed_addr addrspace(3) global i32 undef, align 4 6 @lds.arr = unnamed_addr addrspace(3) global [256 x i32] undef, align 4 8 @global.i32 = unnamed_addr addrspace(1) global i32 undef, align 4 9 @global.arr = unnamed_addr addrspace(1) global [256 x i32] undef, align 4 13 store i32 7, i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*) 19 store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*) 25 store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds.i32 to i32 addrspace(4)*) 31 … i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32… 37 store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global.i32 to i32 addrspace(4)*) [all …]
|
D | image-resource-id.ll | 10 define void @test_2d_rd_1_0(%opencl.image2d_t addrspace(1)* %in, ; read_only 11 i32 addrspace(1)* %out) { 14 %opencl.image2d_t addrspace(1)* %in) #0 15 store i32 %0, i32 addrspace(1)* %out 24 define void @test_3d_rd_1_0(%opencl.image3d_t addrspace(1)* %in, ; read_only 25 i32 addrspace(1)* %out) { 28 %opencl.image3d_t addrspace(1)* %in) #0 29 store i32 %0, i32 addrspace(1)* %out 40 define void @test_2d_wr_1_0(%opencl.image2d_t addrspace(1)* %in, ; write_only 41 i32 addrspace(1)* %out) { [all …]
|
D | lds-alignment.ll | 3 @lds.align16.0 = internal unnamed_addr addrspace(3) global [38 x i8] undef, align 16 4 @lds.align16.1 = internal unnamed_addr addrspace(3) global [38 x i8] undef, align 16 6 @lds.align8.0 = internal unnamed_addr addrspace(3) global [38 x i8] undef, align 8 7 @lds.align32.0 = internal unnamed_addr addrspace(3) global [38 x i8] undef, align 32 9 @lds.missing.align.0 = internal unnamed_addr addrspace(3) global [39 x i32] undef 10 @lds.missing.align.1 = internal unnamed_addr addrspace(3) global [7 x i64] undef 12 declare void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* nocapture, i8 addrspace(1)* nocapture read… 13 declare void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* nocapture, i8 addrspace(3)* nocapture read… 18 define void @test_no_round_size_1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #1 { 19 %lds.align16.0.bc = bitcast [38 x i8] addrspace(3)* @lds.align16.0 to i8 addrspace(3)* [all …]
|
D | merge-stores.ll | 18 define void @merge_global_store_2_constants_i8(i8 addrspace(1)* %out) #0 { 19 %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1 21 store i8 123, i8 addrspace(1)* %out.gep.1 22 store i8 456, i8 addrspace(1)* %out, align 2 30 define void @merge_global_store_2_constants_i8_natural_align(i8 addrspace(1)* %out) #0 { 31 %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1 33 store i8 123, i8 addrspace(1)* %out.gep.1 34 store i8 456, i8 addrspace(1)* %out 40 define void @merge_global_store_2_constants_i16(i16 addrspace(1)* %out) #0 { 41 %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 [all …]
|
D | load-global-i1.ll | 12 define void @global_load_i1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) #0 { 13 %load = load i1, i1 addrspace(1)* %in 14 store i1 %load, i1 addrspace(1)* %out 19 define void @global_load_v2i1(<2 x i1> addrspace(1)* %out, <2 x i1> addrspace(1)* %in) #0 { 20 %load = load <2 x i1>, <2 x i1> addrspace(1)* %in 21 store <2 x i1> %load, <2 x i1> addrspace(1)* %out 26 define void @global_load_v3i1(<3 x i1> addrspace(1)* %out, <3 x i1> addrspace(1)* %in) #0 { 27 %load = load <3 x i1>, <3 x i1> addrspace(1)* %in 28 store <3 x i1> %load, <3 x i1> addrspace(1)* %out 33 define void @global_load_v4i1(<4 x i1> addrspace(1)* %out, <4 x i1> addrspace(1)* %in) #0 { [all …]
|
D | load-constant-i1.ll | 12 define void @constant_load_i1(i1 addrspace(1)* %out, i1 addrspace(2)* nocapture %in) #0 { 13 %load = load i1, i1 addrspace(2)* %in 14 store i1 %load, i1 addrspace(1)* %out 19 define void @constant_load_v2i1(<2 x i1> addrspace(1)* %out, <2 x i1> addrspace(2)* nocapture %in) … 20 %load = load <2 x i1>, <2 x i1> addrspace(2)* %in 21 store <2 x i1> %load, <2 x i1> addrspace(1)* %out 26 define void @constant_load_v3i1(<3 x i1> addrspace(1)* %out, <3 x i1> addrspace(2)* nocapture %in) … 27 %load = load <3 x i1>, <3 x i1> addrspace(2)* %in 28 store <3 x i1> %load, <3 x i1> addrspace(1)* %out 33 define void @constant_load_v4i1(<4 x i1> addrspace(1)* %out, <4 x i1> addrspace(2)* nocapture %in) … [all …]
|
D | load-local-i1.ll | 13 define void @local_load_i1(i1 addrspace(3)* %out, i1 addrspace(3)* %in) #0 { 14 %load = load i1, i1 addrspace(3)* %in 15 store i1 %load, i1 addrspace(3)* %out 20 define void @local_load_v2i1(<2 x i1> addrspace(3)* %out, <2 x i1> addrspace(3)* %in) #0 { 21 %load = load <2 x i1>, <2 x i1> addrspace(3)* %in 22 store <2 x i1> %load, <2 x i1> addrspace(3)* %out 27 define void @local_load_v3i1(<3 x i1> addrspace(3)* %out, <3 x i1> addrspace(3)* %in) #0 { 28 %load = load <3 x i1>, <3 x i1> addrspace(3)* %in 29 store <3 x i1> %load, <3 x i1> addrspace(3)* %out 34 define void @local_load_v4i1(<4 x i1> addrspace(3)* %out, <4 x i1> addrspace(3)* %in) #0 { [all …]
|
D | flat_atomics_i64.ll | 6 define void @atomic_add_i64_offset(i64 addrspace(4)* %out, i64 %in) { 8 %gep = getelementptr i64, i64 addrspace(4)* %out, i64 4 9 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst 16 define void @atomic_add_i64_ret_offset(i64 addrspace(4)* %out, i64 addrspace(4)* %out2, i64 %in) { 18 %gep = getelementptr i64, i64 addrspace(4)* %out, i64 4 19 %tmp0 = atomicrmw volatile add i64 addrspace(4)* %gep, i64 %in seq_cst 20 store i64 %tmp0, i64 addrspace(4)* %out2 26 define void @atomic_add_i64_addr64_offset(i64 addrspace(4)* %out, i64 %in, i64 %index) { 28 %ptr = getelementptr i64, i64 addrspace(4)* %out, i64 %index 29 %gep = getelementptr i64, i64 addrspace(4)* %ptr, i64 4 [all …]
|
D | flat_atomics.ll | 6 define void @atomic_add_i32_offset(i32 addrspace(4)* %out, i32 %in) { 8 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 9 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst 16 define void @atomic_add_i32_ret_offset(i32 addrspace(4)* %out, i32 addrspace(4)* %out2, i32 %in) { 18 %gep = getelementptr i32, i32 addrspace(4)* %out, i32 4 19 %val = atomicrmw volatile add i32 addrspace(4)* %gep, i32 %in seq_cst 20 store i32 %val, i32 addrspace(4)* %out2 26 define void @atomic_add_i32_addr64_offset(i32 addrspace(4)* %out, i32 %in, i64 %index) { 28 %ptr = getelementptr i32, i32 addrspace(4)* %out, i64 %index 29 %gep = getelementptr i32, i32 addrspace(4)* %ptr, i32 4 [all …]
|
D | si-triv-disjoint-mem-access.ll | 8 @stored_lds_ptr = addrspace(3) global i32 addrspace(3)* undef, align 4 9 @stored_constant_ptr = addrspace(3) global i32 addrspace(2)* undef, align 8 10 @stored_global_ptr = addrspace(3) global i32 addrspace(1)* undef, align 8 15 define void @reorder_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %… 16 %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4 18 %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1 19 %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 3 21 %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4 22 store i32 99, i32 addrspace(1)* %gptr, align 4 23 %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4 [all …]
|
D | load-local-i8.ll | 12 define void @local_load_i8(i8 addrspace(3)* %out, i8 addrspace(3)* %in) #0 { 14 %ld = load i8, i8 addrspace(3)* %in 15 store i8 %ld, i8 addrspace(3)* %out 25 define void @local_load_v2i8(<2 x i8> addrspace(3)* %out, <2 x i8> addrspace(3)* %in) #0 { 27 %ld = load <2 x i8>, <2 x i8> addrspace(3)* %in 28 store <2 x i8> %ld, <2 x i8> addrspace(3)* %out 36 define void @local_load_v3i8(<3 x i8> addrspace(3)* %out, <3 x i8> addrspace(3)* %in) #0 { 38 %ld = load <3 x i8>, <3 x i8> addrspace(3)* %in 39 store <3 x i8> %ld, <3 x i8> addrspace(3)* %out 47 define void @local_load_v4i8(<4 x i8> addrspace(3)* %out, <4 x i8> addrspace(3)* %in) #0 { [all …]
|
D | load-constant-i8.ll | 12 define void @constant_load_i8(i8 addrspace(1)* %out, i8 addrspace(2)* %in) #0 { 14 %ld = load i8, i8 addrspace(2)* %in 15 store i8 %ld, i8 addrspace(1)* %out 24 define void @constant_load_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(2)* %in) #0 { 26 %ld = load <2 x i8>, <2 x i8> addrspace(2)* %in 27 store <2 x i8> %ld, <2 x i8> addrspace(1)* %out 35 define void @constant_load_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(2)* %in) #0 { 37 %ld = load <3 x i8>, <3 x i8> addrspace(2)* %in 38 store <3 x i8> %ld, <3 x i8> addrspace(1)* %out 46 define void @constant_load_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(2)* %in) #0 { [all …]
|
D | local-atomics64.ll | 7 define void @lds_atomic_xchg_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 8 %result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst 9 store i64 %result, i64 addrspace(1)* %out, align 8 16 define void @lds_atomic_xchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwin… 17 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4 18 %result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst 19 store i64 %result, i64 addrspace(1)* %out, align 8 26 define void @lds_atomic_add_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { 27 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst 28 store i64 %result, i64 addrspace(1)* %out, align 8 [all …]
|
D | local-atomics.ll | 14 define void @lds_atomic_xchg_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind { 15 %result = atomicrmw xchg i32 addrspace(3)* %ptr, i32 4 seq_cst 16 store i32 %result, i32 addrspace(1)* %out, align 4 24 define void @lds_atomic_xchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwin… 25 %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4 26 %result = atomicrmw xchg i32 addrspace(3)* %gep, i32 4 seq_cst 27 store i32 %result, i32 addrspace(1)* %out, align 4 40 define void @lds_atomic_add_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind { 41 %result = atomicrmw add i32 addrspace(3)* %ptr, i32 4 seq_cst 42 store i32 %result, i32 addrspace(1)* %out, align 4 [all …]
|
/external/llvm/test/Transforms/InstCombine/ |
D | addrspacecast.ll | 7 declare void @llvm.memcpy.p0i8.p1i8.i32(i8*, i8 addrspace(1)*, i32, i32, i1) nounwind 8 declare void @llvm.memcpy.p0i8.p2i8.i32(i8*, i8 addrspace(2)*, i32, i32, i1) nounwind 11 define i32* @combine_redundant_addrspacecast(i32 addrspace(1)* %x) nounwind { 13 ; CHECK: addrspacecast i32 addrspace(1)* %x to i32* 15 %y = addrspacecast i32 addrspace(1)* %x to i32 addrspace(3)* 16 %z = addrspacecast i32 addrspace(3)* %y to i32* 20 define <4 x i32*> @combine_redundant_addrspacecast_vector(<4 x i32 addrspace(1)*> %x) nounwind { 22 ; CHECK: addrspacecast <4 x i32 addrspace(1)*> %x to <4 x i32*> 24 %y = addrspacecast <4 x i32 addrspace(1)*> %x to <4 x i32 addrspace(3)*> 25 %z = addrspacecast <4 x i32 addrspace(3)*> %y to <4 x i32*> [all …]
|
D | constant-fold-address-space-pointer.ll | 4 @g = addrspace(3) global i32 89 6 @const_zero_i8_as1 = addrspace(1) constant i8 0 7 @const_zero_i32_as1 = addrspace(1) constant i32 0 9 @const_zero_i8_as2 = addrspace(2) constant i8 0 10 @const_zero_i32_as2 = addrspace(2) constant i32 0 12 @const_zero_i8_as3 = addrspace(3) constant i8 0 13 @const_zero_i32_as3 = addrspace(3) constant i32 0 17 define i32 addrspace(3)* @test_constant_fold_inttoptr_as_pointer_same_size() { 19 ; CHECK-NEXT: ret i32 addrspace(3)* @const_zero_i32_as3 20 %x = ptrtoint i32 addrspace(3)* @const_zero_i32_as3 to i32 [all …]
|
/external/swiftshader/third_party/LLVM/test/Assembler/ |
D | 2007-12-11-AddressSpaces.ll | 1 ; RUN: llvm-as < %s | llvm-dis | grep {addrspace(33)} | count 7 2 ; RUN: llvm-as < %s | llvm-dis | grep {addrspace(42)} | count 2 3 ; RUN: llvm-as < %s | llvm-dis | grep {addrspace(66)} | count 2 4 ; RUN: llvm-as < %s | llvm-dis | grep {addrspace(11)} | count 6 5 ; RUN: llvm-as < %s | llvm-dis | grep {addrspace(22)} | count 5 7 %struct.mystruct = type { i32, i32 addrspace(33)*, i32, i32 addrspace(33)* } 8 @input = weak addrspace(42) global %struct.mystruct zeroinitializer ; <%struct.mystruct addrspac… 9 @output = addrspace(66) global %struct.mystruct zeroinitializer ; <%struct.mystruct addrspace(66)… 10 @y = external addrspace(33) global i32 addrspace(11)* addrspace(22)* ; <i32 addrspace(11)* addrsp… 14 …p1 = load i32 addrspace(33)* addrspace(42)* getelementptr (%struct.mystruct addrspace(42)* @input,… [all …]
|
/external/clang/test/CodeGenOpenCL/ |
D | to_addr_builtin.cl | 16 //CHECK: %[[ARG:.*]] = addrspacecast i32 addrspace(1)* %{{.*}} to i8 addrspace(4)* 17 //CHECK: %[[RET:.*]] = call i8 addrspace(1)* @to_global(i8 addrspace(4)* %[[ARG]]) 18 //CHECK: %{{.*}} = bitcast i8 addrspace(1)* %[[RET]] to i32 addrspace(1)* 21 //CHECK: %[[ARG:.*]] = addrspacecast i32 addrspace(3)* %{{.*}} to i8 addrspace(4)* 22 //CHECK: %[[RET:.*]] = call i8 addrspace(1)* @to_global(i8 addrspace(4)* %[[ARG]]) 23 //CHECK: %{{.*}} = bitcast i8 addrspace(1)* %[[RET]] to i32 addrspace(1)* 26 //CHECK: %[[ARG:.*]] = addrspacecast i32* %{{.*}} to i8 addrspace(4)* 27 //CHECK: %[[RET:.*]] = call i8 addrspace(1)* @to_global(i8 addrspace(4)* %[[ARG]]) 28 //CHECK: %{{.*}} = bitcast i8 addrspace(1)* %[[RET]] to i32 addrspace(1)* 31 //CHECK: %[[ARG:.*]] = bitcast i32 addrspace(4)* %{{.*}} to i8 addrspace(4)* [all …]
|
/external/llvm/test/Other/ |
D | constant-fold-gep-address-spaces.ll | 10 ; PLAIN: @G8 = global i8 addrspace(1)* getelementptr (i8, i8 addrspace(1)* inttoptr (i32 1 to i8 ad… 11 @G8 = global i8 addrspace(1)* getelementptr (i8, i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1… 12 ; PLAIN: @G1 = global i1 addrspace(2)* getelementptr (i1, i1 addrspace(2)* inttoptr (i8 1 to i1 add… 13 @G1 = global i1 addrspace(2)* getelementptr (i1, i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)… 14 ; PLAIN: @F8 = global i8 addrspace(1)* getelementptr (i8, i8 addrspace(1)* inttoptr (i32 1 to i8 ad… 15 @F8 = global i8 addrspace(1)* getelementptr (i8, i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1… 16 ; PLAIN: @F1 = global i1 addrspace(2)* getelementptr (i1, i1 addrspace(2)* inttoptr (i8 1 to i1 add… 17 @F1 = global i1 addrspace(2)* getelementptr (i1, i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)… 18 ; PLAIN: @H8 = global i8 addrspace(1)* getelementptr (i8, i8 addrspace(1)* null, i32 -1) 19 @H8 = global i8 addrspace(1)* getelementptr (i8, i8 addrspace(1)* inttoptr (i32 0 to i8 addrspace(1… [all …]
|
/external/llvm/test/Transforms/RewriteStatepointsForGC/ |
D | rematerialize-derived-pointers.ll | 4 declare void @use_obj16(i16 addrspace(1)*) "gc-leaf-function" 5 declare void @use_obj32(i32 addrspace(1)*) "gc-leaf-function" 6 declare void @use_obj64(i64 addrspace(1)*) "gc-leaf-function" 10 define void @test_gep_const(i32 addrspace(1)* %base) gc "statepoint-example" { 13 %ptr = getelementptr i32, i32 addrspace(1)* %base, i32 15 14 ; CHECK: getelementptr i32, i32 addrspace(1)* %base, i32 15 16 ; CHECK: %base.relocated = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(token %… 17 ; CHECK: bitcast i8 addrspace(1)* %base.relocated to i32 addrspace(1)* 18 ; CHECK: getelementptr i32, i32 addrspace(1)* %base.relocated.casted, i32 15 19 call void @use_obj32(i32 addrspace(1)* %base) [all …]
|
/external/llvm/test/Transforms/StraightLineStrengthReduce/AMDGPU/ |
D | reassociate-geps-and-slsr-addrspace.ll | 7 ; CHECK: [[b1:%[0-9]+]] = getelementptr float, float addrspace(1)* %arr, i64 [[bump:%[0-9]+]] 8 ; CHECK: [[b2:%[0-9]+]] = getelementptr float, float addrspace(1)* [[b1]], i64 [[bump]] 9 …@slsr_after_reassociate_global_geps_mubuf_max_offset(float addrspace(1)* %out, float addrspace(1)*… 14 %p1 = getelementptr inbounds float, float addrspace(1)* %arr, i64 %tmp 15 %tmp3 = bitcast float addrspace(1)* %p1 to i32 addrspace(1)* 16 %v11 = load i32, i32 addrspace(1)* %tmp3, align 4 17 %tmp4 = bitcast float addrspace(1)* %out to i32 addrspace(1)* 18 store i32 %v11, i32 addrspace(1)* %tmp4, align 4 22 %p2 = getelementptr inbounds float, float addrspace(1)* %arr, i64 %tmp5 23 %tmp6 = bitcast float addrspace(1)* %p2 to i32 addrspace(1)* [all …]
|
/external/llvm/test/Analysis/CostModel/AMDGPU/ |
D | addrspacecast.ll | 4 ; CHECK: estimated cost of 0 for {{.*}} addrspacecast i8 addrspace(1)* %ptr to i8 addrspace(4)* 5 define i8 addrspace(4)* @addrspacecast_global_to_flat(i8 addrspace(1)* %ptr) #0 { 6 %cast = addrspacecast i8 addrspace(1)* %ptr to i8 addrspace(4)* 7 ret i8 addrspace(4)* %cast 11 …K: estimated cost of 0 for {{.*}} addrspacecast <2 x i8 addrspace(1)*> %ptr to <2 x i8 addrspace(4… 12 define <2 x i8 addrspace(4)*> @addrspacecast_global_to_flat_v2(<2 x i8 addrspace(1)*> %ptr) #0 { 13 %cast = addrspacecast <2 x i8 addrspace(1)*> %ptr to <2 x i8 addrspace(4)*> 14 ret <2 x i8 addrspace(4)*> %cast 18 …: estimated cost of 0 for {{.*}} addrspacecast <32 x i8 addrspace(1)*> %ptr to <32 x i8 addrspace(… 19 define <32 x i8 addrspace(4)*> @addrspacecast_global_to_flat_v32(<32 x i8 addrspace(1)*> %ptr) #0 { [all …]
|