/external/llvm/test/CodeGen/X86/ |
D | large-gep-chain.ll | 24 %tmp = getelementptr inbounds float, float* null, i64 1 25 %tmp3 = getelementptr inbounds float, float* %tmp, i64 1 26 %tmp4 = getelementptr inbounds float, float* %tmp3, i64 1 27 %tmp5 = getelementptr inbounds float, float* %tmp4, i64 1 28 %tmp6 = getelementptr inbounds float, float* %tmp5, i64 1 29 %tmp7 = getelementptr inbounds float, float* %tmp6, i64 1 30 %tmp8 = getelementptr inbounds float, float* %tmp7, i64 1 31 %tmp9 = getelementptr inbounds float, float* %tmp8, i64 1 32 %tmp10 = getelementptr inbounds float, float* %tmp9, i64 1 33 %tmp11 = getelementptr inbounds float, float* %tmp10, i64 1 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | large-gep-chain.ll | 24 %tmp = getelementptr inbounds float, float* null, i64 1 25 %tmp3 = getelementptr inbounds float, float* %tmp, i64 1 26 %tmp4 = getelementptr inbounds float, float* %tmp3, i64 1 27 %tmp5 = getelementptr inbounds float, float* %tmp4, i64 1 28 %tmp6 = getelementptr inbounds float, float* %tmp5, i64 1 29 %tmp7 = getelementptr inbounds float, float* %tmp6, i64 1 30 %tmp8 = getelementptr inbounds float, float* %tmp7, i64 1 31 %tmp9 = getelementptr inbounds float, float* %tmp8, i64 1 32 %tmp10 = getelementptr inbounds float, float* %tmp9, i64 1 33 %tmp11 = getelementptr inbounds float, float* %tmp10, i64 1 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/ARM/ |
D | gep.ll | 8 ;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i8, i8* 9 %a0 = getelementptr inbounds i8, i8* undef, i32 0 10 ;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i16, i16* 11 %a1 = getelementptr inbounds i16, i16* undef, i32 0 12 ;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i32, i32* 13 %a2 = getelementptr inbounds i32, i32* undef, i32 0 14 ;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i64, i64* 15 %a3 = getelementptr inbounds i64, i64* undef, i32 0 16 ;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds float, float* 17 %a4 = getelementptr inbounds float, float* undef, i32 0 [all …]
|
/external/llvm/test/Analysis/CostModel/ARM/ |
D | gep.ll | 8 ;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i8, i8* 9 %a0 = getelementptr inbounds i8, i8* undef, i32 0 10 ;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i16, i16* 11 %a1 = getelementptr inbounds i16, i16* undef, i32 0 12 ;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i32, i32* 13 %a2 = getelementptr inbounds i32, i32* undef, i32 0 14 ;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i64, i64* 15 %a3 = getelementptr inbounds i64, i64* undef, i32 0 16 ;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds float, float* 17 %a4 = getelementptr inbounds float, float* undef, i32 0 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Mips/ |
D | dagcombine-store-gep-chain-slow.ll | 20 %arrayinit.begin = getelementptr inbounds [269 x i8], [269 x i8]* %code, i32 0, i32 0 22 %arrayinit.element = getelementptr inbounds i8, i8* %arrayinit.begin, i32 1 25 %arrayinit.element1 = getelementptr inbounds i8, i8* %arrayinit.element, i32 1 27 %arrayinit.element2 = getelementptr inbounds i8, i8* %arrayinit.element1, i32 1 29 %arrayinit.element3 = getelementptr inbounds i8, i8* %arrayinit.element2, i32 1 31 %arrayinit.element4 = getelementptr inbounds i8, i8* %arrayinit.element3, i32 1 34 %arrayinit.element5 = getelementptr inbounds i8, i8* %arrayinit.element4, i32 1 36 %arrayinit.element6 = getelementptr inbounds i8, i8* %arrayinit.element5, i32 1 39 %arrayinit.element7 = getelementptr inbounds i8, i8* %arrayinit.element6, i32 1 41 %arrayinit.element8 = getelementptr inbounds i8, i8* %arrayinit.element7, i32 1 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/GlobalOpt/ |
D | crash-2.ll | 14 …inbounds (%struct.S0.1.7.13, %struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (… 17 …%tmp = load i8, i8* getelementptr inbounds (%struct.S0.1.7.13, %struct.S0.1.7.13* @g_71, i32 0, i3…
|
/external/llvm/test/Transforms/GlobalOpt/ |
D | crash-2.ll | 14 …inbounds (%struct.S0.1.7.13, %struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (… 17 …%tmp = load i8, i8* getelementptr inbounds (%struct.S0.1.7.13, %struct.S0.1.7.13* @g_71, i32 0, i3…
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/X86/ |
D | shift-ashr.ll | 24 ; SSE-NEXT: [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 25 ; SSE-NEXT: [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 26 ; SSE-NEXT: [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 27 ; SSE-NEXT: [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 28 ; SSE-NEXT: [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 29 ; SSE-NEXT: [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 30 ; SSE-NEXT: [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 31 ; SSE-NEXT: [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 32 ; SSE-NEXT: [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 … 33 ; SSE-NEXT: [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 … [all …]
|
D | shift-shl.ll | 25 ; SSE-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 26 ; SSE-NEXT: [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 27 ; SSE-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 29 ; SSE-NEXT: [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 30 ; SSE-NEXT: [[TMP7:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 31 ; SSE-NEXT: [[TMP8:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 37 ; SSE-NEXT: store <2 x i64> [[TMP10]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 38 ; SSE-NEXT: store <2 x i64> [[TMP11]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 39 ; SSE-NEXT: store <2 x i64> [[TMP12]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 44 ; AVX1-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 … [all …]
|
D | shift-lshr.ll | 25 ; SSE-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 26 ; SSE-NEXT: [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 27 ; SSE-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 29 ; SSE-NEXT: [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 30 ; SSE-NEXT: [[TMP7:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 31 ; SSE-NEXT: [[TMP8:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 37 ; SSE-NEXT: store <2 x i64> [[TMP10]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 38 ; SSE-NEXT: store <2 x i64> [[TMP11]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 39 ; SSE-NEXT: store <2 x i64> [[TMP12]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 44 ; AVX1-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 … [all …]
|
D | arith-mul.ll | 24 ; SSE-NEXT: [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 25 ; SSE-NEXT: [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 26 ; SSE-NEXT: [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 27 ; SSE-NEXT: [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 28 ; SSE-NEXT: [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 29 ; SSE-NEXT: [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 30 ; SSE-NEXT: [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 31 ; SSE-NEXT: [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 32 ; SSE-NEXT: [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 … 33 ; SSE-NEXT: [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 … [all …]
|
D | arith-add.ll | 25 ; SSE-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 26 ; SSE-NEXT: [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 27 ; SSE-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 29 ; SSE-NEXT: [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 30 ; SSE-NEXT: [[TMP7:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 31 ; SSE-NEXT: [[TMP8:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 37 ; SSE-NEXT: store <2 x i64> [[TMP10]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 38 ; SSE-NEXT: store <2 x i64> [[TMP11]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 39 ; SSE-NEXT: store <2 x i64> [[TMP12]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 44 ; SLM-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… [all …]
|
D | arith-sub.ll | 25 ; SSE-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 26 ; SSE-NEXT: [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 27 ; SSE-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 29 ; SSE-NEXT: [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 30 ; SSE-NEXT: [[TMP7:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 31 ; SSE-NEXT: [[TMP8:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 37 ; SSE-NEXT: store <2 x i64> [[TMP10]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 38 ; SSE-NEXT: store <2 x i64> [[TMP11]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 39 ; SSE-NEXT: store <2 x i64> [[TMP12]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 44 ; SLM-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… [all …]
|
D | insert-after-bundle.ll | 50 ; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, i8* [[C_ADDR_0352]], i64 1 51 ; CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, i8* [[D_ADDR_0353]], i64 1 52 ; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i8, i8* [[A_ADDR_0355]], i64 1 53 ; CHECK-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds i8, i8* [[B_ADDR_0351]], i64 1 54 ; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i8, i8* [[E_ADDR_0354]], i64 1 55 ; CHECK-NEXT: [[ARRAYIDX21:%.*]] = getelementptr inbounds i8, i8* [[C_ADDR_0352]], i64 2 56 ; CHECK-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds i8, i8* [[D_ADDR_0353]], i64 2 57 ; CHECK-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i8, i8* [[A_ADDR_0355]], i64 2 58 ; CHECK-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds i8, i8* [[B_ADDR_0351]], i64 2 59 ; CHECK-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds i8, i8* [[E_ADDR_0354]], i64 2 [all …]
|
D | ctlz.ll | 29 ; CHECK-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 30 ; CHECK-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 33 ; CHECK-NEXT: store i64 [[CTLZ0]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i3… 34 ; CHECK-NEXT: store i64 [[CTLZ1]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i3… 37 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8 38 %ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8 41 …store i64 %ctlz0, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i32 0, i64 0), align 8 42 …store i64 %ctlz1, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i32 0, i64 1), align 8 48 ; CHECK-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 49 ; CHECK-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… [all …]
|
D | cttz.ll | 29 ; CHECK-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 30 ; CHECK-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 33 ; CHECK-NEXT: store i64 [[CTTZ0]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i3… 34 ; CHECK-NEXT: store i64 [[CTTZ1]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i3… 37 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8 38 %ld1 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 1), align 8 41 …store i64 %cttz0, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i32 0, i64 0), align 8 42 …store i64 %cttz1, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @dst64, i32 0, i64 1), align 8 48 ; CHECK-NEXT: [[LD0:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… 49 ; CHECK-NEXT: [[LD1:%.*]] = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64,… [all …]
|
/external/llvm/test/CodeGen/SPARC/ |
D | reserved-regs.ll | 20 …%0 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 0), al… 21 …%1 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 1), al… 22 …%2 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 2), al… 23 …%3 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 3), al… 24 …%4 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 4), al… 25 …%5 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 5), al… 26 …%6 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 6), al… 27 …%7 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 7), al… 28 …%8 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 8), al… 29 …%9 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 9), al… [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/SPARC/ |
D | reserved-regs.ll | 21 …%0 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 0), al… 22 …%1 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 1), al… 23 …%2 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 2), al… 24 …%3 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 3), al… 25 …%4 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 4), al… 26 …%5 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 5), al… 27 …%6 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 6), al… 28 …%7 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 7), al… 29 …%8 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 8), al… 30 …%9 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 9), al… [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/ |
D | arm-storebytesmerge.ll | 107 %v190 = getelementptr inbounds i8, i8* %v50, i32 394 109 %v191 = getelementptr inbounds i8, i8* %v50, i32 395 111 %v192 = getelementptr inbounds i8, i8* %v50, i32 396 113 %v193 = getelementptr inbounds i8, i8* %v50, i32 397 115 %v194 = getelementptr inbounds i8, i8* %v50, i32 398 117 %v195 = getelementptr inbounds i8, i8* %v50, i32 399 119 %v196 = getelementptr inbounds i8, i8* %v50, i32 400 121 %v197 = getelementptr inbounds i8, i8* %v50, i32 401 123 %v198 = getelementptr inbounds i8, i8* %v50, i32 402 125 %v199 = getelementptr inbounds i8, i8* %v50, i32 403 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | schedule-ilp.ll | 7 %tmp = getelementptr inbounds float, float addrspace(3)* %arg, i32 1 9 %tmp3 = getelementptr inbounds float, float addrspace(3)* %arg, i32 2 11 %tmp5 = getelementptr inbounds float, float addrspace(3)* %arg, i32 3 14 %tmp8 = getelementptr inbounds float, float addrspace(3)* %arg, i32 5 16 %tmp10 = getelementptr inbounds float, float addrspace(3)* %arg, i32 6 18 %tmp12 = getelementptr inbounds float, float addrspace(3)* %arg, i32 7 21 %tmp15 = getelementptr inbounds float, float addrspace(3)* %arg, i32 9 23 %tmp17 = getelementptr inbounds float, float addrspace(3)* %arg, i32 10 25 %tmp19 = getelementptr inbounds float, float addrspace(3)* %arg, i32 11 28 %tmp22 = getelementptr inbounds float, float addrspace(3)* %arg, i32 13 [all …]
|
D | schedule-regpressure-limit3.ll | 9 %tmp = getelementptr inbounds float, float addrspace(3)* %arg, i32 1 11 %tmp3 = getelementptr inbounds float, float addrspace(3)* %arg, i32 2 13 %tmp5 = getelementptr inbounds float, float addrspace(3)* %arg, i32 3 16 %tmp8 = getelementptr inbounds float, float addrspace(3)* %arg, i32 5 18 %tmp10 = getelementptr inbounds float, float addrspace(3)* %arg, i32 6 20 %tmp12 = getelementptr inbounds float, float addrspace(3)* %arg, i32 7 23 %tmp15 = getelementptr inbounds float, float addrspace(3)* %arg, i32 9 25 %tmp17 = getelementptr inbounds float, float addrspace(3)* %arg, i32 10 27 %tmp19 = getelementptr inbounds float, float addrspace(3)* %arg, i32 11 30 %tmp22 = getelementptr inbounds float, float addrspace(3)* %arg, i32 13 [all …]
|
D | schedule-regpressure-limit.ll | 10 %tmp = getelementptr inbounds float, float addrspace(3)* %arg, i32 1 12 %tmp3 = getelementptr inbounds float, float addrspace(3)* %arg, i32 2 14 %tmp5 = getelementptr inbounds float, float addrspace(3)* %arg, i32 3 17 %tmp8 = getelementptr inbounds float, float addrspace(3)* %arg, i32 5 19 %tmp10 = getelementptr inbounds float, float addrspace(3)* %arg, i32 6 21 %tmp12 = getelementptr inbounds float, float addrspace(3)* %arg, i32 7 24 %tmp15 = getelementptr inbounds float, float addrspace(3)* %arg, i32 9 26 %tmp17 = getelementptr inbounds float, float addrspace(3)* %arg, i32 10 28 %tmp19 = getelementptr inbounds float, float addrspace(3)* %arg, i32 11 31 %tmp22 = getelementptr inbounds float, float addrspace(3)* %arg, i32 13 [all …]
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | addsub.ll | 24 %0 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 0), align 4 25 %1 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 0), align 4 27 %2 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @d, i32 0, i64 0), align 4 28 %3 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @e, i32 0, i64 0), align 4 31 store i32 %add2, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i32 0, i64 0), align 4 32 %4 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 1), align 4 33 %5 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 1), align 4 35 %6 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @d, i32 0, i64 1), align 4 36 %7 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @e, i32 0, i64 1), align 4 39 store i32 %sub, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i32 0, i64 1), align 4 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/ |
D | store-imm-large-stack.ll | 18 %v3 = load i8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 9), align 1 19 %v4 = load i8, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i32 10), align 2 21 store i8 %v3, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 16), align 8 22 store i8 %v4, i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g2, i32 0, i32 10), align 2 24 …store i16 0, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32 4… 25 …%v5 = load i16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i3… 26 …store i16 %v5, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32… 27 …%v6 = load i32, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i3… 28 …store i32 %v6, i32* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g3, i32 0, i32… 29 …%v7 = load i16, i16* bitcast (i8* getelementptr inbounds ([1024 x i8], [1024 x i8]* @g0, i32 0, i3… [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/AArch64/ |
D | gep.ll | 8 ; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i8, i8* 9 %a = getelementptr inbounds i8, i8* %p, i32 0 16 ; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i16, i16* 17 %a = getelementptr inbounds i16, i16* %p, i32 0 24 ; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i32, i32* 25 %a = getelementptr inbounds i32, i32* %p, i32 0 32 ; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i64, i64* 33 %a = getelementptr inbounds i64, i64* %p, i32 0 40 ; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i8, i8* 41 %a = getelementptr inbounds i8, i8* %p, i32 1024 [all …]
|