/external/llvm/test/CodeGen/X86/ |
D | large-gep-chain.ll | 24 %tmp = getelementptr inbounds float, float* null, i64 1 25 %tmp3 = getelementptr inbounds float, float* %tmp, i64 1 26 %tmp4 = getelementptr inbounds float, float* %tmp3, i64 1 27 %tmp5 = getelementptr inbounds float, float* %tmp4, i64 1 28 %tmp6 = getelementptr inbounds float, float* %tmp5, i64 1 29 %tmp7 = getelementptr inbounds float, float* %tmp6, i64 1 30 %tmp8 = getelementptr inbounds float, float* %tmp7, i64 1 31 %tmp9 = getelementptr inbounds float, float* %tmp8, i64 1 32 %tmp10 = getelementptr inbounds float, float* %tmp9, i64 1 33 %tmp11 = getelementptr inbounds float, float* %tmp10, i64 1 [all …]
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | large-gep-chain.ll | 24 %tmp = getelementptr inbounds float, float* null, i64 1 25 %tmp3 = getelementptr inbounds float, float* %tmp, i64 1 26 %tmp4 = getelementptr inbounds float, float* %tmp3, i64 1 27 %tmp5 = getelementptr inbounds float, float* %tmp4, i64 1 28 %tmp6 = getelementptr inbounds float, float* %tmp5, i64 1 29 %tmp7 = getelementptr inbounds float, float* %tmp6, i64 1 30 %tmp8 = getelementptr inbounds float, float* %tmp7, i64 1 31 %tmp9 = getelementptr inbounds float, float* %tmp8, i64 1 32 %tmp10 = getelementptr inbounds float, float* %tmp9, i64 1 33 %tmp11 = getelementptr inbounds float, float* %tmp10, i64 1 [all …]
|
/external/llvm/test/Analysis/CostModel/ARM/ |
D | gep.ll | 8 ;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i8, i8* 9 %a0 = getelementptr inbounds i8, i8* undef, i32 0 10 ;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i16, i16* 11 %a1 = getelementptr inbounds i16, i16* undef, i32 0 12 ;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i32, i32* 13 %a2 = getelementptr inbounds i32, i32* undef, i32 0 14 ;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i64, i64* 15 %a3 = getelementptr inbounds i64, i64* undef, i32 0 16 ;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds float, float* 17 %a4 = getelementptr inbounds float, float* undef, i32 0 [all …]
|
/external/llvm-project/llvm/test/Transforms/MemCpyOpt/ |
D | store-to-memset.ll | 10 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, i8* [[TMP0:%.*]], i64 [[TMP2:%.*]] 11 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[TMP3]], i64 -32 13 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, i8* [[TMP4]], i64 1 14 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, i8* [[TMP4]], i64 2 15 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, i8* [[TMP4]], i64 3 16 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, i8* [[TMP4]], i64 4 17 ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, i8* [[TMP4]], i64 5 18 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, i8* [[TMP4]], i64 6 19 ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, i8* [[TMP4]], i64 7 20 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, i8* [[TMP4]], i64 8 [all …]
|
/external/llvm-project/llvm/test/CodeGen/Mips/ |
D | dagcombine-store-gep-chain-slow.ll | 20 %arrayinit.begin = getelementptr inbounds [269 x i8], [269 x i8]* %code, i32 0, i32 0 22 %arrayinit.element = getelementptr inbounds i8, i8* %arrayinit.begin, i32 1 25 %arrayinit.element1 = getelementptr inbounds i8, i8* %arrayinit.element, i32 1 27 %arrayinit.element2 = getelementptr inbounds i8, i8* %arrayinit.element1, i32 1 29 %arrayinit.element3 = getelementptr inbounds i8, i8* %arrayinit.element2, i32 1 31 %arrayinit.element4 = getelementptr inbounds i8, i8* %arrayinit.element3, i32 1 34 %arrayinit.element5 = getelementptr inbounds i8, i8* %arrayinit.element4, i32 1 36 %arrayinit.element6 = getelementptr inbounds i8, i8* %arrayinit.element5, i32 1 39 %arrayinit.element7 = getelementptr inbounds i8, i8* %arrayinit.element6, i32 1 41 %arrayinit.element8 = getelementptr inbounds i8, i8* %arrayinit.element7, i32 1 [all …]
|
/external/llvm/test/Transforms/GlobalOpt/ |
D | crash-2.ll | 14 …inbounds (%struct.S0.1.7.13, %struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (… 17 …%tmp = load i8, i8* getelementptr inbounds (%struct.S0.1.7.13, %struct.S0.1.7.13* @g_71, i32 0, i3…
|
/external/llvm-project/llvm/test/Transforms/GlobalOpt/ |
D | crash-2.ll | 14 …inbounds (%struct.S0.1.7.13, %struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (… 17 …%tmp = load i8, i8* getelementptr inbounds (%struct.S0.1.7.13, %struct.S0.1.7.13* @g_71, i32 0, i3…
|
/external/llvm-project/llvm/test/Analysis/CostModel/RISCV/ |
D | gep.ll | 9 ; RVI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a0 = getelementptr inbounds… 10 ; RVI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a1 = getelementptr inbounds… 11 ; RVI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a2 = getelementptr inbounds… 12 ; RVI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a3 = getelementptr inbounds… 13 ; RVI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %a4 = getelementptr inbounds… 14 ; RVI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %a5 = getelementptr inbounds… 15 ; RVI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %a6 = getelementptr inbounds… 16 ; RVI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %ai = getelementptr inbounds… 19 %a0 = getelementptr inbounds i8, i8* %a, i32 0 20 %a1 = getelementptr inbounds i8, i8* %a, i32 1 [all …]
|
/external/llvm-project/llvm/test/CodeGen/SPARC/ |
D | reserved-regs.ll | 22 …%0 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 0), al… 23 …%1 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 1), al… 24 …%2 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 2), al… 25 …%3 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 3), al… 26 …%4 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 4), al… 27 …%5 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 5), al… 28 …%6 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 6), al… 29 …%7 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 7), al… 30 …%8 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 8), al… 31 …%9 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 9), al… [all …]
|
/external/llvm/test/CodeGen/SPARC/ |
D | reserved-regs.ll | 20 …%0 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 0), al… 21 …%1 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 1), al… 22 …%2 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 2), al… 23 …%3 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 3), al… 24 …%4 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 4), al… 25 …%5 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 5), al… 26 …%6 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 6), al… 27 …%7 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 7), al… 28 …%8 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 8), al… 29 …%9 = load volatile i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @g, i64 0, i64 9), al… [all …]
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | arm-storebytesmerge.ll | 97 %v190 = getelementptr inbounds i8, i8* %v50, i32 394 99 %v191 = getelementptr inbounds i8, i8* %v50, i32 395 101 %v192 = getelementptr inbounds i8, i8* %v50, i32 396 103 %v193 = getelementptr inbounds i8, i8* %v50, i32 397 105 %v194 = getelementptr inbounds i8, i8* %v50, i32 398 107 %v195 = getelementptr inbounds i8, i8* %v50, i32 399 109 %v196 = getelementptr inbounds i8, i8* %v50, i32 400 111 %v197 = getelementptr inbounds i8, i8* %v50, i32 401 113 %v198 = getelementptr inbounds i8, i8* %v50, i32 402 115 %v199 = getelementptr inbounds i8, i8* %v50, i32 403 [all …]
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/ |
D | shift-shl.ll | 26 ; SSE-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 27 ; SSE-NEXT: [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 28 ; SSE-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 30 ; SSE-NEXT: [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 31 ; SSE-NEXT: [[TMP7:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 32 ; SSE-NEXT: [[TMP8:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 38 ; SSE-NEXT: store <2 x i64> [[TMP10]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 39 ; SSE-NEXT: store <2 x i64> [[TMP11]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 40 ; SSE-NEXT: store <2 x i64> [[TMP12]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 45 ; AVX1-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 … [all …]
|
D | horizontal-list.ll | 23 …P6:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [2… 24 …P7:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [2… 50 …P6:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [2… 51 …P7:%.*]] = load <2 x float>, <2 x float>* bitcast (float* getelementptr inbounds ([20 x float], [2… 69 …%1 = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 0), a… 70 …%2 = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 0), … 73 …%3 = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 1), a… 74 …%4 = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 1), … 77 …%5 = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr, i64 0, i64 2), a… 78 …%6 = load float, float* getelementptr inbounds ([20 x float], [20 x float]* @arr1, i64 0, i64 2), … [all …]
|
D | shift-lshr.ll | 26 ; SSE-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 27 ; SSE-NEXT: [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 28 ; SSE-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 30 ; SSE-NEXT: [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 31 ; SSE-NEXT: [[TMP7:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 32 ; SSE-NEXT: [[TMP8:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 38 ; SSE-NEXT: store <2 x i64> [[TMP10]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 39 ; SSE-NEXT: store <2 x i64> [[TMP11]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 40 ; SSE-NEXT: store <2 x i64> [[TMP12]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 45 ; AVX1-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 … [all …]
|
D | arith-mul.ll | 27 ; SSE-NEXT: [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 28 ; SSE-NEXT: [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 29 ; SSE-NEXT: [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 30 ; SSE-NEXT: [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 31 ; SSE-NEXT: [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 32 ; SSE-NEXT: [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 33 ; SSE-NEXT: [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 34 ; SSE-NEXT: [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 35 ; SSE-NEXT: [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 … 36 ; SSE-NEXT: [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 … [all …]
|
D | arith-add.ll | 28 ; SSE-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 29 ; SSE-NEXT: [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 30 ; SSE-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 32 ; SSE-NEXT: [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 33 ; SSE-NEXT: [[TMP7:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 34 ; SSE-NEXT: [[TMP8:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 40 ; SSE-NEXT: store <2 x i64> [[TMP10]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 41 ; SSE-NEXT: store <2 x i64> [[TMP11]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 42 ; SSE-NEXT: store <2 x i64> [[TMP12]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 47 ; SLM-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… [all …]
|
D | arith-sub.ll | 28 ; SSE-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 29 ; SSE-NEXT: [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 30 ; SSE-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 32 ; SSE-NEXT: [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 33 ; SSE-NEXT: [[TMP7:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 34 ; SSE-NEXT: [[TMP8:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… 40 ; SSE-NEXT: store <2 x i64> [[TMP10]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 41 ; SSE-NEXT: store <2 x i64> [[TMP11]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 42 ; SSE-NEXT: store <2 x i64> [[TMP12]], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64… 47 ; SLM-NEXT: [[TMP2:%.*]] = load <2 x i64>, <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x… [all …]
|
D | shift-ashr.ll | 25 ; SSE-NEXT: [[A0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 26 ; SSE-NEXT: [[A1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 27 ; SSE-NEXT: [[A2:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 28 ; SSE-NEXT: [[A3:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 29 ; SSE-NEXT: [[A4:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 30 ; SSE-NEXT: [[A5:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 31 ; SSE-NEXT: [[A6:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 32 ; SSE-NEXT: [[A7:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @a64, i32 … 33 ; SSE-NEXT: [[B0:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 … 34 ; SSE-NEXT: [[B1:%.*]] = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @b64, i32 … [all …]
|
/external/llvm-project/polly/test/Simplify/ |
D | scalability2.ll | 1 ; RUN: opt %loadPolly -polly-ignore-inbounds -polly-simplify -analyze < %s | FileCheck %s -match-fu… 22 %A0 = getelementptr inbounds double, double* %A, i32 %p0 23 %A1 = getelementptr inbounds double, double* %A, i32 %p1 24 %A2 = getelementptr inbounds double, double* %A, i32 %p2 25 %A3 = getelementptr inbounds double, double* %A, i32 %p3 26 %A4 = getelementptr inbounds double, double* %A, i32 %p4 27 %A5 = getelementptr inbounds double, double* %A, i32 %p5 28 %A6 = getelementptr inbounds double, double* %A, i32 %p6 29 %A7 = getelementptr inbounds double, double* %A, i32 %p7 30 %A8 = getelementptr inbounds double, double* %A, i32 %p8 [all …]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | schedule-regpressure-limit3.ll | 9 %tmp = getelementptr inbounds float, float addrspace(3)* %arg, i32 1 11 %tmp3 = getelementptr inbounds float, float addrspace(3)* %arg, i32 2 13 %tmp5 = getelementptr inbounds float, float addrspace(3)* %arg, i32 3 16 %tmp8 = getelementptr inbounds float, float addrspace(3)* %arg, i32 5 18 %tmp10 = getelementptr inbounds float, float addrspace(3)* %arg, i32 6 20 %tmp12 = getelementptr inbounds float, float addrspace(3)* %arg, i32 7 23 %tmp15 = getelementptr inbounds float, float addrspace(3)* %arg, i32 9 25 %tmp17 = getelementptr inbounds float, float addrspace(3)* %arg, i32 10 27 %tmp19 = getelementptr inbounds float, float addrspace(3)* %arg, i32 11 30 %tmp22 = getelementptr inbounds float, float addrspace(3)* %arg, i32 13 [all …]
|
D | schedule-regpressure-limit.ll | 10 %tmp = getelementptr inbounds float, float addrspace(3)* %arg, i32 1 12 %tmp3 = getelementptr inbounds float, float addrspace(3)* %arg, i32 2 14 %tmp5 = getelementptr inbounds float, float addrspace(3)* %arg, i32 3 17 %tmp8 = getelementptr inbounds float, float addrspace(3)* %arg, i32 5 19 %tmp10 = getelementptr inbounds float, float addrspace(3)* %arg, i32 6 21 %tmp12 = getelementptr inbounds float, float addrspace(3)* %arg, i32 7 24 %tmp15 = getelementptr inbounds float, float addrspace(3)* %arg, i32 9 26 %tmp17 = getelementptr inbounds float, float addrspace(3)* %arg, i32 10 28 %tmp19 = getelementptr inbounds float, float addrspace(3)* %arg, i32 11 31 %tmp22 = getelementptr inbounds float, float addrspace(3)* %arg, i32 13 [all …]
|
D | schedule-ilp.ll | 7 %tmp = getelementptr inbounds float, float addrspace(3)* %arg, i32 1 9 %tmp3 = getelementptr inbounds float, float addrspace(3)* %arg, i32 2 11 %tmp5 = getelementptr inbounds float, float addrspace(3)* %arg, i32 3 14 %tmp8 = getelementptr inbounds float, float addrspace(3)* %arg, i32 5 16 %tmp10 = getelementptr inbounds float, float addrspace(3)* %arg, i32 6 18 %tmp12 = getelementptr inbounds float, float addrspace(3)* %arg, i32 7 21 %tmp15 = getelementptr inbounds float, float addrspace(3)* %arg, i32 9 23 %tmp17 = getelementptr inbounds float, float addrspace(3)* %arg, i32 10 25 %tmp19 = getelementptr inbounds float, float addrspace(3)* %arg, i32 11 28 %tmp22 = getelementptr inbounds float, float addrspace(3)* %arg, i32 13 [all …]
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | addsub.ll | 24 %0 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 0), align 4 25 %1 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 0), align 4 27 %2 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @d, i32 0, i64 0), align 4 28 %3 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @e, i32 0, i64 0), align 4 31 store i32 %add2, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i32 0, i64 0), align 4 32 %4 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 1), align 4 33 %5 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 1), align 4 35 %6 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @d, i32 0, i64 1), align 4 36 %7 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @e, i32 0, i64 1), align 4 39 store i32 %sub, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i32 0, i64 1), align 4 [all …]
|
/external/llvm-project/llvm/test/Analysis/CostModel/ARM/ |
D | gep.ll | 12 …Model: Found an estimated cost of 0 for instruction: %a0 = getelementptr inbounds i8, i8* %a, i32 0 13 …Model: Found an estimated cost of 0 for instruction: %a1 = getelementptr inbounds i8, i8* %a, i32 1 14 …del: Found an estimated cost of 1 for instruction: %am4 = getelementptr inbounds i8, i8* %a, i32 -1 15 …del: Found an estimated cost of 0 for instruction: %a31 = getelementptr inbounds i8, i8* %a, i32 31 16 …del: Found an estimated cost of 1 for instruction: %a32 = getelementptr inbounds i8, i8* %a, i32 32 17 …l: Found an estimated cost of 1 for instruction: %a4095 = getelementptr inbounds i8, i8* %a, i32 4… 18 …l: Found an estimated cost of 1 for instruction: %a4096 = getelementptr inbounds i8, i8* %a, i32 4… 19 …l: Found an estimated cost of 1 for instruction: %am255 = getelementptr inbounds i8, i8* %a, i32 -… 20 …l: Found an estimated cost of 1 for instruction: %am256 = getelementptr inbounds i8, i8* %a, i32 -… 21 …odel: Found an estimated cost of 0 for instruction: %ai = getelementptr inbounds i8, i8* %a, i32 %i [all …]
|
/external/llvm-project/llvm/test/Analysis/CostModel/AArch64/ |
D | gep.ll | 8 ; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i8, i8* 9 %a = getelementptr inbounds i8, i8* %p, i32 0 16 ; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i16, i16* 17 %a = getelementptr inbounds i16, i16* %p, i32 0 24 ; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i32, i32* 25 %a = getelementptr inbounds i32, i32* %p, i32 0 32 ; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i64, i64* 33 %a = getelementptr inbounds i64, i64* %p, i32 0 40 ; CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i8, i8* 41 %a = getelementptr inbounds i8, i8* %p, i32 1024 [all …]
|