Home
last modified time | relevance | path

Searched refs:tmp20 (Results 1 – 25 of 375) sorted by relevance

12345678910>>...15

/external/llvm-project/llvm/test/Analysis/ScalarEvolution/
Dpr40420.ll13 %tmp8 = phi i8 [ %tmp6, %bb ], [ %tmp20.lcssa, %outer_latch ]
18 %tmp20.lcssa = phi i8 [ %tmp20.6, %inner_loop ]
23 %tmp13 = phi i8 [ %tmp8, %outer_loop ], [ %tmp20.1, %inner_latch ]
30 %tmp20 = mul i8 %tmp19, %tmp17
34 %tmp18.1 = mul i8 %tmp20, 100
35 %tmp19.1 = mul i8 %tmp18.1, %tmp20
36 %tmp20.1 = mul i8 %tmp19.1, %tmp17.1
43 %tmp20.6 = mul i8 %tmp19.1, %tmp17.1
Dtruncate.ll44 %tmp20 = add i64 %tmp19, %tmp18
45 %tmp21 = trunc i64 %tmp20 to i32
58 %tmp28 = sub i64 %tmp27, %tmp20
83 ; CHECK: %tmp27 = add i64 %tmp20, -1
84 ; CHECK-NOT: (-1 + %tmp20)<nuw>
85 ; CHECK-NEXT: --> (-1 + %tmp20) U:
92 %tmp20 = phi i64 [ %shift, %bb ], [ 0, %bb36 ]
97 %tmp27 = add i64 %tmp20, -1
Dtrivial-phis.ll56 %tmp18 = phi i64 [ 0, %bb16 ], [ %tmp20, %bb19 ]
60 %tmp20 = add nuw nsw i64 %tmp18, 1
61 %tmp21 = icmp slt i64 %tmp20, %arg
123 ; CHECK: %tmp18 = phi i64 [ %tmp20, %bb17 ], [ 0, %bb13 ]
155 %tmp18 = phi i64 [ %tmp20, %bb17 ], [ 0, %bb13 ]
156 %tmp20 = add nuw nsw i64 %tmp18, 1
161 %tmp21 = icmp slt i64 %tmp20, %arg
/external/llvm/test/Transforms/StructurizeCFG/
Dnested-loop-order.ll14 %temp4.0.ph = phi i32 [ 0, %main_body ], [ %tmp20, %ENDIF28 ]
20 %temp4.0 = phi i32 [ %temp4.0.ph, %LOOP.outer ], [ %tmp20, %IF29 ]
21 %tmp20 = add i32 %temp4.0, 1
22 %tmp22 = icmp sgt i32 %tmp20, 3
32 %tmp23 = icmp eq i32 %tmp20, 3
40 %tmp31 = icmp sgt i32 %tmp20, 1
49 %tmp32 = icmp sgt i32 %tmp20, 2
62 %tmp36 = icmp sgt i32 %tmp20, 2
/external/llvm-project/llvm/test/Transforms/StructurizeCFG/
Dnested-loop-order.ll15 %temp4.0.ph = phi i32 [ 0, %main_body ], [ %tmp20, %ENDIF28 ]
21 %temp4.0 = phi i32 [ %temp4.0.ph, %LOOP.outer ], [ %tmp20, %IF29 ]
22 %tmp20 = add i32 %temp4.0, 1
23 %tmp22 = icmp sgt i32 %tmp20, 3
33 %tmp23 = icmp eq i32 %tmp20, 3
41 %tmp31 = icmp sgt i32 %tmp20, 1
50 %tmp32 = icmp sgt i32 %tmp20, 2
63 %tmp36 = icmp sgt i32 %tmp20, 2
/external/llvm-project/llvm/test/CodeGen/X86/
D2008-02-27-DeadSlotElimBug.ll39 %tmp20.i39 = fadd double %tmp17.i, %tmp17.i63 ; <double> [#uses=1]
40 %tmp20.i23 = fadd double %tmp20.i39, %tmp17.i76 ; <double> [#uses=1]
48 %tmp20.i7 = getelementptr %struct.CompAtom, %struct.CompAtom* %d, i32 0, i32 2 ; <i32*> [#uses=2]
50 %tmp74.i = load i32, i32* %tmp20.i7, align 1 ; <i32> [#uses=1]
54 %tmp88.i = load i32, i32* %tmp20.i7, align 1 ; <i32> [#uses=1]
58 %tmp6.i = fadd double 0.000000e+00, %tmp20.i23 ; <double> [#uses=0]
Dfp-stack-compare-cmov.ll9 %tmp20 = fsub float -0.000000e+00, %tmp
10 %iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp
Dfp-stack-compare.ll11 %tmp20 = fsub float -0.000000e+00, %tmp
12 %iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp
Dtailcc-fastisel.ll7 %tmp20 = tail call tailcc i8* @"visit_array_aux<`Reference>"(%0 %arg, i32 undef) ; <i8*> [#uses=1]
9 ret i8* %tmp20
/external/llvm/test/CodeGen/X86/
D2008-02-27-DeadSlotElimBug.ll39 %tmp20.i39 = fadd double %tmp17.i, %tmp17.i63 ; <double> [#uses=1]
40 %tmp20.i23 = fadd double %tmp20.i39, %tmp17.i76 ; <double> [#uses=1]
48 %tmp20.i7 = getelementptr %struct.CompAtom, %struct.CompAtom* %d, i32 0, i32 2 ; <i32*> [#uses=2]
50 %tmp74.i = load i32, i32* %tmp20.i7, align 1 ; <i32> [#uses=1]
54 %tmp88.i = load i32, i32* %tmp20.i7, align 1 ; <i32> [#uses=1]
58 %tmp6.i = fadd double 0.000000e+00, %tmp20.i23 ; <double> [#uses=0]
Dfp-stack-compare-cmov.ll9 %tmp20 = fsub float -0.000000e+00, %tmp
10 %iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp
Dfp-stack-compare.ll11 %tmp20 = fsub float -0.000000e+00, %tmp
12 %iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp
Dtailcall-fastisel.ll7 %tmp20 = tail call fastcc i8* @"visit_array_aux<`Reference>"(%0 %arg, i32 undef) ; <i8*> [#uses=1]
9 ret i8* %tmp20
/external/llvm-project/llvm/test/Analysis/IVUsers/
Ddeep_recursion_in_scev.ll27 %tmp20 = load i32, i32 addrspace(1)* %tmp19, align 8, !range !0
49 %tmp36 = icmp ult i32 %tmp35, %tmp20
80 %tmp55 = icmp ult i32 %tmp53, %tmp20
95 %tmp65 = icmp ult i32 %tmp63, %tmp20
107 %tmp73 = icmp ult i32 %tmp71, %tmp20
119 %tmp81 = icmp ult i32 %tmp79, %tmp20
131 %tmp89 = icmp ult i32 %tmp87, %tmp20
143 %tmp97 = icmp ult i32 %tmp95, %tmp20
155 %tmp105 = icmp ult i32 %tmp103, %tmp20
167 %tmp113 = icmp ult i32 %tmp111, %tmp20
/external/llvm/test/CodeGen/AMDGPU/
Dsgpr-copy.ll17 %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
18 %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 0)
19 %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 16)
20 %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 32)
41 %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
42 %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 16)
43 %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 32)
44 %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 36)
45 %tmp24 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 40)
46 %tmp25 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 48)
[all …]
/external/llvm-project/polly/test/ScopInfo/
Dmultidim_fortran_2d_params.ll20 …ded2, tmp20, tmp19] -> { Stmt_bb17[i0, i1] -> MemRef_a[-1 + tmp20 + i0, nj_loaded2 + tmp19 + i1] :…
57 %tmp20 = load i64, i64* %arg3, align 8
58 %tmp21 = add i64 %tmp20, %tmp13
Dschedule-incorrectly-contructed-in-case-of-infinite-loop.ll38 %tmp20 = load i32, i32* %tmp9, align 4
39 %tmp21 = sext i32 %tmp20 to i64
83 %tmp20 = load i32, i32* %tmp9, align 4
84 %tmp21 = sext i32 %tmp20 to i64
/external/llvm-project/llvm/test/Transforms/GVNHoist/
Dpr35222-hoist-load.ll22 %tmp20 = load i32, i32* @heap, align 4
23 ret i32 %tmp20
65 %tmp20 = load i32, i32* getelementptr inbounds ([573 x i32], [573 x i32]* @j,
67 ret i32 %tmp20
/external/libjpeg-turbo/
Djidctint.c1078 JLONG tmp20, tmp21, tmp22, tmp23, tmp24; in jpeg_idct_10x10() local
1117 tmp20 = tmp10 + tmp12; in jpeg_idct_10x10()
1151 wsptr[8 * 0] = (int)RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS - PASS1_BITS); in jpeg_idct_10x10()
1152 wsptr[8 * 9] = (int)RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS - PASS1_BITS); in jpeg_idct_10x10()
1189 tmp20 = tmp10 + tmp12; in jpeg_idct_10x10()
1223 outptr[0] = range_limit[(int)RIGHT_SHIFT(tmp20 + tmp10, in jpeg_idct_10x10()
1226 outptr[9] = range_limit[(int)RIGHT_SHIFT(tmp20 - tmp10, in jpeg_idct_10x10()
1273 JLONG tmp20, tmp21, tmp22, tmp23, tmp24, tmp25; in jpeg_idct_11x11() local
1301 tmp20 = MULTIPLY(z2 - z3, FIX(2.546640132)); /* c2+c4 */ in jpeg_idct_11x11()
1307 tmp21 = tmp20 + tmp23 + tmp25 - in jpeg_idct_11x11()
[all …]
/external/llvm/test/Transforms/InstCombine/
D2007-05-10-icmp-or.ll3 %tmp20 = icmp ugt i32 %tmp9, 255 ; <i1> [#uses=1]
5 %bothcond = or i1 %tmp20, %tmp11.not ; <i1> [#uses=1]
Dgepphigep.ll21 %tmp20 = getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp19
22 %tmp21 = getelementptr inbounds %struct2, %struct2* %tmp20, i64 0, i32 0
27 %phi = phi %struct2* [ %tmp10, %bb1 ], [ %tmp20, %bb2 ]
47 %tmp20 = getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp19
48 %tmp21 = getelementptr inbounds %struct2, %struct2* %tmp20, i64 0, i32 0
62 define i32 @test3(%struct3* %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19, i64 %tmp20, i64 %tmp21) personali…
74 %tmp2 = getelementptr inbounds %struct3, %struct3* %tmp, i64 %tmp20, i32 1
/external/llvm-project/llvm/test/Transforms/InstCombine/
D2007-05-10-icmp-or.ll3 %tmp20 = icmp ugt i32 %tmp9, 255 ; <i1> [#uses=1]
5 %bothcond = or i1 %tmp20, %tmp11.not ; <i1> [#uses=1]
Dgepphigep.ll21 %tmp20 = getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp19
22 %tmp21 = getelementptr inbounds %struct2, %struct2* %tmp20, i64 0, i32 0
27 %phi = phi %struct2* [ %tmp10, %bb1 ], [ %tmp20, %bb2 ]
47 %tmp20 = getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp19
48 %tmp21 = getelementptr inbounds %struct2, %struct2* %tmp20, i64 0, i32 0
62 define i32 @test3(%struct3* %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19, i64 %tmp20, i64 %tmp21) personali…
74 %tmp2 = getelementptr inbounds %struct3, %struct3* %tmp, i64 %tmp20, i32 1
/external/llvm-project/llvm/test/CodeGen/AMDGPU/
Dsgpr-copy.ll11 %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp, !tbaa !0
12 %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 0, i32 0)
13 %tmp22 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 16, i32 0)
14 %tmp23 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 32, i32 0)
35 %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp, !tbaa !0
36 %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 16, i32 0)
37 %tmp22 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 32, i32 0)
38 %tmp23 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 36, i32 0)
39 %tmp24 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 40, i32 0)
40 %tmp25 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 48, i32 0)
[all …]
Dwaitcnt-looptest.ll47 %tmp20 = phi i32 [ 0, %bb ], [ %tmp100, %bb18 ]
55 %tmp28 = or i32 %tmp20, 1
63 %tmp36 = add nuw nsw i32 %tmp20, 2
71 %tmp44 = add nuw nsw i32 %tmp20, 3
79 %tmp52 = add nuw nsw i32 %tmp20, 4
87 %tmp60 = add nuw nsw i32 %tmp20, 5
95 %tmp68 = add nuw nsw i32 %tmp20, 6
103 %tmp76 = add nuw nsw i32 %tmp20, 7
111 %tmp84 = add nuw nsw i32 %tmp20, 8
119 %tmp92 = add nuw nsw i32 %tmp20, 9
[all …]

12345678910>>...15