/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | demand_shrink_nsw.ll | 8 ; CHECK: add nuw i32 %v42, 1533579450 21 %v42 = sub i32 %v35, %v41 22 %v43 = add nsw i32 %v42, 1533579450
|
/external/llvm/test/Transforms/InstCombine/ |
D | demand_shrink_nsw.ll | 8 ; CHECK: add nuw i32 %v42, 1533579450 21 %v42 = sub i32 %v35, %v41 22 %v43 = add nsw i32 %v42, 1533579450
|
/external/deqp-deps/glslang/Test/baseResults/ |
D | spv.swizzleInversion.frag.out | 16 Name 17 "v42" 47 17(v42): 16(ptr) Variable Function 58 Store 17(v42) 21
|
/external/eigen/test/ |
D | geo_orthomethods.cpp | 67 v42 = Vector4::Random(); in orthomethods_3() local 68 v40.w() = v41.w() = v42.w() = 0; in orthomethods_3() 69 v42.template head<3>() = v40.template head<3>().cross(v41.template head<3>()); in orthomethods_3() 70 VERIFY_IS_APPROX(v40.cross3(v41), v42); in orthomethods_3()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/ |
D | jump-prob.ll | 116 %v42 = getelementptr inbounds [2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 1, i32 3, i32 %v2 117 …0\0A if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* %v42, i32* %v42, i32 1, i32* %v42) #0, … 118 %v44 = load i32, i32* %v42, align 4, !tbaa !3 137 %v54 = load i32, i32* %v42, align 4, !tbaa !3
|
D | early-if-merge-loop.ll | 20 %v8 = phi i32 [ 0, %b2 ], [ %v42, %should_merge ] 54 %v42 = trunc i64 %v41 to i32 73 %v57 = tail call i64 @llvm.hexagon.A2.combinew(i32 %v42, i32 %v23)
|
D | bug-allocframe-size.ll | 81 %v42 = fmul float %v41, 5.000000e-01 85 %v43 = fsub float -0.000000e+00, %v42 89 %v44 = phi float [ %v43, %b9 ], [ %v42, %b8 ]
|
D | swp-phi-def-use.ll | 65 %v26 = phi i32 [ undef, %b2 ], [ %v42, %b3 ], [ %v42, %b13 ] 110 %v42 = load i32, i32* %v10, align 4
|
D | bkfir.ll | 35 %v16 = phi i32 [ %v42, %b3 ], [ 0, %b2 ] 61 %v42 = trunc i64 %v41 to i32 73 %v51 = ashr i32 %v42, 18
|
D | v60-haar-postinc.ll | 67 %v42 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v35, <16 x i32> %v37) 71 %v46 = tail call <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32> %v42, <16 x i32> %v44) 72 %v47 = tail call <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32> %v42, <16 x i32> %v44)
|
D | swp-resmii-1.ll | 65 %v42 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v35, <16 x i32> %v37) 69 %v46 = tail call <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32> %v42, <16 x i32> %v44) 70 %v47 = tail call <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32> %v42, <16 x i32> %v44)
|
D | assert-postinc-ptr-not-value.ll | 75 %v42 = load i32, i32* %v33, align 4 76 %v43 = lshr i32 %v42, 2 163 %v42 = and i32 %v14, -2 164 store i32 %v42, i32* %v13, align 4
|
D | upper-mpy.ll | 70 %v42 = lshr i64 %v41, 32 71 %v43 = trunc i64 %v42 to i32
|
D | regscavengerbug.ll | 88 %v42 = bitcast %3* %v2 to i8* 102 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v25, i8* align 8 %v42, i32 24, i1 false) 115 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v59, i8* align 8 %v42, i32 24, i1 false) 128 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v67, i8* align 8 %v42, i32 24, i1 false) 141 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v75, i8* align 8 %v42, i32 24, i1 false) 154 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v83, i8* align 8 %v42, i32 24, i1 false) 167 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v91, i8* align 8 %v42, i32 24, i1 false) 180 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v99, i8* align 8 %v42, i32 24, i1 false) 193 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v107, i8* align 8 %v42, i32 24, i1 false)
|
D | swp-bad-sched.ll | 76 %v42 = load i32, i32* %v41, align 4, !tbaa !0 77 %v43 = add nsw i32 %v42, %v40 79 %v44 = sub nsw i32 %v40, %v42
|
D | constext-replace.ll | 119 %v42 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v40 120 %v43 = load i32, i32* %v42, align 4 122 store i32 %v35, i32* %v42, align 4
|
D | bug6757-endloop.ll | 99 %v42 = load volatile i32, i32* %v15, align 4 101 %v44 = and i32 %v43, %v42
|
D | swp-phi.ll | 56 %v42 = phi float [ undef, %b0 ], [ %v41, %b1 ] 57 %v43 = phi float [ undef, %b0 ], [ %v42, %b1 ] 97 %v83 = fmul float %v49, %v42
|
D | constext-immstore.ll | 127 %v42 = tail call i32 @f4(i8* %v38, i8* %v41) #0 128 %v43 = icmp sgt i32 %v42, 0 137 %v49 = phi i32 [ %v42, %b12 ]
|
D | vect_setcc_v2i16.ll | 75 %v42 = icmp slt i32 %v30, 4 77 br i1 %v42, label %b6, label %b4
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | pseudo_cmov_lower.ll | 134 double %v42, double %v43, 179 %t43 = fadd double %v43, %v42 191 %t41 = select i1 %cmp, double %v42, double %t43 232 <64 x i1> %v42, <64 x i1> %v43, 253 %t43 = xor <64 x i1> %v43, %v42 259 %t41 = select i1 %cmp, <64 x i1> %v42, <64 x i1> %t43
|
D | sink-local-value.ll | 83 %v42 = inttoptr i32 42 to i8*, !dbg !34 84 call void @llvm.dbg.value(metadata i8* %v42, metadata !29, metadata !DIExpression()), !dbg !34 93 %r.0 = phi i8* [ %v42, %if.then ], [ %v1, %if.else ], !dbg !38
|
/external/llvm/test/CodeGen/X86/ |
D | pseudo_cmov_lower.ll | 134 double %v42, double %v43, 179 %t43 = fadd double %v43, %v42 191 %t41 = select i1 %cmp, double %v42, double %t43 232 <64 x i1> %v42, <64 x i1> %v43, 253 %t43 = xor <64 x i1> %v43, %v42 259 %t41 = select i1 %cmp, <64 x i1> %v42, <64 x i1> %t43
|
/external/deqp-deps/glslang/Test/ |
D | spv.swizzleInversion.frag | 9 vec2 v42 = interpolateAtSample(in4.zx, 1);
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/autohvx/ |
D | float-cost.ll | 63 %v42 = getelementptr inbounds i8, i8* %a0, i32 %v41 64 %v43 = load i8, i8* %v42, align 1, !tbaa !0
|