/external/clang/test/Sema/ |
D | conversion-64-32.c | 13 int4 v127 = a; // no warning. in test1() local 14 return v127; in test1()
|
/external/llvm-project/clang/test/Sema/ |
D | conversion-64-32.c | 13 int4 v127 = a; // no warning. in test1() local 14 return v127; in test1()
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | reserve-vgpr-for-sgpr-spill.ll | 36 ,~{v120},~{v121},~{v122},~{v123},~{v124},~{v125},~{v126},~{v127},~{v128},~{v129} 81 ,~{v120},~{v121},~{v122},~{v123},~{v124},~{v125},~{v126},~{v127},~{v128},~{v129} 123 ,~{v120},~{v121},~{v122},~{v123},~{v124},~{v125},~{v126},~{v127},~{v128},~{v129} 172 ,~{v120},~{v121},~{v122},~{v123},~{v124},~{v125},~{v126},~{v127},~{v128},~{v129}
|
D | occupancy-levels.ll | 202 call void asm sideeffect "", "~{v127}" ()
|
D | attr-amdgpu-flat-work-group-size-vgpr-limit.ll | 137 %v127 = call i32 asm sideeffect "; def $0", "=v"() 393 call void asm sideeffect "; use $0", "v"(i32 %v127)
|
/external/llvm-project/llvm/test/CodeGen/Hexagon/ |
D | swp-epilog-phi7.ll | 154 %v127 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v122, <16 x i32> %v104, i32 1) 160 %v133 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v127, <16 x i32> %v115) 164 %v137 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v126, <16 x i32> %v127) 170 …%v143 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v141, <16 x i32> %v127, i32 …
|
D | reg-scav-imp-use-dbl-vec.ll | 228 %v127 = phi i32 [ 0, %b9 ], [ %v124, %b12 ] 229 %v128 = add i32 %v127, %v33 234 %v133 = add i32 %v127, %v30 239 %v138 = add i32 %v127, %v26
|
D | regscavengerbug.ll | 213 %v127 = load double, double* %v126, align 8, !tbaa !6 214 %v128 = fcmp olt double %v127, %v113 215 %v129 = select i1 %v128, double %v127, double %v113 216 %v130 = fcmp ogt double %v127, %v113 217 %v131 = select i1 %v130, double %v127, double %v113
|
D | aggr-licm.ll | 147 %v127 = and i64 %v125, 1 149 %v129 = shl i64 %v127, %v128
|
D | bug14859-split-const-block-addr.ll | 296 %v127 = phi i8* [ %v6, %b37 ], [ %v126, %b38 ] 297 %v128 = ptrtoint i8* %v127 to i32 302 %v130 = phi i8* [ %v127, %b39 ], [ %v115, %b36 ]
|
D | registerscavenger-fail1.ll | 238 %v127 = fadd double %v126, %v114 239 %v128 = fadd double %v127, undef
|
D | late_instr.ll | 167 %v127 = phi <16 x i32>* [ %v126, %b7 ], [ %v1, %b0 ] 168 %v128 = getelementptr inbounds <16 x i32>, <16 x i32>* %v127, i32 -1
|
D | reg-scavengebug-5.ll | 151 %v127 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v121, <16 x i32> %v110) 153 …%v129 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v126, <32 x i32> %v127, … 157 …%v133 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v130, <32 x i32> %v127, …
|
D | SUnit-boundary-prob.ll | 39 %v7 = phi i32* [ %v1, %b1 ], [ %v127, %b2 ] 167 %v127 = getelementptr i32, i32* %v7, i32 128
|
D | cext-ice.ll | 261 %v127 = inttoptr i32 %v126 to i32* 262 store volatile i32 %a1, i32* %v127, align 4, !tbaa !0
|
D | large-number-of-preds.ll | 207 %v127 = getelementptr inbounds float, float* %a1, i32 7 208 %v128 = load float, float* %v127, align 4, !tbaa !0
|
D | regscavenger_fail_hwloop.ll | 153 %v127 = lshr i32 %v123, 31 154 %v128 = add i32 %v127, 255
|
D | lsr-post-inc-cross-use-offsets.ll | 190 %v127 = add nsw i32 %v126, -129 191 %v128 = getelementptr inbounds i8, i8* %v1, i32 %v127
|
D | expand-vstorerw-undef2.ll | 188 …%v127 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v126, <32 x i32> %v123, i32… 189 %v128 = tail call <32 x i32> @llvm.hexagon.V6.vsubhsat.128B(<32 x i32> undef, <32 x i32> %v127) #2
|
D | opt-glob-addrs-003.ll | 232 %v127 = select i1 %v126, i16 %v121, i16 %v125 243 %v138 = icmp slt i16 %v127, %v137
|
D | hrc-stack-coloring.ll | 170 %v127 = load double, double* %v13, align 8 172 %v129 = fmul double %v127, %v128
|
D | concat-vectors-legalize.ll | 184 %v127 = mul nsw i32 %v65, %v7 191 %v134 = sub i32 %v127, %v133
|
/external/llvm-project/llvm/test/Transforms/PGOProfile/ |
D | chr.ll | 2148 %v127 = add i64 %v126, %v125 2149 %v128 = add i64 %v127, %v126 2150 %v129 = add i64 %v128, %v127
|