Home
last modified time | relevance | path

Searched refs:v82 (Results 1 – 25 of 40) sorted by relevance

12

/external/freetype/src/sfnt/
Dpngshim.c82 typedef unsigned short v82 __attribute__(( vector_size( 16 ) )); in premultiply_data() typedef
93 v82 s, s0, s1, a; in premultiply_data()
97 v82 n0x80 = { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 }; in premultiply_data()
98 v82 n0xFF = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; in premultiply_data()
99 v82 n8 = { 8, 8, 8, 8, 8, 8, 8, 8 }; in premultiply_data()
101 v82 ma = { 1, 1, 3, 3, 5, 5, 7, 7 }; in premultiply_data()
102 v82 o1 = { 0, 0xFF, 0, 0xFF, 0, 0xFF, 0, 0xFF }; in premultiply_data()
103 v82 m0 = { 1, 0, 3, 2, 5, 4, 7, 6 }; in premultiply_data()
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/
Dswp-phi-dep1.ll29 %v13 = phi i32 [ 1, %b1 ], [ %v82, %b5 ]
108 %v82 = add i32 %v13, 1
112 %v86 = icmp eq i32 %v82, %v0
Dswp-phi.ll74 %v60 = phi float [ %v23, %b0 ], [ %v82, %b1 ]
96 %v82 = fadd float %v60, %v81
130 store float %v82, float* %v6, align 4
Dswp-disable-Os.ll96 %v82 = load i8, i8* %v81, align 1, !tbaa !0
97 %v83 = zext i8 %v82 to i32
Dswp-conv3x3-nested.ll102 %v52 = phi <16 x i32> [ %v14, %b3 ], [ %v82, %b4 ]
132 %v82 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v78, <16 x i32> %v81) #2
133 %v83 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v82, <16 x i32> %v52, i32 1) #2
Dregisterscavenger-fail1.ll154 %v64 = phi i32 [ %v82, %b23 ], [ 0, %b19 ], [ %v61, %b22 ]
172 %v82 = add i32 %v64, 4
173 %v83 = icmp eq i32 %v82, %v4
Dregscavenger_fail_hwloop.ll107 %v82 = zext i8 %v81 to i32
128 %v103 = add nsw i32 %v85, %v82
142 %v116 = add nsw i32 %v91, %v82
163 %v136 = add i32 %v82, 16
Dregisterscav-missing-spill-slot.ll118 %v39 = phi double [ %v82, %b19 ], [ 1.000000e+11, %b16 ], [ %v34, %b18 ]
161 %v82 = select i1 undef, double %v76, double %v71
Dlate_instr.ll109 %v82 = phi <16 x i32>* [ %v62, %b3 ], [ %v92, %b4 ]
119 %v92 = getelementptr inbounds <16 x i32>, <16 x i32>* %v82, i32 1
120 %v93 = load <16 x i32>, <16 x i32>* %v82, align 64, !tbaa !0
Dv60-vecpred-spill.ll37 %v17 = phi <16 x i32> [ %v3, %b2 ], [ %v82, %b3 ]
102 …%v82 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v70, <16 x i32> %v81, <16 x i32> …
108 %v85 = phi <16 x i32> [ %v82, %b3 ]
Dbug14859-iv-cleanup-lpad.ll161 %v82 = load i32, i32* %v1, align 4, !tbaa !0
162 %v83 = icmp eq i32 %v82, 0
Dregscav-wrong-super-sub-regs.ll157 %v82 = load i32*, i32** @g4, align 4, !tbaa !7
158 %v83 = getelementptr inbounds i32, i32* %v82, i32 %v58
Daggr-licm.ll102 %v82 = and i64 %v76, 1
105 %v85 = shl i64 %v82, %v84
Dbug19076.ll181 %v82 = load %s.18*, %s.18** %v3, align 4
182 %v83 = getelementptr inbounds %s.18, %s.18* %v82, i32 0, i32 1
Dopt-glob-addrs-001.ll173 %v82 = phi i32 [ 0, %b0 ], [ %v79, %b20 ]
181 %v90 = add i32 %v89, %v82
Dvect-downscale.ll134 %v82 = load <16 x i32>, <16 x i32>* %v68, align 64, !tbaa !2
135 …v83 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v14, <16 x i32> %v81, <16 x i32> %v82)
Dreg-scavengebug.ll115 %v82 = load <16 x i32>, <16 x i32>* %v81, align 64, !tbaa !0
116 %v83 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v82, <16 x i32> %v78, i32 4)
135 %v101 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v100, <16 x i32> %v82, i32 4)
Dv6vect-dh1.ll112 %v82 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v80)
113 %v83 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %v81, <16 x i32> %v82, i32 %a2)
Dswp-epilog-phi5.ll150 %v82 = phi i8* [ %v79, %b6 ], [ %v45, %b3 ]
160 store i8 %v90, i8* %v82, align 1
Dfltnvjump.ll132 %v82 = fdiv float %v80, %v81
133 %v83 = call float @f7(float %v82, i32 0)
/external/llvm/test/CodeGen/X86/
Dmachine-cp.ll103 %v82 = and <16 x i32> undef, %v81
104 %v83 = xor <16 x i32> %v19, %v82
Dpseudo_cmov_lower.ll138 <4 x double> %v82, <4 x double> %v83,
183 %t83 = fsub <4 x double> %v83, %v82
195 %t81 = select i1 %cmp, <4 x double> %v82, <4 x double> %t83
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/
Dpseudo_cmov_lower.ll138 <4 x double> %v82, <4 x double> %v83,
183 %t83 = fsub <4 x double> %v83, %v82
195 %t81 = select i1 %cmp, <4 x double> %v82, <4 x double> %t83
Dmachine-cp.ll179 …%v82 = and <16 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 …
180 %v83 = xor <16 x i32> %v19, %v82
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LoopVectorize/Hexagon/
Dminimum-vf.ll109 %v82 = add nsw i32 %v77, %v81
110 %v83 = trunc i32 %v82 to i16

12