Home
last modified time | relevance | path

Searched refs:ld0 (Results 1 – 25 of 38) sorted by relevance

12

/external/llvm-project/llvm/test/Transforms/LoopVectorize/SystemZ/
Dmem-interleaving-costs-02.ll14 ; CHECK: LV: Found an estimated cost of 6 for VF 4 For instruction: %ld0 = load i16
27 %ld0 = load i16, i16* %ptr0
34 %a1 = add i16 %ld0, %ld1
52 ; CHECK: LV: Found an estimated cost of 5 for VF 16 For instruction: %ld0 = load i8
62 %ld0 = load i8, i8* %ptr0
64 store i8 %ld0, i8* %dstptr
78 ; CHECK: LV: Found an estimated cost of 6 for VF 2 For instruction: %ld0 = load i8
91 %ld0 = load i8, i8* %ptr0
98 %a1 = add i8 %ld0, %ld1
118 ; CHECK: LV: Found an estimated cost of 7 for VF 2 For instruction: %ld0 = load i8
[all …]
/external/llvm/test/CodeGen/AMDGPU/
Dpromote-alloca-array-allocation.ll11 %ld0 = load i32, i32 addrspace(1)* %in, align 4
12 %arrayidx1 = getelementptr inbounds i32, i32* %stack, i32 %ld0
33 %ld0 = load i32, i32 addrspace(1)* %in, align 4
34 %arrayidx1 = getelementptr inbounds i32, i32* %stack, i32 %ld0
/external/llvm-project/llvm/test/CodeGen/AMDGPU/
Dpromote-alloca-array-allocation.ll11 %ld0 = load i32, i32 addrspace(1)* %in, align 4
12 %arrayidx1 = getelementptr inbounds i32, i32* %stack, i32 %ld0
33 %ld0 = load i32, i32 addrspace(1)* %in, align 4
34 %arrayidx1 = getelementptr inbounds i32, i32* %stack, i32 %ld0
Dflat-for-global-subtarget-feature.ll44 %ld0 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(5)* %out.addr, align 4
46 %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %ld0, i32 0
Dcluster_stores.ll26 %ld0 = load i32, i32* %la0
40 store i32 %ld0, i32* %sa0
71 %ld0 = load i32, i32* %la0
86 store i32 %ld0, i32* %sa0
/external/clang/test/CXX/expr/
Dp9.cpp6 __typeof__(0 + 0.0L) ld0; variable
7 long double &ldr = ld0;
/external/llvm-project/clang/test/CXX/expr/
Dp9.cpp6 __typeof__(0 + 0.0L) ld0; variable
7 long double &ldr = ld0;
/external/llvm-project/llvm/test/CodeGen/X86/
Dmerge_store_duplicated_loads.ll18 %ld0 = load double, double* %ld, align 8, !tbaa !2
25 store double %ld0, double* %st, align 8, !tbaa !2
27 store double %ld0, double* %st_idx2, align 8, !tbaa !2
Dload-partial.ll25 %ld0 = load float, float* %p0, align 4
28 %r0 = insertelement <4 x float> undef, float %ld0, i32 0
51 %ld0 = load float, float* %p0, align 4
54 %r0 = insertelement <4 x float> undef, float %ld0, i32 0
74 %ld0 = load float, float* %p0, align 4
77 %r0 = insertelement <8 x float> undef, float %ld0, i32 0
100 %ld0 = load float, float* %p0, align 4
103 %r0 = insertelement <8 x float> undef, float %ld0, i32 0
Ddag-merge-fast-accesses.ll72 %ld0 = load i64, i64* %idx0, align 4
78 store i64 %ld0, i64* %idx4, align 4
Dfshl.ll459 %ld0 = load i8, i8 *%p
461 %res = call i8 @llvm.fshl.i8(i8 %ld1, i8 %ld0, i8 8)
478 %ld0 = load i16, i16 *%p0
480 %res = call i16 @llvm.fshl.i16(i16 %ld1, i16 %ld0, i16 8)
497 %ld0 = load i32, i32 *%p0
499 %res = call i32 @llvm.fshl.i32(i32 %ld1, i32 %ld0, i32 8)
517 %ld0 = load i64, i64 *%p0
519 %res = call i64 @llvm.fshl.i64(i64 %ld1, i64 %ld0, i64 24)
Dfshr.ll458 %ld0 = load i8, i8 *%p
460 %res = call i8 @llvm.fshr.i8(i8 %ld1, i8 %ld0, i8 8)
477 %ld0 = load i16, i16 *%p0
479 %res = call i16 @llvm.fshr.i16(i16 %ld1, i16 %ld0, i16 8)
496 %ld0 = load i32, i32 *%p0
498 %res = call i32 @llvm.fshr.i32(i32 %ld1, i32 %ld0, i32 8)
516 %ld0 = load i64, i64 *%p0
518 %res = call i64 @llvm.fshr.i64(i64 %ld1, i64 %ld0, i64 24)
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/
Dbswap.ll35 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
37 %bswap0 = call i64 @llvm.bswap.i64(i64 %ld0)
66 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 0), align 4
70 %bswap0 = call i64 @llvm.bswap.i64(i64 %ld0)
88 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 4
92 %bswap0 = call i32 @llvm.bswap.i32(i32 %ld0)
119 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 2
127 %bswap0 = call i32 @llvm.bswap.i32(i32 %ld0)
153 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align…
161 %bswap0 = call i16 @llvm.bswap.i16(i16 %ld0)
[all …]
Duitofp.ll30 …%ld0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 0), align 64
32 %cvt0 = uitofp i64 %ld0 to double
55 …%ld0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 0), align 64
59 %cvt0 = uitofp i64 %ld0 to double
101 …%ld0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 0), align 64
109 %cvt0 = uitofp i64 %ld0 to double
159 …%ld0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32, i32 0, i64 0), align…
161 %cvt0 = uitofp i32 %ld0 to double
190 …%ld0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32, i32 0, i64 0), align…
194 %cvt0 = uitofp i32 %ld0 to double
[all …]
Dbitreverse.ll31 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
33 %bitreverse0 = call i64 @llvm.bitreverse.i64(i64 %ld0)
62 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 0), align 4
66 %bitreverse0 = call i64 @llvm.bitreverse.i64(i64 %ld0)
84 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 4
88 %bitreverse0 = call i32 @llvm.bitreverse.i32(i32 %ld0)
121 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 2
129 %bitreverse0 = call i32 @llvm.bitreverse.i32(i32 %ld0)
155 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align…
163 %bitreverse0 = call i16 @llvm.bitreverse.i16(i16 %ld0)
[all …]
Dsitofp.ll54 …%ld0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 0), align 64
56 %cvt0 = sitofp i64 %ld0 to double
106 …%ld0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 0), align 64
110 %cvt0 = sitofp i64 %ld0 to double
191 …%ld0 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 0), align 64
199 %cvt0 = sitofp i64 %ld0 to double
228 …%ld0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32, i32 0, i64 0), align…
230 %cvt0 = sitofp i32 %ld0 to double
259 …%ld0 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32, i32 0, i64 0), align…
263 %cvt0 = sitofp i32 %ld0 to double
[all …]
Dctlz.ll37 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
39 %ctlz0 = call i64 @llvm.ctlz.i64(i64 %ld0, i1 0)
62 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 0), align 4
66 %ctlz0 = call i64 @llvm.ctlz.i64(i64 %ld0, i1 0)
120 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 4
124 %ctlz0 = call i32 @llvm.ctlz.i32(i32 %ld0, i1 0)
178 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 2
186 %ctlz0 = call i32 @llvm.ctlz.i32(i32 %ld0, i1 0)
212 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align…
220 %ctlz0 = call i16 @llvm.ctlz.i16(i16 %ld0, i1 0)
[all …]
Dcttz.ll37 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
39 %cttz0 = call i64 @llvm.cttz.i64(i64 %ld0, i1 0)
62 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 0), align 4
66 %cttz0 = call i64 @llvm.cttz.i64(i64 %ld0, i1 0)
120 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 4
124 %cttz0 = call i32 @llvm.cttz.i32(i32 %ld0, i1 0)
178 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 2
186 %cttz0 = call i32 @llvm.cttz.i32(i32 %ld0, i1 0)
212 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align…
220 %cttz0 = call i16 @llvm.cttz.i16(i16 %ld0, i1 0)
[all …]
Dctpop.ll48 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
50 %ctpop0 = call i64 @llvm.ctpop.i64(i64 %ld0)
103 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 0), align 4
107 %ctpop0 = call i64 @llvm.ctpop.i64(i64 %ld0)
155 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 4
159 %ctpop0 = call i32 @llvm.ctpop.i32(i32 %ld0)
240 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 2
248 %ctpop0 = call i32 @llvm.ctpop.i32(i32 %ld0)
274 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align…
282 %ctpop0 = call i16 @llvm.ctpop.i16(i16 %ld0)
[all …]
Dfround.ll50 …%ld0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
52 %ceil0 = call double @llvm.ceil.f64(double %ld0)
90 …%ld0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
94 %ceil0 = call double @llvm.ceil.f64(double %ld0)
172 …%ld0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
180 %ceil0 = call double @llvm.ceil.f64(double %ld0)
221 …%ld0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
223 %floor0 = call double @llvm.floor.f64(double %ld0)
261 …%ld0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
265 %floor0 = call double @llvm.floor.f64(double %ld0)
[all …]
/external/llvm/test/Transforms/SLPVectorizer/X86/
Dbswap.ll35 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
37 %bswap0 = call i64 @llvm.bswap.i64(i64 %ld0)
66 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 0), align 4
70 %bswap0 = call i64 @llvm.bswap.i64(i64 %ld0)
88 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 4
92 %bswap0 = call i32 @llvm.bswap.i32(i32 %ld0)
119 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 2
127 %bswap0 = call i32 @llvm.bswap.i32(i32 %ld0)
153 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align…
161 %bswap0 = call i16 @llvm.bswap.i16(i16 %ld0)
[all …]
Dctpop.ll29 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
31 %ctpop0 = call i64 @llvm.ctpop.i64(i64 %ld0)
54 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 0), align 4
58 %ctpop0 = call i64 @llvm.ctpop.i64(i64 %ld0)
76 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 4
80 %ctpop0 = call i32 @llvm.ctpop.i32(i32 %ld0)
107 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 2
115 %ctpop0 = call i32 @llvm.ctpop.i32(i32 %ld0)
141 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align…
149 %ctpop0 = call i16 @llvm.ctpop.i16(i16 %ld0)
[all …]
Dfround.ll49 …%ld0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
51 %ceil0 = call double @llvm.ceil.f64(double %ld0)
89 …%ld0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
93 %ceil0 = call double @llvm.ceil.f64(double %ld0)
171 …%ld0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
179 %ceil0 = call double @llvm.ceil.f64(double %ld0)
220 …%ld0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
222 %floor0 = call double @llvm.floor.f64(double %ld0)
260 …%ld0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64…
264 %floor0 = call double @llvm.floor.f64(double %ld0)
[all …]
Dcttz.ll36 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i32 0, i64 0), align 8
38 %cttz0 = call i64 @llvm.cttz.i64(i64 %ld0, i1 0)
61 %ld0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @src64, i64 0, i64 0), align 4
65 %cttz0 = call i64 @llvm.cttz.i64(i64 %ld0, i1 0)
92 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 4
96 %cttz0 = call i32 @llvm.cttz.i32(i32 %ld0, i1 0)
135 %ld0 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 0), align 2
143 %cttz0 = call i32 @llvm.cttz.i32(i32 %ld0, i1 0)
190 …%ld0 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 0), align…
198 %cttz0 = call i16 @llvm.cttz.i16(i16 %ld0, i1 0)
[all …]
/external/llvm/test/CodeGen/X86/
Ddag-merge-fast-accesses.ll80 %ld0 = load i64, i64* %idx0, align 4
86 store i64 %ld0, i64* %idx4, align 4

12