/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | spill-q.ll | 34 %ld7 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind 74 %tmp5 = fadd <4 x float> %tmp4, %ld7
|
/external/llvm-project/llvm/test/CodeGen/Thumb2/ |
D | thumb2-spill-q.ll | 34 %ld7 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind 74 %tmp5 = fadd <4 x float> %tmp4, %ld7
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/ |
D | bswap.ll | 126 %ld7 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 7), align 2 134 %bswap7 = call i32 @llvm.bswap.i32(i32 %ld7) 160 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align… 168 %bswap7 = call i16 @llvm.bswap.i16(i16 %ld7) 203 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), ali… 219 %bswap7 = call i16 @llvm.bswap.i16(i16 %ld7)
|
D | bitreverse.ll | 128 %ld7 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 7), align 2 136 %bitreverse7 = call i32 @llvm.bitreverse.i32(i32 %ld7) 162 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align… 170 %bitreverse7 = call i16 @llvm.bitreverse.i16(i16 %ld7) 211 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), ali… 227 %bitreverse7 = call i16 @llvm.bitreverse.i16(i16 %ld7) 269 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1 285 %bitreverse7 = call i8 @llvm.bitreverse.i8(i8 %ld7) 342 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1 374 %bitreverse7 = call i8 @llvm.bitreverse.i8(i8 %ld7)
|
D | ctlz.ll | 185 %ld7 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 7), align 2 193 %ctlz7 = call i32 @llvm.ctlz.i32(i32 %ld7, i1 0) 219 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align… 227 %ctlz7 = call i16 @llvm.ctlz.i16(i16 %ld7, i1 0) 262 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), ali… 278 %ctlz7 = call i16 @llvm.ctlz.i16(i16 %ld7, i1 0) 320 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1 336 %ctlz7 = call i8 @llvm.ctlz.i8(i8 %ld7, i1 0) 387 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1 419 %ctlz7 = call i8 @llvm.ctlz.i8(i8 %ld7, i1 0) [all …]
|
D | cttz.ll | 185 %ld7 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 7), align 2 193 %cttz7 = call i32 @llvm.cttz.i32(i32 %ld7, i1 0) 219 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align… 227 %cttz7 = call i16 @llvm.cttz.i16(i16 %ld7, i1 0) 262 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), ali… 278 %cttz7 = call i16 @llvm.cttz.i16(i16 %ld7, i1 0) 320 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1 336 %cttz7 = call i8 @llvm.cttz.i8(i8 %ld7, i1 0) 387 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1 419 %cttz7 = call i8 @llvm.cttz.i8(i8 %ld7, i1 0) [all …]
|
D | uitofp.ll | 108 %ld7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 7), align 8 116 %cvt7 = uitofp i64 %ld7 to double 255 …%ld7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32, i32 0, i64 7), align… 263 %cvt7 = uitofp i32 %ld7 to double 381 …%ld7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16, i32 0, i64 7), align… 389 %cvt7 = uitofp i16 %ld7 to double 528 %ld7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i32 0, i64 7), align 1 536 %cvt7 = uitofp i8 %ld7 to double 703 %ld7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 7), align 8 711 %cvt7 = uitofp i64 %ld7 to float [all …]
|
D | ctpop.ll | 247 %ld7 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 7), align 2 255 %ctpop7 = call i32 @llvm.ctpop.i32(i32 %ld7) 281 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align… 289 %ctpop7 = call i16 @llvm.ctpop.i16(i16 %ld7) 324 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), ali… 340 %ctpop7 = call i16 @llvm.ctpop.i16(i16 %ld7) 382 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1 398 %ctpop7 = call i8 @llvm.ctpop.i8(i8 %ld7) 449 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1 481 %ctpop7 = call i8 @llvm.ctpop.i8(i8 %ld7)
|
D | sitofp.ll | 198 %ld7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 7), align 8 206 %cvt7 = sitofp i64 %ld7 to double 324 …%ld7 = load i32, i32* getelementptr inbounds ([16 x i32], [16 x i32]* @src32, i32 0, i64 7), align… 332 %cvt7 = sitofp i32 %ld7 to double 450 …%ld7 = load i16, i16* getelementptr inbounds ([32 x i16], [32 x i16]* @src16, i32 0, i64 7), align… 458 %cvt7 = sitofp i16 %ld7 to double 576 %ld7 = load i8, i8* getelementptr inbounds ([64 x i8], [64 x i8]* @src8, i32 0, i64 7), align 1 584 %cvt7 = sitofp i8 %ld7 to double 751 %ld7 = load i64, i64* getelementptr inbounds ([8 x i64], [8 x i64]* @src64, i32 0, i64 7), align 8 759 %cvt7 = sitofp i64 %ld7 to float [all …]
|
D | fround.ll | 179 …%ld7 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64… 187 %ceil7 = call double @llvm.ceil.f64(double %ld7) 350 …%ld7 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64… 358 %floor7 = call double @llvm.floor.f64(double %ld7) 521 …%ld7 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64… 529 %nearbyint7 = call double @llvm.nearbyint.f64(double %ld7) 692 …%ld7 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64… 700 %rint7 = call double @llvm.rint.f64(double %ld7) 863 …%ld7 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64… 871 %trunc7 = call double @llvm.trunc.f64(double %ld7) [all …]
|
D | bad-reduction.ll | 243 %ld7 = load i8, i8* %g7, align 1 252 %z7 = zext i8 %ld7 to i64 330 %ld7 = load i8, i8* %g7, align 1 339 %z7 = zext i8 %ld7 to i64
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | bswap.ll | 126 %ld7 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 7), align 2 134 %bswap7 = call i32 @llvm.bswap.i32(i32 %ld7) 160 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align… 168 %bswap7 = call i16 @llvm.bswap.i16(i16 %ld7) 203 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), ali… 219 %bswap7 = call i16 @llvm.bswap.i16(i16 %ld7)
|
D | ctpop.ll | 114 %ld7 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 7), align 2 122 %ctpop7 = call i32 @llvm.ctpop.i32(i32 %ld7) 148 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align… 156 %ctpop7 = call i16 @llvm.ctpop.i16(i16 %ld7) 191 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), ali… 207 %ctpop7 = call i16 @llvm.ctpop.i16(i16 %ld7) 249 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1 265 %ctpop7 = call i8 @llvm.ctpop.i8(i8 %ld7) 310 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1 342 %ctpop7 = call i8 @llvm.ctpop.i8(i8 %ld7)
|
D | cttz.ll | 142 %ld7 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 7), align 2 150 %cttz7 = call i32 @llvm.cttz.i32(i32 %ld7, i1 0) 197 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align… 205 %cttz7 = call i16 @llvm.cttz.i16(i16 %ld7, i1 0) 276 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), ali… 292 %cttz7 = call i16 @llvm.cttz.i16(i16 %ld7, i1 0) 379 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1 395 %cttz7 = call i8 @llvm.cttz.i8(i8 %ld7, i1 0) 530 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1 562 %cttz7 = call i8 @llvm.cttz.i8(i8 %ld7, i1 0) [all …]
|
D | ctlz.ll | 142 %ld7 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 7), align 2 150 %ctlz7 = call i32 @llvm.ctlz.i32(i32 %ld7, i1 0) 197 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align… 205 %ctlz7 = call i16 @llvm.ctlz.i16(i16 %ld7, i1 0) 276 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), ali… 292 %ctlz7 = call i16 @llvm.ctlz.i16(i16 %ld7, i1 0) 379 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1 395 %ctlz7 = call i8 @llvm.ctlz.i8(i8 %ld7, i1 0) 530 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1 562 %ctlz7 = call i8 @llvm.ctlz.i8(i8 %ld7, i1 0) [all …]
|
D | fround.ll | 178 …%ld7 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64… 186 %ceil7 = call double @llvm.ceil.f64(double %ld7) 349 …%ld7 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64… 357 %floor7 = call double @llvm.floor.f64(double %ld7) 520 …%ld7 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64… 528 %nearbyint7 = call double @llvm.nearbyint.f64(double %ld7) 691 …%ld7 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64… 699 %rint7 = call double @llvm.rint.f64(double %ld7) 862 …%ld7 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src64, i32 0, i64… 870 %trunc7 = call double @llvm.trunc.f64(double %ld7) [all …]
|
D | bitreverse.ll | 206 %ld7 = load i32, i32* getelementptr inbounds ([8 x i32], [8 x i32]* @src32, i32 0, i64 7), align 2 214 %bitreverse7 = call i32 @llvm.bitreverse.i32(i32 %ld7) 273 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), align… 281 %bitreverse7 = call i16 @llvm.bitreverse.i16(i16 %ld7) 364 …%ld7 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @src16, i16 0, i64 7), ali… 380 %bitreverse7 = call i16 @llvm.bitreverse.i16(i16 %ld7) 479 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1 495 %bitreverse7 = call i8 @llvm.bitreverse.i8(i8 %ld7) 648 %ld7 = load i8, i8* getelementptr inbounds ([32 x i8], [32 x i8]* @src8, i8 0, i64 7), align 1 680 %bitreverse7 = call i8 @llvm.bitreverse.i8(i8 %ld7)
|
/external/llvm/test/CodeGen/ARM/ |
D | spill-q.ll | 34 %ld7 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind 74 %tmp5 = fadd <4 x float> %tmp4, %ld7
|
/external/llvm/test/CodeGen/Thumb2/ |
D | thumb2-spill-q.ll | 34 %ld7 = call <4 x float> @llvm.arm.neon.vld1.v4f32.p0i8(i8* undef, i32 1) nounwind 74 %tmp5 = fadd <4 x float> %tmp4, %ld7
|
/external/llvm-project/libcxx/test/std/numerics/numbers/ |
D | specialize.pass.cpp | 65 [[maybe_unused]] long double ld7{std::numbers::ln10_v<long double>}; in tests() local
|
D | defined.pass.cpp | 64 [[maybe_unused]] const long double* ld7{&std::numbers::ln10_v<long double>}; in tests() local
|
/external/hyphenation-patterns/nn/ |
D | hyph-nn.pat.txt | 12532 ld7øy
|
/external/hyphenation-patterns/nb/ |
D | hyph-nb.pat.txt | 12532 ld7øy
|