/external/swiftshader/third_party/llvm-7.0/llvm/test/Instrumentation/MemorySanitizer/ |
D | vector_shift.ll | 13 declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) 14 declare <32 x i16> @llvm.x86.avx512.psrl.w.512(<32 x i16>, <8 x i16>) 74 %0 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %x, <8 x i16> %y) 84 ; CHECK: = call <8 x i16> @llvm.x86.sse2.psrl.w( 86 ; CHECK: call <8 x i16> @llvm.x86.sse2.psrl.w( 92 %0 = tail call <32 x i16> @llvm.x86.avx512.psrl.w.512(<32 x i16> %x, <8 x i16> %y) 102 ; CHECK: = call <32 x i16> @llvm.x86.avx512.psrl.w.512( 104 ; CHECK: call <32 x i16> @llvm.x86.avx512.psrl.w.512(
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | vec_shuffle-11.ll | 5 …%tmp131 = call <2 x i64> @llvm.x86.sse2.psrl.dq( <2 x i64> < i64 -1, i64 -1 >, i32 96 ) ; … 11 declare <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64>, i32)
|
D | vec_shift2.ll | 5 …%tmp2 = tail call <8 x i16> @llvm.x86.sse2.psrl.w( <8 x i16> %tmp1, <8 x i16> bitcast (<4 x i32> <… 16 declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
|
D | 2007-05-17-ShuffleISelBug.ll | 6 declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) 15 …%tmp838 = tail call <8 x i16> @llvm.x86.sse2.psrl.w( <8 x i16> %tmp832, <8 x i16> < i16 8, i16 und…
|
D | vec_shift.ll | 28 …%tmp9 = tail call <2 x i64> @llvm.x86.sse2.psrl.q( <2 x i64> %b1, <2 x i64> %c ) nounwind readnone… 32 declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone
|
/external/llvm/test/Transforms/InstCombine/ |
D | x86-vector-shifts.ll | 573 %1 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %v, <8 x i16> zeroinitializer) 582 …%1 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %v, <8 x i16> <i16 15, i16 0, i16 0, i16 … 590 …%1 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %v, <8 x i16> <i16 15, i16 15, i16 15, i1… 598 …%1 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %v, <8 x i16> <i16 64, i16 0, i16 0, i16 … 606 %1 = tail call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %v, <4 x i32> zeroinitializer) 615 …%1 = tail call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %v, <4 x i32> <i32 15, i32 0, i32 9999, i… 623 …%1 = tail call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %v, <4 x i32> <i32 15, i32 15, i32 15, i3… 631 …%1 = tail call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %v, <4 x i32> <i32 64, i32 0, i32 9999, i… 639 %1 = tail call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %v, <2 x i64> zeroinitializer) 648 %1 = tail call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %v, <2 x i64> <i64 15, i64 9999>) [all …]
|
/external/llvm/test/Instrumentation/MemorySanitizer/ |
D | vector_shift.ll | 11 declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) 54 %0 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %x, <8 x i16> %y) 64 ; CHECK: = call <8 x i16> @llvm.x86.sse2.psrl.w( 66 ; CHECK: call <8 x i16> @llvm.x86.sse2.psrl.w(
|
/external/llvm/test/CodeGen/X86/ |
D | 2007-05-17-ShuffleISelBug.ll | 7 declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) 16 …%tmp838 = tail call <8 x i16> @llvm.x86.sse2.psrl.w( <8 x i16> %tmp832, <8 x i16> < i16 8, i16 und…
|
D | mmx-fold-load.ll | 245 %u = tail call x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx %t, x86_mmx %v) 250 declare x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx, x86_mmx) 261 %u = tail call x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx %t, x86_mmx %v) 266 declare x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx, x86_mmx) 277 %u = tail call x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx %t, x86_mmx %v) 282 declare x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx, x86_mmx)
|
D | vec_shift2.ll | 20 …%tmp2 = tail call <8 x i16> @llvm.x86.sse2.psrl.w( <8 x i16> %tmp1, <8 x i16> bitcast (<4 x i32> <… 44 declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
|
D | vec_shift.ll | 57 …%tmp9 = tail call <2 x i64> @llvm.x86.sse2.psrl.q( <2 x i64> %b1, <2 x i64> %c ) nounwind readnone… 61 declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone
|
D | sse2-intrinsics-x86-upgrade.ll | 20 %res = call <2 x i64> @llvm.x86.sse2.psrl.dq.bs(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1] 23 declare <2 x i64> @llvm.x86.sse2.psrl.dq.bs(<2 x i64>, i32) nounwind readnone 41 %res = call <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1] 44 declare <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64>, i32) nounwind readnone
|
D | avx2-intrinsics-x86-upgrade.ll | 64 %res = call <4 x i64> @llvm.x86.avx2.psrl.dq.bs(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1] 67 declare <4 x i64> @llvm.x86.avx2.psrl.dq.bs(<4 x i64>, i32) nounwind readnone 86 %res = call <4 x i64> @llvm.x86.avx2.psrl.dq(<4 x i64> %a0, i32 8) ; <<4 x i64>> [#uses=1] 89 declare <4 x i64> @llvm.x86.avx2.psrl.dq(<4 x i64>, i32) nounwind readnone
|
D | stack-folding-mmx.ll | 554 %2 = call x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx %a, x86_mmx %b) nounwind readnone 557 declare x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx, x86_mmx) nounwind readnone 563 %2 = call x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx %a, x86_mmx %b) nounwind readnone 566 declare x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx, x86_mmx) nounwind readnone 572 %2 = call x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx %a, x86_mmx %b) nounwind readnone 575 declare x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx, x86_mmx) nounwind readnone
|
D | sse2-intrinsics-x86.ll | 930 %res = call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %a0, <4 x i32> %a1) ; <<4 x i32>> [#uses=1] 933 declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) nounwind readnone 946 %res = call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %a0, <2 x i64> %a1) ; <<2 x i64>> [#uses=1] 949 declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone 962 %res = call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1] 965 declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
|
D | avx512bw-intrinsics-upgrade.ll | 147 declare <8 x i64> @llvm.x86.avx512.psrl.dq.512(<8 x i64>, i32) 163 %res = call <8 x i64> @llvm.x86.avx512.psrl.dq.512(<8 x i64> %x0, i32 8) 164 %res1 = call <8 x i64> @llvm.x86.avx512.psrl.dq.512(<8 x i64> %x0, i32 4) 181 %res = call <8 x i64> @llvm.x86.avx512.psrl.dq.512(<8 x i64> %x0, i32 4)
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | 2007-05-17-ShuffleISelBug.ll | 7 declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) 16 …%tmp838 = tail call <8 x i16> @llvm.x86.sse2.psrl.w( <8 x i16> %tmp832, <8 x i16> < i16 8, i16 und…
|
D | vec_shift2.ll | 20 …%tmp2 = tail call <8 x i16> @llvm.x86.sse2.psrl.w( <8 x i16> %tmp1, <8 x i16> bitcast (<4 x i32> <… 44 declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
|
D | vec_shift.ll | 57 …%tmp9 = tail call <2 x i64> @llvm.x86.sse2.psrl.q( <2 x i64> %b1, <2 x i64> %c ) nounwind readnone… 61 declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone
|
D | mmx-fold-load.ll | 511 %u = tail call x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx %t, x86_mmx %v) 516 declare x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx, x86_mmx) 543 %u = tail call x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx %t, x86_mmx %v) 548 declare x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx, x86_mmx) 575 %u = tail call x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx %t, x86_mmx %v) 580 declare x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx, x86_mmx)
|
D | stack-folding-mmx.ll | 554 %2 = call x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx %a, x86_mmx %b) nounwind readnone 557 declare x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx, x86_mmx) nounwind readnone 563 %2 = call x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx %a, x86_mmx %b) nounwind readnone 566 declare x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx, x86_mmx) nounwind readnone 572 %2 = call x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx %a, x86_mmx %b) nounwind readnone 575 declare x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx, x86_mmx) nounwind readnone
|
D | avx2-intrinsics-x86-upgrade.ll | 114 %res = call <4 x i64> @llvm.x86.avx2.psrl.dq.bs(<4 x i64> %a0, i32 7) ; <<4 x i64>> [#uses=1] 117 declare <4 x i64> @llvm.x86.avx2.psrl.dq.bs(<4 x i64>, i32) nounwind readnone 146 %res = call <4 x i64> @llvm.x86.avx2.psrl.dq(<4 x i64> %a0, i32 8) ; <<4 x i64>> [#uses=1] 149 declare <4 x i64> @llvm.x86.avx2.psrl.dq(<4 x i64>, i32) nounwind readnone
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/X86/ |
D | x86-vector-shifts.ll | 1001 %1 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %v, <8 x i16> zeroinitializer) 1010 …%1 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %v, <8 x i16> <i16 15, i16 0, i16 0, i16 … 1018 …%1 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %v, <8 x i16> <i16 15, i16 15, i16 15, i1… 1026 …%1 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> %v, <8 x i16> <i16 64, i16 0, i16 0, i16 … 1034 %1 = tail call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %v, <4 x i32> zeroinitializer) 1043 …%1 = tail call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %v, <4 x i32> <i32 15, i32 0, i32 9999, i… 1051 …%1 = tail call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %v, <4 x i32> <i32 15, i32 15, i32 15, i3… 1059 …%1 = tail call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %v, <4 x i32> <i32 64, i32 0, i32 9999, i… 1067 %1 = tail call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %v, <2 x i64> zeroinitializer) 1076 %1 = tail call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %v, <2 x i64> <i64 15, i64 9999>) [all …]
|
/external/swiftshader/third_party/subzero/src/ |
D | IceAssemblerX86Base.h | 396 void psrl(Type Ty, XmmRegister dst, XmmRegister src); 397 void psrl(Type Ty, XmmRegister dst, const Address &src); 398 void psrl(Type Ty, XmmRegister dst, const Immediate &src);
|
/external/swiftshader/third_party/LLVM/include/llvm/ |
D | Intrinsics.gen | 272 x86_mmx_psrl_d, // llvm.x86.mmx.psrl.d 273 x86_mmx_psrl_q, // llvm.x86.mmx.psrl.q 274 x86_mmx_psrl_w, // llvm.x86.mmx.psrl.w 366 x86_sse2_psrl_d, // llvm.x86.sse2.psrl.d 367 x86_sse2_psrl_dq, // llvm.x86.sse2.psrl.dq 368 x86_sse2_psrl_dq_bs, // llvm.x86.sse2.psrl.dq.bs 369 x86_sse2_psrl_q, // llvm.x86.sse2.psrl.q 370 x86_sse2_psrl_w, // llvm.x86.sse2.psrl.w 799 "llvm.x86.mmx.psrl.d", 800 "llvm.x86.mmx.psrl.q", [all …]
|