/external/llvm/test/CodeGen/ARM/ |
D | rotate.ll | 4 ;; select ROTL. Make sure if generates the basic VSHL/VSHR.
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | trunc.ll | 25 ; SI: v_mov_b32_e32 [[VSHL:v[0-9]+]], [[SHL]] 26 ; SI: buffer_store_dword [[VSHL]],
|
/external/llvm/include/llvm/IR/ |
D | IntrinsicsARM.td | 293 // represented by intrinsics in LLVM, and even the basic VSHL variable shift 294 // operation cannot be safely translated to LLVM's shift operators. VSHL can 301 // shifts, where the constant is replicated. For consistency with VSHL (and
|
/external/llvm/lib/Target/X86/ |
D | X86IntrinsicsInfo.h | 292 X86_INTRINSIC_DATA(avx2_psll_d, INTR_TYPE_2OP, X86ISD::VSHL, 0), 293 X86_INTRINSIC_DATA(avx2_psll_q, INTR_TYPE_2OP, X86ISD::VSHL, 0), 294 X86_INTRINSIC_DATA(avx2_psll_w, INTR_TYPE_2OP, X86ISD::VSHL, 0), 1152 X86_INTRINSIC_DATA(avx512_mask_psll_d, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), 1153 X86_INTRINSIC_DATA(avx512_mask_psll_q, INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0), 1704 X86_INTRINSIC_DATA(sse2_psll_d, INTR_TYPE_2OP, X86ISD::VSHL, 0), 1705 X86_INTRINSIC_DATA(sse2_psll_q, INTR_TYPE_2OP, X86ISD::VSHL, 0), 1706 X86_INTRINSIC_DATA(sse2_psll_w, INTR_TYPE_2OP, X86ISD::VSHL, 0),
|
D | X86ISelLowering.h | 311 VSHL, VSRL, VSRA, enumerator
|
D | X86InstrFragmentsSIMD.td | 214 def X86vshl : SDNode<"X86ISD::VSHL",
|
D | X86ISelLowering.cpp | 16063 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break; in getTargetVShiftNode() 18485 unsigned X86OpcV = (Op.getOpcode() == ISD::SHL) ? X86ISD::VSHL : in LowerScalarVariableShift() 18752 Opc = X86ISD::VSHL; in LowerShift() 20508 case X86ISD::VSHL: return "X86ISD::VSHL"; in getTargetNodeName()
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64ISelLowering.h | 113 VSHL, enumerator
|
D | AArch64ISelLowering.cpp | 888 case AArch64ISD::VSHL: return "AArch64ISD::VSHL"; in getTargetNodeName() 5809 if ((ShiftOpc != AArch64ISD::VSHL && ShiftOpc != AArch64ISD::VLSHR)) in tryLowerToSLI() 6548 return DAG.getNode(AArch64ISD::VSHL, DL, VT, Op.getOperand(0), in LowerVectorSRA_SRL_SHL()
|
D | AArch64InstrInfo.td | 221 def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>;
|
/external/llvm/lib/Target/ARM/ |
D | ARMISelLowering.h | 108 VSHL, // ...left enumerator
|
D | ARMScheduleSwift.td | 545 "VSHL", "VSHR(s|u)", "VSHLL", "VQSHL", "VQSHLU", "VBIF",
|
D | ARMISelLowering.cpp | 1167 case ARMISD::VSHL: return "ARMISD::VSHL"; in getTargetNodeName() 3997 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, in LowerFCOPYSIGN() 4005 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, in LowerFCOPYSIGN() 10174 VShiftOpc = ARMISD::VSHL; in PerformIntrinsicCombine() 10317 return DAG.getNode(ARMISD::VSHL, dl, VT, N->getOperand(0), in PerformShiftCombine() 10746 U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL)) in isVectorLoadExtDesirable()
|
D | ARMInstrNEON.td | 517 def NEONvshl : SDNode<"ARMISD::VSHL", SDTARMVSH>; 5413 // VSHL : Vector Shift 5421 // VSHL : Vector Shift Left (Immediate)
|
/external/clang/include/clang/Basic/ |
D | arm_neon.td | 597 def VSHL : SInst<"vshl", "ddx", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
|
/external/valgrind/none/tests/arm/ |
D | neon128.stdout.exp | 429 ---- VSHL (register) ---- 2544 ---- VSHL (immediate) ----
|
D | neon64.stdout.exp | 618 ---- VSHL (register) ---- 3824 ---- VSHL (immediate) ----
|