/external/llvm/lib/Target/AMDGPU/ |
D | SILoadStoreOptimizer.cpp | 66 unsigned Offset1, 131 unsigned Offset1, in offsetsCanBeCombined() argument 135 if (Offset0 == Offset1) in offsetsCanBeCombined() 139 if ((Offset0 % Size != 0) || (Offset1 % Size != 0)) in offsetsCanBeCombined() 143 unsigned EltOffset1 = Offset1 / Size; in offsetsCanBeCombined() 182 unsigned Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff; in findMatchingDSInst() local 185 if (offsetsCanBeCombined(Offset0, Offset1, EltSize)) in findMatchingDSInst() 207 unsigned Offset1 in mergeRead2Pair() local 211 unsigned NewOffset1 = Offset1 / EltSize; in mergeRead2Pair() 303 unsigned Offset1 in mergeWrite2Pair() local [all …]
|
D | AMDGPUInstrInfo.cpp | 51 int64_t Offset0, int64_t Offset1, in shouldScheduleLoadsNear() argument 53 assert(Offset1 > Offset0 && in shouldScheduleLoadsNear() 59 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); in shouldScheduleLoadsNear()
|
D | AMDGPUInstrInfo.h | 50 int64_t Offset1, int64_t Offset2,
|
D | AMDGPUISelDAGToDAG.cpp | 91 SDValue &Offset1) const; 720 SDValue &Offset1) const { in SelectDS64Bit4ByteAligned() 733 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8); in SelectDS64Bit4ByteAligned() 759 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8); in SelectDS64Bit4ByteAligned() 776 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8); in SelectDS64Bit4ByteAligned() 784 Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8); in SelectDS64Bit4ByteAligned()
|
D | SIInstrInfo.cpp | 95 int64_t &Offset1) const { in areLoadsFromSameBasePtr() 128 Offset1 = cast<ConstantSDNode>(Load1->getOperand(2))->getZExtValue(); in areLoadsFromSameBasePtr() 152 Offset1 = Load1Offset->getZExtValue(); in areLoadsFromSameBasePtr() 186 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); in areLoadsFromSameBasePtr() 232 uint8_t Offset1 = Offset1Imm->getImm(); in getMemOpBaseRegImmOfs() local 234 if (Offset1 > Offset0 && Offset1 - Offset0 == 1) { in getMemOpBaseRegImmOfs() 1344 int64_t Offset0, Offset1; in checkInstOffsetsDoNotOverlap() local 1347 getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) { in checkInstOffsetsDoNotOverlap() 1356 offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) { in checkInstOffsetsDoNotOverlap()
|
D | SIInstrInfo.h | 110 int64_t &Offset1,
|
/external/abi-compliance-checker/modules/Internals/ |
D | CallConv.pm | 290 my $Offset1 = $Offsets[$Num]; 292 my $Class1 = $PreClasses->{$Offset1}{"Class"}; 321 $PostClasses{$Offset1}{"Class"} = $ResClass; 322 foreach (keys(%{$PreClasses->{$Offset1}{"Elems"}})) { 323 $PostClasses{$Offset1}{"Elems"}{$Offset1+$_} = $PreClasses->{$Offset1}{"Elems"}{$_}; 326 $PostClasses{$Offset1}{"Elems"}{$Offset2+$_} = $PreClasses->{$Offset2}{"Elems"}{$_}; 332 $PostClasses{$Offset1} = $PreClasses->{$Offset1}; 1228 my $Offset1 = $Offset+$Padding+$MSize{$Pos}; 1229 if($Offset1 % $MaxAlgn != 0) 1231 $Offset1 += $MaxAlgn - $Offset1 % $MaxAlgn; [all …]
|
/external/llvm/lib/Fuzzer/test/ |
D | CustomCrossOverTest.cpp | 43 size_t Offset1 = 0; in LLVMFuzzerCustomCrossOver() local 44 size_t Len1 = R() % (Size1 - Offset1); in LLVMFuzzerCustomCrossOver() 52 memcpy(Out, Data1 + Offset1, Len1); in LLVMFuzzerCustomCrossOver()
|
/external/mesa3d/src/gallium/drivers/radeon/ |
D | AMDGPUInstrInfo.cpp | 188 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument 190 assert(Offset2 > Offset1 in shouldScheduleLoadsNear() 195 return (NumLoads < 16 && (Offset2 - Offset1) < 16); in shouldScheduleLoadsNear()
|
D | AMDGPUInstrInfo.h | 110 int64_t Offset1, int64_t Offset2,
|
/external/swiftshader/third_party/LLVM/lib/CodeGen/SelectionDAG/ |
D | ScheduleDAGSDNodes.cpp | 191 int64_t Offset1, Offset2; in ClusterNeighboringLoads() local 192 if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) || in ClusterNeighboringLoads() 193 Offset1 == Offset2) in ClusterNeighboringLoads() 197 if (O2SMap.insert(std::make_pair(Offset1, Base)).second) in ClusterNeighboringLoads() 198 Offsets.push_back(Offset1); in ClusterNeighboringLoads() 201 if (Offset2 < Offset1) in ClusterNeighboringLoads()
|
/external/llvm/lib/CodeGen/SelectionDAG/ |
D | ScheduleDAGSDNodes.cpp | 225 int64_t Offset1, Offset2; in ClusterNeighboringLoads() local 226 if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) || in ClusterNeighboringLoads() 227 Offset1 == Offset2) in ClusterNeighboringLoads() 231 if (O2SMap.insert(std::make_pair(Offset1, Base)).second) in ClusterNeighboringLoads() 232 Offsets.push_back(Offset1); in ClusterNeighboringLoads() 235 if (Offset2 < Offset1) in ClusterNeighboringLoads()
|
/external/swiftshader/third_party/LLVM/lib/Target/X86/ |
D | X86InstrInfo.h | 309 int64_t &Offset1, int64_t &Offset2) const; 320 int64_t Offset1, int64_t Offset2,
|
/external/swiftshader/third_party/LLVM/lib/Target/ARM/ |
D | ARMBaseInstrInfo.h | 152 int64_t &Offset1, int64_t &Offset2)const; 163 int64_t Offset1, int64_t Offset2,
|
D | ARMBaseInstrInfo.cpp | 1254 int64_t &Offset1, in areLoadsFromSameBasePtr() argument 1312 Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue(); in areLoadsFromSameBasePtr() 1329 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument 1334 assert(Offset2 > Offset1); in shouldScheduleLoadsNear() 1336 if ((Offset2 - Offset1) / 8 > 64) in shouldScheduleLoadsNear()
|
/external/swiftshader/third_party/LLVM/include/llvm/Target/ |
D | TargetInstrInfo.h | 492 int64_t &Offset1, int64_t &Offset2) const { in areLoadsFromSameBasePtr() argument 505 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument
|
/external/llvm/lib/Target/X86/ |
D | X86InstrInfo.h | 406 bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, 418 int64_t Offset1, int64_t Offset2,
|
/external/llvm/lib/Target/ARM/ |
D | ARMBaseInstrInfo.h | 213 bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, 225 int64_t Offset1, int64_t Offset2,
|
/external/llvm/test/CodeGen/SPARC/ |
D | 64abi.ll | 505 ; HARD-DAG: std %f6, [%sp+[[Offset1:[0-9]+]]] 507 ; HARD-DAG: ldx [%sp+[[Offset1]]], %o3 523 ; HARD: st %f0, [%fp+[[Offset1:[0-9]+]]] 528 ; HARD: ld [%fp+[[Offset1]]], %f1
|
/external/llvm/include/llvm/Target/ |
D | TargetInstrInfo.h | 1001 int64_t &Offset1, int64_t &Offset2) const { in areLoadsFromSameBasePtr() argument 1014 int64_t Offset1, int64_t Offset2, in shouldScheduleLoadsNear() argument
|
/external/swiftshader/third_party/LLVM/lib/Transforms/Scalar/ |
D | MemCpyOptimizer.cpp | 109 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD); in IsPointerOffset() local 113 Offset = Offset2-Offset1; in IsPointerOffset()
|
/external/swiftshader/third_party/LLVM/lib/Transforms/IPO/ |
D | MergeFunctions.cpp | 352 uint64_t Offset1 = TD->getIndexedOffset(GEP1->getPointerOperandType(), in isEquivalentGEP() local 356 return Offset1 == Offset2; in isEquivalentGEP()
|
/external/llvm/lib/Transforms/Scalar/ |
D | SeparateConstOffsetFromGEP.cpp | 1246 Value *Offset1 = First->getOperand(1); in swapGEPOperand() local 1249 Second->setOperand(1, Offset1); in swapGEPOperand()
|
D | MemCpyOptimizer.cpp | 114 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL); in IsPointerOffset() local 118 Offset = Offset2-Offset1; in IsPointerOffset()
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64InstrInfo.cpp | 1781 int64_t Offset1 = FirstLdSt.getOperand(2).getImm(); in shouldClusterMemOps() local 1782 if (isUnscaledLdSt(FirstOpc) && !scaleOffset(FirstOpc, Offset1)) in shouldClusterMemOps() 1790 if (Offset1 > 63 || Offset1 < -64) in shouldClusterMemOps() 1794 assert(Offset1 <= Offset2 && "Caller should have ordered offsets."); in shouldClusterMemOps() 1795 return Offset1 + 1 == Offset2; in shouldClusterMemOps()
|