/external/llvm-project/clang/test/Sema/ |
D | asm-goto.cpp | 18 asm goto("jmp %l0;" ::::Later); in test1() 22 Later: in test1()
|
/external/llvm/lib/Transforms/Scalar/ |
D | DeadStoreElimination.cpp | 301 static OverwriteResult isOverwrite(const MemoryLocation &Later, in isOverwrite() argument 309 if (Later.Size == MemoryLocation::UnknownSize || in isOverwrite() 314 const Value *P2 = Later.Ptr->stripPointerCasts(); in isOverwrite() 320 if (Later.Size >= Earlier.Size) in isOverwrite() 338 if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size) in isOverwrite() 369 Later.Size >= Earlier.Size && in isOverwrite() 370 uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size) in isOverwrite() 380 int64_t(LaterOff + Later.Size) >= EarlierOff) { in isOverwrite() 386 LaterOff << ", " << int64_t(LaterOff + Later.Size) << ")\n"); in isOverwrite() 392 int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + Later.Size; in isOverwrite() [all …]
|
/external/llvm-project/llvm/lib/Transforms/Scalar/ |
D | EarlyCSE.cpp | 807 const ParseMemoryInst &Later); 840 const IntrinsicInst *Later) { in isNonTargetIntrinsicMatch() argument 890 if (PtrOp(Earlier) != PtrOp(Later)) in isNonTargetIntrinsicMatch() 894 Intrinsic::ID IDL = Later->getIntrinsicID(); in isNonTargetIntrinsicMatch() 903 if (MaskOp(Earlier) == MaskOp(Later) && ThruOp(Earlier) == ThruOp(Later)) in isNonTargetIntrinsicMatch() 905 if (!isa<UndefValue>(ThruOp(Later))) in isNonTargetIntrinsicMatch() 907 return IsSubmask(MaskOp(Later), MaskOp(Earlier)); in isNonTargetIntrinsicMatch() 914 if (!IsSubmask(MaskOp(Later), MaskOp(Earlier))) in isNonTargetIntrinsicMatch() 916 return isa<UndefValue>(ThruOp(Later)); in isNonTargetIntrinsicMatch() 922 return IsSubmask(MaskOp(Later), MaskOp(Earlier)); in isNonTargetIntrinsicMatch() [all …]
|
D | DeadStoreElimination.cpp | 418 static OverwriteResult isMaskedStoreOverwrite(const Instruction *Later, in isMaskedStoreOverwrite() argument 421 const auto *IIL = dyn_cast<IntrinsicInst>(Later); in isMaskedStoreOverwrite() 450 const MemoryLocation &Later, const MemoryLocation &Earlier, in isOverwrite() argument 456 if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise()) { in isOverwrite() 462 const uint64_t LaterSize = Later.Size.getValue(); in isOverwrite() 466 const Value *P2 = Later.Ptr->stripPointerCasts(); in isOverwrite() 550 static OverwriteResult isPartialOverwrite(const MemoryLocation &Later, in isPartialOverwrite() argument 555 const uint64_t LaterSize = Later.Size.getValue(); in isPartialOverwrite() 1260 StoreInst *Earlier, StoreInst *Later, int64_t InstWriteOffset, in tryToMergePartialOverlappingStores() argument 1265 Later && isa<ConstantInt>(Later->getValueOperand()) && in tryToMergePartialOverlappingStores() [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Scalar/ |
D | DeadStoreElimination.cpp | 355 static OverwriteResult isOverwrite(const MemoryLocation &Later, in isOverwrite() argument 366 if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise()) in isOverwrite() 369 const uint64_t LaterSize = Later.Size.getValue(); in isOverwrite() 373 const Value *P2 = Later.Ptr->stripPointerCasts(); in isOverwrite() 1236 auto *Later = dyn_cast<StoreInst>(Inst); in eliminateDeadStores() local 1240 Later && isa<ConstantInt>(Later->getValueOperand()) && in eliminateDeadStores() 1242 Later->getValueOperand()->getType()) && in eliminateDeadStores() 1243 memoryIsNotModifiedBetween(Earlier, Later, AA)) { in eliminateDeadStores() 1257 cast<ConstantInt>(Later->getValueOperand())->getValue(); in eliminateDeadStores()
|
/external/llvm-project/llvm/test/DebugInfo/PDB/ |
D | pdb-longname-truncation.test | 1 ; For now just verify that this doesn't cause an error. Later we pdbdump can
|
/external/clang/test/SemaCXX/ |
D | cxx98-compat.cpp | 271 void *Table[] = {&&DirectJump, &&Later}; in JumpDiagnostics() 275 Later: // expected-note {{possible target of indirect goto statement}} in JumpDiagnostics()
|
/external/llvm-project/clang/test/SemaCXX/ |
D | cxx98-compat.cpp | 279 void *Table[] = {&&DirectJump, &&Later}; in JumpDiagnostics() 283 Later: // expected-note {{possible target of indirect goto statement}} in JumpDiagnostics()
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | code_placement_no_header_change.ll | 7 ; Later backedge1 and backedge2 is rotated before loop header.
|
/external/python/pyopenssl/doc/ |
D | introduction.rst | 14 Later it was maintained by `Jean-Paul Calderone`_ who among other things managed to make pyOpenSSL …
|
/external/llvm-project/polly/test/Isl/CodeGen/ |
D | multiple-scops-in-a-row.ll | 7 ; We explicitly check here that the second scop is not code generated. Later
|
/external/squashfs-tools/RELEASE-READMEs/ |
D | README-4.0 | 11 Later releases will probably contain kernel patches supporting 4.0
|
/external/iptables/extensions/ |
D | libxt_DNAT.man | 25 Later Kernels (>= 2.6.11-rc1) don't have the ability to NAT to multiple ranges
|
D | libxt_SNAT.man | 26 Later Kernels (>= 2.6.11-rc1) don't have the ability to NAT to multiple ranges
|
/external/llvm-project/llvm/test/CodeGen/AVR/ |
D | rust-avr-bug-112.ll | 7 ; instruction. Later in the pipeline, the frame index
|
/external/llvm-project/llvm/test/CodeGen/PowerPC/ |
D | load-shift-combine.ll | 5 ; load. Later the pre-increment load is combined with a subsequent SRL to
|
/external/llvm/test/CodeGen/PowerPC/ |
D | load-shift-combine.ll | 5 ; load. Later the pre-increment load is combined with a subsequent SRL to
|
/external/perfetto/docs/concepts/ |
D | detached-mode.md | 115 ### Start in detached ring-buffer mode. Later stop and save the ring buffer 150 ### Start tracing with a time limit. Later re-attach and wait for the end
|
/external/llvm-project/llvm/test/Instrumentation/AddressSanitizer/ |
D | global_metadata_windows.ll | 5 ; FIXME: Later we can use this to instrument linkonce odr string literals.
|
/external/llvm-project/llvm/test/DebugInfo/X86/ |
D | live-debug-variables.ll | 7 ; scope of each. Later during register allocation the live interval is split
|
/external/llvm-project/llvm/test/Transforms/AddDiscriminators/ |
D | memcpy-discriminator.ll | 35 ; Later on, mem2reg successfully promotes the new alloca slices to registers,
|
/external/tensorflow/tensorflow/lite/micro/examples/image_recognition_experimental/ |
D | README.md | 54 recommended version is 6 and up. Later versions can be downloaded from
|
/external/llvm-project/lldb/cmake/modules/ |
D | LLDBFramework.cmake | 72 # into a staging directory. Later we can copy over the entire folder.
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Utils/ |
D | LoopUnrollAndJam.cpp | 613 SmallVector<Value *, 4> &Later, in checkDependencies() argument 619 for (Value *J : Later) { in checkDependencies()
|
/external/llvm-project/llvm/lib/Transforms/Utils/ |
D | LoopUnrollAndJam.cpp | 786 for (auto *Later : CurrentLoadsAndStores) { in checkDependencies() local 787 if (!checkDependency(Earlier, Later, LoopDepth, CommonLoopDepth, false, in checkDependencies()
|