/external/skia/include/private/ |
D | SkNx_neon.h | 19 AI static float32x4_t emulate_vrndmq_f32(float32x4_t v) { in emulate_vrndmq_f32() 24 AI static float32x2_t emulate_vrndm_f32(float32x2_t v) { in emulate_vrndm_f32() 33 AI SkNx(float32x2_t vec) : fVec(vec) {} in SkNx() 35 AI SkNx() {} in SkNx() 36 AI SkNx(float val) : fVec(vdup_n_f32(val)) {} in SkNx() 37 AI SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; } in SkNx() 39 AI static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); } in Load() 40 AI void store(void* ptr) const { vst1_f32((float*)ptr, fVec); } in store() 42 AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { in Load2() 48 AI static void Store2(void* dst, const SkNx& a, const SkNx& b) { in Store2() [all …]
|
D | SkNx_sse.h | 31 AI static __m128 emulate_mm_floor_ps(__m128 v) { in emulate_mm_floor_ps() 40 AI SkNx(const __m128& vec) : fVec(vec) {} in SkNx() 42 AI SkNx() {} in SkNx() 43 AI SkNx(float val) : fVec(_mm_set1_ps(val)) {} in SkNx() 44 AI static SkNx Load(const void* ptr) { in Load() 47 AI SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {} in SkNx() 49 AI void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); } in store() 51 AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { in Load2() 57 AI static void Store2(void* dst, const SkNx& a, const SkNx& b) { in Store2() 62 AI static void Store3(void* dst, const SkNx& a, const SkNx& b, const SkNx& c) { in Store3() [all …]
|
D | SkNx.h | 20 #define AI SK_ALWAYS_INLINE macro 31 AI SkNx() = default; 32 AI SkNx(const Half& lo, const Half& hi) : fLo(lo), fHi(hi) {} in SkNx() 34 AI SkNx(T v) : fLo(v), fHi(v) {} in SkNx() 36 AI SkNx(T a, T b) : fLo(a) , fHi(b) { static_assert(N==2, ""); } in SkNx() 37 AI SkNx(T a, T b, T c, T d) : fLo(a,b), fHi(c,d) { static_assert(N==4, ""); } in SkNx() 38 AI SkNx(T a, T b, T c, T d, T e, T f, T g, T h) : fLo(a,b,c,d), fHi(e,f,g,h) { in SkNx() 41 AI SkNx(T a, T b, T c, T d, T e, T f, T g, T h, in SkNx() 47 AI T operator[](int k) const { 52 AI static SkNx Load(const void* vptr) { in Load() [all …]
|
/external/skqp/include/private/ |
D | SkNx_neon.h | 19 AI static float32x4_t emulate_vrndmq_f32(float32x4_t v) { in emulate_vrndmq_f32() 24 AI static float32x2_t emulate_vrndm_f32(float32x2_t v) { in emulate_vrndm_f32() 33 AI SkNx(float32x2_t vec) : fVec(vec) {} in SkNx() 35 AI SkNx() {} in SkNx() 36 AI SkNx(float val) : fVec(vdup_n_f32(val)) {} in SkNx() 37 AI SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; } in SkNx() 39 AI static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); } in Load() 40 AI void store(void* ptr) const { vst1_f32((float*)ptr, fVec); } in store() 42 AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { in Load2() 48 AI static void Store2(void* dst, const SkNx& a, const SkNx& b) { in Store2() [all …]
|
D | SkNx_sse.h | 31 AI static __m128 emulate_mm_floor_ps(__m128 v) { in emulate_mm_floor_ps() 40 AI SkNx(const __m128& vec) : fVec(vec) {} in SkNx() 42 AI SkNx() {} in SkNx() 43 AI SkNx(float val) : fVec(_mm_set1_ps(val)) {} in SkNx() 44 AI static SkNx Load(const void* ptr) { in Load() 47 AI SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {} in SkNx() 49 AI void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); } in store() 51 AI static void Load2(const void* ptr, SkNx* x, SkNx* y) { in Load2() 57 AI static void Store2(void* dst, const SkNx& a, const SkNx& b) { in Store2() 62 AI static void Store3(void* dst, const SkNx& a, const SkNx& b, const SkNx& c) { in Store3() [all …]
|
D | SkNx.h | 20 #define AI SK_ALWAYS_INLINE macro 31 AI SkNx() = default; 32 AI SkNx(const Half& lo, const Half& hi) : fLo(lo), fHi(hi) {} in SkNx() 34 AI SkNx(T v) : fLo(v), fHi(v) {} in SkNx() 36 AI SkNx(T a, T b) : fLo(a) , fHi(b) { static_assert(N==2, ""); } in SkNx() 37 AI SkNx(T a, T b, T c, T d) : fLo(a,b), fHi(c,d) { static_assert(N==4, ""); } in SkNx() 38 AI SkNx(T a, T b, T c, T d, T e, T f, T g, T h) : fLo(a,b,c,d), fHi(e,f,g,h) { in SkNx() 41 AI SkNx(T a, T b, T c, T d, T e, T f, T g, T h, in SkNx() 47 AI T operator[](int k) const { 52 AI static SkNx Load(const void* vptr) { in Load() [all …]
|
/external/harfbuzz_ng/src/ |
D | hb-atomic.hh | 58 #define hb_atomic_int_impl_add(AI, V) __atomic_fetch_add ((AI), (V), __ATOMIC_ACQ_REL) argument 59 #define hb_atomic_int_impl_set_relaxed(AI, V) __atomic_store_n ((AI), (V), __ATOMIC_RELAXED) argument 60 #define hb_atomic_int_impl_set(AI, V) __atomic_store_n ((AI), (V), __ATOMIC_RELEASE) argument 61 #define hb_atomic_int_impl_get_relaxed(AI) __atomic_load_n ((AI), __ATOMIC_RELAXED) argument 62 #define hb_atomic_int_impl_get(AI) __atomic_load_n ((AI), __ATOMIC_ACQUIRE) argument 85 #define hb_atomic_int_impl_add(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->fetch_add ((V), … argument 86 #define hb_atomic_int_impl_set_relaxed(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->store ((V… argument 87 #define hb_atomic_int_impl_set(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->store ((V), std:… argument 88 #define hb_atomic_int_impl_get_relaxed(AI) (reinterpret_cast<std::atomic<int> *> (AI)->load (std::m… argument 89 #define hb_atomic_int_impl_get(AI) (reinterpret_cast<std::atomic<int> *> (AI)->load (std::memory_o… argument [all …]
|
/external/clang/include/clang/CodeGen/ |
D | CGFunctionInfo.h | 124 auto AI = ABIArgInfo(Direct); variable 125 AI.setCoerceToType(T); 126 AI.setPaddingType(Padding); 127 AI.setDirectOffset(Offset); 128 AI.setCanBeFlattened(CanBeFlattened); 129 return AI; 132 auto AI = getDirect(T); variable 133 AI.setInReg(true); 134 return AI; 137 auto AI = ABIArgInfo(Extend); variable [all …]
|
/external/llvm/lib/CodeGen/ |
D | StackProtector.cpp | 61 StackProtector::getSSPLayout(const AllocaInst *AI) const { in getSSPLayout() 62 return AI ? Layout.lookup(AI) : SSPLK_None; in getSSPLayout() 166 bool StackProtector::HasAddressTaken(const Instruction *AI) { in HasAddressTaken() argument 167 for (const User *U : AI->users()) { in HasAddressTaken() 169 if (AI == SI->getValueOperand()) in HasAddressTaken() 172 if (AI == SI->getOperand(0)) in HasAddressTaken() 237 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { in RequiresStackProtector() local 238 if (AI->isArrayAllocation()) { in RequiresStackProtector() 244 if (const auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) { in RequiresStackProtector() 248 Layout.insert(std::make_pair(AI, SSPLK_LargeArray)); in RequiresStackProtector() [all …]
|
D | SafeStack.cpp | 148 uint64_t getStaticAllocaAllocationSize(const AllocaInst* AI); 213 uint64_t SafeStack::getStaticAllocaAllocationSize(const AllocaInst* AI) { in getStaticAllocaAllocationSize() argument 214 uint64_t Size = DL->getTypeAllocSize(AI->getAllocatedType()); in getStaticAllocaAllocationSize() 215 if (AI->isArrayAllocation()) { in getStaticAllocaAllocationSize() 216 auto C = dyn_cast<ConstantInt>(AI->getArraySize()); in getStaticAllocaAllocationSize() 412 if (auto AI = dyn_cast<AllocaInst>(&I)) { in findInsts() local 415 uint64_t Size = getStaticAllocaAllocationSize(AI); in findInsts() 416 if (IsSafeStackAlloca(AI, Size)) in findInsts() 419 if (AI->isStaticAlloca()) { in findInsts() 421 StaticAllocas.push_back(AI); in findInsts() [all …]
|
/external/swiftshader/third_party/LLVM/lib/Transforms/Instrumentation/ |
D | ProfilingUtils.cpp | 67 Function::arg_iterator AI; in InsertProfilingInitCall() local 71 AI = MainFn->arg_begin(); ++AI; in InsertProfilingInitCall() 72 if (AI->getType() != ArgVTy) { in InsertProfilingInitCall() 73 Instruction::CastOps opcode = CastInst::getCastOpcode(AI, false, ArgVTy, in InsertProfilingInitCall() 76 CastInst::Create(opcode, AI, ArgVTy, "argv.cast", InitCall)); in InsertProfilingInitCall() 78 InitCall->setArgOperand(1, AI); in InsertProfilingInitCall() 83 AI = MainFn->arg_begin(); in InsertProfilingInitCall() 86 if (!AI->getType()->isIntegerTy(32)) { in InsertProfilingInitCall() 88 if (!AI->use_empty()) { in InsertProfilingInitCall() 89 opcode = CastInst::getCastOpcode(InitCall, true, AI->getType(), true); in InsertProfilingInitCall() [all …]
|
/external/llvm/lib/Transforms/Utils/ |
D | PromoteMemoryToRegister.cpp | 51 bool llvm::isAllocaPromotable(const AllocaInst *AI) { in isAllocaPromotable() argument 54 unsigned AS = AI->getType()->getAddressSpace(); in isAllocaPromotable() 57 for (const User *U : AI->users()) { in isAllocaPromotable() 64 if (SI->getOperand(0) == AI) in isAllocaPromotable() 119 void AnalyzeAlloca(AllocaInst *AI) { in AnalyzeAlloca() 125 for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) { in AnalyzeAlloca() 149 DbgDeclare = FindAllocaDbgDeclare(AI); in AnalyzeAlloca() 293 void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info, 304 static void removeLifetimeIntrinsicUsers(AllocaInst *AI) { in removeLifetimeIntrinsicUsers() argument 308 for (auto UI = AI->user_begin(), UE = AI->user_end(); UI != UE;) { in removeLifetimeIntrinsicUsers() [all …]
|
D | MetaRenamer.cpp | 84 for (auto AI = M.alias_begin(), AE = M.alias_end(); AI != AE; ++AI) { in runOnModule() local 85 StringRef Name = AI->getName(); in runOnModule() 89 AI->setName("alias"); in runOnModule() 125 for (auto AI = F.arg_begin(), AE = F.arg_end(); AI != AE; ++AI) in runOnFunction() local 126 if (!AI->getType()->isVoidTy()) in runOnFunction() 127 AI->setName("arg"); in runOnFunction()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/CodeGen/ |
D | StackProtector.cpp | 168 bool StackProtector::HasAddressTaken(const Instruction *AI) { in HasAddressTaken() argument 169 for (const User *U : AI->users()) { in HasAddressTaken() 171 if (AI == SI->getValueOperand()) in HasAddressTaken() 174 if (AI == SI->getOperand(0)) in HasAddressTaken() 252 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { in RequiresStackProtector() local 253 if (AI->isArrayAllocation()) { in RequiresStackProtector() 262 if (const auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) { in RequiresStackProtector() 266 Layout.insert(std::make_pair(AI, in RequiresStackProtector() 272 Layout.insert(std::make_pair(AI, in RequiresStackProtector() 279 Layout.insert(std::make_pair(AI, in RequiresStackProtector() [all …]
|
D | SafeStack.cpp | 164 uint64_t getStaticAllocaAllocationSize(const AllocaInst* AI); 220 uint64_t SafeStack::getStaticAllocaAllocationSize(const AllocaInst* AI) { in getStaticAllocaAllocationSize() argument 221 uint64_t Size = DL.getTypeAllocSize(AI->getAllocatedType()); in getStaticAllocaAllocationSize() 222 if (AI->isArrayAllocation()) { in getStaticAllocaAllocationSize() 223 auto C = dyn_cast<ConstantInt>(AI->getArraySize()); in getStaticAllocaAllocationSize() 382 if (auto AI = dyn_cast<AllocaInst>(&I)) { in findInsts() local 385 uint64_t Size = getStaticAllocaAllocationSize(AI); in findInsts() 386 if (IsSafeStackAlloca(AI, Size)) in findInsts() 389 if (AI->isStaticAlloca()) { in findInsts() 391 StaticAllocas.push_back(AI); in findInsts() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Transforms/Utils/ |
D | PromoteMemoryToRegister.cpp | 65 bool llvm::isAllocaPromotable(const AllocaInst *AI) { in isAllocaPromotable() argument 68 unsigned AS = AI->getType()->getAddressSpace(); in isAllocaPromotable() 71 for (const User *U : AI->users()) { in isAllocaPromotable() 78 if (SI->getOperand(0) == AI) in isAllocaPromotable() 133 void AnalyzeAlloca(AllocaInst *AI) { in AnalyzeAlloca() 139 for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) { in AnalyzeAlloca() 163 DbgDeclares = FindDbgAddrUses(AI); in AnalyzeAlloca() 302 void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info, 326 static void removeLifetimeIntrinsicUsers(AllocaInst *AI) { in removeLifetimeIntrinsicUsers() argument 330 for (auto UI = AI->user_begin(), UE = AI->user_end(); UI != UE;) { in removeLifetimeIntrinsicUsers() [all …]
|
D | MetaRenamer.cpp | 96 for (auto AI = M.alias_begin(), AE = M.alias_end(); AI != AE; ++AI) { in runOnModule() local 97 StringRef Name = AI->getName(); in runOnModule() 101 AI->setName("alias"); in runOnModule() 147 for (auto AI = F.arg_begin(), AE = F.arg_end(); AI != AE; ++AI) in runOnFunction() local 148 if (!AI->getType()->isVoidTy()) in runOnFunction() 149 AI->setName("arg"); in runOnFunction()
|
/external/swiftshader/third_party/LLVM/lib/Transforms/Utils/ |
D | PromoteMemoryToRegister.cpp | 80 bool llvm::isAllocaPromotable(const AllocaInst *AI) { in isAllocaPromotable() argument 85 for (Value::const_use_iterator UI = AI->use_begin(), UE = AI->use_end(); in isAllocaPromotable() 94 if (SI->getOperand(0) == AI) in isAllocaPromotable() 275 void DetermineInsertionPoint(AllocaInst *AI, unsigned AllocaNum, 277 void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info, 281 void RewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, 283 void PromoteSingleBlockAlloca(AllocaInst *AI, AllocaInfo &Info, 315 void AnalyzeAlloca(AllocaInst *AI) { in AnalyzeAlloca() 321 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); in AnalyzeAlloca() 346 DbgDeclare = FindAllocaDbgDeclare(AI); in AnalyzeAlloca() [all …]
|
/external/swiftshader/third_party/LLVM/lib/Transforms/Scalar/ |
D | ScalarReplAggregates.cpp | 88 AllocaInst *AI; member 114 : AI(ai), isUnsafe(false), isMemCpySrc(false), isMemCpyDst(false), in AllocaInfo() 125 bool isSafeAllocaToScalarRepl(AllocaInst *AI); 138 void DoScalarReplacement(AllocaInst *AI, 142 void RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, 144 void RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset, 146 void RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset, 148 void RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI, 152 AllocaInst *AI, 154 void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/unittests/Transforms/Utils/ |
D | IntegerDivision.cpp | 37 Function::arg_iterator AI = F->arg_begin(); in TEST() local 38 Value *A = &*AI++; in TEST() 39 Value *B = &*AI++; in TEST() 67 Function::arg_iterator AI = F->arg_begin(); in TEST() local 68 Value *A = &*AI++; in TEST() 69 Value *B = &*AI++; in TEST() 97 Function::arg_iterator AI = F->arg_begin(); in TEST() local 98 Value *A = &*AI++; in TEST() 99 Value *B = &*AI++; in TEST() 127 Function::arg_iterator AI = F->arg_begin(); in TEST() local [all …]
|
/external/llvm/unittests/Transforms/Utils/ |
D | IntegerDivision.cpp | 37 Function::arg_iterator AI = F->arg_begin(); in TEST() local 38 Value *A = &*AI++; in TEST() 39 Value *B = &*AI++; in TEST() 67 Function::arg_iterator AI = F->arg_begin(); in TEST() local 68 Value *A = &*AI++; in TEST() 69 Value *B = &*AI++; in TEST() 97 Function::arg_iterator AI = F->arg_begin(); in TEST() local 98 Value *A = &*AI++; in TEST() 99 Value *B = &*AI++; in TEST() 127 Function::arg_iterator AI = F->arg_begin(); in TEST() local [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Transforms/Instrumentation/ |
D | HWAddressSanitizer.cpp | 165 bool isInterestingAlloca(const AllocaInst &AI); 166 bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag); 173 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI, 504 static uint64_t getAllocaSizeInBytes(const AllocaInst &AI) { in getAllocaSizeInBytes() argument 506 if (AI.isArrayAllocation()) { in getAllocaSizeInBytes() 507 const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize()); in getAllocaSizeInBytes() 511 Type *Ty = AI.getAllocatedType(); in getAllocaSizeInBytes() 512 uint64_t SizeInBytes = AI.getModule()->getDataLayout().getTypeAllocSize(Ty); in getAllocaSizeInBytes() 516 bool HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, in tagAlloca() argument 518 size_t Size = (getAllocaSizeInBytes(*AI) + Mapping.getAllocaAlignment() - 1) & in tagAlloca() [all …]
|
/external/llvm/lib/Transforms/InstCombine/ |
D | InstCombineLoadStoreAlloca.cpp | 162 isOnlyCopiedFromConstantGlobal(AllocaInst *AI, in isOnlyCopiedFromConstantGlobal() argument 165 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete)) in isOnlyCopiedFromConstantGlobal() 170 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) { in simplifyAllocaArraySize() argument 172 if (!AI.isArrayAllocation()) { in simplifyAllocaArraySize() 174 if (AI.getArraySize()->getType()->isIntegerTy(32)) in simplifyAllocaArraySize() 179 AI.setOperand(0, V); in simplifyAllocaArraySize() 180 return &AI; in simplifyAllocaArraySize() 184 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) { in simplifyAllocaArraySize() 185 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue()); in simplifyAllocaArraySize() 186 AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName()); in simplifyAllocaArraySize() [all …]
|
/external/swiftshader/third_party/LLVM/lib/Transforms/InstCombine/ |
D | InstCombineLoadStoreAlloca.cpp | 25 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { in visitAllocaInst() argument 29 Type *IntPtrTy = TD->getIntPtrType(AI.getContext()); in visitAllocaInst() 30 if (AI.getArraySize()->getType() != IntPtrTy) { in visitAllocaInst() 31 Value *V = Builder->CreateIntCast(AI.getArraySize(), in visitAllocaInst() 33 AI.setOperand(0, V); in visitAllocaInst() 34 return &AI; in visitAllocaInst() 39 if (AI.isArrayAllocation()) { // Check C != 1 in visitAllocaInst() 40 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) { in visitAllocaInst() 42 ArrayType::get(AI.getAllocatedType(), C->getZExtValue()); in visitAllocaInst() 43 assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!"); in visitAllocaInst() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Transforms/InstCombine/ |
D | InstCombineLoadStoreAlloca.cpp | 165 isOnlyCopiedFromConstantGlobal(AllocaInst *AI, in isOnlyCopiedFromConstantGlobal() argument 168 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete)) in isOnlyCopiedFromConstantGlobal() 174 static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI, in isDereferenceableForAllocaSize() argument 176 if (AI->isArrayAllocation()) in isDereferenceableForAllocaSize() 178 uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType()); in isDereferenceableForAllocaSize() 181 return isDereferenceableAndAlignedPointer(V, AI->getAlignment(), in isDereferenceableForAllocaSize() 185 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) { in simplifyAllocaArraySize() argument 187 if (!AI.isArrayAllocation()) { in simplifyAllocaArraySize() 189 if (AI.getArraySize()->getType()->isIntegerTy(32)) in simplifyAllocaArraySize() 194 AI.setOperand(0, V); in simplifyAllocaArraySize() [all …]
|