/art/test/407-arrays/src/ |
D | Main.java | 33 int[] ints, Object[] objects, long[] longs, int index) { in $opt$testReads() argument 35 assertEquals(false, bools[index]); in $opt$testReads() 38 assertEquals(0, bytes[index]); in $opt$testReads() 41 assertEquals(0, chars[index]); in $opt$testReads() 44 assertEquals(0, shorts[index]); in $opt$testReads() 47 assertEquals(0, ints[index]); in $opt$testReads() 50 assertNull(objects[index]); in $opt$testReads() 53 assertEquals(0, longs[index]); in $opt$testReads() 57 int[] ints, Object[] objects, long[] longs, int index) { in $opt$testWrites() argument 61 assertEquals(true, bools[index]); in $opt$testWrites() [all …]
|
/art/runtime/arch/x86_64/ |
D | asm_support_x86_64.S | 31 #define RAW_VAR(name,index) $index argument 32 #define VAR(name,index) SYMBOL($index) argument 33 #define PLT_VAR(name, index) PLT_SYMBOL($index) argument 34 #define REG_VAR(name,index) %$index argument 35 #define CALL_MACRO(name,index) $index argument 54 #define RAW_VAR(name,index) name& argument 55 #define VAR(name,index) name& argument 56 #define PLT_VAR(name, index) name&@PLT argument 57 #define REG_VAR(name,index) %name argument 58 #define CALL_MACRO(name,index) name& argument [all …]
|
/art/runtime/arch/x86/ |
D | asm_support_x86.S | 31 #define RAW_VAR(name,index) $index argument 32 #define VAR(name,index) SYMBOL($index) argument 33 #define PLT_VAR(name, index) SYMBOL($index) argument 34 #define REG_VAR(name,index) %$index argument 35 #define CALL_MACRO(name,index) $index argument 54 #define RAW_VAR(name,index) name& argument 55 #define VAR(name,index) name& argument 56 #define PLT_VAR(name, index) name&@PLT argument 57 #define REG_VAR(name,index) %name argument 58 #define CALL_MACRO(name,index) name& argument [all …]
|
/art/runtime/gc/accounting/ |
D | atomic_stack.h | 73 int32_t index; in AtomicBumpBack() local 76 index = back_index_.LoadRelaxed(); in AtomicBumpBack() 77 new_index = index + num_slots; in AtomicBumpBack() 82 } while (!back_index_.CompareExchangeWeakRelaxed(index, new_index)); in AtomicBumpBack() 83 *start_address = &begin_[index]; in AtomicBumpBack() 87 for (int32_t i = index; i < new_index; ++i) { in AtomicBumpBack() 89 << "i=" << i << " index=" << index << " new_index=" << new_index; in AtomicBumpBack() 107 int32_t index = back_index_.LoadRelaxed(); in PushBack() local 108 DCHECK_LT(static_cast<size_t>(index), growth_limit_); in PushBack() 109 back_index_.StoreRelaxed(index + 1); in PushBack() [all …]
|
D | space_bitmap-inl.h | 37 const size_t index = OffsetToIndex(offset); in AtomicTestAndSet() local 39 Atomic<uword>* atomic_entry = reinterpret_cast<Atomic<uword>*>(&bitmap_begin_[index]); in AtomicTestAndSet() 40 DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_; in AtomicTestAndSet() 163 const size_t index = OffsetToIndex(offset); in Modify() local 165 DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_; in Modify() 166 uword* address = &bitmap_begin_[index]; in Modify()
|
D | space_bitmap.h | 66 static constexpr T IndexToOffset(T index) { in IndexToOffset() argument 67 return static_cast<T>(index * kAlignment * kBitsPerWord); in IndexToOffset() 97 const size_t index = OffsetToIndex(offset); in HasAddress() local 98 return index < bitmap_size_ / kWordSize; in HasAddress() 188 const size_t index = OffsetToIndex(offset); in GetObjectWordAddress() local 189 return &bitmap_begin_[index]; in GetObjectWordAddress()
|
/art/runtime/base/ |
D | hash_set.h | 66 Iterator(HashSet* hash_set, size_t index) : hash_set_(hash_set), index_(index) { in Iterator() argument 107 size_t NextNonEmptySlot(size_t index) const { in NextNonEmptySlot() argument 109 DCHECK_LT(index, num_buckets); in NextNonEmptySlot() 111 ++index; in NextNonEmptySlot() 112 } while (index < num_buckets && hash_set_->IsFreeSlot(index)); in NextNonEmptySlot() 113 return index; in NextNonEmptySlot() 238 size_t index = IndexForHash(hash); in FindWithHash() local 240 T& slot = ElementForIndex(index); in FindWithHash() 245 return Iterator(this, index); in FindWithHash() 247 index = NextIndex(index); in FindWithHash() [all …]
|
/art/compiler/utils/ |
D | growable_array.h | 123 void InsertAt(size_t index, T elem) { in InsertAt() argument 124 DCHECK(index <= Size()); in InsertAt() 126 for (size_t i = Size() - 1; i > index; --i) { in InsertAt() 129 elem_list_[index] = elem; in InsertAt() 136 T Get(size_t index) const { in Get() argument 137 DCHECK_LT(index, num_used_); in Get() 138 return elem_list_[index]; in Get() 142 void Put(size_t index, T elem) { in Put() argument 143 DCHECK_LT(index, num_used_); in Put() 144 elem_list_[index] = elem; in Put() [all …]
|
/art/runtime/ |
D | gc_map.h | 40 const uint8_t* GetBitMap(size_t index) const { in GetBitMap() argument 41 size_t entry_offset = index * EntryWidth(); in GetBitMap() 46 uintptr_t GetNativePcOffset(size_t index) const { in GetNativePcOffset() argument 47 size_t entry_offset = index * EntryWidth(); in GetNativePcOffset() 68 size_t index = Hash(native_pc_offset) % num_entries; in FindBitMap() local 70 while (GetNativePcOffset(index) != native_pc_offset) { in FindBitMap() 71 index = (index + 1) % num_entries; in FindBitMap() 75 return GetBitMap(index); in FindBitMap()
|
D | monitor_pool_test.cc | 80 size_t index = r.next() % monitors.size(); in TEST_F() local 81 Monitor* mon = monitors[index]; in TEST_F() 82 monitors.erase(monitors.begin() + index); in TEST_F() 112 size_t index = r.next() % monitors.size(); in TEST_F() local 113 Monitor* mon = monitors[index]; in TEST_F() 114 monitors.erase(monitors.begin() + index); in TEST_F()
|
D | monitor_pool.h | 119 size_t index = offset / kChunkSize; in LookupMonitor() local 121 uintptr_t base = *(monitor_chunks_.LoadRelaxed()+index); in LookupMonitor() 133 for (size_t index = 0; index < num_chunks_; ++index) { in ComputeMonitorIdInPool() local 134 uintptr_t chunk_addr = *(monitor_chunks_.LoadRelaxed() + index); in ComputeMonitorIdInPool() 136 … return OffsetToMonitorId(reinterpret_cast<uintptr_t>(mon) - chunk_addr + index * kChunkSize); in ComputeMonitorIdInPool()
|
D | transaction.cc | 84 void Transaction::RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) { in RecordWriteArray() argument 90 array_log.LogValue(index, value); in RecordWriteArray() 370 void Transaction::ArrayLog::LogValue(size_t index, uint64_t value) { in LogValue() argument 371 auto it = array_values_.find(index); in LogValue() 373 array_values_.insert(std::make_pair(index, value)); in LogValue() 387 size_t index, uint64_t value) { in UndoArrayWrite() argument 392 array->AsBooleanArray()->SetWithoutChecks<false>(index, static_cast<uint8_t>(value)); in UndoArrayWrite() 395 array->AsByteArray()->SetWithoutChecks<false>(index, static_cast<int8_t>(value)); in UndoArrayWrite() 398 array->AsCharArray()->SetWithoutChecks<false>(index, static_cast<uint16_t>(value)); in UndoArrayWrite() 401 array->AsShortArray()->SetWithoutChecks<false>(index, static_cast<int16_t>(value)); in UndoArrayWrite() [all …]
|
/art/compiler/dex/ |
D | vreg_analysis.cc | 22 bool MIRGraph::SetFp(int index, bool is_fp) { in SetFp() argument 24 if (is_fp && !reg_location_[index].fp) { in SetFp() 25 reg_location_[index].fp = true; in SetFp() 26 reg_location_[index].defined = true; in SetFp() 32 bool MIRGraph::SetFp(int index) { in SetFp() argument 34 if (!reg_location_[index].fp) { in SetFp() 35 reg_location_[index].fp = true; in SetFp() 36 reg_location_[index].defined = true; in SetFp() 42 bool MIRGraph::SetCore(int index, bool is_core) { in SetCore() argument 44 if (is_core && !reg_location_[index].defined) { in SetCore() [all …]
|
/art/compiler/optimizing/ |
D | parallel_move_resolver.cc | 65 void ParallelMoveResolver::PerformMove(size_t index) { in PerformMove() argument 73 DCHECK(!moves_.Get(index)->IsPending()); in PerformMove() 74 DCHECK(!moves_.Get(index)->IsRedundant()); in PerformMove() 79 DCHECK(!moves_.Get(index)->GetSource().IsInvalid()); in PerformMove() 80 Location destination = moves_.Get(index)->MarkPending(); in PerformMove() 101 MoveOperands* move = moves_.Get(index); in PerformMove() 128 EmitSwap(index); in PerformMove() 145 EmitMove(index); in PerformMove()
|
D | parallel_move_resolver.h | 64 virtual void EmitMove(size_t index) = 0; 67 virtual void EmitSwap(size_t index) = 0; 83 void PerformMove(size_t index);
|
D | code_generator_x86_64.cc | 654 uint32_t index = gp_index_++; in GetNextLocation() local 656 if (index < calling_convention.GetNumberOfRegisters()) { in GetNextLocation() 657 return X86_64CpuLocation(calling_convention.GetRegisterAt(index)); in GetNextLocation() 664 uint32_t index = gp_index_; in GetNextLocation() local 666 if (index < calling_convention.GetNumberOfRegisters()) { in GetNextLocation() 668 return X86_64CpuLocation(calling_convention.GetRegisterAt(index)); in GetNextLocation() 1090 Location index = locations->InAt(1); in VisitArrayGet() local 1096 if (index.IsConstant()) { in VisitArrayGet() 1098 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset)); in VisitArrayGet() 1100 __ movzxb(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_1, data_offset)); in VisitArrayGet() [all …]
|
D | code_generator_x86.cc | 310 uint32_t index = gp_index_++; in GetNextLocation() local 311 if (index < calling_convention.GetNumberOfRegisters()) { in GetNextLocation() 312 return X86CpuLocation(calling_convention.GetRegisterAt(index)); in GetNextLocation() 314 return Location::StackSlot(calling_convention.GetStackOffsetOf(index)); in GetNextLocation() 319 uint32_t index = gp_index_; in GetNextLocation() local 321 if (index + 1 < calling_convention.GetNumberOfRegisters()) { in GetNextLocation() 323 calling_convention.GetRegisterPairAt(index))); in GetNextLocation() 324 } else if (index + 1 == calling_convention.GetNumberOfRegisters()) { in GetNextLocation() 325 return Location::QuickParameter(index); in GetNextLocation() 327 return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(index)); in GetNextLocation() [all …]
|
D | code_generator_arm.cc | 335 uint32_t index = gp_index_++; in GetNextLocation() local 336 if (index < calling_convention.GetNumberOfRegisters()) { in GetNextLocation() 337 return ArmCoreLocation(calling_convention.GetRegisterAt(index)); in GetNextLocation() 339 return Location::StackSlot(calling_convention.GetStackOffsetOf(index)); in GetNextLocation() 344 uint32_t index = gp_index_; in GetNextLocation() local 346 if (index + 1 < calling_convention.GetNumberOfRegisters()) { in GetNextLocation() 348 calling_convention.GetRegisterPairAt(index))); in GetNextLocation() 349 } else if (index + 1 == calling_convention.GetNumberOfRegisters()) { in GetNextLocation() 350 return Location::QuickParameter(index); in GetNextLocation() 352 return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(index)); in GetNextLocation() [all …]
|
D | parallel_move_test.cc | 29 virtual void EmitMove(size_t index) { in EmitMove() argument 30 MoveOperands* move = moves_.Get(index); in EmitMove() 41 virtual void EmitSwap(size_t index) { in EmitSwap() argument 42 MoveOperands* move = moves_.Get(index); in EmitSwap()
|
D | nodes.h | 463 HUseListNode(T* user, size_t index, HUseListNode* tail) 464 : user_(user), index_(index), tail_(tail) {} 512 virtual void SetRawInputAt(size_t index, HInstruction* input) = 0; 517 void AddUseAt(HInstruction* user, size_t index) { in AddUseAt() argument 518 uses_ = new (block_->GetGraph()->GetArena()) HUseListNode<HInstruction>(user, index, uses_); in AddUseAt() 521 void AddEnvUseAt(HEnvironment* user, size_t index) { in AddEnvUseAt() argument 523 user, index, env_uses_); in AddEnvUseAt() 526 void RemoveUser(HInstruction* user, size_t index); 658 void SetRawEnvAt(size_t index, HInstruction* instruction) { in SetRawEnvAt() argument 659 vregs_.Put(index, instruction); in SetRawEnvAt() [all …]
|
/art/test/121-modifiers/src/ |
D | Main.java | 146 int index = name.indexOf("Field"); in getFieldMask() local 147 if (index > 0) { in getFieldMask() 148 String shortS = name.substring(0, index); in getFieldMask() 175 int index = name.indexOf("Method"); in getMethodMask() local 176 if (index > 0) { in getMethodMask() 177 String shortS = name.substring(0, index); in getMethodMask()
|
/art/runtime/verifier/ |
D | dex_gc_map.h | 56 uint16_t GetDexPc(size_t index) const { in GetDexPc() argument 57 size_t entry_offset = index * EntryWidth(); in GetDexPc() 66 const uint8_t* GetBitMap(size_t index) const { in GetBitMap() argument 67 size_t entry_offset = index * EntryWidth(); in GetBitMap()
|
/art/runtime/mirror/ |
D | string-inl.h | 57 inline uint16_t String::CharAt(int32_t index) { in CharAt() argument 61 if (UNLIKELY(static_cast<uint32_t>(index) >= static_cast<uint32_t>(count_))) { in CharAt() 65 "length=%i; index=%i", count_, index); in CharAt() 68 return GetCharArray()->Get(index + GetOffset()); in CharAt()
|
D | array.h | 77 void* GetRawData(size_t component_size, int32_t index) in GetRawData() argument 80 + (index * component_size); in GetRawData() 84 const void* GetRawData(size_t component_size, int32_t index) const { in GetRawData() argument 86 + (index * component_size); in GetRawData() 93 ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 99 void ThrowArrayIndexOutOfBoundsException(int32_t index)
|
/art/runtime/native/ |
D | java_lang_VMClassLoader.cc | 72 …jstring VMClassLoader_getBootClassPathResource(JNIEnv* env, jclass, jstring javaName, jint index) { in VMClassLoader_getBootClassPathResource() argument 79 if (index < 0 || size_t(index) >= path.size()) { in VMClassLoader_getBootClassPathResource() 82 const DexFile* dex_file = path[index]; in VMClassLoader_getBootClassPathResource()
|