1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_MIRROR_DEX_CACHE_H_ 18 #define ART_RUNTIME_MIRROR_DEX_CACHE_H_ 19 20 #include "array.h" 21 #include "base/array_ref.h" 22 #include "base/atomic_pair.h" 23 #include "base/bit_utils.h" 24 #include "base/locks.h" 25 #include "base/macros.h" 26 #include "dex/dex_file.h" 27 #include "dex/dex_file_types.h" 28 #include "gc_root.h" // Note: must not use -inl here to avoid circular dependency. 29 #include "linear_alloc.h" 30 #include "object.h" 31 #include "object_array.h" 32 33 namespace art HIDDEN { 34 35 namespace linker { 36 class ImageWriter; 37 } // namespace linker 38 39 class ArtField; 40 class ArtMethod; 41 struct DexCacheOffsets; 42 class DexFile; 43 union JValue; 44 class ReflectiveValueVisitor; 45 class Thread; 46 47 namespace mirror { 48 49 class CallSite; 50 class Class; 51 class ClassLoader; 52 class DexCache; 53 class MethodType; 54 class String; 55 56 template <typename T> struct alignas(8) DexCachePair { 57 GcRoot<T> object; 58 uint32_t index; 59 // The array is initially [ {0,0}, {0,0}, {0,0} ... ] 60 // We maintain the invariant that once a dex cache entry is populated, 61 // the pointer is always non-0 62 // Any given entry would thus be: 63 // {non-0, non-0} OR {0,0} 64 // 65 // It's generally sufficiently enough then to check if the 66 // lookup index matches the stored index (for a >0 lookup index) 67 // because if it's true the pointer is also non-null. 68 // 69 // For the 0th entry which is a special case, the value is either 70 // {0,0} (initial state) or {non-0, 0} which indicates 71 // that a valid object is stored at that index for a dex section id of 0. 72 // 73 // As an optimization, we want to avoid branching on the object pointer since 74 // it's always non-null if the id branch succeeds (except for the 0th id). 75 // Set the initial state for the 0th entry to be {0,1} which is guaranteed to fail 76 // the lookup id == stored id branch. 77 DexCachePair(ObjPtr<T> object, uint32_t index); DexCachePairDexCachePair78 DexCachePair() : index(0) {} 79 DexCachePair(const DexCachePair<T>&) = default; 80 DexCachePair& operator=(const DexCachePair<T>&) = default; 81 82 static void Initialize(std::atomic<DexCachePair<T>>* dex_cache); 83 InvalidIndexForSlotDexCachePair84 static uint32_t InvalidIndexForSlot(uint32_t slot) { 85 // Since the cache size is a power of two, 0 will always map to slot 0. 86 // Use 1 for slot 0 and 0 for all other slots. 87 return (slot == 0) ? 1u : 0u; 88 } 89 90 T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_); 91 }; 92 93 template <typename T> struct alignas(2 * __SIZEOF_POINTER__) NativeDexCachePair { 94 T* object; 95 size_t index; 96 // This is similar to DexCachePair except that we're storing a native pointer 97 // instead of a GC root. See DexCachePair for the details. NativeDexCachePairNativeDexCachePair98 NativeDexCachePair(T* object, uint32_t index) 99 : object(object), 100 index(index) {} NativeDexCachePairNativeDexCachePair101 NativeDexCachePair() : object(nullptr), index(0u) { } 102 NativeDexCachePair(const NativeDexCachePair<T>&) = default; 103 NativeDexCachePair& operator=(const NativeDexCachePair<T>&) = default; 104 105 static void Initialize(std::atomic<NativeDexCachePair<T>>* dex_cache); 106 InvalidIndexForSlotNativeDexCachePair107 static uint32_t InvalidIndexForSlot(uint32_t slot) { 108 // Since the cache size is a power of two, 0 will always map to slot 0. 109 // Use 1 for slot 0 and 0 for all other slots. 110 return (slot == 0) ? 1u : 0u; 111 } 112 GetObjectForIndexNativeDexCachePair113 T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) { 114 if (idx != index) { 115 return nullptr; 116 } 117 DCHECK(object != nullptr); 118 return object; 119 } 120 }; 121 122 template <typename T, size_t size> class NativeDexCachePairArray { 123 public: NativeDexCachePairArray()124 NativeDexCachePairArray() {} 125 Get(uint32_t index)126 T* Get(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) { 127 auto pair = GetNativePair(entries_, SlotIndex(index)); 128 return pair.GetObjectForIndex(index); 129 } 130 Set(uint32_t index,T * value)131 void Set(uint32_t index, T* value) { 132 NativeDexCachePair<T> pair(value, index); 133 SetNativePair(entries_, SlotIndex(index), pair); 134 } 135 GetNativePair(uint32_t index)136 NativeDexCachePair<T> GetNativePair(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) { 137 return GetNativePair(entries_, SlotIndex(index)); 138 } 139 SetNativePair(uint32_t index,NativeDexCachePair<T> value)140 void SetNativePair(uint32_t index, NativeDexCachePair<T> value) { 141 SetNativePair(entries_, SlotIndex(index), value); 142 } 143 144 private: GetNativePair(std::atomic<NativeDexCachePair<T>> * pair_array,size_t idx)145 NativeDexCachePair<T> GetNativePair(std::atomic<NativeDexCachePair<T>>* pair_array, size_t idx) { 146 auto* array = reinterpret_cast<AtomicPair<uintptr_t>*>(pair_array); 147 AtomicPair<uintptr_t> value = AtomicPairLoadAcquire(&array[idx]); 148 return NativeDexCachePair<T>(reinterpret_cast<T*>(value.val), value.key); 149 } 150 SetNativePair(std::atomic<NativeDexCachePair<T>> * pair_array,size_t idx,NativeDexCachePair<T> pair)151 void SetNativePair(std::atomic<NativeDexCachePair<T>>* pair_array, 152 size_t idx, 153 NativeDexCachePair<T> pair) { 154 auto* array = reinterpret_cast<AtomicPair<uintptr_t>*>(pair_array); 155 AtomicPair<uintptr_t> v(pair.index, reinterpret_cast<size_t>(pair.object)); 156 AtomicPairStoreRelease(&array[idx], v); 157 } 158 SlotIndex(uint32_t index)159 uint32_t SlotIndex(uint32_t index) { 160 return index % size; 161 } 162 163 std::atomic<NativeDexCachePair<T>> entries_[0]; 164 165 NativeDexCachePairArray(const NativeDexCachePairArray<T, size>&) = delete; 166 NativeDexCachePairArray& operator=(const NativeDexCachePairArray<T, size>&) = delete; 167 }; 168 169 template <typename T, size_t size> class DexCachePairArray { 170 public: DexCachePairArray()171 DexCachePairArray() {} 172 Get(uint32_t index)173 T* Get(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) { 174 return GetPair(index).GetObjectForIndex(index); 175 } 176 Set(uint32_t index,T * value)177 void Set(uint32_t index, T* value) REQUIRES_SHARED(Locks::mutator_lock_) { 178 SetPair(index, DexCachePair<T>(value, index)); 179 } 180 GetPair(uint32_t index)181 DexCachePair<T> GetPair(uint32_t index) { 182 return entries_[SlotIndex(index)].load(std::memory_order_acquire); 183 } 184 SetPair(uint32_t index,DexCachePair<T> value)185 void SetPair(uint32_t index, DexCachePair<T> value) { 186 entries_[SlotIndex(index)].store(value, std::memory_order_release); 187 } 188 Clear(uint32_t index)189 void Clear(uint32_t index) { 190 uint32_t slot = SlotIndex(index); 191 // This is racy but should only be called from the transactional interpreter. 192 if (entries_[slot].load(std::memory_order_relaxed).index == index) { 193 DexCachePair<T> cleared(nullptr, DexCachePair<T>::InvalidIndexForSlot(slot)); 194 entries_[slot].store(cleared, std::memory_order_relaxed); 195 } 196 } 197 198 private: SlotIndex(uint32_t index)199 uint32_t SlotIndex(uint32_t index) { 200 return index % size; 201 } 202 203 std::atomic<DexCachePair<T>> entries_[0]; 204 205 DexCachePairArray(const DexCachePairArray<T, size>&) = delete; 206 DexCachePairArray& operator=(const DexCachePairArray<T, size>&) = delete; 207 }; 208 209 template <typename T> class GcRootArray { 210 public: GcRootArray()211 GcRootArray() {} 212 213 T* Get(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_); 214 GetGcRoot(uint32_t index)215 Atomic<GcRoot<T>>* GetGcRoot(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) { 216 return &entries_[index]; 217 } 218 219 // Only to be used in locations that don't need the atomic or will later load 220 // and read atomically. GetGcRootAddress(uint32_t index)221 GcRoot<T>* GetGcRootAddress(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) { 222 static_assert(sizeof(GcRoot<T>) == sizeof(Atomic<GcRoot<T>>)); 223 return reinterpret_cast<GcRoot<T>*>(&entries_[index]); 224 } 225 226 void Set(uint32_t index, T* value) REQUIRES_SHARED(Locks::mutator_lock_); 227 228 private: 229 Atomic<GcRoot<T>> entries_[0]; 230 }; 231 232 template <typename T> class NativeArray { 233 public: NativeArray()234 NativeArray() {} 235 Get(uint32_t index)236 T* Get(uint32_t index) { 237 return entries_[index].load(std::memory_order_relaxed); 238 } 239 GetPtrEntryPtrSize(uint32_t index,PointerSize ptr_size)240 T** GetPtrEntryPtrSize(uint32_t index, PointerSize ptr_size) { 241 if (ptr_size == PointerSize::k64) { 242 return reinterpret_cast<T**>(reinterpret_cast<uint64_t*>(entries_) + index); 243 } else { 244 return reinterpret_cast<T**>(reinterpret_cast<uint32_t*>(entries_) + index); 245 } 246 } 247 Set(uint32_t index,T * value)248 void Set(uint32_t index, T* value) { 249 entries_[index].store(value, std::memory_order_relaxed); 250 } 251 252 private: 253 Atomic<T*> entries_[0]; 254 }; 255 256 // C++ mirror of java.lang.DexCache. 257 class MANAGED DexCache final : public Object { 258 public: 259 MIRROR_CLASS("Ljava/lang/DexCache;"); 260 261 // Size of java.lang.DexCache.class. 262 static uint32_t ClassSize(PointerSize pointer_size); 263 264 // Note: update the image version in image.cc if changing any of these cache sizes. 265 266 // Size of type dex cache. Needs to be a power of 2 for entrypoint assumptions to hold. 267 static constexpr size_t kDexCacheTypeCacheSize = 1024; 268 static_assert(IsPowerOfTwo(kDexCacheTypeCacheSize), 269 "Type dex cache size is not a power of 2."); 270 271 // Size of string dex cache. Needs to be a power of 2 for entrypoint assumptions to hold. 272 static constexpr size_t kDexCacheStringCacheSize = 1024; 273 static_assert(IsPowerOfTwo(kDexCacheStringCacheSize), 274 "String dex cache size is not a power of 2."); 275 276 // Size of field dex cache. Needs to be a power of 2 for entrypoint assumptions to hold. 277 static constexpr size_t kDexCacheFieldCacheSize = 1024; 278 static_assert(IsPowerOfTwo(kDexCacheFieldCacheSize), 279 "Field dex cache size is not a power of 2."); 280 281 // Size of method dex cache. Needs to be a power of 2 for entrypoint assumptions to hold. 282 static constexpr size_t kDexCacheMethodCacheSize = 1024; 283 static_assert(IsPowerOfTwo(kDexCacheMethodCacheSize), 284 "Method dex cache size is not a power of 2."); 285 286 // Size of method type dex cache. Needs to be a power of 2 for entrypoint assumptions 287 // to hold. 288 static constexpr size_t kDexCacheMethodTypeCacheSize = 1024; 289 static_assert(IsPowerOfTwo(kDexCacheMethodTypeCacheSize), 290 "MethodType dex cache size is not a power of 2."); 291 292 // Size of an instance of java.lang.DexCache not including referenced values. InstanceSize()293 static constexpr uint32_t InstanceSize() { 294 return sizeof(DexCache); 295 } 296 297 // Visit gc-roots in DexCachePair array in [pairs_begin, pairs_end) range. 298 template <typename Visitor> 299 static void VisitDexCachePairRoots(Visitor& visitor, 300 DexCachePair<Object>* pairs_begin, 301 DexCachePair<Object>* pairs_end) 302 REQUIRES_SHARED(Locks::mutator_lock_); 303 304 EXPORT void Initialize(const DexFile* dex_file, ObjPtr<ClassLoader> class_loader) 305 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::dex_lock_); 306 307 // Zero all array references. 308 // WARNING: This does not free the memory since it is in LinearAlloc. 309 EXPORT void ResetNativeArrays() REQUIRES_SHARED(Locks::mutator_lock_); 310 311 template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, 312 ReadBarrierOption kReadBarrierOption = kWithReadBarrier> 313 ObjPtr<String> GetLocation() REQUIRES_SHARED(Locks::mutator_lock_); 314 315 String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE 316 REQUIRES_SHARED(Locks::mutator_lock_); 317 318 void SetResolvedString(dex::StringIndex string_idx, ObjPtr<mirror::String> resolved) ALWAYS_INLINE 319 REQUIRES_SHARED(Locks::mutator_lock_); 320 321 // Clear a string for a string_idx, used to undo string intern transactions to make sure 322 // the string isn't kept live. 323 void ClearString(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_); 324 325 Class* GetResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_); 326 327 void SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> resolved) 328 REQUIRES_SHARED(Locks::mutator_lock_); 329 330 void ClearResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_); 331 332 ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx) 333 REQUIRES_SHARED(Locks::mutator_lock_); 334 335 ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx, ArtMethod* resolved) 336 REQUIRES_SHARED(Locks::mutator_lock_); 337 338 ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx) 339 REQUIRES_SHARED(Locks::mutator_lock_); 340 341 ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field) 342 REQUIRES_SHARED(Locks::mutator_lock_); 343 344 MethodType* GetResolvedMethodType(dex::ProtoIndex proto_idx) REQUIRES_SHARED(Locks::mutator_lock_); 345 346 void SetResolvedMethodType(dex::ProtoIndex proto_idx, MethodType* resolved) 347 REQUIRES_SHARED(Locks::mutator_lock_); 348 349 // Clear a method type for proto_idx, used to undo method type resolution 350 // in aborted transactions to make sure the method type isn't kept live. 351 void ClearMethodType(dex::ProtoIndex proto_idx) REQUIRES_SHARED(Locks::mutator_lock_); 352 353 CallSite* GetResolvedCallSite(uint32_t call_site_idx) REQUIRES_SHARED(Locks::mutator_lock_); 354 355 // Attempts to bind |call_site_idx| to the call site |resolved|. The 356 // caller must use the return value in place of |resolved|. This is 357 // because multiple threads can invoke the bootstrap method each 358 // producing a call site, but the method handle invocation on the 359 // call site must be on a common agreed value. 360 ObjPtr<CallSite> SetResolvedCallSite(uint32_t call_site_idx, ObjPtr<CallSite> resolved) 361 REQUIRES_SHARED(Locks::mutator_lock_) WARN_UNUSED; 362 GetDexFile()363 const DexFile* GetDexFile() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) { 364 return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_)); 365 } 366 SetDexFile(const DexFile * dex_file)367 void SetDexFile(const DexFile* dex_file) REQUIRES_SHARED(Locks::mutator_lock_) { 368 SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file); 369 } 370 371 EXPORT void SetLocation(ObjPtr<String> location) REQUIRES_SHARED(Locks::mutator_lock_); 372 373 void VisitReflectiveTargets(ReflectiveValueVisitor* visitor) REQUIRES(Locks::mutator_lock_); 374 375 void SetClassLoader(ObjPtr<ClassLoader> class_loader) REQUIRES_SHARED(Locks::mutator_lock_); 376 377 EXPORT ObjPtr<ClassLoader> GetClassLoader() REQUIRES_SHARED(Locks::mutator_lock_); 378 379 template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, 380 ReadBarrierOption kReadBarrierOption = kWithReadBarrier, 381 typename Visitor> 382 void VisitNativeRoots(const Visitor& visitor) 383 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); 384 385 // Sets null to dex cache array fields which were allocated with the startup 386 // allocator. 387 void UnlinkStartupCaches() REQUIRES_SHARED(Locks::mutator_lock_); 388 389 // Returns whether we should allocate a full array given the number of elements. 390 // Note: update the image version in image.cc if changing this method. ShouldAllocateFullArray(size_t number_of_elements,size_t dex_cache_size)391 static bool ShouldAllocateFullArray(size_t number_of_elements, size_t dex_cache_size) { 392 return number_of_elements <= dex_cache_size; 393 } 394 395 396 // NOLINTBEGIN(bugprone-macro-parentheses) 397 #define DEFINE_ARRAY(name, array_kind, getter_setter, type, ids, alloc_kind) \ 398 template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> \ 399 array_kind* Get ##getter_setter() \ 400 ALWAYS_INLINE \ 401 REQUIRES_SHARED(Locks::mutator_lock_) { \ 402 return GetFieldPtr<array_kind*, kVerifyFlags>(getter_setter ##Offset()); \ 403 } \ 404 void Set ##getter_setter(array_kind* value) \ 405 REQUIRES_SHARED(Locks::mutator_lock_) { \ 406 SetFieldPtr<false>(getter_setter ##Offset(), value); \ 407 } \ 408 static constexpr MemberOffset getter_setter ##Offset() { \ 409 return OFFSET_OF_OBJECT_MEMBER(DexCache, name); \ 410 } \ 411 array_kind* Allocate ##getter_setter(bool startup = false) \ 412 REQUIRES_SHARED(Locks::mutator_lock_) { \ 413 return reinterpret_cast<array_kind*>(AllocArray<type>( \ 414 getter_setter ##Offset(), GetDexFile()->ids(), alloc_kind, startup)); \ 415 } \ 416 template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> \ 417 size_t Num ##getter_setter() REQUIRES_SHARED(Locks::mutator_lock_) { \ 418 return Get ##getter_setter() == nullptr ? 0u : GetDexFile()->ids(); \ 419 } \ 420 421 #define DEFINE_PAIR_ARRAY(name, pair_kind, getter_setter, type, size, alloc_kind) \ 422 template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> \ 423 pair_kind ##Array<type, size>* Get ##getter_setter() \ 424 ALWAYS_INLINE \ 425 REQUIRES_SHARED(Locks::mutator_lock_) { \ 426 return GetFieldPtr<pair_kind ##Array<type, size>*, kVerifyFlags>(getter_setter ##Offset()); \ 427 } \ 428 void Set ##getter_setter(pair_kind ##Array<type, size>* value) \ 429 REQUIRES_SHARED(Locks::mutator_lock_) { \ 430 SetFieldPtr<false>(getter_setter ##Offset(), value); \ 431 } \ 432 static constexpr MemberOffset getter_setter ##Offset() { \ 433 return OFFSET_OF_OBJECT_MEMBER(DexCache, name); \ 434 } \ 435 pair_kind ##Array<type, size>* Allocate ##getter_setter() \ 436 REQUIRES_SHARED(Locks::mutator_lock_) { \ 437 return reinterpret_cast<pair_kind ##Array<type, size>*>( \ 438 AllocArray<std::atomic<pair_kind<type>>>( \ 439 getter_setter ##Offset(), size, alloc_kind)); \ 440 } \ 441 template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> \ 442 size_t Num ##getter_setter() REQUIRES_SHARED(Locks::mutator_lock_) { \ 443 return Get ##getter_setter() == nullptr ? 0u : size; \ 444 } \ 445 446 #define DEFINE_DUAL_CACHE( \ 447 name, pair_kind, getter_setter, type, pair_size, alloc_pair_kind, \ 448 array_kind, component_type, ids, alloc_array_kind) \ 449 DEFINE_PAIR_ARRAY( \ 450 name, pair_kind, getter_setter, type, pair_size, alloc_pair_kind) \ 451 DEFINE_ARRAY( \ 452 name ##array_, array_kind, getter_setter ##Array, component_type, ids, alloc_array_kind) \ 453 type* Get ##getter_setter ##Entry(uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_) { \ 454 DCHECK_LT(index, GetDexFile()->ids()); \ 455 auto* array = Get ##getter_setter ##Array(); \ 456 if (array != nullptr) { \ 457 return array->Get(index); \ 458 } \ 459 auto* pairs = Get ##getter_setter(); \ 460 if (pairs != nullptr) { \ 461 return pairs->Get(index); \ 462 } \ 463 return nullptr; \ 464 } \ 465 void Set ##getter_setter ##Entry(uint32_t index, type* resolved) \ 466 REQUIRES_SHARED(Locks::mutator_lock_) { \ 467 DCHECK_LT(index, GetDexFile()->ids()); \ 468 auto* array = Get ##getter_setter ##Array(); \ 469 if (array != nullptr) { \ 470 array->Set(index, resolved); \ 471 } else { \ 472 auto* pairs = Get ##getter_setter(); \ 473 if (pairs == nullptr) { \ 474 bool should_allocate_full_array = ShouldAllocateFullArray(GetDexFile()->ids(), pair_size); \ 475 if (ShouldAllocateFullArrayAtStartup() || should_allocate_full_array) { \ 476 array = Allocate ##getter_setter ##Array(!should_allocate_full_array); \ 477 array->Set(index, resolved); \ 478 } else { \ 479 pairs = Allocate ##getter_setter(); \ 480 pairs->Set(index, resolved); \ 481 } \ 482 } else { \ 483 pairs->Set(index, resolved); \ 484 } \ 485 } \ 486 } \ 487 void Unlink ##getter_setter ##ArrayIfStartup() \ 488 REQUIRES_SHARED(Locks::mutator_lock_) { \ 489 if (!ShouldAllocateFullArray(GetDexFile()->ids(), pair_size)) { \ 490 Set ##getter_setter ##Array(nullptr) ; \ 491 } \ 492 } 493 494 DEFINE_ARRAY(resolved_call_sites_, 495 GcRootArray<CallSite>, 496 ResolvedCallSites, 497 GcRoot<CallSite>, 498 NumCallSiteIds, 499 LinearAllocKind::kGCRootArray) 500 501 DEFINE_DUAL_CACHE(resolved_fields_, 502 NativeDexCachePair, 503 ResolvedFields, 504 ArtField, 505 kDexCacheFieldCacheSize, 506 LinearAllocKind::kNoGCRoots, 507 NativeArray<ArtField>, 508 ArtField, 509 NumFieldIds, 510 LinearAllocKind::kNoGCRoots) 511 512 DEFINE_DUAL_CACHE(resolved_method_types_, 513 DexCachePair, 514 ResolvedMethodTypes, 515 mirror::MethodType, 516 kDexCacheMethodTypeCacheSize, 517 LinearAllocKind::kDexCacheArray, 518 GcRootArray<mirror::MethodType>, 519 GcRoot<mirror::MethodType>, 520 NumProtoIds, 521 LinearAllocKind::kGCRootArray); 522 523 DEFINE_DUAL_CACHE(resolved_methods_, 524 NativeDexCachePair, 525 ResolvedMethods, 526 ArtMethod, 527 kDexCacheMethodCacheSize, 528 LinearAllocKind::kNoGCRoots, 529 NativeArray<ArtMethod>, 530 ArtMethod, 531 NumMethodIds, 532 LinearAllocKind::kNoGCRoots) 533 534 DEFINE_DUAL_CACHE(resolved_types_, 535 DexCachePair, 536 ResolvedTypes, 537 mirror::Class, 538 kDexCacheTypeCacheSize, 539 LinearAllocKind::kDexCacheArray, 540 GcRootArray<mirror::Class>, 541 GcRoot<mirror::Class>, 542 NumTypeIds, 543 LinearAllocKind::kGCRootArray); 544 545 DEFINE_DUAL_CACHE(strings_, 546 DexCachePair, 547 Strings, 548 mirror::String, 549 kDexCacheStringCacheSize, 550 LinearAllocKind::kDexCacheArray, 551 GcRootArray<mirror::String>, 552 GcRoot<mirror::String>, 553 NumStringIds, 554 LinearAllocKind::kGCRootArray); 555 556 // NOLINTEND(bugprone-macro-parentheses) 557 558 private: 559 // Allocate new array in linear alloc and save it in the given fields. 560 template<typename T> 561 T* AllocArray(MemberOffset obj_offset, size_t num, LinearAllocKind kind, bool startup = false) 562 REQUIRES_SHARED(Locks::mutator_lock_); 563 564 // Visit instance fields of the dex cache as well as its associated arrays. 565 template <bool kVisitNativeRoots, 566 VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, 567 ReadBarrierOption kReadBarrierOption = kWithReadBarrier, 568 typename Visitor> 569 void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor) 570 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); 571 572 // Returns whether we should allocate a full array given the current state of 573 // the runtime and oat files. 574 bool ShouldAllocateFullArrayAtStartup() REQUIRES_SHARED(Locks::mutator_lock_); 575 576 HeapReference<ClassLoader> class_loader_; 577 HeapReference<String> location_; 578 579 uint64_t dex_file_; // const DexFile* 580 // 581 uint64_t resolved_call_sites_; // Array of call sites 582 uint64_t resolved_fields_; // NativeDexCacheArray holding ArtField's 583 uint64_t resolved_fields_array_; // Array of ArtField's. 584 uint64_t resolved_method_types_; // DexCacheArray holding mirror::MethodType's 585 uint64_t resolved_method_types_array_; // Array of mirror::MethodType's 586 uint64_t resolved_methods_; // NativeDexCacheArray holding ArtMethod's 587 uint64_t resolved_methods_array_; // Array of ArtMethod's 588 uint64_t resolved_types_; // DexCacheArray holding mirror::Class's 589 uint64_t resolved_types_array_; // Array of resolved types. 590 uint64_t strings_; // DexCacheArray holding mirror::String's 591 uint64_t strings_array_; // Array of String's. 592 593 friend struct art::DexCacheOffsets; // for verifying offset information 594 friend class linker::ImageWriter; 595 friend class Object; // For VisitReferences 596 DISALLOW_IMPLICIT_CONSTRUCTORS(DexCache); 597 }; 598 599 } // namespace mirror 600 } // namespace art 601 602 #endif // ART_RUNTIME_MIRROR_DEX_CACHE_H_ 603