1 /* 2 * Copyright 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_ 18 #define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_ 19 20 #include <iosfwd> 21 #include <memory> 22 #include <set> 23 #include <string> 24 #include <unordered_set> 25 #include <vector> 26 27 #include "base/arena_containers.h" 28 #include "base/array_ref.h" 29 #include "base/atomic.h" 30 #include "base/histogram.h" 31 #include "base/macros.h" 32 #include "base/mem_map.h" 33 #include "base/mutex.h" 34 #include "base/safe_map.h" 35 #include "jit_memory_region.h" 36 37 namespace art { 38 39 class ArtMethod; 40 template<class T> class Handle; 41 class LinearAlloc; 42 class InlineCache; 43 class IsMarkedVisitor; 44 class JitJniStubTestHelper; 45 class OatQuickMethodHeader; 46 struct ProfileMethodInfo; 47 class ProfilingInfo; 48 class Thread; 49 50 namespace gc { 51 namespace accounting { 52 template<size_t kAlignment> class MemoryRangeBitmap; 53 } // namespace accounting 54 } // namespace gc 55 56 namespace mirror { 57 class Class; 58 class Object; 59 template<class T> class ObjectArray; 60 } // namespace mirror 61 62 namespace gc { 63 namespace accounting { 64 template<size_t kAlignment> class MemoryRangeBitmap; 65 } // namespace accounting 66 } // namespace gc 67 68 namespace mirror { 69 class Class; 70 class Object; 71 template<class T> class ObjectArray; 72 } // namespace mirror 73 74 namespace jit { 75 76 class MarkCodeClosure; 77 78 // Type of bitmap used for tracking live functions in the JIT code cache for the purposes 79 // of garbage collecting code. 80 using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAccountingBytes>; 81 82 // The state of profile-based compilation in the zygote. 83 // - kInProgress: JIT compilation is happening 84 // - kDone: JIT compilation is finished, and the zygote is preparing notifying 85 // the other processes. 86 // - kNotifiedOk: the zygote has notified the other processes, which can start 87 // sharing the boot image method mappings. 88 // - kNotifiedFailure: the zygote has notified the other processes, but they 89 // cannot share the boot image method mappings due to 90 // unexpected errors 91 enum class ZygoteCompilationState : uint8_t { 92 kInProgress = 0, 93 kDone = 1, 94 kNotifiedOk = 2, 95 kNotifiedFailure = 3, 96 }; 97 98 // Class abstraction over a map of ArtMethod -> compiled code, where the 99 // ArtMethod are compiled by the zygote, and the map acts as a communication 100 // channel between the zygote and the other processes. 101 // For the zygote process, this map is the only map it is placing the compiled 102 // code. JitCodeCache.method_code_map_ is empty. 103 // 104 // This map is writable only by the zygote, and readable by all children. 105 class ZygoteMap { 106 public: 107 struct Entry { 108 ArtMethod* method; 109 // Note we currently only allocate code in the low 4g, so we could just reserve 4 bytes 110 // for the code pointer. For simplicity and in the case we move to 64bit 111 // addresses for code, just keep it void* for now. 112 const void* code_ptr; 113 }; 114 ZygoteMap(JitMemoryRegion * region)115 explicit ZygoteMap(JitMemoryRegion* region) 116 : map_(), region_(region), compilation_state_(nullptr) {} 117 118 // Initialize the data structure so it can hold `number_of_methods` mappings. 119 // Note that the map is fixed size and never grows. 120 void Initialize(uint32_t number_of_methods) REQUIRES(!Locks::jit_lock_); 121 122 // Add the mapping method -> code. 123 void Put(const void* code, ArtMethod* method) REQUIRES(Locks::jit_lock_); 124 125 // Return the code pointer for the given method. If pc is not zero, check that 126 // the pc falls into that code range. Return null otherwise. 127 const void* GetCodeFor(ArtMethod* method, uintptr_t pc = 0) const; 128 129 // Return whether the map has associated code for the given method. ContainsMethod(ArtMethod * method)130 bool ContainsMethod(ArtMethod* method) const { 131 return GetCodeFor(method) != nullptr; 132 } 133 SetCompilationState(ZygoteCompilationState state)134 void SetCompilationState(ZygoteCompilationState state) { 135 region_->WriteData(compilation_state_, state); 136 } 137 IsCompilationDoneButNotNotified()138 bool IsCompilationDoneButNotNotified() const { 139 return compilation_state_ != nullptr && *compilation_state_ == ZygoteCompilationState::kDone; 140 } 141 IsCompilationNotified()142 bool IsCompilationNotified() const { 143 return compilation_state_ != nullptr && *compilation_state_ > ZygoteCompilationState::kDone; 144 } 145 CanMapBootImageMethods()146 bool CanMapBootImageMethods() const { 147 return compilation_state_ != nullptr && 148 *compilation_state_ == ZygoteCompilationState::kNotifiedOk; 149 } 150 cbegin()151 ArrayRef<const Entry>::const_iterator cbegin() const { 152 return map_.cbegin(); 153 } begin()154 ArrayRef<const Entry>::iterator begin() { 155 return map_.begin(); 156 } cend()157 ArrayRef<const Entry>::const_iterator cend() const { 158 return map_.cend(); 159 } end()160 ArrayRef<const Entry>::iterator end() { 161 return map_.end(); 162 } 163 164 private: 165 // The map allocated with `region_`. 166 ArrayRef<const Entry> map_; 167 168 // The region in which the map is allocated. 169 JitMemoryRegion* const region_; 170 171 // The current state of compilation in the zygote. Starts with kInProgress, 172 // and should end with kNotifiedOk or kNotifiedFailure. 173 const ZygoteCompilationState* compilation_state_; 174 175 DISALLOW_COPY_AND_ASSIGN(ZygoteMap); 176 }; 177 178 class JitCodeCache { 179 public: 180 static constexpr size_t kMaxCapacity = 64 * MB; 181 // Put the default to a very low amount for debug builds to stress the code cache 182 // collection. 183 static constexpr size_t kInitialCapacity = kIsDebugBuild ? 8 * KB : 64 * KB; 184 185 // By default, do not GC until reaching 256KB. 186 static constexpr size_t kReservedCapacity = kInitialCapacity * 4; 187 188 // Create the code cache with a code + data capacity equal to "capacity", error message is passed 189 // in the out arg error_msg. 190 static JitCodeCache* Create(bool used_only_for_profile_data, 191 bool rwx_memory_allowed, 192 bool is_zygote, 193 std::string* error_msg); 194 ~JitCodeCache(); 195 196 bool NotifyCompilationOf(ArtMethod* method, 197 Thread* self, 198 bool osr, 199 bool prejit, 200 bool baseline, 201 JitMemoryRegion* region) 202 REQUIRES_SHARED(Locks::mutator_lock_) 203 REQUIRES(!Locks::jit_lock_); 204 205 void NotifyMethodRedefined(ArtMethod* method) 206 REQUIRES(Locks::mutator_lock_) 207 REQUIRES(!Locks::jit_lock_); 208 209 // Notify to the code cache that the compiler wants to use the 210 // profiling info of `method` to drive optimizations, 211 // and therefore ensure the returned profiling info object is not 212 // collected. 213 ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self) 214 REQUIRES_SHARED(Locks::mutator_lock_) 215 REQUIRES(!Locks::jit_lock_); 216 217 void DoneCompiling(ArtMethod* method, Thread* self, bool osr) 218 REQUIRES_SHARED(Locks::mutator_lock_) 219 REQUIRES(!Locks::jit_lock_); 220 221 void DoneCompilerUse(ArtMethod* method, Thread* self) 222 REQUIRES_SHARED(Locks::mutator_lock_) 223 REQUIRES(!Locks::jit_lock_); 224 225 // Return true if the code cache contains this pc. 226 bool ContainsPc(const void* pc) const; 227 228 // Return true if the code cache contains this pc in the private region (i.e. not from zygote). 229 bool PrivateRegionContainsPc(const void* pc) const; 230 231 // Returns true if either the method's entrypoint is JIT compiled code or it is the 232 // instrumentation entrypoint and we can jump to jit code for this method. For testing use only. 233 bool WillExecuteJitCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_); 234 235 // Return true if the code cache contains this method. 236 bool ContainsMethod(ArtMethod* method) REQUIRES(!Locks::jit_lock_); 237 238 // Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise. 239 const void* GetJniStubCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_); 240 241 // Allocate a region for both code and data in the JIT code cache. 242 // The reserved memory is left completely uninitialized. 243 bool Reserve(Thread* self, 244 JitMemoryRegion* region, 245 size_t code_size, 246 size_t stack_map_size, 247 size_t number_of_roots, 248 ArtMethod* method, 249 /*out*/ArrayRef<const uint8_t>* reserved_code, 250 /*out*/ArrayRef<const uint8_t>* reserved_data) 251 REQUIRES_SHARED(Locks::mutator_lock_) 252 REQUIRES(!Locks::jit_lock_); 253 254 // Initialize code and data of previously allocated memory. 255 // 256 // `cha_single_implementation_list` needs to be registered via CHA (if it's 257 // still valid), since the compiled code still needs to be invalidated if the 258 // single-implementation assumptions are violated later. This needs to be done 259 // even if `has_should_deoptimize_flag` is false, which can happen due to CHA 260 // guard elimination. 261 bool Commit(Thread* self, 262 JitMemoryRegion* region, 263 ArtMethod* method, 264 ArrayRef<const uint8_t> reserved_code, // Uninitialized destination. 265 ArrayRef<const uint8_t> code, // Compiler output (source). 266 ArrayRef<const uint8_t> reserved_data, // Uninitialized destination. 267 const std::vector<Handle<mirror::Object>>& roots, 268 ArrayRef<const uint8_t> stack_map, // Compiler output (source). 269 bool osr, 270 bool has_should_deoptimize_flag, 271 const ArenaSet<ArtMethod*>& cha_single_implementation_list) 272 REQUIRES_SHARED(Locks::mutator_lock_) 273 REQUIRES(!Locks::jit_lock_); 274 275 // Free the previously allocated memory regions. 276 void Free(Thread* self, JitMemoryRegion* region, const uint8_t* code, const uint8_t* data) 277 REQUIRES_SHARED(Locks::mutator_lock_) 278 REQUIRES(!Locks::jit_lock_); 279 280 // Perform a collection on the code cache. 281 void GarbageCollectCache(Thread* self) 282 REQUIRES(!Locks::jit_lock_) 283 REQUIRES_SHARED(Locks::mutator_lock_); 284 285 // Given the 'pc', try to find the JIT compiled code associated with it. 286 // Return null if 'pc' is not in the code cache. 'method' is passed for 287 // sanity check. 288 OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method) 289 REQUIRES(!Locks::jit_lock_) 290 REQUIRES_SHARED(Locks::mutator_lock_); 291 292 OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method) 293 REQUIRES(!Locks::jit_lock_) 294 REQUIRES_SHARED(Locks::mutator_lock_); 295 296 // Removes method from the cache for testing purposes. The caller 297 // must ensure that all threads are suspended and the method should 298 // not be in any thread's stack. 299 bool RemoveMethod(ArtMethod* method, bool release_memory) 300 REQUIRES(!Locks::jit_lock_) 301 REQUIRES(Locks::mutator_lock_); 302 303 // Remove all methods in our cache that were allocated by 'alloc'. 304 void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) 305 REQUIRES(!Locks::jit_lock_) 306 REQUIRES_SHARED(Locks::mutator_lock_); 307 308 void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array) 309 REQUIRES(!Locks::jit_lock_) 310 REQUIRES_SHARED(Locks::mutator_lock_); 311 312 // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true, 313 // will collect and retry if the first allocation is unsuccessful. 314 ProfilingInfo* AddProfilingInfo(Thread* self, 315 ArtMethod* method, 316 const std::vector<uint32_t>& entries, 317 bool retry_allocation) 318 REQUIRES(!Locks::jit_lock_) 319 REQUIRES_SHARED(Locks::mutator_lock_); 320 OwnsSpace(const void * mspace)321 bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS { 322 return private_region_.OwnsSpace(mspace) || shared_region_.OwnsSpace(mspace); 323 } 324 325 void* MoreCore(const void* mspace, intptr_t increment); 326 327 // Adds to `methods` all profiled methods which are part of any of the given dex locations. 328 void GetProfiledMethods(const std::set<std::string>& dex_base_locations, 329 std::vector<ProfileMethodInfo>& methods) 330 REQUIRES(!Locks::jit_lock_) 331 REQUIRES_SHARED(Locks::mutator_lock_); 332 333 void InvalidateAllCompiledCode() 334 REQUIRES(!Locks::jit_lock_) 335 REQUIRES_SHARED(Locks::mutator_lock_); 336 337 void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code) 338 REQUIRES(!Locks::jit_lock_) 339 REQUIRES_SHARED(Locks::mutator_lock_); 340 341 void Dump(std::ostream& os) REQUIRES(!Locks::jit_lock_); 342 343 bool IsOsrCompiled(ArtMethod* method) REQUIRES(!Locks::jit_lock_); 344 345 void SweepRootTables(IsMarkedVisitor* visitor) 346 REQUIRES(!Locks::jit_lock_) 347 REQUIRES_SHARED(Locks::mutator_lock_); 348 349 // The GC needs to disallow the reading of inline caches when it processes them, 350 // to avoid having a class being used while it is being deleted. 351 void AllowInlineCacheAccess() REQUIRES(!Locks::jit_lock_); 352 void DisallowInlineCacheAccess() REQUIRES(!Locks::jit_lock_); 353 void BroadcastForInlineCacheAccess() REQUIRES(!Locks::jit_lock_); 354 355 // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer 356 // 'new_method' since it is being made obsolete. 357 void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) 358 REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_); 359 360 // Dynamically change whether we want to garbage collect code. 361 void SetGarbageCollectCode(bool value) REQUIRES(!Locks::jit_lock_); 362 363 bool GetGarbageCollectCode() REQUIRES(!Locks::jit_lock_); 364 365 // Unsafe variant for debug checks. GetGarbageCollectCodeUnsafe()366 bool GetGarbageCollectCodeUnsafe() const NO_THREAD_SAFETY_ANALYSIS { 367 return garbage_collect_code_; 368 } GetZygoteMap()369 ZygoteMap* GetZygoteMap() { 370 return &zygote_map_; 371 } 372 373 // If Jit-gc has been disabled (and instrumentation has been enabled) this will return the 374 // jit-compiled entrypoint for this method. Otherwise it will return null. 375 const void* FindCompiledCodeForInstrumentation(ArtMethod* method) 376 REQUIRES(!Locks::jit_lock_) 377 REQUIRES_SHARED(Locks::mutator_lock_); 378 379 // Fetch the code of a method that was JITted, but the JIT could not 380 // update its entrypoint due to the resolution trampoline. 381 const void* GetSavedEntryPointOfPreCompiledMethod(ArtMethod* method) 382 REQUIRES(!Locks::jit_lock_) 383 REQUIRES_SHARED(Locks::mutator_lock_); 384 385 void PostForkChildAction(bool is_system_server, bool is_zygote); 386 387 // Clear the entrypoints of JIT compiled methods that belong in the zygote space. 388 // This is used for removing non-debuggable JIT code at the point we realize the runtime 389 // is debuggable. Also clear the Precompiled flag from all methods so the non-debuggable code 390 // doesn't come back. 391 void TransitionToDebuggable() REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_); 392 393 JitMemoryRegion* GetCurrentRegion(); IsSharedRegion(const JitMemoryRegion & region)394 bool IsSharedRegion(const JitMemoryRegion& region) const { return ®ion == &shared_region_; } CanAllocateProfilingInfo()395 bool CanAllocateProfilingInfo() { 396 // If we don't have a private region, we cannot allocate a profiling info. 397 // A shared region doesn't support in general GC objects, which a profiling info 398 // can reference. 399 JitMemoryRegion* region = GetCurrentRegion(); 400 return region->IsValid() && !IsSharedRegion(*region); 401 } 402 403 // Return whether the given `ptr` is in the zygote executable memory space. IsInZygoteExecSpace(const void * ptr)404 bool IsInZygoteExecSpace(const void* ptr) const { 405 return shared_region_.IsInExecSpace(ptr); 406 } 407 408 private: 409 JitCodeCache(); 410 411 ProfilingInfo* AddProfilingInfoInternal(Thread* self, 412 ArtMethod* method, 413 const std::vector<uint32_t>& entries) 414 REQUIRES(Locks::jit_lock_) 415 REQUIRES_SHARED(Locks::mutator_lock_); 416 417 // If a collection is in progress, wait for it to finish. Must be called with the mutator lock. 418 // The non-mutator lock version should be used if possible. This method will release then 419 // re-acquire the mutator lock. 420 void WaitForPotentialCollectionToCompleteRunnable(Thread* self) 421 REQUIRES(Locks::jit_lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_); 422 423 // If a collection is in progress, wait for it to finish. Return 424 // whether the thread actually waited. 425 bool WaitForPotentialCollectionToComplete(Thread* self) 426 REQUIRES(Locks::jit_lock_) REQUIRES(!Locks::mutator_lock_); 427 428 // Remove CHA dependents and underlying allocations for entries in `method_headers`. 429 void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers) 430 REQUIRES(!Locks::jit_lock_) 431 REQUIRES(!Locks::cha_lock_); 432 433 // Removes method from the cache. The caller must ensure that all threads 434 // are suspended and the method should not be in any thread's stack. 435 bool RemoveMethodLocked(ArtMethod* method, bool release_memory) 436 REQUIRES(Locks::jit_lock_) 437 REQUIRES(Locks::mutator_lock_); 438 439 // Free code and data allocations for `code_ptr`. 440 void FreeCodeAndData(const void* code_ptr, bool free_debug_info = true) 441 REQUIRES(Locks::jit_lock_); 442 443 // Number of bytes allocated in the code cache. 444 size_t CodeCacheSize() REQUIRES(!Locks::jit_lock_); 445 446 // Number of bytes allocated in the data cache. 447 size_t DataCacheSize() REQUIRES(!Locks::jit_lock_); 448 449 // Number of bytes allocated in the code cache. 450 size_t CodeCacheSizeLocked() REQUIRES(Locks::jit_lock_); 451 452 // Number of bytes allocated in the data cache. 453 size_t DataCacheSizeLocked() REQUIRES(Locks::jit_lock_); 454 455 // Notify all waiting threads that a collection is done. 456 void NotifyCollectionDone(Thread* self) REQUIRES(Locks::jit_lock_); 457 458 // Return whether we should do a full collection given the current state of the cache. 459 bool ShouldDoFullCollection() 460 REQUIRES(Locks::jit_lock_) 461 REQUIRES_SHARED(Locks::mutator_lock_); 462 463 void DoCollection(Thread* self, bool collect_profiling_info) 464 REQUIRES(!Locks::jit_lock_) 465 REQUIRES_SHARED(Locks::mutator_lock_); 466 467 void RemoveUnmarkedCode(Thread* self) 468 REQUIRES(!Locks::jit_lock_) 469 REQUIRES_SHARED(Locks::mutator_lock_); 470 471 void MarkCompiledCodeOnThreadStacks(Thread* self) 472 REQUIRES(!Locks::jit_lock_) 473 REQUIRES_SHARED(Locks::mutator_lock_); 474 GetLiveBitmap()475 CodeCacheBitmap* GetLiveBitmap() const { 476 return live_bitmap_.get(); 477 } 478 IsInZygoteDataSpace(const void * ptr)479 bool IsInZygoteDataSpace(const void* ptr) const { 480 return shared_region_.IsInDataSpace(ptr); 481 } 482 483 bool IsWeakAccessEnabled(Thread* self) const; 484 void WaitUntilInlineCacheAccessible(Thread* self) 485 REQUIRES(!Locks::jit_lock_) 486 REQUIRES_SHARED(Locks::mutator_lock_); 487 488 class JniStubKey; 489 class JniStubData; 490 491 // Whether the GC allows accessing weaks in inline caches. Note that this 492 // is not used by the concurrent collector, which uses 493 // Thread::SetWeakRefAccessEnabled instead. 494 Atomic<bool> is_weak_access_enabled_; 495 496 // Condition to wait on for accessing inline caches. 497 ConditionVariable inline_cache_cond_ GUARDED_BY(Locks::jit_lock_); 498 499 // -------------- JIT memory regions ------------------------------------- // 500 501 // Shared region, inherited from the zygote. 502 JitMemoryRegion shared_region_; 503 504 // Process's own region. 505 JitMemoryRegion private_region_; 506 507 // -------------- Global JIT maps --------------------------------------- // 508 509 // Holds compiled code associated with the shorty for a JNI stub. 510 SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(Locks::jit_lock_); 511 512 // Holds compiled code associated to the ArtMethod. 513 SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(Locks::jit_lock_); 514 515 // Holds compiled code associated to the ArtMethod. Used when pre-jitting 516 // methods whose entrypoints have the resolution stub. 517 SafeMap<ArtMethod*, const void*> saved_compiled_methods_map_ GUARDED_BY(Locks::jit_lock_); 518 519 // Holds osr compiled code associated to the ArtMethod. 520 SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(Locks::jit_lock_); 521 522 // ProfilingInfo objects we have allocated. 523 std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_); 524 525 // Methods that the zygote has compiled and can be shared across processes 526 // forked from the zygote. 527 ZygoteMap zygote_map_; 528 529 // -------------- JIT GC related data structures ----------------------- // 530 531 // Condition to wait on during collection. 532 ConditionVariable lock_cond_ GUARDED_BY(Locks::jit_lock_); 533 534 // Whether there is a code cache collection in progress. 535 bool collection_in_progress_ GUARDED_BY(Locks::jit_lock_); 536 537 // Bitmap for collecting code and data. 538 std::unique_ptr<CodeCacheBitmap> live_bitmap_; 539 540 // Whether the last collection round increased the code cache. 541 bool last_collection_increased_code_cache_ GUARDED_BY(Locks::jit_lock_); 542 543 // Whether we can do garbage collection. Not 'const' as tests may override this. 544 bool garbage_collect_code_ GUARDED_BY(Locks::jit_lock_); 545 546 // ---------------- JIT statistics -------------------------------------- // 547 548 // Number of compilations done throughout the lifetime of the JIT. 549 size_t number_of_compilations_ GUARDED_BY(Locks::jit_lock_); 550 551 // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT. 552 size_t number_of_osr_compilations_ GUARDED_BY(Locks::jit_lock_); 553 554 // Number of code cache collections done throughout the lifetime of the JIT. 555 size_t number_of_collections_ GUARDED_BY(Locks::jit_lock_); 556 557 // Histograms for keeping track of stack map size statistics. 558 Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(Locks::jit_lock_); 559 560 // Histograms for keeping track of code size statistics. 561 Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(Locks::jit_lock_); 562 563 // Histograms for keeping track of profiling info statistics. 564 Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(Locks::jit_lock_); 565 566 friend class art::JitJniStubTestHelper; 567 friend class ScopedCodeCacheWrite; 568 friend class MarkCodeClosure; 569 570 DISALLOW_COPY_AND_ASSIGN(JitCodeCache); 571 }; 572 573 } // namespace jit 574 } // namespace art 575 576 #endif // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_ 577