1 /* 2 * Copyright (C) 2015 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_JIT_PROFILING_INFO_H_ 18 #define ART_RUNTIME_JIT_PROFILING_INFO_H_ 19 20 #include <vector> 21 22 #include "base/macros.h" 23 #include "base/value_object.h" 24 #include "gc_root.h" 25 #include "interpreter/mterp/nterp.h" 26 #include "offsets.h" 27 28 namespace art HIDDEN { 29 30 class ArtMethod; 31 class CompilerOptions; 32 class ProfilingInfo; 33 34 namespace jit { 35 class Jit; 36 class JitCodeCache; 37 } // namespace jit 38 39 namespace mirror { 40 class Class; 41 } // namespace mirror 42 43 // Structure to store the classes seen at runtime for a specific instruction. 44 // Once the classes_ array is full, we consider the INVOKE to be megamorphic. 45 class InlineCache { 46 public: 47 // This is hard coded in the assembly stub art_quick_update_inline_cache. 48 static constexpr uint8_t kIndividualCacheSize = 5; 49 ClassesOffset()50 static constexpr MemberOffset ClassesOffset() { 51 return MemberOffset(OFFSETOF_MEMBER(InlineCache, classes_)); 52 } 53 54 // Encode the list of `dex_pcs` to fit into an uint32_t. 55 static uint32_t EncodeDexPc(ArtMethod* method, 56 const std::vector<uint32_t>& dex_pcs, 57 uint32_t inline_max_code_units) 58 REQUIRES_SHARED(Locks::mutator_lock_); 59 60 // Return the maximum inlining depth that we support to encode a list of dex 61 // pcs. 62 static uint32_t MaxDexPcEncodingDepth(ArtMethod* method, 63 uint32_t inline_max_code_units) 64 REQUIRES_SHARED(Locks::mutator_lock_); 65 66 private: 67 uint32_t dex_pc_; 68 GcRoot<mirror::Class> classes_[kIndividualCacheSize]; 69 70 friend class jit::JitCodeCache; 71 friend class ProfilingInfo; 72 73 DISALLOW_COPY_AND_ASSIGN(InlineCache); 74 }; 75 76 class BranchCache { 77 public: FalseOffset()78 static constexpr MemberOffset FalseOffset() { 79 return MemberOffset(OFFSETOF_MEMBER(BranchCache, false_)); 80 } 81 TrueOffset()82 static constexpr MemberOffset TrueOffset() { 83 return MemberOffset(OFFSETOF_MEMBER(BranchCache, true_)); 84 } 85 GetExecutionCount()86 uint32_t GetExecutionCount() const { 87 return true_ + false_; 88 } 89 GetTrue()90 uint16_t GetTrue() const { 91 return true_; 92 } 93 GetFalse()94 uint16_t GetFalse() const { 95 return false_; 96 } 97 98 private: 99 uint32_t dex_pc_; 100 uint16_t false_; 101 uint16_t true_; 102 103 friend class ProfilingInfo; 104 105 DISALLOW_COPY_AND_ASSIGN(BranchCache); 106 }; 107 108 /** 109 * Profiling info for a method, created and filled by the interpreter once the 110 * method is warm, and used by the compiler to drive optimizations. 111 */ 112 class ProfilingInfo { 113 public: 114 // Create a ProfilingInfo for 'method'. 115 EXPORT static ProfilingInfo* Create(Thread* self, 116 ArtMethod* method, 117 const std::vector<uint32_t>& inline_cache_entries) 118 REQUIRES_SHARED(Locks::mutator_lock_); 119 120 // Add information from an executed INVOKE instruction to the profile. 121 void AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls) 122 // Method should not be interruptible, as it manipulates the ProfilingInfo 123 // which can be concurrently collected. 124 REQUIRES(Roles::uninterruptible_) 125 REQUIRES_SHARED(Locks::mutator_lock_); 126 GetMethod()127 ArtMethod* GetMethod() const { 128 return method_; 129 } 130 131 InlineCache* GetInlineCache(uint32_t dex_pc); 132 BranchCache* GetBranchCache(uint32_t dex_pc); 133 GetInlineCaches()134 InlineCache* GetInlineCaches() { 135 return reinterpret_cast<InlineCache*>( 136 reinterpret_cast<uintptr_t>(this) + sizeof(ProfilingInfo)); 137 } GetBranchCaches()138 BranchCache* GetBranchCaches() { 139 return reinterpret_cast<BranchCache*>( 140 reinterpret_cast<uintptr_t>(this) + sizeof(ProfilingInfo) + 141 number_of_inline_caches_ * sizeof(InlineCache)); 142 } 143 ComputeSize(uint32_t number_of_inline_caches,uint32_t number_of_branch_caches)144 static size_t ComputeSize(uint32_t number_of_inline_caches, uint32_t number_of_branch_caches) { 145 return sizeof(ProfilingInfo) + 146 number_of_inline_caches * sizeof(InlineCache) + 147 number_of_branch_caches * sizeof(BranchCache); 148 } 149 150 // Increments the number of times this method is currently being inlined. 151 // Returns whether it was successful, that is it could increment without 152 // overflowing. IncrementInlineUse()153 bool IncrementInlineUse() { 154 if (current_inline_uses_ == std::numeric_limits<uint16_t>::max()) { 155 return false; 156 } 157 current_inline_uses_++; 158 return true; 159 } 160 DecrementInlineUse()161 void DecrementInlineUse() { 162 DCHECK_GT(current_inline_uses_, 0); 163 current_inline_uses_--; 164 } 165 IsInUseByCompiler()166 bool IsInUseByCompiler() const { 167 return current_inline_uses_ > 0; 168 } 169 BaselineHotnessCountOffset()170 static constexpr MemberOffset BaselineHotnessCountOffset() { 171 return MemberOffset(OFFSETOF_MEMBER(ProfilingInfo, baseline_hotness_count_)); 172 } 173 GetBaselineHotnessCount()174 uint16_t GetBaselineHotnessCount() const { 175 return baseline_hotness_count_; 176 } 177 178 static uint16_t GetOptimizeThreshold(); 179 180 private: 181 ProfilingInfo(ArtMethod* method, 182 const std::vector<uint32_t>& inline_cache_entries, 183 const std::vector<uint32_t>& branch_cache_entries); 184 185 // Hotness count for methods compiled with the JIT baseline compiler. Once 186 // a threshold is hit (currentily the maximum value of uint16_t), we will 187 // JIT compile optimized the method. 188 uint16_t baseline_hotness_count_; 189 190 // Method this profiling info is for. 191 // Not 'const' as JVMTI introduces obsolete methods that we implement by creating new ArtMethods. 192 // See JitCodeCache::MoveObsoleteMethod. 193 ArtMethod* method_; 194 195 // Number of invokes we are profiling in the ArtMethod. 196 const uint32_t number_of_inline_caches_; 197 198 // Number of branches we are profiling in the ArtMethod. 199 const uint32_t number_of_branch_caches_; 200 201 // When the compiler inlines the method associated to this ProfilingInfo, 202 // it updates this counter so that the GC does not try to clear the inline caches. 203 uint16_t current_inline_uses_; 204 205 // Memory following the object: 206 // - Dynamically allocated array of `InlineCache` of size `number_of_inline_caches_`. 207 // - Dynamically allocated array of `BranchCache of size `number_of_branch_caches_`. 208 friend class jit::JitCodeCache; 209 210 DISALLOW_COPY_AND_ASSIGN(ProfilingInfo); 211 }; 212 213 class ScopedProfilingInfoUse : public ValueObject { 214 public: 215 ScopedProfilingInfoUse(jit::Jit* jit, ArtMethod* method, Thread* self); 216 ~ScopedProfilingInfoUse(); 217 GetProfilingInfo()218 ProfilingInfo* GetProfilingInfo() const { return profiling_info_; } 219 220 private: 221 jit::Jit* const jit_; 222 ArtMethod* const method_; 223 Thread* const self_; 224 ProfilingInfo* const profiling_info_; 225 226 DISALLOW_COPY_AND_ASSIGN(ScopedProfilingInfoUse); 227 }; 228 229 } // namespace art 230 231 #endif // ART_RUNTIME_JIT_PROFILING_INFO_H_ 232