1 /* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_MONITOR_POOL_H_ 18 #define ART_RUNTIME_MONITOR_POOL_H_ 19 20 #include "monitor.h" 21 22 #include "base/allocator.h" 23 #include "base/macros.h" 24 #ifdef __LP64__ 25 #include <stdint.h> 26 #include "base/atomic.h" 27 #include "runtime.h" 28 #else 29 #include "base/stl_util.h" // STLDeleteElements 30 #endif 31 32 namespace art HIDDEN { 33 34 // Abstraction to keep monitors small enough to fit in a lock word (32bits). On 32bit systems the 35 // monitor id loses the alignment bits of the Monitor*. 36 class MonitorPool { 37 public: Create()38 static MonitorPool* Create() { 39 #ifndef __LP64__ 40 return nullptr; 41 #else 42 return new MonitorPool(); 43 #endif 44 } 45 CreateMonitor(Thread * self,Thread * owner,ObjPtr<mirror::Object> obj,int32_t hash_code)46 static Monitor* CreateMonitor(Thread* self, 47 Thread* owner, 48 ObjPtr<mirror::Object> obj, 49 int32_t hash_code) 50 REQUIRES_SHARED(Locks::mutator_lock_) { 51 #ifndef __LP64__ 52 Monitor* mon = new Monitor(self, owner, obj, hash_code); 53 DCHECK_ALIGNED(mon, LockWord::kMonitorIdAlignment); 54 return mon; 55 #else 56 return GetMonitorPool()->CreateMonitorInPool(self, owner, obj, hash_code); 57 #endif 58 } 59 ReleaseMonitor(Thread * self,Monitor * monitor)60 static void ReleaseMonitor(Thread* self, Monitor* monitor) { 61 #ifndef __LP64__ 62 UNUSED(self); 63 delete monitor; 64 #else 65 GetMonitorPool()->ReleaseMonitorToPool(self, monitor); 66 #endif 67 } 68 ReleaseMonitors(Thread * self,MonitorList::Monitors * monitors)69 static void ReleaseMonitors(Thread* self, MonitorList::Monitors* monitors) { 70 #ifndef __LP64__ 71 UNUSED(self); 72 STLDeleteElements(monitors); 73 #else 74 GetMonitorPool()->ReleaseMonitorsToPool(self, monitors); 75 #endif 76 } 77 MonitorFromMonitorId(MonitorId mon_id)78 static Monitor* MonitorFromMonitorId(MonitorId mon_id) { 79 #ifndef __LP64__ 80 return reinterpret_cast<Monitor*>(mon_id << LockWord::kMonitorIdAlignmentShift); 81 #else 82 return GetMonitorPool()->LookupMonitor(mon_id); 83 #endif 84 } 85 MonitorIdFromMonitor(Monitor * mon)86 static MonitorId MonitorIdFromMonitor(Monitor* mon) { 87 #ifndef __LP64__ 88 return reinterpret_cast<MonitorId>(mon) >> LockWord::kMonitorIdAlignmentShift; 89 #else 90 return mon->GetMonitorId(); 91 #endif 92 } 93 ComputeMonitorId(Monitor * mon,Thread * self)94 static MonitorId ComputeMonitorId(Monitor* mon, Thread* self) { 95 #ifndef __LP64__ 96 UNUSED(self); 97 return MonitorIdFromMonitor(mon); 98 #else 99 return GetMonitorPool()->ComputeMonitorIdInPool(mon, self); 100 #endif 101 } 102 GetMonitorPool()103 static MonitorPool* GetMonitorPool() { 104 #ifndef __LP64__ 105 return nullptr; 106 #else 107 return Runtime::Current()->GetMonitorPool(); 108 #endif 109 } 110 ~MonitorPool()111 ~MonitorPool() { 112 #ifdef __LP64__ 113 FreeInternal(); 114 #endif 115 } 116 117 private: 118 #ifdef __LP64__ 119 // When we create a monitor pool, threads have not been initialized, yet, so ignore thread-safety 120 // analysis. 121 MonitorPool() NO_THREAD_SAFETY_ANALYSIS; 122 123 void AllocateChunk() REQUIRES(Locks::allocated_monitor_ids_lock_); 124 125 // Release all chunks and metadata. This is done on shutdown, where threads have been destroyed, 126 // so ignore thead-safety analysis. 127 void FreeInternal() NO_THREAD_SAFETY_ANALYSIS; 128 129 Monitor* CreateMonitorInPool(Thread* self, 130 Thread* owner, 131 ObjPtr<mirror::Object> obj, 132 int32_t hash_code) 133 REQUIRES_SHARED(Locks::mutator_lock_); 134 135 void ReleaseMonitorToPool(Thread* self, Monitor* monitor); 136 void ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors); 137 138 // Note: This is safe as we do not ever move chunks. All needed entries in the monitor_chunks_ 139 // data structure are read-only once we get here. Updates happen-before this call because 140 // the lock word was stored with release semantics and we read it with acquire semantics to 141 // retrieve the id. LookupMonitor(MonitorId mon_id)142 Monitor* LookupMonitor(MonitorId mon_id) { 143 size_t offset = MonitorIdToOffset(mon_id); 144 size_t index = offset / kChunkSize; 145 size_t top_index = index / kMaxListSize; 146 size_t list_index = index % kMaxListSize; 147 size_t offset_in_chunk = offset % kChunkSize; 148 uintptr_t base = monitor_chunks_[top_index][list_index]; 149 return reinterpret_cast<Monitor*>(base + offset_in_chunk); 150 } 151 IsInChunk(uintptr_t base_addr,Monitor * mon)152 static bool IsInChunk(uintptr_t base_addr, Monitor* mon) { 153 uintptr_t mon_ptr = reinterpret_cast<uintptr_t>(mon); 154 return base_addr <= mon_ptr && (mon_ptr - base_addr < kChunkSize); 155 } 156 ComputeMonitorIdInPool(Monitor * mon,Thread * self)157 MonitorId ComputeMonitorIdInPool(Monitor* mon, Thread* self) { 158 MutexLock mu(self, *Locks::allocated_monitor_ids_lock_); 159 for (size_t i = 0; i <= current_chunk_list_index_; ++i) { 160 for (size_t j = 0; j < ChunkListCapacity(i); ++j) { 161 if (j >= num_chunks_ && i == current_chunk_list_index_) { 162 break; 163 } 164 uintptr_t chunk_addr = monitor_chunks_[i][j]; 165 if (IsInChunk(chunk_addr, mon)) { 166 return OffsetToMonitorId( 167 reinterpret_cast<uintptr_t>(mon) - chunk_addr 168 + i * (kMaxListSize * kChunkSize) + j * kChunkSize); 169 } 170 } 171 } 172 LOG(FATAL) << "Did not find chunk that contains monitor."; 173 return 0; 174 } 175 MonitorIdToOffset(MonitorId id)176 static constexpr size_t MonitorIdToOffset(MonitorId id) { 177 return id << 3; 178 } 179 OffsetToMonitorId(size_t offset)180 static constexpr MonitorId OffsetToMonitorId(size_t offset) { 181 return static_cast<MonitorId>(offset >> 3); 182 } 183 ChunkListCapacity(size_t index)184 static constexpr size_t ChunkListCapacity(size_t index) { 185 return kInitialChunkStorage << index; 186 } 187 188 // TODO: There are assumptions in the code that monitor addresses are 8B aligned (>>3). 189 static constexpr size_t kMonitorAlignment = 8; 190 // Size of a monitor, rounded up to a multiple of alignment. 191 static constexpr size_t kAlignedMonitorSize = (sizeof(Monitor) + kMonitorAlignment - 1) & 192 -kMonitorAlignment; 193 // Size of the chunks holding the actual monitors. The bottom bits of the monitor id are the 194 // index into such a chunk. We can collapse this to the actually used storage 195 // in a chunk, i.e., kChunkCapacity * kAlignedMonitorSize, but this would mean proper divisions. 196 static constexpr size_t kChunkSize = 4096; 197 static_assert(IsPowerOfTwo(kChunkSize), "kChunkSize must be power of 2"); 198 static constexpr size_t kChunkCapacity = kChunkSize / kAlignedMonitorSize; 199 // The number of chunks of storage that can be referenced by the initial chunk list. 200 // The total number of usable monitor chunks is typically 255 times this number, so it 201 // should be large enough that we don't run out. We run out of address bits if it's > 512. 202 // Currently we set it a bit smaller, to save half a page per process. We make it tiny in 203 // debug builds to catch growth errors. The only value we really expect to tune. 204 static constexpr size_t kInitialChunkStorage = kIsDebugBuild ? 8U : 256U; 205 static_assert(IsPowerOfTwo(kInitialChunkStorage), "kInitialChunkStorage must be power of 2"); 206 // The number of lists, each containing pointers to storage chunks. 207 static constexpr size_t kMaxChunkLists = 8; // Dictated by 3 bit index. Don't increase above 8. 208 static_assert(IsPowerOfTwo(kMaxChunkLists), "kMaxChunkLists must be power of 2"); 209 static constexpr size_t kMaxListSize = kInitialChunkStorage << (kMaxChunkLists - 1); 210 // We lose 3 bits in monitor id due to 3 bit monitor_chunks_ index, and gain it back from 211 // the 3 bit alignment constraint on monitors: 212 static_assert(kMaxListSize * kChunkSize < (1 << LockWord::kMonitorIdSize), 213 "Monitor id bits don't fit"); 214 static_assert(IsPowerOfTwo(kMaxListSize), "kMaxListSize must be power of 2"); 215 216 // Array of pointers to lists (again arrays) of pointers to chunks containing monitors. 217 // Zeroth entry points to a list (array) of kInitialChunkStorage pointers to chunks. 218 // Each subsequent list as twice as large as the preceding one. 219 // Monitor Ids are effectively interpreted as follows: 220 // Top 3 bits (of 28): index into monitor_chunks_. 221 // Next 16 bits: index into the chunk list, i.e. monitor_chunks_[i]. 222 // Last 9 bits: offset within chunk, expressed as multiple of kMonitorAlignment. 223 // If we set kInitialChunkStorage to 512, this would allow us to use roughly 128K chunks of 224 // monitors, which is 0.5GB of monitors. With this maximum setting, the largest chunk list 225 // contains 64K entries, and we make full use of the available index space. With a 226 // kInitialChunkStorage value of 256, this is proportionately reduced to 0.25GB of monitors. 227 // Updates to monitor_chunks_ are guarded by allocated_monitor_ids_lock_ . 228 // No field in this entire data structure is ever updated once a monitor id whose lookup 229 // requires it has been made visible to another thread. Thus readers never race with 230 // updates, in spite of the fact that they acquire no locks. 231 uintptr_t* monitor_chunks_[kMaxChunkLists]; // uintptr_t is really a Monitor* . 232 // Highest currently used index in monitor_chunks_ . Used for newly allocated chunks. 233 size_t current_chunk_list_index_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); 234 // Number of chunk pointers stored in monitor_chunks_[current_chunk_list_index_] so far. 235 size_t num_chunks_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); 236 // After the initial allocation, this is always equal to 237 // ChunkListCapacity(current_chunk_list_index_). 238 size_t current_chunk_list_capacity_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); 239 240 using Allocator = TrackingAllocator<uint8_t, kAllocatorTagMonitorPool>; 241 Allocator allocator_; 242 243 // Start of free list of monitors. 244 // Note: these point to the right memory regions, but do *not* denote initialized objects. 245 Monitor* first_free_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); 246 #endif 247 }; 248 249 } // namespace art 250 251 #endif // ART_RUNTIME_MONITOR_POOL_H_ 252