1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "monitor_pool.h"
18
19 #include "base/logging.h" // For VLOG.
20 #include "base/mutex-inl.h"
21 #include "monitor.h"
22 #include "thread-current-inl.h"
23
24 namespace art {
25
26 namespace mirror {
27 class Object;
28 } // namespace mirror
29
MonitorPool()30 MonitorPool::MonitorPool()
31 : current_chunk_list_index_(0), num_chunks_(0), current_chunk_list_capacity_(0),
32 first_free_(nullptr) {
33 for (size_t i = 0; i < kMaxChunkLists; ++i) {
34 monitor_chunks_[i] = nullptr; // Not absolutely required, but ...
35 }
36 AllocateChunk(); // Get our first chunk.
37 }
38
39 // Assumes locks are held appropriately when necessary.
40 // We do not need a lock in the constructor, but we need one when in CreateMonitorInPool.
AllocateChunk()41 void MonitorPool::AllocateChunk() {
42 DCHECK(first_free_ == nullptr);
43
44 // Do we need to allocate another chunk list?
45 if (num_chunks_ == current_chunk_list_capacity_) {
46 if (current_chunk_list_capacity_ != 0U) {
47 ++current_chunk_list_index_;
48 CHECK_LT(current_chunk_list_index_, kMaxChunkLists) << "Out of space for inflated monitors";
49 VLOG(monitor) << "Expanding to capacity "
50 << 2 * ChunkListCapacity(current_chunk_list_index_) - kInitialChunkStorage;
51 } // else we're initializing
52 current_chunk_list_capacity_ = ChunkListCapacity(current_chunk_list_index_);
53 uintptr_t* new_list = new uintptr_t[current_chunk_list_capacity_]();
54 DCHECK(monitor_chunks_[current_chunk_list_index_] == nullptr);
55 monitor_chunks_[current_chunk_list_index_] = new_list;
56 num_chunks_ = 0;
57 }
58
59 // Allocate the chunk.
60 void* chunk = allocator_.allocate(kChunkSize);
61 // Check we allocated memory.
62 CHECK_NE(reinterpret_cast<uintptr_t>(nullptr), reinterpret_cast<uintptr_t>(chunk));
63 // Check it is aligned as we need it.
64 CHECK_EQ(0U, reinterpret_cast<uintptr_t>(chunk) % kMonitorAlignment);
65
66 // Add the chunk.
67 monitor_chunks_[current_chunk_list_index_][num_chunks_] = reinterpret_cast<uintptr_t>(chunk);
68 num_chunks_++;
69
70 // Set up the free list
71 Monitor* last = reinterpret_cast<Monitor*>(reinterpret_cast<uintptr_t>(chunk) +
72 (kChunkCapacity - 1) * kAlignedMonitorSize);
73 last->next_free_ = nullptr;
74 // Eagerly compute id.
75 last->monitor_id_ = OffsetToMonitorId(current_chunk_list_index_* (kMaxListSize * kChunkSize)
76 + (num_chunks_ - 1) * kChunkSize + (kChunkCapacity - 1) * kAlignedMonitorSize);
77 for (size_t i = 0; i < kChunkCapacity - 1; ++i) {
78 Monitor* before = reinterpret_cast<Monitor*>(reinterpret_cast<uintptr_t>(last) -
79 kAlignedMonitorSize);
80 before->next_free_ = last;
81 // Derive monitor_id from last.
82 before->monitor_id_ = OffsetToMonitorId(MonitorIdToOffset(last->monitor_id_) -
83 kAlignedMonitorSize);
84
85 last = before;
86 }
87 DCHECK(last == reinterpret_cast<Monitor*>(chunk));
88 first_free_ = last;
89 }
90
FreeInternal()91 void MonitorPool::FreeInternal() {
92 // This is on shutdown with NO_THREAD_SAFETY_ANALYSIS, can't/don't need to lock.
93 DCHECK_NE(current_chunk_list_capacity_, 0UL);
94 for (size_t i = 0; i <= current_chunk_list_index_; ++i) {
95 DCHECK_NE(monitor_chunks_[i], static_cast<uintptr_t*>(nullptr));
96 for (size_t j = 0; j < ChunkListCapacity(i); ++j) {
97 if (i < current_chunk_list_index_ || j < num_chunks_) {
98 DCHECK_NE(monitor_chunks_[i][j], 0U);
99 allocator_.deallocate(reinterpret_cast<uint8_t*>(monitor_chunks_[i][j]), kChunkSize);
100 } else {
101 DCHECK_EQ(monitor_chunks_[i][j], 0U);
102 }
103 }
104 delete[] monitor_chunks_[i];
105 }
106 }
107
CreateMonitorInPool(Thread * self,Thread * owner,ObjPtr<mirror::Object> obj,int32_t hash_code)108 Monitor* MonitorPool::CreateMonitorInPool(Thread* self,
109 Thread* owner,
110 ObjPtr<mirror::Object> obj,
111 int32_t hash_code)
112 REQUIRES_SHARED(Locks::mutator_lock_) {
113 // We are gonna allocate, so acquire the writer lock.
114 MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
115
116 // Enough space, or need to resize?
117 if (first_free_ == nullptr) {
118 VLOG(monitor) << "Allocating a new chunk.";
119 AllocateChunk();
120 }
121
122 Monitor* mon_uninitialized = first_free_;
123 first_free_ = first_free_->next_free_;
124
125 // Pull out the id which was preinitialized.
126 MonitorId id = mon_uninitialized->monitor_id_;
127
128 // Initialize it.
129 Monitor* monitor = new(mon_uninitialized) Monitor(self, owner, obj, hash_code, id);
130
131 return monitor;
132 }
133
ReleaseMonitorToPool(Thread * self,Monitor * monitor)134 void MonitorPool::ReleaseMonitorToPool(Thread* self, Monitor* monitor) {
135 // Might be racy with allocation, so acquire lock.
136 MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
137
138 // Keep the monitor id. Don't trust it's not cleared.
139 MonitorId id = monitor->monitor_id_;
140
141 // Call the destructor.
142 // TODO: Exception safety?
143 monitor->~Monitor();
144
145 // Add to the head of the free list.
146 monitor->next_free_ = first_free_;
147 first_free_ = monitor;
148
149 // Rewrite monitor id.
150 monitor->monitor_id_ = id;
151 }
152
ReleaseMonitorsToPool(Thread * self,MonitorList::Monitors * monitors)153 void MonitorPool::ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors) {
154 for (Monitor* mon : *monitors) {
155 ReleaseMonitorToPool(self, mon);
156 }
157 }
158
159 } // namespace art
160