1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "intern_table.h"
18
19 #include <memory>
20
21 #include "gc_root-inl.h"
22 #include "gc/space/image_space.h"
23 #include "mirror/dex_cache.h"
24 #include "mirror/object_array-inl.h"
25 #include "mirror/object-inl.h"
26 #include "mirror/string-inl.h"
27 #include "thread.h"
28 #include "utf.h"
29
30 namespace art {
31
InternTable()32 InternTable::InternTable()
33 : image_added_to_intern_table_(false), log_new_roots_(false),
34 allow_new_interns_(true),
35 new_intern_condition_("New intern condition", *Locks::intern_table_lock_) {
36 }
37
Size() const38 size_t InternTable::Size() const {
39 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
40 return strong_interns_.Size() + weak_interns_.Size();
41 }
42
StrongSize() const43 size_t InternTable::StrongSize() const {
44 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
45 return strong_interns_.Size();
46 }
47
WeakSize() const48 size_t InternTable::WeakSize() const {
49 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
50 return weak_interns_.Size();
51 }
52
DumpForSigQuit(std::ostream & os) const53 void InternTable::DumpForSigQuit(std::ostream& os) const {
54 os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n";
55 }
56
VisitRoots(RootVisitor * visitor,VisitRootFlags flags)57 void InternTable::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
58 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
59 if ((flags & kVisitRootFlagAllRoots) != 0) {
60 strong_interns_.VisitRoots(visitor);
61 } else if ((flags & kVisitRootFlagNewRoots) != 0) {
62 for (auto& root : new_strong_intern_roots_) {
63 mirror::String* old_ref = root.Read<kWithoutReadBarrier>();
64 root.VisitRoot(visitor, RootInfo(kRootInternedString));
65 mirror::String* new_ref = root.Read<kWithoutReadBarrier>();
66 if (new_ref != old_ref) {
67 // The GC moved a root in the log. Need to search the strong interns and update the
68 // corresponding object. This is slow, but luckily for us, this may only happen with a
69 // concurrent moving GC.
70 strong_interns_.Remove(old_ref);
71 strong_interns_.Insert(new_ref);
72 }
73 }
74 }
75 if ((flags & kVisitRootFlagClearRootLog) != 0) {
76 new_strong_intern_roots_.clear();
77 }
78 if ((flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
79 log_new_roots_ = true;
80 } else if ((flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
81 log_new_roots_ = false;
82 }
83 // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
84 }
85
LookupStrong(mirror::String * s)86 mirror::String* InternTable::LookupStrong(mirror::String* s) {
87 return strong_interns_.Find(s);
88 }
89
LookupWeak(mirror::String * s)90 mirror::String* InternTable::LookupWeak(mirror::String* s) {
91 return weak_interns_.Find(s);
92 }
93
SwapPostZygoteWithPreZygote()94 void InternTable::SwapPostZygoteWithPreZygote() {
95 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
96 weak_interns_.SwapPostZygoteWithPreZygote();
97 strong_interns_.SwapPostZygoteWithPreZygote();
98 }
99
InsertStrong(mirror::String * s)100 mirror::String* InternTable::InsertStrong(mirror::String* s) {
101 Runtime* runtime = Runtime::Current();
102 if (runtime->IsActiveTransaction()) {
103 runtime->RecordStrongStringInsertion(s);
104 }
105 if (log_new_roots_) {
106 new_strong_intern_roots_.push_back(GcRoot<mirror::String>(s));
107 }
108 strong_interns_.Insert(s);
109 return s;
110 }
111
InsertWeak(mirror::String * s)112 mirror::String* InternTable::InsertWeak(mirror::String* s) {
113 Runtime* runtime = Runtime::Current();
114 if (runtime->IsActiveTransaction()) {
115 runtime->RecordWeakStringInsertion(s);
116 }
117 weak_interns_.Insert(s);
118 return s;
119 }
120
RemoveStrong(mirror::String * s)121 void InternTable::RemoveStrong(mirror::String* s) {
122 strong_interns_.Remove(s);
123 }
124
RemoveWeak(mirror::String * s)125 void InternTable::RemoveWeak(mirror::String* s) {
126 Runtime* runtime = Runtime::Current();
127 if (runtime->IsActiveTransaction()) {
128 runtime->RecordWeakStringRemoval(s);
129 }
130 weak_interns_.Remove(s);
131 }
132
133 // Insert/remove methods used to undo changes made during an aborted transaction.
InsertStrongFromTransaction(mirror::String * s)134 mirror::String* InternTable::InsertStrongFromTransaction(mirror::String* s) {
135 DCHECK(!Runtime::Current()->IsActiveTransaction());
136 return InsertStrong(s);
137 }
InsertWeakFromTransaction(mirror::String * s)138 mirror::String* InternTable::InsertWeakFromTransaction(mirror::String* s) {
139 DCHECK(!Runtime::Current()->IsActiveTransaction());
140 return InsertWeak(s);
141 }
RemoveStrongFromTransaction(mirror::String * s)142 void InternTable::RemoveStrongFromTransaction(mirror::String* s) {
143 DCHECK(!Runtime::Current()->IsActiveTransaction());
144 RemoveStrong(s);
145 }
RemoveWeakFromTransaction(mirror::String * s)146 void InternTable::RemoveWeakFromTransaction(mirror::String* s) {
147 DCHECK(!Runtime::Current()->IsActiveTransaction());
148 RemoveWeak(s);
149 }
150
AddImageStringsToTable(gc::space::ImageSpace * image_space)151 void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space) {
152 CHECK(image_space != nullptr);
153 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
154 if (!image_added_to_intern_table_) {
155 const ImageHeader* const header = &image_space->GetImageHeader();
156 // Check if we have the interned strings section.
157 const ImageSection& section = header->GetImageSection(ImageHeader::kSectionInternedStrings);
158 if (section.Size() > 0) {
159 ReadFromMemoryLocked(image_space->Begin() + section.Offset());
160 } else {
161 // TODO: Delete this logic?
162 mirror::Object* root = header->GetImageRoot(ImageHeader::kDexCaches);
163 mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
164 for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
165 mirror::DexCache* dex_cache = dex_caches->Get(i);
166 const DexFile* dex_file = dex_cache->GetDexFile();
167 const size_t num_strings = dex_file->NumStringIds();
168 for (size_t j = 0; j < num_strings; ++j) {
169 mirror::String* image_string = dex_cache->GetResolvedString(j);
170 if (image_string != nullptr) {
171 mirror::String* found = LookupStrong(image_string);
172 if (found == nullptr) {
173 InsertStrong(image_string);
174 } else {
175 DCHECK_EQ(found, image_string);
176 }
177 }
178 }
179 }
180 }
181 image_added_to_intern_table_ = true;
182 }
183 }
184
LookupStringFromImage(mirror::String * s)185 mirror::String* InternTable::LookupStringFromImage(mirror::String* s)
186 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
187 if (image_added_to_intern_table_) {
188 return nullptr;
189 }
190 gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace();
191 if (image == nullptr) {
192 return nullptr; // No image present.
193 }
194 mirror::Object* root = image->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
195 mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
196 const std::string utf8 = s->ToModifiedUtf8();
197 for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
198 mirror::DexCache* dex_cache = dex_caches->Get(i);
199 const DexFile* dex_file = dex_cache->GetDexFile();
200 // Binary search the dex file for the string index.
201 const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str());
202 if (string_id != nullptr) {
203 uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
204 // GetResolvedString() contains a RB.
205 mirror::String* image_string = dex_cache->GetResolvedString(string_idx);
206 if (image_string != nullptr) {
207 return image_string;
208 }
209 }
210 }
211 return nullptr;
212 }
213
AllowNewInterns()214 void InternTable::AllowNewInterns() {
215 Thread* self = Thread::Current();
216 MutexLock mu(self, *Locks::intern_table_lock_);
217 allow_new_interns_ = true;
218 new_intern_condition_.Broadcast(self);
219 }
220
DisallowNewInterns()221 void InternTable::DisallowNewInterns() {
222 Thread* self = Thread::Current();
223 MutexLock mu(self, *Locks::intern_table_lock_);
224 allow_new_interns_ = false;
225 }
226
EnsureNewInternsDisallowed()227 void InternTable::EnsureNewInternsDisallowed() {
228 // Lock and unlock once to ensure that no threads are still in the
229 // middle of adding new interns.
230 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
231 CHECK(!allow_new_interns_);
232 }
233
Insert(mirror::String * s,bool is_strong)234 mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) {
235 if (s == nullptr) {
236 return nullptr;
237 }
238 Thread* self = Thread::Current();
239 MutexLock mu(self, *Locks::intern_table_lock_);
240 while (UNLIKELY(!allow_new_interns_)) {
241 new_intern_condition_.WaitHoldingLocks(self);
242 }
243 // Check the strong table for a match.
244 mirror::String* strong = LookupStrong(s);
245 if (strong != nullptr) {
246 return strong;
247 }
248 // There is no match in the strong table, check the weak table.
249 mirror::String* weak = LookupWeak(s);
250 if (weak != nullptr) {
251 if (is_strong) {
252 // A match was found in the weak table. Promote to the strong table.
253 RemoveWeak(weak);
254 return InsertStrong(weak);
255 }
256 return weak;
257 }
258 // Check the image for a match.
259 mirror::String* image = LookupStringFromImage(s);
260 if (image != nullptr) {
261 return is_strong ? InsertStrong(image) : InsertWeak(image);
262 }
263 // No match in the strong table or the weak table. Insert into the strong / weak table.
264 return is_strong ? InsertStrong(s) : InsertWeak(s);
265 }
266
InternStrong(int32_t utf16_length,const char * utf8_data)267 mirror::String* InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
268 DCHECK(utf8_data != nullptr);
269 return InternStrong(mirror::String::AllocFromModifiedUtf8(
270 Thread::Current(), utf16_length, utf8_data));
271 }
272
InternStrong(const char * utf8_data)273 mirror::String* InternTable::InternStrong(const char* utf8_data) {
274 DCHECK(utf8_data != nullptr);
275 return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
276 }
277
InternStrong(mirror::String * s)278 mirror::String* InternTable::InternStrong(mirror::String* s) {
279 return Insert(s, true);
280 }
281
InternWeak(mirror::String * s)282 mirror::String* InternTable::InternWeak(mirror::String* s) {
283 return Insert(s, false);
284 }
285
ContainsWeak(mirror::String * s)286 bool InternTable::ContainsWeak(mirror::String* s) {
287 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
288 return LookupWeak(s) == s;
289 }
290
SweepInternTableWeaks(IsMarkedCallback * callback,void * arg)291 void InternTable::SweepInternTableWeaks(IsMarkedCallback* callback, void* arg) {
292 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
293 weak_interns_.SweepWeaks(callback, arg);
294 }
295
AddImageInternTable(gc::space::ImageSpace * image_space)296 void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) {
297 const ImageSection& intern_section = image_space->GetImageHeader().GetImageSection(
298 ImageHeader::kSectionInternedStrings);
299 // Read the string tables from the image.
300 const uint8_t* ptr = image_space->Begin() + intern_section.Offset();
301 const size_t offset = ReadFromMemory(ptr);
302 CHECK_LE(offset, intern_section.Size());
303 }
304
ReadFromMemory(const uint8_t * ptr)305 size_t InternTable::ReadFromMemory(const uint8_t* ptr) {
306 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
307 return ReadFromMemoryLocked(ptr);
308 }
309
ReadFromMemoryLocked(const uint8_t * ptr)310 size_t InternTable::ReadFromMemoryLocked(const uint8_t* ptr) {
311 return strong_interns_.ReadIntoPreZygoteTable(ptr);
312 }
313
WriteToMemory(uint8_t * ptr)314 size_t InternTable::WriteToMemory(uint8_t* ptr) {
315 MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
316 return strong_interns_.WriteFromPostZygoteTable(ptr);
317 }
318
operator ()(const GcRoot<mirror::String> & root) const319 std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const {
320 if (kIsDebugBuild) {
321 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
322 }
323 return static_cast<size_t>(root.Read()->GetHashCode());
324 }
325
operator ()(const GcRoot<mirror::String> & a,const GcRoot<mirror::String> & b) const326 bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
327 const GcRoot<mirror::String>& b) const {
328 if (kIsDebugBuild) {
329 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
330 }
331 return a.Read()->Equals(b.Read());
332 }
333
ReadIntoPreZygoteTable(const uint8_t * ptr)334 size_t InternTable::Table::ReadIntoPreZygoteTable(const uint8_t* ptr) {
335 CHECK_EQ(pre_zygote_table_.Size(), 0u);
336 size_t read_count = 0;
337 pre_zygote_table_ = UnorderedSet(ptr, false /* make copy */, &read_count);
338 return read_count;
339 }
340
WriteFromPostZygoteTable(uint8_t * ptr)341 size_t InternTable::Table::WriteFromPostZygoteTable(uint8_t* ptr) {
342 return post_zygote_table_.WriteToMemory(ptr);
343 }
344
Remove(mirror::String * s)345 void InternTable::Table::Remove(mirror::String* s) {
346 auto it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
347 if (it != post_zygote_table_.end()) {
348 post_zygote_table_.Erase(it);
349 } else {
350 it = pre_zygote_table_.Find(GcRoot<mirror::String>(s));
351 DCHECK(it != pre_zygote_table_.end());
352 pre_zygote_table_.Erase(it);
353 }
354 }
355
Find(mirror::String * s)356 mirror::String* InternTable::Table::Find(mirror::String* s) {
357 Locks::intern_table_lock_->AssertHeld(Thread::Current());
358 auto it = pre_zygote_table_.Find(GcRoot<mirror::String>(s));
359 if (it != pre_zygote_table_.end()) {
360 return it->Read();
361 }
362 it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
363 if (it != post_zygote_table_.end()) {
364 return it->Read();
365 }
366 return nullptr;
367 }
368
SwapPostZygoteWithPreZygote()369 void InternTable::Table::SwapPostZygoteWithPreZygote() {
370 if (pre_zygote_table_.Empty()) {
371 std::swap(pre_zygote_table_, post_zygote_table_);
372 VLOG(heap) << "Swapping " << pre_zygote_table_.Size() << " interns to the pre zygote table";
373 } else {
374 // This case happens if read the intern table from the image.
375 VLOG(heap) << "Not swapping due to non-empty pre_zygote_table_";
376 }
377 }
378
Insert(mirror::String * s)379 void InternTable::Table::Insert(mirror::String* s) {
380 // Always insert the post zygote table, this gets swapped when we create the zygote to be the
381 // pre zygote table.
382 post_zygote_table_.Insert(GcRoot<mirror::String>(s));
383 }
384
VisitRoots(RootVisitor * visitor)385 void InternTable::Table::VisitRoots(RootVisitor* visitor) {
386 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
387 visitor, RootInfo(kRootInternedString));
388 for (auto& intern : pre_zygote_table_) {
389 buffered_visitor.VisitRoot(intern);
390 }
391 for (auto& intern : post_zygote_table_) {
392 buffered_visitor.VisitRoot(intern);
393 }
394 }
395
SweepWeaks(IsMarkedCallback * callback,void * arg)396 void InternTable::Table::SweepWeaks(IsMarkedCallback* callback, void* arg) {
397 SweepWeaks(&pre_zygote_table_, callback, arg);
398 SweepWeaks(&post_zygote_table_, callback, arg);
399 }
400
SweepWeaks(UnorderedSet * set,IsMarkedCallback * callback,void * arg)401 void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedCallback* callback, void* arg) {
402 for (auto it = set->begin(), end = set->end(); it != end;) {
403 // This does not need a read barrier because this is called by GC.
404 mirror::Object* object = it->Read<kWithoutReadBarrier>();
405 mirror::Object* new_object = callback(object, arg);
406 if (new_object == nullptr) {
407 it = set->Erase(it);
408 } else {
409 *it = GcRoot<mirror::String>(new_object->AsString());
410 ++it;
411 }
412 }
413 }
414
Size() const415 size_t InternTable::Table::Size() const {
416 return pre_zygote_table_.Size() + post_zygote_table_.Size();
417 }
418
419 } // namespace art
420