1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_INTERN_TABLE_H_
18 #define ART_RUNTIME_INTERN_TABLE_H_
19 
20 #include <unordered_set>
21 
22 #include "base/allocator.h"
23 #include "base/hash_set.h"
24 #include "base/mutex.h"
25 #include "gc_root.h"
26 #include "object_callbacks.h"
27 
28 namespace art {
29 
30 namespace gc {
31 namespace space {
32 class ImageSpace;
33 }  // namespace space
34 }  // namespace gc
35 
36 enum VisitRootFlags : uint8_t;
37 
38 namespace mirror {
39 class String;
40 }  // namespace mirror
41 class Transaction;
42 
43 /**
44  * Used to intern strings.
45  *
46  * There are actually two tables: one that holds strong references to its strings, and one that
47  * holds weak references. The former is used for string literals, for which there is an effective
48  * reference from the constant pool. The latter is used for strings interned at runtime via
49  * String.intern. Some code (XML parsers being a prime example) relies on being able to intern
50  * arbitrarily many strings for the duration of a parse without permanently increasing the memory
51  * footprint.
52  */
53 class InternTable {
54  public:
55   InternTable();
56 
57   // Interns a potentially new string in the 'strong' table. (See above.)
58   mirror::String* InternStrong(int32_t utf16_length, const char* utf8_data)
59       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
60 
61   // Interns a potentially new string in the 'strong' table. (See above.)
62   mirror::String* InternStrong(const char* utf8_data)
63       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
64 
65   // Interns a potentially new string in the 'strong' table. (See above.)
66   mirror::String* InternStrong(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
67 
68   // Interns a potentially new string in the 'weak' table. (See above.)
69   mirror::String* InternWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
70 
71   void SweepInternTableWeaks(IsMarkedCallback* callback, void* arg)
72       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
73 
74   bool ContainsWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
75 
76   // Total number of interned strings.
77   size_t Size() const LOCKS_EXCLUDED(Locks::intern_table_lock_);
78   // Total number of weakly live interned strings.
79   size_t StrongSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_);
80   // Total number of strongly live interned strings.
81   size_t WeakSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_);
82 
83   void VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
84       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
85 
86   void DumpForSigQuit(std::ostream& os) const;
87 
88   void DisallowNewInterns() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
89   void AllowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
90 
91   // Adds all of the resolved image strings from the image space into the intern table. The
92   // advantage of doing this is preventing expensive DexFile::FindStringId calls.
93   void AddImageStringsToTable(gc::space::ImageSpace* image_space)
94       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_);
95   // Copy the post zygote tables to pre zygote to save memory by preventing dirty pages.
96   void SwapPostZygoteWithPreZygote()
97       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_);
98 
99  private:
100   class StringHashEquals {
101    public:
102     std::size_t operator()(const GcRoot<mirror::String>& root) const NO_THREAD_SAFETY_ANALYSIS;
103     bool operator()(const GcRoot<mirror::String>& a, const GcRoot<mirror::String>& b) const
104         NO_THREAD_SAFETY_ANALYSIS;
105   };
106   class GcRootEmptyFn {
107    public:
MakeEmpty(GcRoot<mirror::String> & item)108     void MakeEmpty(GcRoot<mirror::String>& item) const {
109       item = GcRoot<mirror::String>();
110     }
IsEmpty(const GcRoot<mirror::String> & item)111     bool IsEmpty(const GcRoot<mirror::String>& item) const {
112       return item.IsNull();
113     }
114   };
115 
116   // Table which holds pre zygote and post zygote interned strings. There is one instance for
117   // weak interns and strong interns.
118   class Table {
119    public:
120     mirror::String* Find(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
121         EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
122     void Insert(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
123         EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
124     void Remove(mirror::String* s)
125         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
126         EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
127     void VisitRoots(RootCallback* callback, void* arg)
128         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
129         EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
130     void SweepWeaks(IsMarkedCallback* callback, void* arg)
131         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
132         EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
133     void SwapPostZygoteWithPreZygote() EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
134     size_t Size() const EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
135 
136    private:
137     typedef HashSet<GcRoot<mirror::String>, GcRootEmptyFn, StringHashEquals, StringHashEquals,
138         TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> UnorderedSet;
139 
140     void SweepWeaks(UnorderedSet* set, IsMarkedCallback* callback, void* arg)
141         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
142         EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
143 
144     // We call SwapPostZygoteWithPreZygote when we create the zygote to reduce private dirty pages
145     // caused by modifying the zygote intern table hash table. The pre zygote table are the
146     // interned strings which were interned before we created the zygote space. Post zygote is self
147     // explanatory.
148     UnorderedSet pre_zygote_table_;
149     UnorderedSet post_zygote_table_;
150   };
151 
152   // Insert if non null, otherwise return nullptr.
153   mirror::String* Insert(mirror::String* s, bool is_strong)
154       LOCKS_EXCLUDED(Locks::intern_table_lock_)
155       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
156 
157   mirror::String* LookupStrong(mirror::String* s)
158       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
159       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
160   mirror::String* LookupWeak(mirror::String* s)
161       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
162       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
163   mirror::String* InsertStrong(mirror::String* s)
164       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
165       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
166   mirror::String* InsertWeak(mirror::String* s)
167       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
168       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
169   void RemoveStrong(mirror::String* s)
170       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
171       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
172   void RemoveWeak(mirror::String* s)
173       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
174       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
175 
176   // Transaction rollback access.
177   mirror::String* LookupStringFromImage(mirror::String* s)
178       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
179       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
180   mirror::String* InsertStrongFromTransaction(mirror::String* s)
181       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
182       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
183   mirror::String* InsertWeakFromTransaction(mirror::String* s)
184       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
185       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
186   void RemoveStrongFromTransaction(mirror::String* s)
187       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
188       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
189   void RemoveWeakFromTransaction(mirror::String* s)
190       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
191       EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
192   friend class Transaction;
193 
194   bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
195   bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
196   bool allow_new_interns_ GUARDED_BY(Locks::intern_table_lock_);
197   ConditionVariable new_intern_condition_ GUARDED_BY(Locks::intern_table_lock_);
198   // Since this contains (strong) roots, they need a read barrier to
199   // enable concurrent intern table (strong) root scan. Do not
200   // directly access the strings in it. Use functions that contain
201   // read barriers.
202   Table strong_interns_ GUARDED_BY(Locks::intern_table_lock_);
203   std::vector<GcRoot<mirror::String>> new_strong_intern_roots_
204       GUARDED_BY(Locks::intern_table_lock_);
205   // Since this contains (weak) roots, they need a read barrier. Do
206   // not directly access the strings in it. Use functions that contain
207   // read barriers.
208   Table weak_interns_ GUARDED_BY(Locks::intern_table_lock_);
209 };
210 
211 }  // namespace art
212 
213 #endif  // ART_RUNTIME_INTERN_TABLE_H_
214