1 /*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "indirect_reference_table-inl.h"
18
19 #include "base/mutator_locked_dumpable.h"
20 #include "base/systrace.h"
21 #include "base/utils.h"
22 #include "jni/java_vm_ext.h"
23 #include "jni/jni_internal.h"
24 #include "mirror/object-inl.h"
25 #include "nth_caller_visitor.h"
26 #include "reference_table.h"
27 #include "runtime.h"
28 #include "scoped_thread_state_change-inl.h"
29 #include "thread.h"
30
31 #include <cstdlib>
32
33 namespace art {
34
35 static constexpr bool kDumpStackOnNonLocalReference = false;
36 static constexpr bool kDebugIRT = false;
37
38 // Maximum table size we allow.
39 static constexpr size_t kMaxTableSizeInBytes = 128 * MB;
40
GetIndirectRefKindString(const IndirectRefKind & kind)41 const char* GetIndirectRefKindString(const IndirectRefKind& kind) {
42 switch (kind) {
43 case kHandleScopeOrInvalid:
44 return "HandleScopeOrInvalid";
45 case kLocal:
46 return "Local";
47 case kGlobal:
48 return "Global";
49 case kWeakGlobal:
50 return "WeakGlobal";
51 }
52 return "IndirectRefKind Error";
53 }
54
AbortIfNoCheckJNI(const std::string & msg)55 void IndirectReferenceTable::AbortIfNoCheckJNI(const std::string& msg) {
56 // If -Xcheck:jni is on, it'll give a more detailed error before aborting.
57 JavaVMExt* vm = Runtime::Current()->GetJavaVM();
58 if (!vm->IsCheckJniEnabled()) {
59 // Otherwise, we want to abort rather than hand back a bad reference.
60 LOG(FATAL) << msg;
61 } else {
62 LOG(ERROR) << msg;
63 }
64 }
65
IndirectReferenceTable(size_t max_count,IndirectRefKind desired_kind,ResizableCapacity resizable,std::string * error_msg)66 IndirectReferenceTable::IndirectReferenceTable(size_t max_count,
67 IndirectRefKind desired_kind,
68 ResizableCapacity resizable,
69 std::string* error_msg)
70 : segment_state_(kIRTFirstSegment),
71 kind_(desired_kind),
72 max_entries_(max_count),
73 current_num_holes_(0),
74 resizable_(resizable) {
75 CHECK(error_msg != nullptr);
76 CHECK_NE(desired_kind, kHandleScopeOrInvalid);
77
78 // Overflow and maximum check.
79 CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(IrtEntry));
80
81 const size_t table_bytes = max_count * sizeof(IrtEntry);
82 table_mem_map_ = MemMap::MapAnonymous("indirect ref table",
83 table_bytes,
84 PROT_READ | PROT_WRITE,
85 /*low_4gb=*/ false,
86 error_msg);
87 if (!table_mem_map_.IsValid() && error_msg->empty()) {
88 *error_msg = "Unable to map memory for indirect ref table";
89 }
90
91 if (table_mem_map_.IsValid()) {
92 table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
93 } else {
94 table_ = nullptr;
95 }
96 segment_state_ = kIRTFirstSegment;
97 last_known_previous_state_ = kIRTFirstSegment;
98 }
99
~IndirectReferenceTable()100 IndirectReferenceTable::~IndirectReferenceTable() {
101 }
102
ConstexprChecks()103 void IndirectReferenceTable::ConstexprChecks() {
104 // Use this for some assertions. They can't be put into the header as C++ wants the class
105 // to be complete.
106
107 // Check kind.
108 static_assert((EncodeIndirectRefKind(kLocal) & (~kKindMask)) == 0, "Kind encoding error");
109 static_assert((EncodeIndirectRefKind(kGlobal) & (~kKindMask)) == 0, "Kind encoding error");
110 static_assert((EncodeIndirectRefKind(kWeakGlobal) & (~kKindMask)) == 0, "Kind encoding error");
111 static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kLocal)) == kLocal,
112 "Kind encoding error");
113 static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kGlobal)) == kGlobal,
114 "Kind encoding error");
115 static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kWeakGlobal)) == kWeakGlobal,
116 "Kind encoding error");
117
118 // Check serial.
119 static_assert(DecodeSerial(EncodeSerial(0u)) == 0u, "Serial encoding error");
120 static_assert(DecodeSerial(EncodeSerial(1u)) == 1u, "Serial encoding error");
121 static_assert(DecodeSerial(EncodeSerial(2u)) == 2u, "Serial encoding error");
122 static_assert(DecodeSerial(EncodeSerial(3u)) == 3u, "Serial encoding error");
123
124 // Table index.
125 static_assert(DecodeIndex(EncodeIndex(0u)) == 0u, "Index encoding error");
126 static_assert(DecodeIndex(EncodeIndex(1u)) == 1u, "Index encoding error");
127 static_assert(DecodeIndex(EncodeIndex(2u)) == 2u, "Index encoding error");
128 static_assert(DecodeIndex(EncodeIndex(3u)) == 3u, "Index encoding error");
129 }
130
IsValid() const131 bool IndirectReferenceTable::IsValid() const {
132 return table_mem_map_.IsValid();
133 }
134
135 // Holes:
136 //
137 // To keep the IRT compact, we want to fill "holes" created by non-stack-discipline Add & Remove
138 // operation sequences. For simplicity and lower memory overhead, we do not use a free list or
139 // similar. Instead, we scan for holes, with the expectation that we will find holes fast as they
140 // are usually near the end of the table (see the header, TODO: verify this assumption). To avoid
141 // scans when there are no holes, the number of known holes should be tracked.
142 //
143 // A previous implementation stored the top index and the number of holes as the segment state.
144 // This constraints the maximum number of references to 16-bit. We want to relax this, as it
145 // is easy to require more references (e.g., to list all classes in large applications). Thus,
146 // the implicitly stack-stored state, the IRTSegmentState, is only the top index.
147 //
148 // Thus, hole count is a local property of the current segment, and needs to be recovered when
149 // (or after) a frame is pushed or popped. To keep JNI transitions simple (and inlineable), we
150 // cannot do work when the segment changes. Thus, Add and Remove need to ensure the current
151 // hole count is correct.
152 //
153 // To be able to detect segment changes, we require an additional local field that can describe
154 // the known segment. This is last_known_previous_state_. The requirement will become clear with
155 // the following (some non-trivial) cases that have to be supported:
156 //
157 // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference
158 // 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
159 // 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
160 // reference
161 // 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference
162 // 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
163 // reference
164 //
165 // Storing the last known *previous* state (bottom index) allows conservatively detecting all the
166 // segment changes above. The condition is simply that the last known state is greater than or
167 // equal to the current previous state, and smaller than the current state (top index). The
168 // condition is conservative as it adds O(1) overhead to operations on an empty segment.
169
CountNullEntries(const IrtEntry * table,size_t from,size_t to)170 static size_t CountNullEntries(const IrtEntry* table, size_t from, size_t to) {
171 size_t count = 0;
172 for (size_t index = from; index != to; ++index) {
173 if (table[index].GetReference()->IsNull()) {
174 count++;
175 }
176 }
177 return count;
178 }
179
RecoverHoles(IRTSegmentState prev_state)180 void IndirectReferenceTable::RecoverHoles(IRTSegmentState prev_state) {
181 if (last_known_previous_state_.top_index >= segment_state_.top_index ||
182 last_known_previous_state_.top_index < prev_state.top_index) {
183 const size_t top_index = segment_state_.top_index;
184 size_t count = CountNullEntries(table_, prev_state.top_index, top_index);
185
186 if (kDebugIRT) {
187 LOG(INFO) << "+++ Recovered holes: "
188 << " Current prev=" << prev_state.top_index
189 << " Current top_index=" << top_index
190 << " Old num_holes=" << current_num_holes_
191 << " New num_holes=" << count;
192 }
193
194 current_num_holes_ = count;
195 last_known_previous_state_ = prev_state;
196 } else if (kDebugIRT) {
197 LOG(INFO) << "No need to recover holes";
198 }
199 }
200
201 ALWAYS_INLINE
CheckHoleCount(IrtEntry * table,size_t exp_num_holes,IRTSegmentState prev_state,IRTSegmentState cur_state)202 static inline void CheckHoleCount(IrtEntry* table,
203 size_t exp_num_holes,
204 IRTSegmentState prev_state,
205 IRTSegmentState cur_state) {
206 if (kIsDebugBuild) {
207 size_t count = CountNullEntries(table, prev_state.top_index, cur_state.top_index);
208 CHECK_EQ(exp_num_holes, count) << "prevState=" << prev_state.top_index
209 << " topIndex=" << cur_state.top_index;
210 }
211 }
212
Resize(size_t new_size,std::string * error_msg)213 bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) {
214 CHECK_GT(new_size, max_entries_);
215
216 constexpr size_t kMaxEntries = kMaxTableSizeInBytes / sizeof(IrtEntry);
217 if (new_size > kMaxEntries) {
218 *error_msg = android::base::StringPrintf("Requested size exceeds maximum: %zu", new_size);
219 return false;
220 }
221 // Note: the above check also ensures that there is no overflow below.
222
223 const size_t table_bytes = new_size * sizeof(IrtEntry);
224 MemMap new_map = MemMap::MapAnonymous("indirect ref table",
225 table_bytes,
226 PROT_READ | PROT_WRITE,
227 /*low_4gb=*/ false,
228 error_msg);
229 if (!new_map.IsValid()) {
230 return false;
231 }
232
233 memcpy(new_map.Begin(), table_mem_map_.Begin(), table_mem_map_.Size());
234 table_mem_map_ = std::move(new_map);
235 table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
236 max_entries_ = new_size;
237
238 return true;
239 }
240
Add(IRTSegmentState previous_state,ObjPtr<mirror::Object> obj,std::string * error_msg)241 IndirectRef IndirectReferenceTable::Add(IRTSegmentState previous_state,
242 ObjPtr<mirror::Object> obj,
243 std::string* error_msg) {
244 if (kDebugIRT) {
245 LOG(INFO) << "+++ Add: previous_state=" << previous_state.top_index
246 << " top_index=" << segment_state_.top_index
247 << " last_known_prev_top_index=" << last_known_previous_state_.top_index
248 << " holes=" << current_num_holes_;
249 }
250
251 size_t top_index = segment_state_.top_index;
252
253 CHECK(obj != nullptr);
254 VerifyObject(obj);
255 DCHECK(table_ != nullptr);
256
257 if (top_index == max_entries_) {
258 if (resizable_ == ResizableCapacity::kNo) {
259 std::ostringstream oss;
260 oss << "JNI ERROR (app bug): " << kind_ << " table overflow "
261 << "(max=" << max_entries_ << ")"
262 << MutatorLockedDumpable<IndirectReferenceTable>(*this);
263 *error_msg = oss.str();
264 return nullptr;
265 }
266
267 // Try to double space.
268 if (std::numeric_limits<size_t>::max() / 2 < max_entries_) {
269 std::ostringstream oss;
270 oss << "JNI ERROR (app bug): " << kind_ << " table overflow "
271 << "(max=" << max_entries_ << ")" << std::endl
272 << MutatorLockedDumpable<IndirectReferenceTable>(*this)
273 << " Resizing failed: exceeds size_t";
274 *error_msg = oss.str();
275 return nullptr;
276 }
277
278 std::string inner_error_msg;
279 if (!Resize(max_entries_ * 2, &inner_error_msg)) {
280 std::ostringstream oss;
281 oss << "JNI ERROR (app bug): " << kind_ << " table overflow "
282 << "(max=" << max_entries_ << ")" << std::endl
283 << MutatorLockedDumpable<IndirectReferenceTable>(*this)
284 << " Resizing failed: " << inner_error_msg;
285 *error_msg = oss.str();
286 return nullptr;
287 }
288 }
289
290 RecoverHoles(previous_state);
291 CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
292
293 // We know there's enough room in the table. Now we just need to find
294 // the right spot. If there's a hole, find it and fill it; otherwise,
295 // add to the end of the list.
296 IndirectRef result;
297 size_t index;
298 if (current_num_holes_ > 0) {
299 DCHECK_GT(top_index, 1U);
300 // Find the first hole; likely to be near the end of the list.
301 IrtEntry* p_scan = &table_[top_index - 1];
302 DCHECK(!p_scan->GetReference()->IsNull());
303 --p_scan;
304 while (!p_scan->GetReference()->IsNull()) {
305 DCHECK_GE(p_scan, table_ + previous_state.top_index);
306 --p_scan;
307 }
308 index = p_scan - table_;
309 current_num_holes_--;
310 } else {
311 // Add to the end.
312 index = top_index++;
313 segment_state_.top_index = top_index;
314 }
315 table_[index].Add(obj);
316 result = ToIndirectRef(index);
317 if (kDebugIRT) {
318 LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.top_index
319 << " holes=" << current_num_holes_;
320 }
321
322 DCHECK(result != nullptr);
323 return result;
324 }
325
AssertEmpty()326 void IndirectReferenceTable::AssertEmpty() {
327 for (size_t i = 0; i < Capacity(); ++i) {
328 if (!table_[i].GetReference()->IsNull()) {
329 LOG(FATAL) << "Internal Error: non-empty local reference table\n"
330 << MutatorLockedDumpable<IndirectReferenceTable>(*this);
331 UNREACHABLE();
332 }
333 }
334 }
335
336 // Removes an object. We extract the table offset bits from "iref"
337 // and zap the corresponding entry, leaving a hole if it's not at the top.
338 // If the entry is not between the current top index and the bottom index
339 // specified by the cookie, we don't remove anything. This is the behavior
340 // required by JNI's DeleteLocalRef function.
341 // This method is not called when a local frame is popped; this is only used
342 // for explicit single removals.
343 // Returns "false" if nothing was removed.
Remove(IRTSegmentState previous_state,IndirectRef iref)344 bool IndirectReferenceTable::Remove(IRTSegmentState previous_state, IndirectRef iref) {
345 if (kDebugIRT) {
346 LOG(INFO) << "+++ Remove: previous_state=" << previous_state.top_index
347 << " top_index=" << segment_state_.top_index
348 << " last_known_prev_top_index=" << last_known_previous_state_.top_index
349 << " holes=" << current_num_holes_;
350 }
351
352 const uint32_t top_index = segment_state_.top_index;
353 const uint32_t bottom_index = previous_state.top_index;
354
355 DCHECK(table_ != nullptr);
356
357 if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid) {
358 auto* self = Thread::Current();
359 if (self->HandleScopeContains(reinterpret_cast<jobject>(iref))) {
360 auto* env = self->GetJniEnv();
361 DCHECK(env != nullptr);
362 if (env->IsCheckJniEnabled()) {
363 ScopedObjectAccess soa(self);
364 LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread";
365 if (kDumpStackOnNonLocalReference) {
366 self->Dump(LOG_STREAM(WARNING));
367 }
368 }
369 return true;
370 }
371 }
372 const uint32_t idx = ExtractIndex(iref);
373 if (idx < bottom_index) {
374 // Wrong segment.
375 LOG(WARNING) << "Attempt to remove index outside index area (" << idx
376 << " vs " << bottom_index << "-" << top_index << ")";
377 return false;
378 }
379 if (idx >= top_index) {
380 // Bad --- stale reference?
381 LOG(WARNING) << "Attempt to remove invalid index " << idx
382 << " (bottom=" << bottom_index << " top=" << top_index << ")";
383 return false;
384 }
385
386 RecoverHoles(previous_state);
387 CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
388
389 if (idx == top_index - 1) {
390 // Top-most entry. Scan up and consume holes.
391
392 if (!CheckEntry("remove", iref, idx)) {
393 return false;
394 }
395
396 *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
397 if (current_num_holes_ != 0) {
398 uint32_t collapse_top_index = top_index;
399 while (--collapse_top_index > bottom_index && current_num_holes_ != 0) {
400 if (kDebugIRT) {
401 ScopedObjectAccess soa(Thread::Current());
402 LOG(INFO) << "+++ checking for hole at " << collapse_top_index - 1
403 << " (previous_state=" << bottom_index << ") val="
404 << table_[collapse_top_index - 1].GetReference()->Read<kWithoutReadBarrier>();
405 }
406 if (!table_[collapse_top_index - 1].GetReference()->IsNull()) {
407 break;
408 }
409 if (kDebugIRT) {
410 LOG(INFO) << "+++ ate hole at " << (collapse_top_index - 1);
411 }
412 current_num_holes_--;
413 }
414 segment_state_.top_index = collapse_top_index;
415
416 CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
417 } else {
418 segment_state_.top_index = top_index - 1;
419 if (kDebugIRT) {
420 LOG(INFO) << "+++ ate last entry " << top_index - 1;
421 }
422 }
423 } else {
424 // Not the top-most entry. This creates a hole. We null out the entry to prevent somebody
425 // from deleting it twice and screwing up the hole count.
426 if (table_[idx].GetReference()->IsNull()) {
427 LOG(INFO) << "--- WEIRD: removing null entry " << idx;
428 return false;
429 }
430 if (!CheckEntry("remove", iref, idx)) {
431 return false;
432 }
433
434 *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
435 current_num_holes_++;
436 CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
437 if (kDebugIRT) {
438 LOG(INFO) << "+++ left hole at " << idx << ", holes=" << current_num_holes_;
439 }
440 }
441
442 return true;
443 }
444
Trim()445 void IndirectReferenceTable::Trim() {
446 ScopedTrace trace(__PRETTY_FUNCTION__);
447 const size_t top_index = Capacity();
448 auto* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize);
449 uint8_t* release_end = table_mem_map_.End();
450 madvise(release_start, release_end - release_start, MADV_DONTNEED);
451 }
452
VisitRoots(RootVisitor * visitor,const RootInfo & root_info)453 void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
454 BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info);
455 for (auto ref : *this) {
456 if (!ref->IsNull()) {
457 root_visitor.VisitRoot(*ref);
458 DCHECK(!ref->IsNull());
459 }
460 }
461 }
462
Dump(std::ostream & os) const463 void IndirectReferenceTable::Dump(std::ostream& os) const {
464 os << kind_ << " table dump:\n";
465 ReferenceTable::Table entries;
466 for (size_t i = 0; i < Capacity(); ++i) {
467 ObjPtr<mirror::Object> obj = table_[i].GetReference()->Read<kWithoutReadBarrier>();
468 if (obj != nullptr) {
469 obj = table_[i].GetReference()->Read();
470 entries.push_back(GcRoot<mirror::Object>(obj));
471 }
472 }
473 ReferenceTable::Dump(os, entries);
474 }
475
SetSegmentState(IRTSegmentState new_state)476 void IndirectReferenceTable::SetSegmentState(IRTSegmentState new_state) {
477 if (kDebugIRT) {
478 LOG(INFO) << "Setting segment state: "
479 << segment_state_.top_index
480 << " -> "
481 << new_state.top_index;
482 }
483 segment_state_ = new_state;
484 }
485
EnsureFreeCapacity(size_t free_capacity,std::string * error_msg)486 bool IndirectReferenceTable::EnsureFreeCapacity(size_t free_capacity, std::string* error_msg) {
487 size_t top_index = segment_state_.top_index;
488 if (top_index < max_entries_ && top_index + free_capacity <= max_entries_) {
489 return true;
490 }
491
492 // We're only gonna do a simple best-effort here, ensuring the asked-for capacity at the end.
493 if (resizable_ == ResizableCapacity::kNo) {
494 *error_msg = "Table is not resizable";
495 return false;
496 }
497
498 // Try to increase the table size.
499
500 // Would this overflow?
501 if (std::numeric_limits<size_t>::max() - free_capacity < top_index) {
502 *error_msg = "Cannot resize table, overflow.";
503 return false;
504 }
505
506 if (!Resize(top_index + free_capacity, error_msg)) {
507 LOG(WARNING) << "JNI ERROR: Unable to reserve space in EnsureFreeCapacity (" << free_capacity
508 << "): " << std::endl
509 << MutatorLockedDumpable<IndirectReferenceTable>(*this)
510 << " Resizing failed: " << *error_msg;
511 return false;
512 }
513 return true;
514 }
515
FreeCapacity() const516 size_t IndirectReferenceTable::FreeCapacity() const {
517 return max_entries_ - segment_state_.top_index;
518 }
519
520 } // namespace art
521