1 /*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "base/bit_utils.h"
18 #include "base/globals.h"
19 #include "indirect_reference_table-inl.h"
20
21 #include "base/mutator_locked_dumpable.h"
22 #include "base/systrace.h"
23 #include "base/utils.h"
24 #include "indirect_reference_table.h"
25 #include "jni/java_vm_ext.h"
26 #include "jni/jni_internal.h"
27 #include "mirror/object-inl.h"
28 #include "nth_caller_visitor.h"
29 #include "reference_table.h"
30 #include "runtime.h"
31 #include "scoped_thread_state_change-inl.h"
32 #include "thread.h"
33
34 #include <cstdlib>
35
36 namespace art {
37
38 static constexpr bool kDumpStackOnNonLocalReference = false;
39 static constexpr bool kDebugIRT = false;
40
41 // Maximum table size we allow.
42 static constexpr size_t kMaxTableSizeInBytes = 128 * MB;
43
GetIndirectRefKindString(const IndirectRefKind & kind)44 const char* GetIndirectRefKindString(const IndirectRefKind& kind) {
45 switch (kind) {
46 case kJniTransitionOrInvalid:
47 return "JniTransitionOrInvalid";
48 case kLocal:
49 return "Local";
50 case kGlobal:
51 return "Global";
52 case kWeakGlobal:
53 return "WeakGlobal";
54 }
55 return "IndirectRefKind Error";
56 }
57
AbortIfNoCheckJNI(const std::string & msg)58 void IndirectReferenceTable::AbortIfNoCheckJNI(const std::string& msg) {
59 // If -Xcheck:jni is on, it'll give a more detailed error before aborting.
60 JavaVMExt* vm = Runtime::Current()->GetJavaVM();
61 if (!vm->IsCheckJniEnabled()) {
62 // Otherwise, we want to abort rather than hand back a bad reference.
63 LOG(FATAL) << msg;
64 } else {
65 LOG(ERROR) << msg;
66 }
67 }
68
IndirectReferenceTable(size_t max_count,IndirectRefKind desired_kind,ResizableCapacity resizable,std::string * error_msg)69 IndirectReferenceTable::IndirectReferenceTable(size_t max_count,
70 IndirectRefKind desired_kind,
71 ResizableCapacity resizable,
72 std::string* error_msg)
73 : segment_state_(kIRTFirstSegment),
74 kind_(desired_kind),
75 max_entries_(max_count),
76 current_num_holes_(0),
77 resizable_(resizable) {
78 CHECK(error_msg != nullptr);
79 CHECK_NE(desired_kind, kJniTransitionOrInvalid);
80
81 // Overflow and maximum check.
82 CHECK_LE(max_count, kMaxTableSizeInBytes / sizeof(IrtEntry));
83
84 const size_t table_bytes = RoundUp(max_count * sizeof(IrtEntry), kPageSize);
85 table_mem_map_ = MemMap::MapAnonymous("indirect ref table",
86 table_bytes,
87 PROT_READ | PROT_WRITE,
88 /*low_4gb=*/ false,
89 error_msg);
90 if (!table_mem_map_.IsValid() && error_msg->empty()) {
91 *error_msg = "Unable to map memory for indirect ref table";
92 }
93
94 if (table_mem_map_.IsValid()) {
95 table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
96 } else {
97 table_ = nullptr;
98 }
99 segment_state_ = kIRTFirstSegment;
100 last_known_previous_state_ = kIRTFirstSegment;
101 // Take into account the actual length.
102 max_entries_ = table_bytes / sizeof(IrtEntry);
103 }
104
~IndirectReferenceTable()105 IndirectReferenceTable::~IndirectReferenceTable() {
106 }
107
ConstexprChecks()108 void IndirectReferenceTable::ConstexprChecks() {
109 // Use this for some assertions. They can't be put into the header as C++ wants the class
110 // to be complete.
111
112 // Check kind.
113 static_assert((EncodeIndirectRefKind(kLocal) & (~kKindMask)) == 0, "Kind encoding error");
114 static_assert((EncodeIndirectRefKind(kGlobal) & (~kKindMask)) == 0, "Kind encoding error");
115 static_assert((EncodeIndirectRefKind(kWeakGlobal) & (~kKindMask)) == 0, "Kind encoding error");
116 static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kLocal)) == kLocal,
117 "Kind encoding error");
118 static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kGlobal)) == kGlobal,
119 "Kind encoding error");
120 static_assert(DecodeIndirectRefKind(EncodeIndirectRefKind(kWeakGlobal)) == kWeakGlobal,
121 "Kind encoding error");
122
123 // Check serial.
124 static_assert(DecodeSerial(EncodeSerial(0u)) == 0u, "Serial encoding error");
125 static_assert(DecodeSerial(EncodeSerial(1u)) == 1u, "Serial encoding error");
126 static_assert(DecodeSerial(EncodeSerial(2u)) == 2u, "Serial encoding error");
127 static_assert(DecodeSerial(EncodeSerial(3u)) == 3u, "Serial encoding error");
128
129 // Table index.
130 static_assert(DecodeIndex(EncodeIndex(0u)) == 0u, "Index encoding error");
131 static_assert(DecodeIndex(EncodeIndex(1u)) == 1u, "Index encoding error");
132 static_assert(DecodeIndex(EncodeIndex(2u)) == 2u, "Index encoding error");
133 static_assert(DecodeIndex(EncodeIndex(3u)) == 3u, "Index encoding error");
134 }
135
IsValid() const136 bool IndirectReferenceTable::IsValid() const {
137 return table_mem_map_.IsValid();
138 }
139
140 // Holes:
141 //
142 // To keep the IRT compact, we want to fill "holes" created by non-stack-discipline Add & Remove
143 // operation sequences. For simplicity and lower memory overhead, we do not use a free list or
144 // similar. Instead, we scan for holes, with the expectation that we will find holes fast as they
145 // are usually near the end of the table (see the header, TODO: verify this assumption). To avoid
146 // scans when there are no holes, the number of known holes should be tracked.
147 //
148 // A previous implementation stored the top index and the number of holes as the segment state.
149 // This constraints the maximum number of references to 16-bit. We want to relax this, as it
150 // is easy to require more references (e.g., to list all classes in large applications). Thus,
151 // the implicitly stack-stored state, the IRTSegmentState, is only the top index.
152 //
153 // Thus, hole count is a local property of the current segment, and needs to be recovered when
154 // (or after) a frame is pushed or popped. To keep JNI transitions simple (and inlineable), we
155 // cannot do work when the segment changes. Thus, Add and Remove need to ensure the current
156 // hole count is correct.
157 //
158 // To be able to detect segment changes, we require an additional local field that can describe
159 // the known segment. This is last_known_previous_state_. The requirement will become clear with
160 // the following (some non-trivial) cases that have to be supported:
161 //
162 // 1) Segment with holes (current_num_holes_ > 0), push new segment, add/remove reference
163 // 2) Segment with holes (current_num_holes_ > 0), pop segment, add/remove reference
164 // 3) Segment with holes (current_num_holes_ > 0), push new segment, pop segment, add/remove
165 // reference
166 // 4) Empty segment, push new segment, create a hole, pop a segment, add/remove a reference
167 // 5) Base segment, push new segment, create a hole, pop a segment, push new segment, add/remove
168 // reference
169 //
170 // Storing the last known *previous* state (bottom index) allows conservatively detecting all the
171 // segment changes above. The condition is simply that the last known state is greater than or
172 // equal to the current previous state, and smaller than the current state (top index). The
173 // condition is conservative as it adds O(1) overhead to operations on an empty segment.
174
CountNullEntries(const IrtEntry * table,size_t from,size_t to)175 static size_t CountNullEntries(const IrtEntry* table, size_t from, size_t to) {
176 size_t count = 0;
177 for (size_t index = from; index != to; ++index) {
178 if (table[index].GetReference()->IsNull()) {
179 count++;
180 }
181 }
182 return count;
183 }
184
RecoverHoles(IRTSegmentState prev_state)185 void IndirectReferenceTable::RecoverHoles(IRTSegmentState prev_state) {
186 if (last_known_previous_state_.top_index >= segment_state_.top_index ||
187 last_known_previous_state_.top_index < prev_state.top_index) {
188 const size_t top_index = segment_state_.top_index;
189 size_t count = CountNullEntries(table_, prev_state.top_index, top_index);
190
191 if (kDebugIRT) {
192 LOG(INFO) << "+++ Recovered holes: "
193 << " Current prev=" << prev_state.top_index
194 << " Current top_index=" << top_index
195 << " Old num_holes=" << current_num_holes_
196 << " New num_holes=" << count;
197 }
198
199 current_num_holes_ = count;
200 last_known_previous_state_ = prev_state;
201 } else if (kDebugIRT) {
202 LOG(INFO) << "No need to recover holes";
203 }
204 }
205
206 ALWAYS_INLINE
CheckHoleCount(IrtEntry * table,size_t exp_num_holes,IRTSegmentState prev_state,IRTSegmentState cur_state)207 static inline void CheckHoleCount(IrtEntry* table,
208 size_t exp_num_holes,
209 IRTSegmentState prev_state,
210 IRTSegmentState cur_state) {
211 if (kIsDebugBuild) {
212 size_t count = CountNullEntries(table, prev_state.top_index, cur_state.top_index);
213 CHECK_EQ(exp_num_holes, count) << "prevState=" << prev_state.top_index
214 << " topIndex=" << cur_state.top_index;
215 }
216 }
217
Resize(size_t new_size,std::string * error_msg)218 bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) {
219 CHECK_GT(new_size, max_entries_);
220
221 constexpr size_t kMaxEntries = kMaxTableSizeInBytes / sizeof(IrtEntry);
222 if (new_size > kMaxEntries) {
223 *error_msg = android::base::StringPrintf("Requested size exceeds maximum: %zu", new_size);
224 return false;
225 }
226 // Note: the above check also ensures that there is no overflow below.
227
228 const size_t table_bytes = RoundUp(new_size * sizeof(IrtEntry), kPageSize);
229 MemMap new_map = MemMap::MapAnonymous("indirect ref table",
230 table_bytes,
231 PROT_READ | PROT_WRITE,
232 /*low_4gb=*/ false,
233 error_msg);
234 if (!new_map.IsValid()) {
235 return false;
236 }
237
238 memcpy(new_map.Begin(), table_mem_map_.Begin(), table_mem_map_.Size());
239 table_mem_map_ = std::move(new_map);
240 table_ = reinterpret_cast<IrtEntry*>(table_mem_map_.Begin());
241 const size_t real_new_size = table_bytes / sizeof(IrtEntry);
242 DCHECK_GE(real_new_size, new_size);
243 max_entries_ = real_new_size;
244
245 return true;
246 }
247
Add(IRTSegmentState previous_state,ObjPtr<mirror::Object> obj,std::string * error_msg)248 IndirectRef IndirectReferenceTable::Add(IRTSegmentState previous_state,
249 ObjPtr<mirror::Object> obj,
250 std::string* error_msg) {
251 if (kDebugIRT) {
252 LOG(INFO) << "+++ Add: previous_state=" << previous_state.top_index
253 << " top_index=" << segment_state_.top_index
254 << " last_known_prev_top_index=" << last_known_previous_state_.top_index
255 << " holes=" << current_num_holes_;
256 }
257
258 size_t top_index = segment_state_.top_index;
259
260 CHECK(obj != nullptr);
261 VerifyObject(obj);
262 DCHECK(table_ != nullptr);
263
264 if (top_index == max_entries_) {
265 if (resizable_ == ResizableCapacity::kNo) {
266 std::ostringstream oss;
267 oss << "JNI ERROR (app bug): " << kind_ << " table overflow "
268 << "(max=" << max_entries_ << ")"
269 << MutatorLockedDumpable<IndirectReferenceTable>(*this);
270 *error_msg = oss.str();
271 return nullptr;
272 }
273
274 // Try to double space.
275 if (std::numeric_limits<size_t>::max() / 2 < max_entries_) {
276 std::ostringstream oss;
277 oss << "JNI ERROR (app bug): " << kind_ << " table overflow "
278 << "(max=" << max_entries_ << ")" << std::endl
279 << MutatorLockedDumpable<IndirectReferenceTable>(*this)
280 << " Resizing failed: exceeds size_t";
281 *error_msg = oss.str();
282 return nullptr;
283 }
284
285 std::string inner_error_msg;
286 if (!Resize(max_entries_ * 2, &inner_error_msg)) {
287 std::ostringstream oss;
288 oss << "JNI ERROR (app bug): " << kind_ << " table overflow "
289 << "(max=" << max_entries_ << ")" << std::endl
290 << MutatorLockedDumpable<IndirectReferenceTable>(*this)
291 << " Resizing failed: " << inner_error_msg;
292 *error_msg = oss.str();
293 return nullptr;
294 }
295 }
296
297 RecoverHoles(previous_state);
298 CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
299
300 // We know there's enough room in the table. Now we just need to find
301 // the right spot. If there's a hole, find it and fill it; otherwise,
302 // add to the end of the list.
303 IndirectRef result;
304 size_t index;
305 if (current_num_holes_ > 0) {
306 DCHECK_GT(top_index, 1U);
307 // Find the first hole; likely to be near the end of the list.
308 IrtEntry* p_scan = &table_[top_index - 1];
309 DCHECK(!p_scan->GetReference()->IsNull());
310 --p_scan;
311 while (!p_scan->GetReference()->IsNull()) {
312 DCHECK_GE(p_scan, table_ + previous_state.top_index);
313 --p_scan;
314 }
315 index = p_scan - table_;
316 current_num_holes_--;
317 } else {
318 // Add to the end.
319 index = top_index++;
320 segment_state_.top_index = top_index;
321 }
322 table_[index].Add(obj);
323 result = ToIndirectRef(index);
324 if (kDebugIRT) {
325 LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.top_index
326 << " holes=" << current_num_holes_;
327 }
328
329 DCHECK(result != nullptr);
330 return result;
331 }
332
AssertEmpty()333 void IndirectReferenceTable::AssertEmpty() {
334 for (size_t i = 0; i < Capacity(); ++i) {
335 if (!table_[i].GetReference()->IsNull()) {
336 LOG(FATAL) << "Internal Error: non-empty local reference table\n"
337 << MutatorLockedDumpable<IndirectReferenceTable>(*this);
338 UNREACHABLE();
339 }
340 }
341 }
342
343 // Removes an object. We extract the table offset bits from "iref"
344 // and zap the corresponding entry, leaving a hole if it's not at the top.
345 // If the entry is not between the current top index and the bottom index
346 // specified by the cookie, we don't remove anything. This is the behavior
347 // required by JNI's DeleteLocalRef function.
348 // This method is not called when a local frame is popped; this is only used
349 // for explicit single removals.
350 // Returns "false" if nothing was removed.
Remove(IRTSegmentState previous_state,IndirectRef iref)351 bool IndirectReferenceTable::Remove(IRTSegmentState previous_state, IndirectRef iref) {
352 if (kDebugIRT) {
353 LOG(INFO) << "+++ Remove: previous_state=" << previous_state.top_index
354 << " top_index=" << segment_state_.top_index
355 << " last_known_prev_top_index=" << last_known_previous_state_.top_index
356 << " holes=" << current_num_holes_;
357 }
358
359 const uint32_t top_index = segment_state_.top_index;
360 const uint32_t bottom_index = previous_state.top_index;
361
362 DCHECK(table_ != nullptr);
363
364 // TODO: We should eagerly check the ref kind against the `kind_` instead of
365 // relying on this weak check and postponing the rest until `CheckEntry()` below.
366 // Passing the wrong kind shall currently result in misleading warnings.
367 if (GetIndirectRefKind(iref) == kJniTransitionOrInvalid) {
368 auto* self = Thread::Current();
369 ScopedObjectAccess soa(self);
370 if (self->IsJniTransitionReference(reinterpret_cast<jobject>(iref))) {
371 auto* env = self->GetJniEnv();
372 DCHECK(env != nullptr);
373 if (env->IsCheckJniEnabled()) {
374 LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread";
375 if (kDumpStackOnNonLocalReference) {
376 self->Dump(LOG_STREAM(WARNING));
377 }
378 }
379 return true;
380 }
381 }
382
383 const uint32_t idx = ExtractIndex(iref);
384 if (idx < bottom_index) {
385 // Wrong segment.
386 LOG(WARNING) << "Attempt to remove index outside index area (" << idx
387 << " vs " << bottom_index << "-" << top_index << ")";
388 return false;
389 }
390 if (idx >= top_index) {
391 // Bad --- stale reference?
392 LOG(WARNING) << "Attempt to remove invalid index " << idx
393 << " (bottom=" << bottom_index << " top=" << top_index << ")";
394 return false;
395 }
396
397 RecoverHoles(previous_state);
398 CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
399
400 if (idx == top_index - 1) {
401 // Top-most entry. Scan up and consume holes.
402
403 if (!CheckEntry("remove", iref, idx)) {
404 return false;
405 }
406
407 *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
408 if (current_num_holes_ != 0) {
409 uint32_t collapse_top_index = top_index;
410 while (--collapse_top_index > bottom_index && current_num_holes_ != 0) {
411 if (kDebugIRT) {
412 ScopedObjectAccess soa(Thread::Current());
413 LOG(INFO) << "+++ checking for hole at " << collapse_top_index - 1
414 << " (previous_state=" << bottom_index << ") val="
415 << table_[collapse_top_index - 1].GetReference()->Read<kWithoutReadBarrier>();
416 }
417 if (!table_[collapse_top_index - 1].GetReference()->IsNull()) {
418 break;
419 }
420 if (kDebugIRT) {
421 LOG(INFO) << "+++ ate hole at " << (collapse_top_index - 1);
422 }
423 current_num_holes_--;
424 }
425 segment_state_.top_index = collapse_top_index;
426
427 CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
428 } else {
429 segment_state_.top_index = top_index - 1;
430 if (kDebugIRT) {
431 LOG(INFO) << "+++ ate last entry " << top_index - 1;
432 }
433 }
434 } else {
435 // Not the top-most entry. This creates a hole. We null out the entry to prevent somebody
436 // from deleting it twice and screwing up the hole count.
437 if (table_[idx].GetReference()->IsNull()) {
438 LOG(INFO) << "--- WEIRD: removing null entry " << idx;
439 return false;
440 }
441 if (!CheckEntry("remove", iref, idx)) {
442 return false;
443 }
444
445 *table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
446 current_num_holes_++;
447 CheckHoleCount(table_, current_num_holes_, previous_state, segment_state_);
448 if (kDebugIRT) {
449 LOG(INFO) << "+++ left hole at " << idx << ", holes=" << current_num_holes_;
450 }
451 }
452
453 return true;
454 }
455
Trim()456 void IndirectReferenceTable::Trim() {
457 ScopedTrace trace(__PRETTY_FUNCTION__);
458 const size_t top_index = Capacity();
459 uint8_t* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize);
460 uint8_t* release_end = static_cast<uint8_t*>(table_mem_map_.BaseEnd());
461 DCHECK_GE(reinterpret_cast<uintptr_t>(release_end), reinterpret_cast<uintptr_t>(release_start));
462 DCHECK_ALIGNED(release_end, kPageSize);
463 DCHECK_ALIGNED(release_end - release_start, kPageSize);
464 madvise(release_start, release_end - release_start, MADV_DONTNEED);
465 }
466
VisitRoots(RootVisitor * visitor,const RootInfo & root_info)467 void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
468 BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info);
469 for (auto ref : *this) {
470 if (!ref->IsNull()) {
471 root_visitor.VisitRoot(*ref);
472 DCHECK(!ref->IsNull());
473 }
474 }
475 }
476
Dump(std::ostream & os) const477 void IndirectReferenceTable::Dump(std::ostream& os) const {
478 os << kind_ << " table dump:\n";
479 ReferenceTable::Table entries;
480 for (size_t i = 0; i < Capacity(); ++i) {
481 ObjPtr<mirror::Object> obj = table_[i].GetReference()->Read<kWithoutReadBarrier>();
482 if (obj != nullptr) {
483 obj = table_[i].GetReference()->Read();
484 entries.push_back(GcRoot<mirror::Object>(obj));
485 }
486 }
487 ReferenceTable::Dump(os, entries);
488 }
489
SetSegmentState(IRTSegmentState new_state)490 void IndirectReferenceTable::SetSegmentState(IRTSegmentState new_state) {
491 if (kDebugIRT) {
492 LOG(INFO) << "Setting segment state: "
493 << segment_state_.top_index
494 << " -> "
495 << new_state.top_index;
496 }
497 segment_state_ = new_state;
498 }
499
EnsureFreeCapacity(size_t free_capacity,std::string * error_msg)500 bool IndirectReferenceTable::EnsureFreeCapacity(size_t free_capacity, std::string* error_msg) {
501 size_t top_index = segment_state_.top_index;
502 if (top_index < max_entries_ && top_index + free_capacity <= max_entries_) {
503 return true;
504 }
505
506 // We're only gonna do a simple best-effort here, ensuring the asked-for capacity at the end.
507 if (resizable_ == ResizableCapacity::kNo) {
508 *error_msg = "Table is not resizable";
509 return false;
510 }
511
512 // Try to increase the table size.
513
514 // Would this overflow?
515 if (std::numeric_limits<size_t>::max() - free_capacity < top_index) {
516 *error_msg = "Cannot resize table, overflow.";
517 return false;
518 }
519
520 if (!Resize(top_index + free_capacity, error_msg)) {
521 LOG(WARNING) << "JNI ERROR: Unable to reserve space in EnsureFreeCapacity (" << free_capacity
522 << "): " << std::endl
523 << MutatorLockedDumpable<IndirectReferenceTable>(*this)
524 << " Resizing failed: " << *error_msg;
525 return false;
526 }
527 return true;
528 }
529
FreeCapacity() const530 size_t IndirectReferenceTable::FreeCapacity() const {
531 return max_entries_ - segment_state_.top_index;
532 }
533
534 } // namespace art
535