1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "jni_id_manager.h"
18
19 #include <algorithm>
20 #include <cstdint>
21 #include <type_traits>
22
23 #include "android-base/macros.h"
24 #include "art_field-inl.h"
25 #include "art_method-inl.h"
26 #include "base/enums.h"
27 #include "base/globals.h"
28 #include "base/locks.h"
29 #include "base/mutex.h"
30 #include "gc/allocation_listener.h"
31 #include "gc/heap.h"
32 #include "jni/jni_internal.h"
33 #include "jni_id_type.h"
34 #include "mirror/array-inl.h"
35 #include "mirror/array.h"
36 #include "mirror/class-alloc-inl.h"
37 #include "mirror/class-inl.h"
38 #include "mirror/class.h"
39 #include "mirror/class_ext-inl.h"
40 #include "mirror/object-inl.h"
41 #include "obj_ptr-inl.h"
42 #include "reflective_handle_scope-inl.h"
43 #include "reflective_handle_scope.h"
44 #include "reflective_value_visitor.h"
45 #include "thread-inl.h"
46 #include "thread.h"
47
48 namespace art {
49 namespace jni {
50
51 constexpr bool kTraceIds = false;
52
53 // TODO This whole thing could be done lock & wait free (since we never remove anything from the
54 // ids list). It's not clear this would be worthwile though.
55
56 namespace {
57
IdToIndex(uintptr_t id)58 static constexpr size_t IdToIndex(uintptr_t id) {
59 return id >> 1;
60 }
61
IndexToId(size_t index)62 static constexpr uintptr_t IndexToId(size_t index) {
63 return (index << 1) + 1;
64 }
65
66 template <typename ArtType>
GetIds(ObjPtr<mirror::Class> k,ArtType * t)67 ObjPtr<mirror::PointerArray> GetIds(ObjPtr<mirror::Class> k, ArtType* t)
68 REQUIRES_SHARED(Locks::mutator_lock_) {
69 ObjPtr<mirror::Object> ret;
70 if constexpr (std::is_same_v<ArtType, ArtField>) {
71 ret = t->IsStatic() ? k->GetStaticFieldIds() : k->GetInstanceFieldIds();
72 } else {
73 ret = t->IsObsolete() ? nullptr : k->GetMethodIds();
74 }
75 DCHECK(ret.IsNull() || ret->IsArrayInstance()) << "Should have bailed out early!";
76 if (kIsDebugBuild && !ret.IsNull()) {
77 if (kRuntimePointerSize == PointerSize::k32) {
78 CHECK(ret->IsIntArray());
79 } else {
80 CHECK(ret->IsLongArray());
81 }
82 }
83 return down_cast<mirror::PointerArray*>(ret.Ptr());
84 }
85
86 template <typename ArtType>
87 bool ShouldReturnPointer(ObjPtr<mirror::Class> klass, ArtType* t)
88 REQUIRES_SHARED(Locks::mutator_lock_);
89
90 template <>
ShouldReturnPointer(ObjPtr<mirror::Class> klass,ArtMethod * t ATTRIBUTE_UNUSED)91 bool ShouldReturnPointer(ObjPtr<mirror::Class> klass, ArtMethod* t ATTRIBUTE_UNUSED) {
92 ObjPtr<mirror::ClassExt> ext(klass->GetExtData());
93 if (ext.IsNull()) {
94 return true;
95 }
96 ObjPtr<mirror::Object> arr = ext->GetJMethodIDs();
97 return arr.IsNull() || !arr->IsArrayInstance();
98 }
99
100 template<>
ShouldReturnPointer(ObjPtr<mirror::Class> klass,ArtField * t)101 bool ShouldReturnPointer(ObjPtr<mirror::Class> klass, ArtField* t) {
102 ObjPtr<mirror::ClassExt> ext(klass->GetExtData());
103 if (ext.IsNull()) {
104 return true;
105 }
106 ObjPtr<mirror::Object> arr = t->IsStatic() ? ext->GetStaticJFieldIDs()
107 : ext->GetInstanceJFieldIDs();
108 return arr.IsNull() || !arr->IsArrayInstance();
109 }
110
111
112 // Forces the appropriate id array to be present if possible. Returns true if allocation was
113 // attempted but failed.
114 template <typename ArtType>
115 bool EnsureIdsArray(Thread* self, ObjPtr<mirror::Class> k, ArtType* t)
116 REQUIRES_SHARED(Locks::mutator_lock_);
117
118 template <>
EnsureIdsArray(Thread * self,ObjPtr<mirror::Class> k,ArtField * field)119 bool EnsureIdsArray(Thread* self, ObjPtr<mirror::Class> k, ArtField* field) {
120 ScopedExceptionStorage ses(self);
121 StackHandleScope<1> hs(self);
122 Handle<mirror::Class> h_k(hs.NewHandle(k));
123 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
124 return false;
125 } else {
126 // NB This modifies the class to allocate the ClassExt and the ids array.
127 field->IsStatic() ? mirror::Class::EnsureStaticFieldIds(h_k)
128 : mirror::Class::EnsureInstanceFieldIds(h_k);
129 }
130 if (self->IsExceptionPending()) {
131 self->AssertPendingOOMException();
132 ses.SuppressOldException("Failed to allocate maps for jmethodIDs. ");
133 return true;
134 }
135 return false;
136 }
137
138 template <>
EnsureIdsArray(Thread * self,ObjPtr<mirror::Class> k,ArtMethod * method)139 bool EnsureIdsArray(Thread* self, ObjPtr<mirror::Class> k, ArtMethod* method) {
140 if (method->IsObsolete()) {
141 if (kTraceIds) {
142 LOG(INFO) << "jmethodID for Obsolete method " << method->PrettyMethod() << " requested!";
143 }
144 // No ids array for obsolete methods. Just do a linear scan.
145 return false;
146 }
147 StackHandleScope<1> hs(self);
148 Handle<mirror::Class> h_k(hs.NewHandle(k));
149 if (Locks::mutator_lock_->IsExclusiveHeld(self) || !Locks::mutator_lock_->IsSharedHeld(self)) {
150 return false;
151 } else {
152 // NB This modifies the class to allocate the ClassExt and the ids array.
153 mirror::Class::EnsureMethodIds(h_k);
154 }
155 if (self->IsExceptionPending()) {
156 self->AssertPendingOOMException();
157 return true;
158 }
159 return false;
160 }
161
162 template <typename ArtType>
163 size_t GetIdOffset(ObjPtr<mirror::Class> k, ArtType* t, PointerSize pointer_size)
164 REQUIRES_SHARED(Locks::mutator_lock_);
165 template <>
GetIdOffset(ObjPtr<mirror::Class> k,ArtField * f,PointerSize ptr_size ATTRIBUTE_UNUSED)166 size_t GetIdOffset(ObjPtr<mirror::Class> k, ArtField* f, PointerSize ptr_size ATTRIBUTE_UNUSED) {
167 return f->IsStatic() ? k->GetStaticFieldIdOffset(f) : k->GetInstanceFieldIdOffset(f);
168 }
169 template <>
GetIdOffset(ObjPtr<mirror::Class> k,ArtMethod * method,PointerSize pointer_size)170 size_t GetIdOffset(ObjPtr<mirror::Class> k, ArtMethod* method, PointerSize pointer_size) {
171 return method->IsObsolete() ? -1 : k->GetMethodIdOffset(method, pointer_size);
172 }
173
174 // Calls the relevant PrettyMethod/PrettyField on the input.
175 template <typename ArtType>
176 std::string PrettyGeneric(ArtType t) REQUIRES_SHARED(Locks::mutator_lock_);
177 template <>
PrettyGeneric(ArtMethod * f)178 std::string PrettyGeneric(ArtMethod* f) {
179 return f->PrettyMethod();
180 }
181 template <>
PrettyGeneric(ReflectiveHandle<ArtMethod> f)182 std::string PrettyGeneric(ReflectiveHandle<ArtMethod> f) {
183 return f->PrettyMethod();
184 }
185 template <>
PrettyGeneric(ArtField * f)186 std::string PrettyGeneric(ArtField* f) {
187 return f->PrettyField();
188 }
189 template <>
PrettyGeneric(ReflectiveHandle<ArtField> f)190 std::string PrettyGeneric(ReflectiveHandle<ArtField> f) {
191 return f->PrettyField();
192 }
193
194 // Checks if the field or method is obsolete.
195 template <typename ArtType>
196 bool IsObsolete(ReflectiveHandle<ArtType> t) REQUIRES_SHARED(Locks::mutator_lock_);
197 template <>
IsObsolete(ReflectiveHandle<ArtField> t ATTRIBUTE_UNUSED)198 bool IsObsolete(ReflectiveHandle<ArtField> t ATTRIBUTE_UNUSED) {
199 return false;
200 }
201 template <>
IsObsolete(ReflectiveHandle<ArtMethod> t)202 bool IsObsolete(ReflectiveHandle<ArtMethod> t) {
203 return t->IsObsolete();
204 }
205
206 // Get the canonical (non-copied) version of the field or method. Only relevant for methods.
207 template <typename ArtType>
208 ArtType* Canonicalize(ReflectiveHandle<ArtType> t) REQUIRES_SHARED(Locks::mutator_lock_);
209 template <>
Canonicalize(ReflectiveHandle<ArtField> t)210 ArtField* Canonicalize(ReflectiveHandle<ArtField> t) {
211 return t.Get();
212 }
213 template <>
Canonicalize(ReflectiveHandle<ArtMethod> t)214 ArtMethod* Canonicalize(ReflectiveHandle<ArtMethod> t) {
215 if (UNLIKELY(t->IsCopied())) {
216 return t->GetCanonicalMethod();
217 }
218 return t.Get();
219 }
220
221 }; // namespace
222
223 // We increment the id by 2 each time to allow us to use the LSB as a flag that the ID is an index
224 // and not a pointer. This gives us 2**31 unique methods that can be addressed on 32-bit art, which
225 // should be more than enough.
226 template <>
GetNextId(JniIdType type)227 uintptr_t JniIdManager::GetNextId<ArtField>(JniIdType type) {
228 DCHECK_EQ(type, JniIdType::kIndices);
229 uintptr_t res = next_field_id_;
230 next_field_id_ += 2;
231 CHECK_GT(next_field_id_, res) << "jfieldID Overflow";
232 return res;
233 }
234
235 template <>
GetNextId(JniIdType type)236 uintptr_t JniIdManager::GetNextId<ArtMethod>(JniIdType type) {
237 DCHECK_EQ(type, JniIdType::kIndices);
238 uintptr_t res = next_method_id_;
239 next_method_id_ += 2;
240 CHECK_GT(next_method_id_, res) << "jmethodID Overflow";
241 return res;
242 }
243 template <>
GetGenericMap()244 std::vector<ArtField*>& JniIdManager::GetGenericMap<ArtField>() {
245 return field_id_map_;
246 }
247
248 template <>
GetGenericMap()249 std::vector<ArtMethod*>& JniIdManager::GetGenericMap<ArtMethod>() {
250 return method_id_map_;
251 }
252 template <>
GetLinearSearchStartId(ReflectiveHandle<ArtField> t ATTRIBUTE_UNUSED)253 size_t JniIdManager::GetLinearSearchStartId<ArtField>(
254 ReflectiveHandle<ArtField> t ATTRIBUTE_UNUSED) {
255 return deferred_allocation_field_id_start_;
256 }
257
258 template <>
GetLinearSearchStartId(ReflectiveHandle<ArtMethod> m)259 size_t JniIdManager::GetLinearSearchStartId<ArtMethod>(ReflectiveHandle<ArtMethod> m) {
260 if (m->IsObsolete()) {
261 return 1;
262 } else {
263 return deferred_allocation_method_id_start_;
264 }
265 }
266
267 // TODO need to fix races in here with visitors
268 template <typename ArtType>
EncodeGenericId(ReflectiveHandle<ArtType> t)269 uintptr_t JniIdManager::EncodeGenericId(ReflectiveHandle<ArtType> t) {
270 static_assert(std::is_same_v<ArtType, ArtField> || std::is_same_v<ArtType, ArtMethod>,
271 "Expected ArtField or ArtMethod");
272 Runtime* runtime = Runtime::Current();
273 JniIdType id_type = runtime->GetJniIdType();
274 if (id_type == JniIdType::kPointer || t == nullptr) {
275 return reinterpret_cast<uintptr_t>(t.Get());
276 }
277 Thread* self = Thread::Current();
278 ScopedExceptionStorage ses(self);
279 DCHECK(!t->GetDeclaringClass().IsNull()) << "Null declaring class " << PrettyGeneric(t);
280 size_t off = GetIdOffset(t->GetDeclaringClass(), Canonicalize(t), kRuntimePointerSize);
281 // Here is the earliest point we can suspend.
282 bool allocation_failure = EnsureIdsArray(self, t->GetDeclaringClass(), t.Get());
283 if (allocation_failure) {
284 self->AssertPendingOOMException();
285 ses.SuppressOldException("OOM exception while trying to allocate JNI ids.");
286 return 0u;
287 } else if (ShouldReturnPointer(t->GetDeclaringClass(), t.Get())) {
288 return reinterpret_cast<uintptr_t>(t.Get());
289 }
290 ObjPtr<mirror::Class> klass = t->GetDeclaringClass();
291 ObjPtr<mirror::PointerArray> ids(GetIds(klass, t.Get()));
292 uintptr_t cur_id = 0;
293 if (!ids.IsNull()) {
294 DCHECK_GT(ids->GetLength(), static_cast<int32_t>(off)) << " is " << PrettyGeneric(t);
295 DCHECK_LE(0, static_cast<int32_t>(off)) << " is " << PrettyGeneric(t);
296 cur_id = ids->GetElementPtrSize<uintptr_t>(off, kRuntimePointerSize);
297 }
298 if (cur_id != 0) {
299 return cur_id;
300 }
301 WriterMutexLock mu(self, *Locks::jni_id_lock_);
302 ScopedAssertNoThreadSuspension sants("EncodeJniId critical section.");
303 // Check the ids array for a racing id.
304 constexpr std::pair<size_t, size_t> counts {
305 std::is_same_v<ArtType, ArtField> ? 1 : 0,
306 std::is_same_v<ArtType, ArtField> ? 0 : 1,
307 };
308 StackReflectiveHandleScope<counts.first, counts.second> hs(self);
309 t = hs.NewHandle(Canonicalize(t));
310 if (!ids.IsNull()) {
311 // It's possible we got suspended and structurally redefined during the EnsureIdsArray. We need
312 // to get the information again.
313 ids = GetIds(klass, t.Get());
314 off = GetIdOffset(klass, Canonicalize(t), kRuntimePointerSize);
315 CHECK(!ids.IsNull());
316 cur_id = ids->GetElementPtrSize<uintptr_t>(off, kRuntimePointerSize);
317 if (cur_id != 0) {
318 // We were racing some other thread and lost.
319 return cur_id;
320 }
321 } else {
322 // We cannot allocate anything here or don't have an ids array (we might be an obsolete method).
323 DCHECK(IsObsolete(t) || deferred_allocation_refcount_ > 0u)
324 << "deferred_allocation_refcount_: " << deferred_allocation_refcount_
325 << " t: " << PrettyGeneric(t);
326 // Check to see if we raced and lost to another thread.
327 const std::vector<ArtType*>& vec = GetGenericMap<ArtType>();
328 bool found = false;
329 // simple count-while.
330 size_t search_start_index = IdToIndex(GetLinearSearchStartId(t));
331 size_t index = std::count_if(vec.cbegin() + search_start_index,
332 vec.cend(),
333 [&found, &self, t](const ArtType* candidate) {
334 Locks::mutator_lock_->AssertSharedHeld(self);
335 found = found || candidate == t.Get();
336 return !found;
337 }) +
338 search_start_index;
339 if (found) {
340 // We were either racing some other thread and lost or this thread was asked to encode the
341 // same method multiple times while holding the mutator lock.
342 DCHECK_EQ(vec[index], t.Get())
343 << "Expected: " << PrettyGeneric(vec[index]) << " got " << PrettyGeneric(t)
344 << " at index " << index << " (id: " << IndexToId(index) << ").";
345 return IndexToId(index);
346 }
347 }
348 cur_id = GetNextId<ArtType>(id_type);
349 DCHECK_EQ(cur_id % 2, 1u);
350 size_t cur_index = IdToIndex(cur_id);
351 std::vector<ArtType*>& vec = GetGenericMap<ArtType>();
352 vec.reserve(cur_index + 1);
353 vec.resize(std::max(vec.size(), cur_index + 1), nullptr);
354 vec[cur_index] = t.Get();
355 if (ids.IsNull()) {
356 if (kIsDebugBuild && !IsObsolete(t)) {
357 CHECK_NE(deferred_allocation_refcount_, 0u)
358 << "Failed to allocate ids array despite not being forbidden from doing so!";
359 Locks::mutator_lock_->AssertExclusiveHeld(self);
360 }
361 } else {
362 ids->SetElementPtrSize(off, reinterpret_cast<void*>(cur_id), kRuntimePointerSize);
363 }
364 return cur_id;
365 }
366
EncodeFieldId(ArtField * field)367 jfieldID JniIdManager::EncodeFieldId(ArtField* field) {
368 StackArtFieldHandleScope<1> rhs(Thread::Current());
369 return EncodeFieldId(rhs.NewHandle(field));
370 }
371
EncodeFieldId(ReflectiveHandle<ArtField> field)372 jfieldID JniIdManager::EncodeFieldId(ReflectiveHandle<ArtField> field) {
373 auto* res = reinterpret_cast<jfieldID>(EncodeGenericId(field));
374 if (kTraceIds && field != nullptr) {
375 LOG(INFO) << "Returning " << res << " for field " << field->PrettyField();
376 }
377 return res;
378 }
379
EncodeMethodId(ArtMethod * method)380 jmethodID JniIdManager::EncodeMethodId(ArtMethod* method) {
381 StackArtMethodHandleScope<1> rhs(Thread::Current());
382 return EncodeMethodId(rhs.NewHandle(method));
383 }
384
EncodeMethodId(ReflectiveHandle<ArtMethod> method)385 jmethodID JniIdManager::EncodeMethodId(ReflectiveHandle<ArtMethod> method) {
386 auto* res = reinterpret_cast<jmethodID>(EncodeGenericId(method));
387 if (kTraceIds && method != nullptr) {
388 LOG(INFO) << "Returning " << res << " for method " << method->PrettyMethod();
389 }
390 return res;
391 }
392
VisitRoots(RootVisitor * visitor)393 void JniIdManager::VisitRoots(RootVisitor *visitor) {
394 pointer_marker_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
395 }
396
Init(Thread * self)397 void JniIdManager::Init(Thread* self) {
398 // When compiling we don't want to have anything to do with any of this, which is fine since JNI
399 // ids won't be created during AOT compilation. This also means we don't need to do any
400 // complicated stuff with the image-writer.
401 if (!Runtime::Current()->IsAotCompiler()) {
402 // Allocate the marker
403 StackHandleScope<3> hs(self);
404 Handle<mirror::Object> marker_obj(
405 hs.NewHandle(GetClassRoot<mirror::Object>()->AllocObject(self)));
406 CHECK(!marker_obj.IsNull());
407 pointer_marker_ = GcRoot<mirror::Object>(marker_obj.Get());
408 // Manually mark class-ext as having all pointer-ids to avoid any annoying loops.
409 Handle<mirror::Class> class_ext_class(hs.NewHandle(GetClassRoot<mirror::ClassExt>()));
410 mirror::Class::EnsureExtDataPresent(class_ext_class, self);
411 Handle<mirror::ClassExt> class_ext_ext(hs.NewHandle(class_ext_class->GetExtData()));
412 class_ext_ext->SetIdsArraysForClassExtExtData(marker_obj.Get());
413 }
414 }
415
VisitReflectiveTargets(ReflectiveValueVisitor * rvv)416 void JniIdManager::VisitReflectiveTargets(ReflectiveValueVisitor* rvv) {
417 art::WriterMutexLock mu(Thread::Current(), *Locks::jni_id_lock_);
418 for (auto it = field_id_map_.begin(); it != field_id_map_.end(); ++it) {
419 ArtField* old_field = *it;
420 uintptr_t id = IndexToId(std::distance(field_id_map_.begin(), it));
421 ArtField* new_field =
422 rvv->VisitField(old_field, JniIdReflectiveSourceInfo(reinterpret_cast<jfieldID>(id)));
423 if (old_field != new_field) {
424 *it = new_field;
425 ObjPtr<mirror::Class> old_class(old_field->GetDeclaringClass());
426 ObjPtr<mirror::Class> new_class(new_field->GetDeclaringClass());
427 ObjPtr<mirror::ClassExt> old_ext_data(old_class->GetExtData());
428 ObjPtr<mirror::ClassExt> new_ext_data(new_class->GetExtData());
429 if (!old_ext_data.IsNull()) {
430 CHECK(!old_ext_data->HasInstanceFieldPointerIdMarker() &&
431 !old_ext_data->HasStaticFieldPointerIdMarker())
432 << old_class->PrettyClass();
433 // Clear the old field mapping.
434 if (old_field->IsStatic()) {
435 size_t old_off = ArraySlice<ArtField>(old_class->GetSFieldsPtr()).OffsetOf(old_field);
436 ObjPtr<mirror::PointerArray> old_statics(old_ext_data->GetStaticJFieldIDsPointerArray());
437 if (!old_statics.IsNull()) {
438 old_statics->SetElementPtrSize(old_off, 0, kRuntimePointerSize);
439 }
440 } else {
441 size_t old_off = ArraySlice<ArtField>(old_class->GetIFieldsPtr()).OffsetOf(old_field);
442 ObjPtr<mirror::PointerArray> old_instances(
443 old_ext_data->GetInstanceJFieldIDsPointerArray());
444 if (!old_instances.IsNull()) {
445 old_instances->SetElementPtrSize(old_off, 0, kRuntimePointerSize);
446 }
447 }
448 }
449 if (!new_ext_data.IsNull()) {
450 CHECK(!new_ext_data->HasInstanceFieldPointerIdMarker() &&
451 !new_ext_data->HasStaticFieldPointerIdMarker())
452 << new_class->PrettyClass();
453 // Set the new field mapping.
454 if (new_field->IsStatic()) {
455 size_t new_off = ArraySlice<ArtField>(new_class->GetSFieldsPtr()).OffsetOf(new_field);
456 ObjPtr<mirror::PointerArray> new_statics(new_ext_data->GetStaticJFieldIDsPointerArray());
457 if (!new_statics.IsNull()) {
458 new_statics->SetElementPtrSize(new_off, id, kRuntimePointerSize);
459 }
460 } else {
461 size_t new_off = ArraySlice<ArtField>(new_class->GetIFieldsPtr()).OffsetOf(new_field);
462 ObjPtr<mirror::PointerArray> new_instances(
463 new_ext_data->GetInstanceJFieldIDsPointerArray());
464 if (!new_instances.IsNull()) {
465 new_instances->SetElementPtrSize(new_off, id, kRuntimePointerSize);
466 }
467 }
468 }
469 }
470 }
471 for (auto it = method_id_map_.begin(); it != method_id_map_.end(); ++it) {
472 ArtMethod* old_method = *it;
473 uintptr_t id = IndexToId(std::distance(method_id_map_.begin(), it));
474 ArtMethod* new_method =
475 rvv->VisitMethod(old_method, JniIdReflectiveSourceInfo(reinterpret_cast<jmethodID>(id)));
476 if (old_method != new_method) {
477 *it = new_method;
478 ObjPtr<mirror::Class> old_class(old_method->GetDeclaringClass());
479 ObjPtr<mirror::Class> new_class(new_method->GetDeclaringClass());
480 ObjPtr<mirror::ClassExt> old_ext_data(old_class->GetExtData());
481 ObjPtr<mirror::ClassExt> new_ext_data(new_class->GetExtData());
482 if (!old_ext_data.IsNull()) {
483 CHECK(!old_ext_data->HasMethodPointerIdMarker()) << old_class->PrettyClass();
484 // Clear the old method mapping.
485 size_t old_off = ArraySlice<ArtMethod>(old_class->GetMethodsPtr()).OffsetOf(old_method);
486 ObjPtr<mirror::PointerArray> old_methods(old_ext_data->GetJMethodIDsPointerArray());
487 if (!old_methods.IsNull()) {
488 old_methods->SetElementPtrSize(old_off, 0, kRuntimePointerSize);
489 }
490 }
491 if (!new_ext_data.IsNull()) {
492 CHECK(!new_ext_data->HasMethodPointerIdMarker()) << new_class->PrettyClass();
493 // Set the new method mapping.
494 size_t new_off = ArraySlice<ArtMethod>(new_class->GetMethodsPtr()).OffsetOf(new_method);
495 ObjPtr<mirror::PointerArray> new_methods(new_ext_data->GetJMethodIDsPointerArray());
496 if (!new_methods.IsNull()) {
497 new_methods->SetElementPtrSize(new_off, id, kRuntimePointerSize);
498 }
499 }
500 }
501 }
502 }
503
DecodeGenericId(uintptr_t t)504 template <typename ArtType> ArtType* JniIdManager::DecodeGenericId(uintptr_t t) {
505 if (Runtime::Current()->GetJniIdType() == JniIdType::kIndices && (t % 2) == 1) {
506 ReaderMutexLock mu(Thread::Current(), *Locks::jni_id_lock_);
507 size_t index = IdToIndex(t);
508 DCHECK_GT(GetGenericMap<ArtType>().size(), index);
509 return GetGenericMap<ArtType>().at(index);
510 } else {
511 DCHECK_EQ((t % 2), 0u) << "id: " << t;
512 return reinterpret_cast<ArtType*>(t);
513 }
514 }
515
DecodeMethodId(jmethodID method)516 ArtMethod* JniIdManager::DecodeMethodId(jmethodID method) {
517 return DecodeGenericId<ArtMethod>(reinterpret_cast<uintptr_t>(method));
518 }
519
DecodeFieldId(jfieldID field)520 ArtField* JniIdManager::DecodeFieldId(jfieldID field) {
521 return DecodeGenericId<ArtField>(reinterpret_cast<uintptr_t>(field));
522 }
523
GetPointerMarker()524 ObjPtr<mirror::Object> JniIdManager::GetPointerMarker() {
525 return pointer_marker_.Read();
526 }
527
528 // This whole defer system is an annoying requirement to allow us to generate IDs during heap-walks
529 // such as those required for instrumentation tooling.
530 //
531 // The defer system works with the normal id-assignment routine to ensure that all the class-ext
532 // data structures are eventually created and filled in. Basically how it works is the id-assignment
533 // function will check to see if it has a strong mutator-lock. If it does not then it will try to
534 // allocate the class-ext data structures normally and fail if it is unable to do so. In the case
535 // where mutator-lock is being held exclusive no attempt to allocate will be made and the thread
536 // will CHECK that allocations are being deferred (or that the method is obsolete, in which case
537 // there is no class-ext to store the method->id map in).
538 //
539 // Once the thread is done holding the exclusive mutator-lock it will go back and fill-in the
540 // class-ext data of all the methods that were added. We do this without the exclusive mutator-lock
541 // on a copy of the maps before we decrement the deferred refcount. This ensures that any other
542 // threads running at the same time know they need to perform a linear scan of the id-map. Since we
543 // don't have the mutator-lock anymore other threads can allocate the class-ext data, meaning our
544 // copy is fine. The only way additional methods could end up on the id-maps after our copy without
545 // having class-ext data is if another thread picked up the exclusive mutator-lock and added another
546 // defer, in which case that thread would fix-up the remaining ids. In this way we maintain eventual
547 // consistency between the class-ext method/field->id maps and the JniIdManager id->method/field
548 // maps.
549 //
550 // TODO It is possible that another thread to gain the mutator-lock and allocate new ids without
551 // calling StartDefer. This is basically a race that we should try to catch but doing so is
552 // rather difficult and since this defer system is only used in very rare circumstances unlikely to
553 // be worth the trouble.
StartDefer()554 void JniIdManager::StartDefer() {
555 Thread* self = Thread::Current();
556 WriterMutexLock mu(self, *Locks::jni_id_lock_);
557 if (deferred_allocation_refcount_++ == 0) {
558 deferred_allocation_field_id_start_ = next_field_id_;
559 deferred_allocation_method_id_start_ = next_method_id_;
560 }
561 }
562
563 class JniIdDeferStackReflectiveScope : public BaseReflectiveHandleScope {
564 public:
REQUIRES_SHARED(art::Locks::mutator_lock_)565 JniIdDeferStackReflectiveScope() REQUIRES_SHARED(art::Locks::mutator_lock_)
566 : BaseReflectiveHandleScope(), methods_(), fields_() {
567 PushScope(Thread::Current());
568 }
569
Initialize(const std::vector<ArtMethod * > & methods,const std::vector<ArtField * > & fields)570 void Initialize(const std::vector<ArtMethod*>& methods, const std::vector<ArtField*>& fields)
571 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Roles::uninterruptible_) {
572 methods_ = methods;
573 fields_ = fields;
574 }
575
REQUIRES_SHARED(Locks::mutator_lock_)576 ~JniIdDeferStackReflectiveScope() REQUIRES_SHARED(Locks::mutator_lock_) {
577 PopScope();
578 }
579
VisitTargets(ReflectiveValueVisitor * visitor)580 void VisitTargets(ReflectiveValueVisitor* visitor) override
581 REQUIRES_SHARED(Locks::mutator_lock_) {
582 for (auto it = methods_.begin(); it != methods_.end(); ++it) {
583 if (*it == nullptr) {
584 continue;
585 }
586 *it = visitor->VisitMethod(*it, ReflectiveHandleScopeSourceInfo(this));
587 }
588 for (auto it = fields_.begin(); it != fields_.end(); ++it) {
589 if (*it == nullptr) {
590 continue;
591 }
592 *it = visitor->VisitField(*it, ReflectiveHandleScopeSourceInfo(this));
593 }
594 }
595
GetFieldPtr(size_t idx)596 ArtField** GetFieldPtr(size_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
597 return &fields_[idx];
598 }
599
GetMethodPtr(size_t idx)600 ArtMethod** GetMethodPtr(size_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
601 return &methods_[idx];
602 }
603
NumFields() const604 size_t NumFields() const {
605 return fields_.size();
606 }
NumMethods() const607 size_t NumMethods() const {
608 return methods_.size();
609 }
610
611 private:
612 std::vector<ArtMethod*> methods_;
613 std::vector<ArtField*> fields_;
614 };
615
EndDefer()616 void JniIdManager::EndDefer() {
617 // Fixup the method->id map.
618 Thread* self = Thread::Current();
619 auto set_id = [&](auto** t, uintptr_t id) REQUIRES_SHARED(Locks::mutator_lock_) {
620 if (t == nullptr) {
621 return;
622 }
623 bool alloc_failure = EnsureIdsArray(self, (*t)->GetDeclaringClass(), *t);
624 ObjPtr<mirror::Class> klass((*t)->GetDeclaringClass());
625 size_t off = GetIdOffset(klass, (*t), kRuntimePointerSize);
626 ObjPtr<mirror::PointerArray> ids = GetIds(klass, (*t));
627 CHECK(!alloc_failure) << "Could not allocate jni ids array!";
628 if (ids.IsNull()) {
629 return;
630 }
631 if (kIsDebugBuild) {
632 uintptr_t old_id = ids->GetElementPtrSize<uintptr_t, kRuntimePointerSize>(off);
633 if (old_id != 0) {
634 DCHECK_EQ(old_id, id);
635 }
636 }
637 ids->SetElementPtrSize(off, reinterpret_cast<void*>(id), kRuntimePointerSize);
638 };
639 // To ensure eventual consistency this depends on the fact that the method_id_map_ and
640 // field_id_map_ are the ultimate source of truth and no id is ever reused to be valid. It also
641 // relies on all threads always getting calling StartDefer if they are going to be allocating jni
642 // ids while suspended. If a thread tries to do so while it doesn't have a scope we could miss
643 // ids.
644 // TODO We should use roles or something to verify that this requirement is not broken.
645 //
646 // If another thread comes along and adds more methods to the list after
647 // copying either (1) the id-maps are already present for the method and everything is fine, (2)
648 // the thread is not suspended and so can create the ext-data and id lists or, (3) the thread also
649 // suspended everything and incremented the deferred_allocation_refcount_ so it will fix up new
650 // ids when it finishes.
651 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
652 Locks::mutator_lock_->AssertSharedHeld(self);
653 JniIdDeferStackReflectiveScope jidsrs;
654 uintptr_t method_start_id;
655 uintptr_t field_start_id;
656 {
657 ReaderMutexLock mu(self, *Locks::jni_id_lock_);
658 ScopedAssertNoThreadSuspension sants(__FUNCTION__);
659 jidsrs.Initialize(method_id_map_, field_id_map_);
660 method_start_id = deferred_allocation_method_id_start_;
661 field_start_id = deferred_allocation_field_id_start_;
662 }
663
664 for (size_t index = kIsDebugBuild ? 0 : IdToIndex(method_start_id); index < jidsrs.NumMethods();
665 ++index) {
666 set_id(jidsrs.GetMethodPtr(index), IndexToId(index));
667 }
668 for (size_t index = kIsDebugBuild ? 0 : IdToIndex(field_start_id); index < jidsrs.NumFields();
669 ++index) {
670 set_id(jidsrs.GetFieldPtr(index), IndexToId(index));
671 }
672 WriterMutexLock mu(self, *Locks::jni_id_lock_);
673 DCHECK_GE(deferred_allocation_refcount_, 1u);
674 if (--deferred_allocation_refcount_ == 0) {
675 deferred_allocation_field_id_start_ = 0;
676 deferred_allocation_method_id_start_ = 0;
677 }
678 }
679
ScopedEnableSuspendAllJniIdQueries()680 ScopedEnableSuspendAllJniIdQueries::ScopedEnableSuspendAllJniIdQueries()
681 : manager_(Runtime::Current()->GetJniIdManager()) {
682 manager_->StartDefer();
683 }
684
~ScopedEnableSuspendAllJniIdQueries()685 ScopedEnableSuspendAllJniIdQueries::~ScopedEnableSuspendAllJniIdQueries() {
686 manager_->EndDefer();
687 }
688
689 }; // namespace jni
690 }; // namespace art
691