1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "debugger_interface.h"
18
19 #include <android-base/logging.h>
20
21 #include "base/array_ref.h"
22 #include "base/bit_utils.h"
23 #include "base/logging.h"
24 #include "base/mutex.h"
25 #include "base/time_utils.h"
26 #include "base/utils.h"
27 #include "dex/dex_file.h"
28 #include "elf/elf_debug_reader.h"
29 #include "jit/jit.h"
30 #include "jit/jit_code_cache.h"
31 #include "jit/jit_memory_region.h"
32 #include "runtime.h"
33 #include "thread-current-inl.h"
34 #include "thread.h"
35
36 #include <atomic>
37 #include <cstddef>
38
39 //
40 // Debug interface for native tools (gdb, lldb, libunwind, simpleperf).
41 //
42 // See http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html
43 //
44 // There are two ways for native tools to access the debug data safely:
45 //
46 // 1) Synchronously, by setting a breakpoint in the __*_debug_register_code
47 // method, which is called after every modification of the linked list.
48 // GDB does this, but it is complex to set up and it stops the process.
49 //
50 // 2) Asynchronously, using the entry seqlocks.
51 // * The seqlock is a monotonically increasing counter, which
52 // is even if the entry is valid and odd if it is invalid.
53 // It is set to even value after all other fields are set,
54 // and it is set to odd value before the entry is deleted.
55 // * This makes it possible to safely read the symfile data:
56 // * The reader should read the value of the seqlock both
57 // before and after reading the symfile. If the seqlock
58 // values match and are even the copy is consistent.
59 // * Entries are recycled, but never freed, which guarantees
60 // that the seqlock is not overwritten by a random value.
61 // * The linked-list is one level higher. The next-pointer
62 // must always point to an entry with even seqlock, which
63 // ensures that entries of a crashed process can be read.
64 // This means the entry must be added after it is created
65 // and it must be removed before it is invalidated (odd).
66 // * When iterating over the linked list the reader can use
67 // the timestamps to ensure that current and next entry
68 // were not deleted using the following steps:
69 // 1) Read next pointer and the next entry's seqlock.
70 // 2) Read the symfile and re-read the next pointer.
71 // 3) Re-read both the current and next seqlock.
72 // 4) Go to step 1 with using new entry and seqlock.
73 //
74 // 3) Asynchronously, using the global seqlock.
75 // * The seqlock is a monotonically increasing counter which is incremented
76 // before and after every modification of the linked list. Odd value of
77 // the counter means the linked list is being modified (it is locked).
78 // * The tool should read the value of the seqlock both before and after
79 // copying the linked list. If the seqlock values match and are even,
80 // the copy is consistent. Otherwise, the reader should try again.
81 // * Note that using the data directly while is it being modified
82 // might crash the tool. Therefore, the only safe way is to make
83 // a copy and use the copy only after the seqlock has been checked.
84 // * Note that the process might even free and munmap the data while
85 // it is being copied, therefore the reader should either handle
86 // SEGV or use OS calls to read the memory (e.g. process_vm_readv).
87 // * The timestamps on the entry record the time when the entry was
88 // created which is relevant if the unwinding is not live and is
89 // postponed until much later. All timestamps must be unique.
90 // * For full conformance with the C++ memory model, all seqlock
91 // protected accesses should be atomic. We currently do this in the
92 // more critical cases. The rest will have to be fixed before
93 // attempting to run TSAN on this code.
94 //
95
96 namespace art {
97
98 static Mutex g_jit_debug_lock("JIT native debug entries", kNativeDebugInterfaceLock);
99 static Mutex g_dex_debug_lock("DEX native debug entries", kNativeDebugInterfaceLock);
100
101 // Most loads and stores need no synchronization since all memory is protected by the global locks.
102 // Some writes are synchronized so libunwindstack can read the memory safely from another process.
103 constexpr std::memory_order kNonRacingRelaxed = std::memory_order_relaxed;
104
105 // Size of JIT code range covered by each packed JITCodeEntry.
106 constexpr uint32_t kJitRepackGroupSize = 64 * KB;
107
108 // Automatically call the repack method every 'n' new entries.
109 constexpr uint32_t kJitRepackFrequency = 64;
110
111 // Public binary interface between ART and native tools (gdb, libunwind, etc).
112 // The fields below need to be exported and have special names as per the gdb api.
113 extern "C" {
114 enum JITAction {
115 JIT_NOACTION = 0,
116 JIT_REGISTER_FN,
117 JIT_UNREGISTER_FN
118 };
119
120 // Public/stable binary interface.
121 struct JITCodeEntryPublic {
122 std::atomic<const JITCodeEntry*> next_; // Atomic to guarantee consistency after crash.
123 const JITCodeEntry* prev_ = nullptr; // For linked list deletion. Unused in readers.
124 const uint8_t* symfile_addr_ = nullptr; // Address of the in-memory ELF file.
125 uint64_t symfile_size_ = 0; // NB: The offset is 12 on x86 but 16 on ARM32.
126
127 // Android-specific fields:
128 uint64_t timestamp_; // CLOCK_MONOTONIC time of entry registration.
129 std::atomic_uint32_t seqlock_{1}; // Synchronization. Even value if entry is valid.
130 };
131
132 // Implementation-specific fields (which can be used only in this file).
133 struct JITCodeEntry : public JITCodeEntryPublic {
134 // Unpacked entries: Code address of the symbol in the ELF file.
135 // Packed entries: The start address of the covered memory range.
136 const void* addr_ = nullptr;
137 // Allow merging of ELF files to save space.
138 // Packing drops advanced DWARF data, so it is not always desirable.
139 bool allow_packing_ = false;
140 // Whether this entry has been LZMA compressed.
141 // Compression is expensive, so we don't always do it.
142 bool is_compressed_ = false;
143 };
144
145 // Public/stable binary interface.
146 struct JITDescriptorPublic {
147 uint32_t version_ = 1; // NB: GDB supports only version 1.
148 uint32_t action_flag_ = JIT_NOACTION; // One of the JITAction enum values.
149 const JITCodeEntry* relevant_entry_ = nullptr; // The entry affected by the action.
150 std::atomic<const JITCodeEntry*> head_{nullptr}; // Head of link list of all entries.
151
152 // Android-specific fields:
153 uint8_t magic_[8] = {'A', 'n', 'd', 'r', 'o', 'i', 'd', '2'};
154 uint32_t flags_ = 0; // Reserved for future use. Must be 0.
155 uint32_t sizeof_descriptor = sizeof(JITDescriptorPublic);
156 uint32_t sizeof_entry = sizeof(JITCodeEntryPublic);
157 std::atomic_uint32_t seqlock_{0}; // Incremented before and after any modification.
158 uint64_t timestamp_ = 1; // CLOCK_MONOTONIC time of last action.
159 };
160
161 // Implementation-specific fields (which can be used only in this file).
162 struct JITDescriptor : public JITDescriptorPublic {
163 const JITCodeEntry* tail_ = nullptr; // Tail of link list of all live entries.
164 const JITCodeEntry* free_entries_ = nullptr; // List of deleted entries ready for reuse.
165
166 // Used for memory sharing with zygote. See NativeDebugInfoPreFork().
167 const JITCodeEntry* zygote_head_entry_ = nullptr;
168 JITCodeEntry application_tail_entry_{};
169 };
170
171 // Public interface: Can be used by reader to check the structs have the expected size.
172 uint32_t g_art_sizeof_jit_code_entry = sizeof(JITCodeEntryPublic);
173 uint32_t g_art_sizeof_jit_descriptor = sizeof(JITDescriptorPublic);
174
175 // Check that std::atomic has the expected layout.
176 static_assert(alignof(std::atomic_uint32_t) == alignof(uint32_t), "Weird alignment");
177 static_assert(sizeof(std::atomic_uint32_t) == sizeof(uint32_t), "Weird size");
178 static_assert(std::atomic_uint32_t::is_always_lock_free, "Expected to be lock free");
179 static_assert(alignof(std::atomic<void*>) == alignof(void*), "Weird alignment");
180 static_assert(sizeof(std::atomic<void*>) == sizeof(void*), "Weird size");
181 static_assert(std::atomic<void*>::is_always_lock_free, "Expected to be lock free");
182
183 // GDB may set breakpoint here. We must ensure it is not removed or deduplicated.
__jit_debug_register_code()184 void __attribute__((noinline)) __jit_debug_register_code() {
185 __asm__("");
186 }
187
188 // Alternatively, native tools may overwrite this field to execute custom handler.
189 void (*__jit_debug_register_code_ptr)() = __jit_debug_register_code;
190
191 // The root data structure describing of all JITed methods.
GUARDED_BY(g_jit_debug_lock)192 JITDescriptor __jit_debug_descriptor GUARDED_BY(g_jit_debug_lock) {};
193
194 // The following globals mirror the ones above, but are used to register dex files.
__dex_debug_register_code()195 void __attribute__((noinline)) __dex_debug_register_code() {
196 __asm__("");
197 }
198 void (*__dex_debug_register_code_ptr)() = __dex_debug_register_code;
GUARDED_BY(g_dex_debug_lock)199 JITDescriptor __dex_debug_descriptor GUARDED_BY(g_dex_debug_lock) {};
200 }
201
202 // The fields below are internal, but we keep them here anyway for consistency.
203 // Their state is related to the static state above and it must be kept in sync.
204
205 // Used only in debug builds to check that we are not adding duplicate entries.
206 static std::unordered_set<const void*> g_dcheck_all_jit_functions GUARDED_BY(g_jit_debug_lock);
207
208 // Methods that have been marked for deletion on the next repack pass.
209 static std::vector<const void*> g_removed_jit_functions GUARDED_BY(g_jit_debug_lock);
210
211 // Number of small (single symbol) ELF files. Used to trigger repacking.
212 static uint32_t g_jit_num_unpacked_entries = 0;
213
214 struct DexNativeInfo {
215 static constexpr bool kCopySymfileData = false; // Just reference DEX files.
Descriptorart::DexNativeInfo216 static JITDescriptor& Descriptor() { return __dex_debug_descriptor; }
NotifyNativeDebuggerart::DexNativeInfo217 static void NotifyNativeDebugger() { __dex_debug_register_code_ptr(); }
Allocart::DexNativeInfo218 static const void* Alloc(size_t size) { return malloc(size); }
Freeart::DexNativeInfo219 static void Free(const void* ptr) { free(const_cast<void*>(ptr)); }
Writableart::DexNativeInfo220 template<class T> static T* Writable(const T* v) { return const_cast<T*>(v); }
221 };
222
223 struct JitNativeInfo {
224 static constexpr bool kCopySymfileData = true; // Copy debug info to JIT memory.
Descriptorart::JitNativeInfo225 static JITDescriptor& Descriptor() { return __jit_debug_descriptor; }
NotifyNativeDebuggerart::JitNativeInfo226 static void NotifyNativeDebugger() { __jit_debug_register_code_ptr(); }
Allocart::JitNativeInfo227 static const void* Alloc(size_t size) { return Memory()->AllocateData(size); }
Freeart::JitNativeInfo228 static void Free(const void* ptr) { Memory()->FreeData(reinterpret_cast<const uint8_t*>(ptr)); }
229 static void Free(void* ptr) = delete;
230
Writableart::JitNativeInfo231 template<class T> static T* Writable(const T* v) {
232 // Special case: This entry is in static memory and not allocated in JIT memory.
233 if (v == reinterpret_cast<const void*>(&Descriptor().application_tail_entry_)) {
234 return const_cast<T*>(v);
235 }
236 return const_cast<T*>(Memory()->GetWritableDataAddress(v));
237 }
238
Memoryart::JitNativeInfo239 static jit::JitMemoryRegion* Memory() ASSERT_CAPABILITY(Locks::jit_lock_) {
240 Locks::jit_lock_->AssertHeld(Thread::Current());
241 jit::JitCodeCache* jit_code_cache = Runtime::Current()->GetJitCodeCache();
242 CHECK(jit_code_cache != nullptr);
243 jit::JitMemoryRegion* memory = jit_code_cache->GetCurrentRegion();
244 CHECK(memory->IsValid());
245 return memory;
246 }
247 };
248
GetJITCodeEntrySymFile(const JITCodeEntry * entry)249 ArrayRef<const uint8_t> GetJITCodeEntrySymFile(const JITCodeEntry* entry) {
250 return ArrayRef<const uint8_t>(entry->symfile_addr_, entry->symfile_size_);
251 }
252
253 // Ensure the timestamp is monotonically increasing even in presence of low
254 // granularity system timer. This ensures each entry has unique timestamp.
GetNextTimestamp(JITDescriptor & descriptor)255 static uint64_t GetNextTimestamp(JITDescriptor& descriptor) {
256 return std::max(descriptor.timestamp_ + 1, NanoTime());
257 }
258
259 // Mark the descriptor as "locked", so native tools know the data is being modified.
Seqlock(JITDescriptor & descriptor)260 static void Seqlock(JITDescriptor& descriptor) {
261 DCHECK_EQ(descriptor.seqlock_.load(kNonRacingRelaxed) & 1, 0u) << "Already locked";
262 descriptor.seqlock_.fetch_add(1, std::memory_order_relaxed);
263 // Ensure that any writes within the locked section cannot be reordered before the increment.
264 std::atomic_thread_fence(std::memory_order_release);
265 }
266
267 // Mark the descriptor as "unlocked", so native tools know the data is safe to read.
Sequnlock(JITDescriptor & descriptor)268 static void Sequnlock(JITDescriptor& descriptor) {
269 DCHECK_EQ(descriptor.seqlock_.load(kNonRacingRelaxed) & 1, 1u) << "Already unlocked";
270 // Ensure that any writes within the locked section cannot be reordered after the increment.
271 std::atomic_thread_fence(std::memory_order_release);
272 descriptor.seqlock_.fetch_add(1, std::memory_order_relaxed);
273 }
274
275 // Insert 'entry' in the linked list before 'next' and mark it as valid (append if 'next' is null).
276 // This method must be called under global lock (g_jit_debug_lock or g_dex_debug_lock).
277 template<class NativeInfo>
InsertNewEntry(const JITCodeEntry * entry,const JITCodeEntry * next)278 static void InsertNewEntry(const JITCodeEntry* entry, const JITCodeEntry* next) {
279 CHECK_EQ(entry->seqlock_.load(kNonRacingRelaxed) & 1, 1u) << "Expected invalid entry";
280 JITDescriptor& descriptor = NativeInfo::Descriptor();
281 const JITCodeEntry* prev = (next != nullptr ? next->prev_ : descriptor.tail_);
282 JITCodeEntry* writable = NativeInfo::Writable(entry);
283 writable->next_ = next;
284 writable->prev_ = prev;
285 writable->seqlock_.fetch_add(1, std::memory_order_release); // Mark as valid.
286 // Backward pointers should not be used by readers, so they are non-atomic.
287 if (next != nullptr) {
288 NativeInfo::Writable(next)->prev_ = entry;
289 } else {
290 descriptor.tail_ = entry;
291 }
292 // Forward pointers must be atomic and they must point to a valid entry at all times.
293 if (prev != nullptr) {
294 NativeInfo::Writable(prev)->next_.store(entry, std::memory_order_release);
295 } else {
296 descriptor.head_.store(entry, std::memory_order_release);
297 }
298 }
299
300 // This must be called with the appropriate lock taken (g_{jit,dex}_debug_lock).
301 template<class NativeInfo>
CreateJITCodeEntryInternal(ArrayRef<const uint8_t> symfile=ArrayRef<const uint8_t> (),const void * addr=nullptr,bool allow_packing=false,bool is_compressed=false)302 static const JITCodeEntry* CreateJITCodeEntryInternal(
303 ArrayRef<const uint8_t> symfile = ArrayRef<const uint8_t>(),
304 const void* addr = nullptr,
305 bool allow_packing = false,
306 bool is_compressed = false) {
307 JITDescriptor& descriptor = NativeInfo::Descriptor();
308
309 // Allocate JITCodeEntry if needed.
310 if (descriptor.free_entries_ == nullptr) {
311 const void* memory = NativeInfo::Alloc(sizeof(JITCodeEntry));
312 if (memory == nullptr) {
313 LOG(ERROR) << "Failed to allocate memory for native debug info";
314 return nullptr;
315 }
316 new (NativeInfo::Writable(memory)) JITCodeEntry();
317 descriptor.free_entries_ = reinterpret_cast<const JITCodeEntry*>(memory);
318 }
319
320 // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry.
321 if (NativeInfo::kCopySymfileData && !symfile.empty()) {
322 const uint8_t* copy = reinterpret_cast<const uint8_t*>(NativeInfo::Alloc(symfile.size()));
323 if (copy == nullptr) {
324 LOG(ERROR) << "Failed to allocate memory for native debug info";
325 return nullptr;
326 }
327 memcpy(NativeInfo::Writable(copy), symfile.data(), symfile.size());
328 symfile = ArrayRef<const uint8_t>(copy, symfile.size());
329 }
330
331 uint64_t timestamp = GetNextTimestamp(descriptor);
332
333 // We must insert entries at specific place. See NativeDebugInfoPreFork().
334 const JITCodeEntry* next = descriptor.head_.load(kNonRacingRelaxed); // Insert at the head.
335 if (descriptor.zygote_head_entry_ != nullptr && Runtime::Current()->IsZygote()) {
336 next = nullptr; // Insert zygote entries at the tail.
337 }
338
339 // Pop entry from the free list.
340 const JITCodeEntry* entry = descriptor.free_entries_;
341 descriptor.free_entries_ = descriptor.free_entries_->next_.load(kNonRacingRelaxed);
342
343 // Create the entry and set all its fields.
344 JITCodeEntry* writable_entry = NativeInfo::Writable(entry);
345 writable_entry->symfile_addr_ = symfile.data();
346 writable_entry->symfile_size_ = symfile.size();
347 writable_entry->addr_ = addr;
348 writable_entry->allow_packing_ = allow_packing;
349 writable_entry->is_compressed_ = is_compressed;
350 writable_entry->timestamp_ = timestamp;
351
352 // Add the entry to the main linked list.
353 Seqlock(descriptor);
354 InsertNewEntry<NativeInfo>(entry, next);
355 descriptor.relevant_entry_ = entry;
356 descriptor.action_flag_ = JIT_REGISTER_FN;
357 descriptor.timestamp_ = timestamp;
358 Sequnlock(descriptor);
359
360 NativeInfo::NotifyNativeDebugger();
361
362 return entry;
363 }
364
365 template<class NativeInfo>
DeleteJITCodeEntryInternal(const JITCodeEntry * entry)366 static void DeleteJITCodeEntryInternal(const JITCodeEntry* entry) {
367 CHECK(entry != nullptr);
368 JITDescriptor& descriptor = NativeInfo::Descriptor();
369
370 // Remove the entry from the main linked-list.
371 Seqlock(descriptor);
372 const JITCodeEntry* next = entry->next_.load(kNonRacingRelaxed);
373 const JITCodeEntry* prev = entry->prev_;
374 if (next != nullptr) {
375 NativeInfo::Writable(next)->prev_ = prev;
376 } else {
377 descriptor.tail_ = prev;
378 }
379 if (prev != nullptr) {
380 NativeInfo::Writable(prev)->next_.store(next, std::memory_order_relaxed);
381 } else {
382 descriptor.head_.store(next, std::memory_order_relaxed);
383 }
384 descriptor.relevant_entry_ = entry;
385 descriptor.action_flag_ = JIT_UNREGISTER_FN;
386 descriptor.timestamp_ = GetNextTimestamp(descriptor);
387 Sequnlock(descriptor);
388
389 NativeInfo::NotifyNativeDebugger();
390
391 // Delete the entry.
392 JITCodeEntry* writable_entry = NativeInfo::Writable(entry);
393 CHECK_EQ(writable_entry->seqlock_.load(kNonRacingRelaxed) & 1, 0u) << "Expected valid entry";
394 // Release: Ensures that "next_" points to valid entry at any time in reader.
395 writable_entry->seqlock_.fetch_add(1, std::memory_order_release); // Mark as invalid.
396 // Release: Ensures that the entry is seen as invalid before it's data is freed.
397 std::atomic_thread_fence(std::memory_order_release);
398 const uint8_t* symfile = entry->symfile_addr_;
399 writable_entry->symfile_addr_ = nullptr;
400 if (NativeInfo::kCopySymfileData && symfile != nullptr) {
401 NativeInfo::Free(symfile);
402 }
403
404 // Push the entry to the free list.
405 writable_entry->next_.store(descriptor.free_entries_, kNonRacingRelaxed);
406 writable_entry->prev_ = nullptr;
407 descriptor.free_entries_ = entry;
408 }
409
AddNativeDebugInfoForDex(Thread * self,const DexFile * dexfile)410 void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
411 MutexLock mu(self, g_dex_debug_lock);
412 DCHECK(dexfile != nullptr);
413 // Compact dex files may store data past the size defined in the header.
414 const DexFile::Header& header = dexfile->GetHeader();
415 uint32_t size = std::max(header.file_size_, header.data_off_ + header.data_size_);
416 const ArrayRef<const uint8_t> symfile(dexfile->Begin(), size);
417 CreateJITCodeEntryInternal<DexNativeInfo>(symfile);
418 }
419
RemoveNativeDebugInfoForDex(Thread * self,const DexFile * dexfile)420 void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
421 MutexLock mu(self, g_dex_debug_lock);
422 DCHECK(dexfile != nullptr);
423 // We register dex files in the class linker and free them in DexFile_closeDexFile, but
424 // there might be cases where we load the dex file without using it in the class linker.
425 // On the other hand, single dex file might also be used with different class-loaders.
426 for (const JITCodeEntry* entry = __dex_debug_descriptor.head_; entry != nullptr; ) {
427 const JITCodeEntry* next = entry->next_; // Save next pointer before we free the memory.
428 if (entry->symfile_addr_ == dexfile->Begin()) {
429 DeleteJITCodeEntryInternal<DexNativeInfo>(entry);
430 }
431 entry = next;
432 }
433 }
434
435 // Splits the linked linked in to two parts:
436 // The first part (including the static head pointer) is owned by the application.
437 // The second part is owned by zygote and might be concurrently modified by it.
438 //
439 // We add two empty entries at the boundary which are never removed (app_tail, zygote_head).
440 // These entries are needed to preserve the next/prev pointers in the linked list,
441 // since zygote can not modify the application's data and vice versa.
442 //
443 // <------- owned by the application memory --------> <--- owned by zygote memory --->
444 // |----------------------|------------------|-------------|-----------------|
445 // head -> | application_entries* | application_tail | zygote_head | zygote_entries* |
446 // |+---------------------|------------------|-------------|----------------+|
447 // | |
448 // \-(new application entries) (new zygote entries)-/
449 //
450 // Zygote entries are inserted at the end, which means that repacked zygote entries
451 // will still be seen by single forward iteration of the linked list (avoiding race).
452 //
453 // Application entries are inserted at the start which introduces repacking race,
454 // but that is ok, since it is easy to read new entries from head in further pass.
455 // The benefit is that this makes it fast to read only the new entries.
456 //
NativeDebugInfoPreFork()457 void NativeDebugInfoPreFork() {
458 CHECK(Runtime::Current()->IsZygote());
459 JITDescriptor& descriptor = JitNativeInfo::Descriptor();
460 if (descriptor.zygote_head_entry_ != nullptr) {
461 return; // Already done - we need to do this only on the first fork.
462 }
463
464 // Create the zygote-owned head entry (with no ELF file).
465 // The data will be allocated from the current JIT memory (owned by zygote).
466 MutexLock mu(Thread::Current(), *Locks::jit_lock_); // Needed to alloc entry.
467 const JITCodeEntry* zygote_head =
468 reinterpret_cast<const JITCodeEntry*>(JitNativeInfo::Alloc(sizeof(JITCodeEntry)));
469 CHECK(zygote_head != nullptr);
470 new (JitNativeInfo::Writable(zygote_head)) JITCodeEntry(); // Initialize.
471 InsertNewEntry<JitNativeInfo>(zygote_head, descriptor.head_);
472 descriptor.zygote_head_entry_ = zygote_head;
473
474 // Create the child-owned tail entry (with no ELF file).
475 // The data is statically allocated since it must be owned by the forked process.
476 InsertNewEntry<JitNativeInfo>(&descriptor.application_tail_entry_, descriptor.head_);
477 }
478
NativeDebugInfoPostFork()479 void NativeDebugInfoPostFork() {
480 CHECK(!Runtime::Current()->IsZygote());
481 JITDescriptor& descriptor = JitNativeInfo::Descriptor();
482 descriptor.free_entries_ = nullptr; // Don't reuse zygote's entries.
483 }
484
485 // Split the JIT code cache into groups of fixed size and create single JITCodeEntry for each group.
486 // The start address of method's code determines which group it belongs to. The end is irrelevant.
487 // New mini debug infos will be merged if possible, and entries for GCed functions will be removed.
RepackEntries(bool compress_entries,ArrayRef<const void * > removed)488 static void RepackEntries(bool compress_entries, ArrayRef<const void*> removed)
489 REQUIRES(g_jit_debug_lock) {
490 DCHECK(std::is_sorted(removed.begin(), removed.end()));
491 jit::Jit* jit = Runtime::Current()->GetJit();
492 if (jit == nullptr) {
493 return;
494 }
495 JITDescriptor& descriptor = __jit_debug_descriptor;
496 bool is_zygote = Runtime::Current()->IsZygote();
497
498 // Collect entries that we want to pack.
499 std::vector<const JITCodeEntry*> entries;
500 entries.reserve(2 * kJitRepackFrequency);
501 for (const JITCodeEntry* it = descriptor.head_; it != nullptr; it = it->next_) {
502 if (it == descriptor.zygote_head_entry_ && !is_zygote) {
503 break; // Memory owned by the zygote process (read-only for an app).
504 }
505 if (it->allow_packing_) {
506 if (!compress_entries && it->is_compressed_ && removed.empty()) {
507 continue; // If we are not compressing, also avoid decompressing.
508 }
509 entries.push_back(it);
510 }
511 }
512 auto cmp = [](const JITCodeEntry* l, const JITCodeEntry* r) { return l->addr_ < r->addr_; };
513 std::sort(entries.begin(), entries.end(), cmp); // Sort by address.
514
515 // Process the entries in groups (each spanning memory range of size kJitRepackGroupSize).
516 for (auto group_it = entries.begin(); group_it != entries.end();) {
517 const void* group_ptr = AlignDown((*group_it)->addr_, kJitRepackGroupSize);
518 const void* group_end = reinterpret_cast<const uint8_t*>(group_ptr) + kJitRepackGroupSize;
519
520 // Find all entries in this group (each entry is an in-memory ELF file).
521 auto begin = group_it;
522 auto end = std::find_if(begin, entries.end(), [=](auto* e) { return e->addr_ >= group_end; });
523 CHECK(end > begin);
524 ArrayRef<const JITCodeEntry*> elfs(&*begin, end - begin);
525
526 // Find all symbols that have been removed in this memory range.
527 auto removed_begin = std::lower_bound(removed.begin(), removed.end(), group_ptr);
528 auto removed_end = std::lower_bound(removed.begin(), removed.end(), group_end);
529 CHECK(removed_end >= removed_begin);
530 ArrayRef<const void*> removed_subset(&*removed_begin, removed_end - removed_begin);
531
532 // Optimization: Don't compress the last group since it will likely change again soon.
533 bool compress = compress_entries && end != entries.end();
534
535 // Bail out early if there is nothing to do for this group.
536 if (elfs.size() == 1 && removed_subset.empty() && (*begin)->is_compressed_ == compress) {
537 group_it = end; // Go to next group.
538 continue;
539 }
540
541 // Create new single JITCodeEntry that covers this memory range.
542 uint64_t start_time = MicroTime();
543 size_t live_symbols;
544 std::vector<uint8_t> packed = jit->GetJitCompiler()->PackElfFileForJIT(
545 elfs, removed_subset, compress, &live_symbols);
546 VLOG(jit)
547 << "JIT mini-debug-info repacked"
548 << " for " << group_ptr
549 << " in " << MicroTime() - start_time << "us"
550 << " elfs=" << elfs.size()
551 << " dead=" << removed_subset.size()
552 << " live=" << live_symbols
553 << " size=" << packed.size() << (compress ? "(lzma)" : "");
554
555 // Replace the old entries with the new one (with their lifetime temporally overlapping).
556 CreateJITCodeEntryInternal<JitNativeInfo>(ArrayRef<const uint8_t>(packed),
557 /*addr_=*/ group_ptr,
558 /*allow_packing_=*/ true,
559 /*is_compressed_=*/ compress);
560 for (auto it : elfs) {
561 DeleteJITCodeEntryInternal<JitNativeInfo>(/*entry=*/ it);
562 }
563 group_it = end; // Go to next group.
564 }
565 g_jit_num_unpacked_entries = 0;
566 }
567
568 void RepackNativeDebugInfoForJitLocked() REQUIRES(g_jit_debug_lock);
569
AddNativeDebugInfoForJit(const void * code_ptr,const std::vector<uint8_t> & symfile,bool allow_packing)570 void AddNativeDebugInfoForJit(const void* code_ptr,
571 const std::vector<uint8_t>& symfile,
572 bool allow_packing) {
573 MutexLock mu(Thread::Current(), g_jit_debug_lock);
574 DCHECK_NE(symfile.size(), 0u);
575 if (kIsDebugBuild && code_ptr != nullptr) {
576 DCHECK(g_dcheck_all_jit_functions.insert(code_ptr).second) << code_ptr << " already added";
577 }
578
579 // Remove all methods which have been marked for removal. The JIT GC should
580 // force repack, so this should happen only rarely for various corner cases.
581 // Must be done before addition in case the added code_ptr is in the removed set.
582 if (!g_removed_jit_functions.empty()) {
583 RepackNativeDebugInfoForJitLocked();
584 }
585
586 CreateJITCodeEntryInternal<JitNativeInfo>(ArrayRef<const uint8_t>(symfile),
587 /*addr=*/ code_ptr,
588 /*allow_packing=*/ allow_packing,
589 /*is_compressed=*/ false);
590
591 VLOG(jit)
592 << "JIT mini-debug-info added"
593 << " for " << code_ptr
594 << " size=" << PrettySize(symfile.size());
595
596 // Automatically repack entries on regular basis to save space.
597 // Pack (but don't compress) recent entries - this is cheap and reduces memory use by ~4x.
598 // We delay compression until after GC since it is more expensive (and saves further ~4x).
599 // Always compress zygote, since it does not GC and we want to keep the high-water mark low.
600 if (++g_jit_num_unpacked_entries >= kJitRepackFrequency) {
601 bool is_zygote = Runtime::Current()->IsZygote();
602 RepackEntries(/*compress_entries=*/ is_zygote, /*removed=*/ ArrayRef<const void*>());
603 }
604 }
605
RemoveNativeDebugInfoForJit(const void * code_ptr)606 void RemoveNativeDebugInfoForJit(const void* code_ptr) {
607 MutexLock mu(Thread::Current(), g_jit_debug_lock);
608 g_dcheck_all_jit_functions.erase(code_ptr);
609
610 // Method removal is very expensive since we need to decompress and read ELF files.
611 // Collet methods to be removed and do the removal in bulk later.
612 g_removed_jit_functions.push_back(code_ptr);
613
614 VLOG(jit) << "JIT mini-debug-info removed for " << code_ptr;
615 }
616
RepackNativeDebugInfoForJitLocked()617 void RepackNativeDebugInfoForJitLocked() {
618 // Remove entries which are inside packed and compressed ELF files.
619 std::vector<const void*>& removed = g_removed_jit_functions;
620 std::sort(removed.begin(), removed.end());
621 RepackEntries(/*compress_entries=*/ true, ArrayRef<const void*>(removed));
622
623 // Remove entries which are not allowed to be packed (containing single method each).
624 for (const JITCodeEntry* it = __jit_debug_descriptor.head_; it != nullptr;) {
625 const JITCodeEntry* next = it->next_;
626 if (!it->allow_packing_ && std::binary_search(removed.begin(), removed.end(), it->addr_)) {
627 DeleteJITCodeEntryInternal<JitNativeInfo>(/*entry=*/ it);
628 }
629 it = next;
630 }
631
632 removed.clear();
633 removed.shrink_to_fit();
634 }
635
RepackNativeDebugInfoForJit()636 void RepackNativeDebugInfoForJit() {
637 MutexLock mu(Thread::Current(), g_jit_debug_lock);
638 RepackNativeDebugInfoForJitLocked();
639 }
640
GetJitMiniDebugInfoMemUsage()641 size_t GetJitMiniDebugInfoMemUsage() {
642 MutexLock mu(Thread::Current(), g_jit_debug_lock);
643 size_t size = 0;
644 for (const JITCodeEntry* it = __jit_debug_descriptor.head_; it != nullptr; it = it->next_) {
645 size += sizeof(JITCodeEntry) + it->symfile_size_;
646 }
647 return size;
648 }
649
GetNativeDebugInfoLock()650 Mutex* GetNativeDebugInfoLock() {
651 return &g_jit_debug_lock;
652 }
653
ForEachNativeDebugSymbol(std::function<void (const void *,size_t,const char *)> cb)654 void ForEachNativeDebugSymbol(std::function<void(const void*, size_t, const char*)> cb) {
655 MutexLock mu(Thread::Current(), g_jit_debug_lock);
656 using ElfRuntimeTypes = std::conditional<sizeof(void*) == 4, ElfTypes32, ElfTypes64>::type;
657 const JITCodeEntry* end = __jit_debug_descriptor.zygote_head_entry_;
658 for (const JITCodeEntry* it = __jit_debug_descriptor.head_; it != end; it = it->next_) {
659 ArrayRef<const uint8_t> buffer(it->symfile_addr_, it->symfile_size_);
660 if (!buffer.empty()) {
661 ElfDebugReader<ElfRuntimeTypes> reader(buffer);
662 reader.VisitFunctionSymbols([&](ElfRuntimeTypes::Sym sym, const char* name) {
663 cb(reinterpret_cast<const void*>(sym.st_value), sym.st_size, name);
664 });
665 }
666 }
667 }
668
669 } // namespace art
670