1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "debugger_interface.h"
18 
19 #include <android-base/logging.h>
20 
21 #include "base/array_ref.h"
22 #include "base/bit_utils.h"
23 #include "base/logging.h"
24 #include "base/mutex.h"
25 #include "base/time_utils.h"
26 #include "base/utils.h"
27 #include "dex/dex_file.h"
28 #include "jit/jit.h"
29 #include "jit/jit_code_cache.h"
30 #include "jit/jit_memory_region.h"
31 #include "runtime.h"
32 #include "thread-current-inl.h"
33 #include "thread.h"
34 
35 #include <atomic>
36 #include <cstddef>
37 
38 //
39 // Debug interface for native tools (gdb, lldb, libunwind, simpleperf).
40 //
41 // See http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html
42 //
43 // There are two ways for native tools to access the debug data safely:
44 //
45 // 1) Synchronously, by setting a breakpoint in the __*_debug_register_code
46 //    method, which is called after every modification of the linked list.
47 //    GDB does this, but it is complex to set up and it stops the process.
48 //
49 // 2) Asynchronously, using the entry seqlocks.
50 //   * The seqlock is a monotonically increasing counter, which
51 //     is even if the entry is valid and odd if it is invalid.
52 //     It is set to even value after all other fields are set,
53 //     and it is set to odd value before the entry is deleted.
54 //   * This makes it possible to safely read the symfile data:
55 //     * The reader should read the value of the seqlock both
56 //       before and after reading the symfile. If the seqlock
57 //       values match and are even the copy is consistent.
58 //   * Entries are recycled, but never freed, which guarantees
59 //     that the seqlock is not overwritten by a random value.
60 //   * The linked-list is one level higher.  The next-pointer
61 //     must always point to an entry with even seqlock, which
62 //     ensures that entries of a crashed process can be read.
63 //     This means the entry must be added after it is created
64 //     and it must be removed before it is invalidated (odd).
65 //   * When iterating over the linked list the reader can use
66 //     the timestamps to ensure that current and next entry
67 //     were not deleted using the following steps:
68 //       1) Read next pointer and the next entry's seqlock.
69 //       2) Read the symfile and re-read the next pointer.
70 //       3) Re-read both the current and next seqlock.
71 //       4) Go to step 1 with using new entry and seqlock.
72 //
73 // 3) Asynchronously, using the global seqlock.
74 //   * The seqlock is a monotonically increasing counter which is incremented
75 //     before and after every modification of the linked list. Odd value of
76 //     the counter means the linked list is being modified (it is locked).
77 //   * The tool should read the value of the seqlock both before and after
78 //     copying the linked list.  If the seqlock values match and are even,
79 //     the copy is consistent.  Otherwise, the reader should try again.
80 //     * Note that using the data directly while is it being modified
81 //       might crash the tool.  Therefore, the only safe way is to make
82 //       a copy and use the copy only after the seqlock has been checked.
83 //     * Note that the process might even free and munmap the data while
84 //       it is being copied, therefore the reader should either handle
85 //       SEGV or use OS calls to read the memory (e.g. process_vm_readv).
86 //   * The timestamps on the entry record the time when the entry was
87 //     created which is relevant if the unwinding is not live and is
88 //     postponed until much later.  All timestamps must be unique.
89 //   * For full conformance with the C++ memory model, all seqlock
90 //     protected accesses should be atomic. We currently do this in the
91 //     more critical cases. The rest will have to be fixed before
92 //     attempting to run TSAN on this code.
93 //
94 
95 namespace art {
96 
97 static Mutex g_jit_debug_lock("JIT native debug entries", kNativeDebugInterfaceLock);
98 static Mutex g_dex_debug_lock("DEX native debug entries", kNativeDebugInterfaceLock);
99 
100 // Most loads and stores need no synchronization since all memory is protected by the global locks.
101 // Some writes are synchronized so libunwindstack can read the memory safely from another process.
102 constexpr std::memory_order kNonRacingRelaxed = std::memory_order_relaxed;
103 
104 // Public binary interface between ART and native tools (gdb, libunwind, etc).
105 extern "C" {
106   enum JITAction {
107     JIT_NOACTION = 0,
108     JIT_REGISTER_FN,
109     JIT_UNREGISTER_FN
110   };
111 
112   // Public/stable binary interface.
113   struct JITCodeEntryPublic {
114     std::atomic<const JITCodeEntry*> next_;  // Atomic to guarantee consistency after crash.
115     const JITCodeEntry* prev_ = nullptr;     // For linked list deletion. Unused in readers.
116     const uint8_t* symfile_addr_ = nullptr;  // Address of the in-memory ELF file.
117     uint64_t symfile_size_ = 0;              // NB: The offset is 12 on x86 but 16 on ARM32.
118 
119     // Android-specific fields:
120     uint64_t timestamp_;                     // CLOCK_MONOTONIC time of entry registration.
121     std::atomic_uint32_t seqlock_{1};        // Synchronization. Even value if entry is valid.
122   };
123 
124   // Implementation-specific fields (which can be used only in this file).
125   struct JITCodeEntry : public JITCodeEntryPublic {
126     // Unpacked entries: Code address of the symbol in the ELF file.
127     // Packed entries: The start address of the covered memory range.
128     const void* addr_ = nullptr;
129     // Allow merging of ELF files to save space.
130     // Packing drops advanced DWARF data, so it is not always desirable.
131     bool allow_packing_ = false;
132     // Whether this entry has been LZMA compressed.
133     // Compression is expensive, so we don't always do it.
134     bool is_compressed_ = false;
135   };
136 
137   // Public/stable binary interface.
138   struct JITDescriptorPublic {
139     uint32_t version_ = 1;                            // NB: GDB supports only version 1.
140     uint32_t action_flag_ = JIT_NOACTION;             // One of the JITAction enum values.
141     const JITCodeEntry* relevant_entry_ = nullptr;    // The entry affected by the action.
142     std::atomic<const JITCodeEntry*> head_{nullptr};  // Head of link list of all entries.
143 
144     // Android-specific fields:
145     uint8_t magic_[8] = {'A', 'n', 'd', 'r', 'o', 'i', 'd', '2'};
146     uint32_t flags_ = 0;  // Reserved for future use. Must be 0.
147     uint32_t sizeof_descriptor = sizeof(JITDescriptorPublic);
148     uint32_t sizeof_entry = sizeof(JITCodeEntryPublic);
149     std::atomic_uint32_t seqlock_{0};  // Incremented before and after any modification.
150     uint64_t timestamp_ = 1;           // CLOCK_MONOTONIC time of last action.
151   };
152 
153   // Implementation-specific fields (which can be used only in this file).
154   struct JITDescriptor : public JITDescriptorPublic {
155     const JITCodeEntry* tail_ = nullptr;          // Tail of link list of all live entries.
156     const JITCodeEntry* free_entries_ = nullptr;  // List of deleted entries ready for reuse.
157 
158     // Used for memory sharing with zygote. See NativeDebugInfoPreFork().
159     const JITCodeEntry* zygote_head_entry_ = nullptr;
160     JITCodeEntry application_tail_entry_{};
161   };
162 
163   // Public interface: Can be used by reader to check the structs have the expected size.
164   uint32_t g_art_sizeof_jit_code_entry = sizeof(JITCodeEntryPublic);
165   uint32_t g_art_sizeof_jit_descriptor = sizeof(JITDescriptorPublic);
166 
167   // Check that std::atomic has the expected layout.
168   static_assert(alignof(std::atomic_uint32_t) == alignof(uint32_t), "Weird alignment");
169   static_assert(sizeof(std::atomic_uint32_t) == sizeof(uint32_t), "Weird size");
170   static_assert(std::atomic_uint32_t::is_always_lock_free, "Expected to be lock free");
171   static_assert(alignof(std::atomic<void*>) == alignof(void*), "Weird alignment");
172   static_assert(sizeof(std::atomic<void*>) == sizeof(void*), "Weird size");
173   static_assert(std::atomic<void*>::is_always_lock_free, "Expected to be lock free");
174 
175   // GDB may set breakpoint here. We must ensure it is not removed or deduplicated.
__jit_debug_register_code()176   void __attribute__((noinline)) __jit_debug_register_code() {
177     __asm__("");
178   }
179 
180   // Alternatively, native tools may overwrite this field to execute custom handler.
181   void (*__jit_debug_register_code_ptr)() = __jit_debug_register_code;
182 
183   // The root data structure describing of all JITed methods.
GUARDED_BY(g_jit_debug_lock)184   JITDescriptor __jit_debug_descriptor GUARDED_BY(g_jit_debug_lock) {};
185 
186   // The following globals mirror the ones above, but are used to register dex files.
__dex_debug_register_code()187   void __attribute__((noinline)) __dex_debug_register_code() {
188     __asm__("");
189   }
190   void (*__dex_debug_register_code_ptr)() = __dex_debug_register_code;
GUARDED_BY(g_dex_debug_lock)191   JITDescriptor __dex_debug_descriptor GUARDED_BY(g_dex_debug_lock) {};
192 }
193 
194 struct DexNativeInfo {
195   static constexpr bool kCopySymfileData = false;  // Just reference DEX files.
Descriptorart::DexNativeInfo196   static JITDescriptor& Descriptor() { return __dex_debug_descriptor; }
NotifyNativeDebuggerart::DexNativeInfo197   static void NotifyNativeDebugger() { __dex_debug_register_code_ptr(); }
Allocart::DexNativeInfo198   static const void* Alloc(size_t size) { return malloc(size); }
Freeart::DexNativeInfo199   static void Free(const void* ptr) { free(const_cast<void*>(ptr)); }
Writableart::DexNativeInfo200   template<class T> static T* Writable(const T* v) { return const_cast<T*>(v); }
201 };
202 
203 struct JitNativeInfo {
204   static constexpr bool kCopySymfileData = true;  // Copy debug info to JIT memory.
Descriptorart::JitNativeInfo205   static JITDescriptor& Descriptor() { return __jit_debug_descriptor; }
NotifyNativeDebuggerart::JitNativeInfo206   static void NotifyNativeDebugger() { __jit_debug_register_code_ptr(); }
Allocart::JitNativeInfo207   static const void* Alloc(size_t size) { return Memory()->AllocateData(size); }
Freeart::JitNativeInfo208   static void Free(const void* ptr) { Memory()->FreeData(reinterpret_cast<const uint8_t*>(ptr)); }
209   static void Free(void* ptr) = delete;
210 
Writableart::JitNativeInfo211   template<class T> static T* Writable(const T* v) {
212     // Special case: This entry is in static memory and not allocated in JIT memory.
213     if (v == reinterpret_cast<const void*>(&Descriptor().application_tail_entry_)) {
214       return const_cast<T*>(v);
215     }
216     return const_cast<T*>(Memory()->GetWritableDataAddress(v));
217   }
218 
Memoryart::JitNativeInfo219   static jit::JitMemoryRegion* Memory() ASSERT_CAPABILITY(Locks::jit_lock_) {
220     Locks::jit_lock_->AssertHeld(Thread::Current());
221     jit::JitCodeCache* jit_code_cache = Runtime::Current()->GetJitCodeCache();
222     CHECK(jit_code_cache != nullptr);
223     jit::JitMemoryRegion* memory = jit_code_cache->GetCurrentRegion();
224     CHECK(memory->IsValid());
225     return memory;
226   }
227 };
228 
GetJITCodeEntrySymFile(const JITCodeEntry * entry)229 ArrayRef<const uint8_t> GetJITCodeEntrySymFile(const JITCodeEntry* entry) {
230   return ArrayRef<const uint8_t>(entry->symfile_addr_, entry->symfile_size_);
231 }
232 
233 // Ensure the timestamp is monotonically increasing even in presence of low
234 // granularity system timer.  This ensures each entry has unique timestamp.
GetNextTimestamp(JITDescriptor & descriptor)235 static uint64_t GetNextTimestamp(JITDescriptor& descriptor) {
236   return std::max(descriptor.timestamp_ + 1, NanoTime());
237 }
238 
239 // Mark the descriptor as "locked", so native tools know the data is being modified.
Seqlock(JITDescriptor & descriptor)240 static void Seqlock(JITDescriptor& descriptor) {
241   DCHECK_EQ(descriptor.seqlock_.load(kNonRacingRelaxed) & 1, 0u) << "Already locked";
242   descriptor.seqlock_.fetch_add(1, std::memory_order_relaxed);
243   // Ensure that any writes within the locked section cannot be reordered before the increment.
244   std::atomic_thread_fence(std::memory_order_release);
245 }
246 
247 // Mark the descriptor as "unlocked", so native tools know the data is safe to read.
Sequnlock(JITDescriptor & descriptor)248 static void Sequnlock(JITDescriptor& descriptor) {
249   DCHECK_EQ(descriptor.seqlock_.load(kNonRacingRelaxed) & 1, 1u) << "Already unlocked";
250   // Ensure that any writes within the locked section cannot be reordered after the increment.
251   std::atomic_thread_fence(std::memory_order_release);
252   descriptor.seqlock_.fetch_add(1, std::memory_order_relaxed);
253 }
254 
255 // Insert 'entry' in the linked list before 'next' and mark it as valid (append if 'next' is null).
256 // This method must be called under global lock (g_jit_debug_lock or g_dex_debug_lock).
257 template<class NativeInfo>
InsertNewEntry(const JITCodeEntry * entry,const JITCodeEntry * next)258 static void InsertNewEntry(const JITCodeEntry* entry, const JITCodeEntry* next) {
259   CHECK_EQ(entry->seqlock_.load(kNonRacingRelaxed) & 1, 1u) << "Expected invalid entry";
260   JITDescriptor& descriptor = NativeInfo::Descriptor();
261   const JITCodeEntry* prev = (next != nullptr ? next->prev_ : descriptor.tail_);
262   JITCodeEntry* writable = NativeInfo::Writable(entry);
263   writable->next_ = next;
264   writable->prev_ = prev;
265   writable->seqlock_.fetch_add(1, std::memory_order_release);  // Mark as valid.
266   // Backward pointers should not be used by readers, so they are non-atomic.
267   if (next != nullptr) {
268     NativeInfo::Writable(next)->prev_ = entry;
269   } else {
270     descriptor.tail_ = entry;
271   }
272   // Forward pointers must be atomic and they must point to a valid entry at all times.
273   if (prev != nullptr) {
274     NativeInfo::Writable(prev)->next_.store(entry, std::memory_order_release);
275   } else {
276     descriptor.head_.store(entry, std::memory_order_release);
277   }
278 }
279 
280 // This must be called with the appropriate lock taken (g_{jit,dex}_debug_lock).
281 template<class NativeInfo>
CreateJITCodeEntryInternal(ArrayRef<const uint8_t> symfile=ArrayRef<const uint8_t> (),const void * addr=nullptr,bool allow_packing=false,bool is_compressed=false)282 static const JITCodeEntry* CreateJITCodeEntryInternal(
283     ArrayRef<const uint8_t> symfile = ArrayRef<const uint8_t>(),
284     const void* addr = nullptr,
285     bool allow_packing = false,
286     bool is_compressed = false) {
287   JITDescriptor& descriptor = NativeInfo::Descriptor();
288 
289   // Allocate JITCodeEntry if needed.
290   if (descriptor.free_entries_ == nullptr) {
291     const void* memory = NativeInfo::Alloc(sizeof(JITCodeEntry));
292     if (memory == nullptr) {
293       LOG(ERROR) << "Failed to allocate memory for native debug info";
294       return nullptr;
295     }
296     new (NativeInfo::Writable(memory)) JITCodeEntry();
297     descriptor.free_entries_ = reinterpret_cast<const JITCodeEntry*>(memory);
298   }
299 
300   // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry.
301   if (NativeInfo::kCopySymfileData && !symfile.empty()) {
302     const uint8_t* copy = reinterpret_cast<const uint8_t*>(NativeInfo::Alloc(symfile.size()));
303     if (copy == nullptr) {
304       LOG(ERROR) << "Failed to allocate memory for native debug info";
305       return nullptr;
306     }
307     memcpy(NativeInfo::Writable(copy), symfile.data(), symfile.size());
308     symfile = ArrayRef<const uint8_t>(copy, symfile.size());
309   }
310 
311   uint64_t timestamp = GetNextTimestamp(descriptor);
312 
313   // We must insert entries at specific place.  See NativeDebugInfoPreFork().
314   const JITCodeEntry* next = descriptor.head_.load(kNonRacingRelaxed);  // Insert at the head.
315   if (descriptor.zygote_head_entry_ != nullptr && Runtime::Current()->IsZygote()) {
316     next = nullptr;  // Insert zygote entries at the tail.
317   }
318 
319   // Pop entry from the free list.
320   const JITCodeEntry* entry = descriptor.free_entries_;
321   descriptor.free_entries_ = descriptor.free_entries_->next_.load(kNonRacingRelaxed);
322 
323   // Create the entry and set all its fields.
324   JITCodeEntry* writable_entry = NativeInfo::Writable(entry);
325   writable_entry->symfile_addr_ = symfile.data();
326   writable_entry->symfile_size_ = symfile.size();
327   writable_entry->addr_ = addr;
328   writable_entry->allow_packing_ = allow_packing;
329   writable_entry->is_compressed_ = is_compressed;
330   writable_entry->timestamp_ = timestamp;
331 
332   // Add the entry to the main linked list.
333   Seqlock(descriptor);
334   InsertNewEntry<NativeInfo>(entry, next);
335   descriptor.relevant_entry_ = entry;
336   descriptor.action_flag_ = JIT_REGISTER_FN;
337   descriptor.timestamp_ = timestamp;
338   Sequnlock(descriptor);
339 
340   NativeInfo::NotifyNativeDebugger();
341 
342   return entry;
343 }
344 
345 template<class NativeInfo>
DeleteJITCodeEntryInternal(const JITCodeEntry * entry)346 static void DeleteJITCodeEntryInternal(const JITCodeEntry* entry) {
347   CHECK(entry != nullptr);
348   JITDescriptor& descriptor = NativeInfo::Descriptor();
349 
350   // Remove the entry from the main linked-list.
351   Seqlock(descriptor);
352   const JITCodeEntry* next = entry->next_.load(kNonRacingRelaxed);
353   const JITCodeEntry* prev = entry->prev_;
354   if (next != nullptr) {
355     NativeInfo::Writable(next)->prev_ = prev;
356   } else {
357     descriptor.tail_ = prev;
358   }
359   if (prev != nullptr) {
360     NativeInfo::Writable(prev)->next_.store(next, std::memory_order_relaxed);
361   } else {
362     descriptor.head_.store(next, std::memory_order_relaxed);
363   }
364   descriptor.relevant_entry_ = entry;
365   descriptor.action_flag_ = JIT_UNREGISTER_FN;
366   descriptor.timestamp_ = GetNextTimestamp(descriptor);
367   Sequnlock(descriptor);
368 
369   NativeInfo::NotifyNativeDebugger();
370 
371   // Delete the entry.
372   JITCodeEntry* writable_entry = NativeInfo::Writable(entry);
373   CHECK_EQ(writable_entry->seqlock_.load(kNonRacingRelaxed) & 1, 0u) << "Expected valid entry";
374   // Release: Ensures that "next_" points to valid entry at any time in reader.
375   writable_entry->seqlock_.fetch_add(1, std::memory_order_release);  // Mark as invalid.
376   // Release: Ensures that the entry is seen as invalid before it's data is freed.
377   std::atomic_thread_fence(std::memory_order_release);
378   const uint8_t* symfile = entry->symfile_addr_;
379   writable_entry->symfile_addr_ = nullptr;
380   if (NativeInfo::kCopySymfileData && symfile != nullptr) {
381     NativeInfo::Free(symfile);
382   }
383 
384   // Push the entry to the free list.
385   writable_entry->next_.store(descriptor.free_entries_, kNonRacingRelaxed);
386   writable_entry->prev_ = nullptr;
387   descriptor.free_entries_ = entry;
388 }
389 
AddNativeDebugInfoForDex(Thread * self,const DexFile * dexfile)390 void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
391   MutexLock mu(self, g_dex_debug_lock);
392   DCHECK(dexfile != nullptr);
393   const ArrayRef<const uint8_t> symfile(dexfile->Begin(), dexfile->Size());
394   CreateJITCodeEntryInternal<DexNativeInfo>(symfile);
395 }
396 
RemoveNativeDebugInfoForDex(Thread * self,const DexFile * dexfile)397 void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
398   MutexLock mu(self, g_dex_debug_lock);
399   DCHECK(dexfile != nullptr);
400   // We register dex files in the class linker and free them in DexFile_closeDexFile, but
401   // there might be cases where we load the dex file without using it in the class linker.
402   // On the other hand, single dex file might also be used with different class-loaders.
403   for (const JITCodeEntry* entry = __dex_debug_descriptor.head_; entry != nullptr; ) {
404     const JITCodeEntry* next = entry->next_;  // Save next pointer before we free the memory.
405     if (entry->symfile_addr_ == dexfile->Begin()) {
406       DeleteJITCodeEntryInternal<DexNativeInfo>(entry);
407     }
408     entry = next;
409   }
410 }
411 
412 // Splits the linked linked in to two parts:
413 // The first part (including the static head pointer) is owned by the application.
414 // The second part is owned by zygote and might be concurrently modified by it.
415 //
416 // We add two empty entries at the boundary which are never removed (app_tail, zygote_head).
417 // These entries are needed to preserve the next/prev pointers in the linked list,
418 // since zygote can not modify the application's data and vice versa.
419 //
420 // <------- owned by the application memory --------> <--- owned by zygote memory --->
421 //         |----------------------|------------------|-------------|-----------------|
422 // head -> | application_entries* | application_tail | zygote_head | zygote_entries* |
423 //         |+---------------------|------------------|-------------|----------------+|
424 //          |                                                                       |
425 //          \-(new application entries)                        (new zygote entries)-/
426 //
427 // Zygote entries are inserted at the end, which means that repacked zygote entries
428 // will still be seen by single forward iteration of the linked list (avoiding race).
429 //
430 // Application entries are inserted at the start which introduces repacking race,
431 // but that is ok, since it is easy to read new entries from head in further pass.
432 // The benefit is that this makes it fast to read only the new entries.
433 //
NativeDebugInfoPreFork()434 void NativeDebugInfoPreFork() {
435   CHECK(Runtime::Current()->IsZygote());
436   JITDescriptor& descriptor = JitNativeInfo::Descriptor();
437   if (descriptor.zygote_head_entry_ != nullptr) {
438     return;  // Already done - we need to do this only on the first fork.
439   }
440 
441   // Create the zygote-owned head entry (with no ELF file).
442   // The data will be allocated from the current JIT memory (owned by zygote).
443   MutexLock mu(Thread::Current(), *Locks::jit_lock_);  // Needed to alloc entry.
444   const JITCodeEntry* zygote_head =
445     reinterpret_cast<const JITCodeEntry*>(JitNativeInfo::Alloc(sizeof(JITCodeEntry)));
446   CHECK(zygote_head != nullptr);
447   new (JitNativeInfo::Writable(zygote_head)) JITCodeEntry();  // Initialize.
448   InsertNewEntry<JitNativeInfo>(zygote_head, descriptor.head_);
449   descriptor.zygote_head_entry_ = zygote_head;
450 
451   // Create the child-owned tail entry (with no ELF file).
452   // The data is statically allocated since it must be owned by the forked process.
453   InsertNewEntry<JitNativeInfo>(&descriptor.application_tail_entry_, descriptor.head_);
454 }
455 
NativeDebugInfoPostFork()456 void NativeDebugInfoPostFork() {
457   CHECK(!Runtime::Current()->IsZygote());
458   JITDescriptor& descriptor = JitNativeInfo::Descriptor();
459   descriptor.free_entries_ = nullptr;  // Don't reuse zygote's entries.
460 }
461 
462 // Size of JIT code range covered by each packed JITCodeEntry.
463 static constexpr uint32_t kJitRepackGroupSize = 64 * KB;
464 
465 // Automatically call the repack method every 'n' new entries.
466 static constexpr uint32_t kJitRepackFrequency = 64;
467 static uint32_t g_jit_num_unpacked_entries = 0;
468 
469 // Split the JIT code cache into groups of fixed size and create single JITCodeEntry for each group.
470 // The start address of method's code determines which group it belongs to.  The end is irrelevant.
471 // New mini debug infos will be merged if possible, and entries for GCed functions will be removed.
RepackEntries(bool compress_entries,ArrayRef<const void * > removed)472 static void RepackEntries(bool compress_entries, ArrayRef<const void*> removed)
473     REQUIRES(g_jit_debug_lock) {
474   DCHECK(std::is_sorted(removed.begin(), removed.end()));
475   jit::Jit* jit = Runtime::Current()->GetJit();
476   if (jit == nullptr) {
477     return;
478   }
479   JITDescriptor& descriptor = __jit_debug_descriptor;
480   bool is_zygote = Runtime::Current()->IsZygote();
481 
482   // Collect entries that we want to pack.
483   std::vector<const JITCodeEntry*> entries;
484   entries.reserve(2 * kJitRepackFrequency);
485   for (const JITCodeEntry* it = descriptor.head_; it != nullptr; it = it->next_) {
486     if (it == descriptor.zygote_head_entry_ && !is_zygote) {
487       break;  // Memory owned by the zygote process (read-only for an app).
488     }
489     if (it->allow_packing_) {
490       if (!compress_entries && it->is_compressed_ && removed.empty()) {
491         continue;  // If we are not compressing, also avoid decompressing.
492       }
493       entries.push_back(it);
494     }
495   }
496   auto cmp = [](const JITCodeEntry* l, const JITCodeEntry* r) { return l->addr_ < r->addr_; };
497   std::sort(entries.begin(), entries.end(), cmp);  // Sort by address.
498 
499   // Process the entries in groups (each spanning memory range of size kJitRepackGroupSize).
500   for (auto group_it = entries.begin(); group_it != entries.end();) {
501     const void* group_ptr = AlignDown((*group_it)->addr_, kJitRepackGroupSize);
502     const void* group_end = reinterpret_cast<const uint8_t*>(group_ptr) + kJitRepackGroupSize;
503 
504     // Find all entries in this group (each entry is an in-memory ELF file).
505     auto begin = group_it;
506     auto end = std::find_if(begin, entries.end(), [=](auto* e) { return e->addr_ >= group_end; });
507     CHECK(end > begin);
508     ArrayRef<const JITCodeEntry*> elfs(&*begin, end - begin);
509 
510     // Find all symbols that have been removed in this memory range.
511     auto removed_begin = std::lower_bound(removed.begin(), removed.end(), group_ptr);
512     auto removed_end = std::lower_bound(removed.begin(), removed.end(), group_end);
513     CHECK(removed_end >= removed_begin);
514     ArrayRef<const void*> removed_subset(&*removed_begin, removed_end - removed_begin);
515 
516     // Optimization: Don't compress the last group since it will likely change again soon.
517     bool compress = compress_entries && end != entries.end();
518 
519     // Bail out early if there is nothing to do for this group.
520     if (elfs.size() == 1 && removed_subset.empty() && (*begin)->is_compressed_ == compress) {
521       group_it = end;  // Go to next group.
522       continue;
523     }
524 
525     // Create new single JITCodeEntry that covers this memory range.
526     uint64_t start_time = MicroTime();
527     size_t live_symbols;
528     std::vector<uint8_t> packed = jit->GetJitCompiler()->PackElfFileForJIT(
529         elfs, removed_subset, compress, &live_symbols);
530     VLOG(jit)
531         << "JIT mini-debug-info repacked"
532         << " for " << group_ptr
533         << " in " << MicroTime() - start_time << "us"
534         << " elfs=" << elfs.size()
535         << " dead=" << removed_subset.size()
536         << " live=" << live_symbols
537         << " size=" << packed.size() << (compress ? "(lzma)" : "");
538 
539     // Replace the old entries with the new one (with their lifetime temporally overlapping).
540     CreateJITCodeEntryInternal<JitNativeInfo>(ArrayRef<const uint8_t>(packed),
541                                               /*addr_=*/ group_ptr,
542                                               /*allow_packing_=*/ true,
543                                               /*is_compressed_=*/ compress);
544     for (auto it : elfs) {
545       DeleteJITCodeEntryInternal<JitNativeInfo>(/*entry=*/ it);
546     }
547     group_it = end;  // Go to next group.
548   }
549   g_jit_num_unpacked_entries = 0;
550 }
551 
AddNativeDebugInfoForJit(const void * code_ptr,const std::vector<uint8_t> & symfile,bool allow_packing)552 void AddNativeDebugInfoForJit(const void* code_ptr,
553                               const std::vector<uint8_t>& symfile,
554                               bool allow_packing) {
555   MutexLock mu(Thread::Current(), g_jit_debug_lock);
556   DCHECK_NE(symfile.size(), 0u);
557 
558   CreateJITCodeEntryInternal<JitNativeInfo>(ArrayRef<const uint8_t>(symfile),
559                                             /*addr=*/ code_ptr,
560                                             /*allow_packing=*/ allow_packing,
561                                             /*is_compressed=*/ false);
562 
563   VLOG(jit)
564       << "JIT mini-debug-info added"
565       << " for " << code_ptr
566       << " size=" << PrettySize(symfile.size());
567 
568   // Automatically repack entries on regular basis to save space.
569   // Pack (but don't compress) recent entries - this is cheap and reduces memory use by ~4x.
570   // We delay compression until after GC since it is more expensive (and saves further ~4x).
571   // Always compress zygote, since it does not GC and we want to keep the high-water mark low.
572   if (++g_jit_num_unpacked_entries >= kJitRepackFrequency) {
573     bool is_zygote = Runtime::Current()->IsZygote();
574     RepackEntries(/*compress_entries=*/ is_zygote, /*removed=*/ ArrayRef<const void*>());
575   }
576 }
577 
RemoveNativeDebugInfoForJit(ArrayRef<const void * > removed)578 void RemoveNativeDebugInfoForJit(ArrayRef<const void*> removed) {
579   MutexLock mu(Thread::Current(), g_jit_debug_lock);
580   RepackEntries(/*compress_entries=*/ true, removed);
581 
582   // Remove entries which are not allowed to be packed (containing single method each).
583   for (const JITCodeEntry* it = __jit_debug_descriptor.head_; it != nullptr;) {
584     const JITCodeEntry* next = it->next_;
585     if (!it->allow_packing_ && std::binary_search(removed.begin(), removed.end(), it->addr_)) {
586       DeleteJITCodeEntryInternal<JitNativeInfo>(/*entry=*/ it);
587     }
588     it = next;
589   }
590 }
591 
GetJitMiniDebugInfoMemUsage()592 size_t GetJitMiniDebugInfoMemUsage() {
593   MutexLock mu(Thread::Current(), g_jit_debug_lock);
594   size_t size = 0;
595   for (const JITCodeEntry* it = __jit_debug_descriptor.head_; it != nullptr; it = it->next_) {
596     size += sizeof(JITCodeEntry) + it->symfile_size_;
597   }
598   return size;
599 }
600 
GetNativeDebugInfoLock()601 Mutex* GetNativeDebugInfoLock() {
602   return &g_jit_debug_lock;
603 }
604 
605 }  // namespace art
606