1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "debugger_interface.h"
18
19 #include <android-base/logging.h>
20
21 #include "base/array_ref.h"
22 #include "base/mutex.h"
23 #include "base/time_utils.h"
24 #include "thread-current-inl.h"
25 #include "thread.h"
26
27 #include <atomic>
28 #include <unordered_map>
29 #include <cstddef>
30
31 //
32 // Debug interface for native tools (gdb, lldb, libunwind, simpleperf).
33 //
34 // See http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html
35 //
36 // There are two ways for native tools to access the debug data safely:
37 //
38 // 1) Synchronously, by setting a breakpoint in the __*_debug_register_code
39 // method, which is called after every modification of the linked list.
40 // GDB does this, but it is complex to set up and it stops the process.
41 //
42 // 2) Asynchronously, by monitoring the action_seqlock_.
43 // * The seqlock is a monotonically increasing counter which is incremented
44 // before and after every modification of the linked list. Odd value of
45 // the counter means the linked list is being modified (it is locked).
46 // * The tool should read the value of the seqlock both before and after
47 // copying the linked list. If the seqlock values match and are even,
48 // the copy is consistent. Otherwise, the reader should try again.
49 // * Note that using the data directly while is it being modified
50 // might crash the tool. Therefore, the only safe way is to make
51 // a copy and use the copy only after the seqlock has been checked.
52 // * Note that the process might even free and munmap the data while
53 // it is being copied, therefore the reader should either handle
54 // SEGV or use OS calls to read the memory (e.g. process_vm_readv).
55 // * The seqlock can be used to determine the number of modifications of
56 // the linked list, which can be used to intelligently cache the data.
57 // Note the possible overflow of the seqlock. It is intentionally
58 // 32-bit, since 64-bit atomics can be tricky on some architectures.
59 // * The timestamps on the entry record the time when the entry was
60 // created which is relevant if the unwinding is not live and is
61 // postponed until much later. All timestamps must be unique.
62 // * Memory barriers are used to make it possible to reason about
63 // the data even when it is being modified (e.g. the process crashed
64 // while that data was locked, and thus it will be never unlocked).
65 // * In particular, it should be possible to:
66 // 1) read the seqlock and then the linked list head pointer.
67 // 2) copy the entry and check that seqlock has not changed.
68 // 3) copy the symfile and check that seqlock has not changed.
69 // 4) go back to step 2 using the next pointer (if non-null).
70 // This safely creates copy of all symfiles, although other data
71 // might be inconsistent/unusable (e.g. prev_, action_timestamp_).
72 // * For full conformance with the C++ memory model, all seqlock
73 // protected accesses should be atomic. We currently do this in the
74 // more critical cases. The rest will have to be fixed before
75 // attempting to run TSAN on this code.
76 //
77
78 namespace art {
79 extern "C" {
80 typedef enum {
81 JIT_NOACTION = 0,
82 JIT_REGISTER_FN,
83 JIT_UNREGISTER_FN
84 } JITAction;
85
86 struct JITCodeEntry {
87 // Atomic to ensure the reader can always iterate over the linked list
88 // (e.g. the process could crash in the middle of writing this field).
89 std::atomic<JITCodeEntry*> next_;
90 // Non-atomic. The reader should not use it. It is only used for deletion.
91 JITCodeEntry* prev_;
92 const uint8_t* symfile_addr_;
93 uint64_t symfile_size_; // Beware of the offset (12 on x86; but 16 on ARM32).
94
95 // Android-specific fields:
96 uint64_t register_timestamp_; // CLOCK_MONOTONIC time of entry registration.
97 };
98
99 struct JITDescriptor {
100 uint32_t version_ = 1; // NB: GDB supports only version 1.
101 uint32_t action_flag_ = JIT_NOACTION; // One of the JITAction enum values.
102 JITCodeEntry* relevant_entry_ = nullptr; // The entry affected by the action.
103 std::atomic<JITCodeEntry*> head_{nullptr}; // Head of link list of all entries.
104
105 // Android-specific fields:
106 uint8_t magic_[8] = {'A', 'n', 'd', 'r', 'o', 'i', 'd', '1'};
107 uint32_t flags_ = 0; // Reserved for future use. Must be 0.
108 uint32_t sizeof_descriptor = sizeof(JITDescriptor);
109 uint32_t sizeof_entry = sizeof(JITCodeEntry);
110 std::atomic_uint32_t action_seqlock_{0}; // Incremented before and after any modification.
111 uint64_t action_timestamp_ = 1; // CLOCK_MONOTONIC time of last action.
112 };
113
114 // Check that std::atomic has the expected layout.
115 static_assert(alignof(std::atomic_uint32_t) == alignof(uint32_t), "Weird alignment");
116 static_assert(sizeof(std::atomic_uint32_t) == sizeof(uint32_t), "Weird size");
117 static_assert(alignof(std::atomic<void*>) == alignof(void*), "Weird alignment");
118 static_assert(sizeof(std::atomic<void*>) == sizeof(void*), "Weird size");
119
120 // GDB may set breakpoint here. We must ensure it is not removed or deduplicated.
__jit_debug_register_code()121 void __attribute__((noinline)) __jit_debug_register_code() {
122 __asm__("");
123 }
124
125 // Alternatively, native tools may overwrite this field to execute custom handler.
126 void (*__jit_debug_register_code_ptr)() = __jit_debug_register_code;
127
128 // The root data structure describing of all JITed methods.
129 JITDescriptor __jit_debug_descriptor {};
130
131 // The following globals mirror the ones above, but are used to register dex files.
__dex_debug_register_code()132 void __attribute__((noinline)) __dex_debug_register_code() {
133 __asm__("");
134 }
135 void (*__dex_debug_register_code_ptr)() = __dex_debug_register_code;
136 JITDescriptor __dex_debug_descriptor {};
137 }
138
139 // Mark the descriptor as "locked", so native tools know the data is being modified.
ActionSeqlock(JITDescriptor & descriptor)140 static void ActionSeqlock(JITDescriptor& descriptor) {
141 DCHECK_EQ(descriptor.action_seqlock_.load() & 1, 0u) << "Already locked";
142 descriptor.action_seqlock_.fetch_add(1, std::memory_order_relaxed);
143 // Ensure that any writes within the locked section cannot be reordered before the increment.
144 std::atomic_thread_fence(std::memory_order_release);
145 }
146
147 // Mark the descriptor as "unlocked", so native tools know the data is safe to read.
ActionSequnlock(JITDescriptor & descriptor)148 static void ActionSequnlock(JITDescriptor& descriptor) {
149 DCHECK_EQ(descriptor.action_seqlock_.load() & 1, 1u) << "Already unlocked";
150 // Ensure that any writes within the locked section cannot be reordered after the increment.
151 std::atomic_thread_fence(std::memory_order_release);
152 descriptor.action_seqlock_.fetch_add(1, std::memory_order_relaxed);
153 }
154
CreateJITCodeEntryInternal(JITDescriptor & descriptor,void (* register_code_ptr)(),const ArrayRef<const uint8_t> & symfile)155 static JITCodeEntry* CreateJITCodeEntryInternal(
156 JITDescriptor& descriptor,
157 void (*register_code_ptr)(),
158 const ArrayRef<const uint8_t>& symfile)
159 REQUIRES(Locks::native_debug_interface_lock_) {
160 // Ensure the timestamp is monotonically increasing even in presence of low
161 // granularity system timer. This ensures each entry has unique timestamp.
162 uint64_t timestamp = std::max(descriptor.action_timestamp_ + 1, NanoTime());
163
164 JITCodeEntry* head = descriptor.head_.load(std::memory_order_relaxed);
165 JITCodeEntry* entry = new JITCodeEntry;
166 CHECK(entry != nullptr);
167 entry->symfile_addr_ = symfile.data();
168 entry->symfile_size_ = symfile.size();
169 entry->prev_ = nullptr;
170 entry->next_.store(head, std::memory_order_relaxed);
171 entry->register_timestamp_ = timestamp;
172
173 // We are going to modify the linked list, so take the seqlock.
174 ActionSeqlock(descriptor);
175 if (head != nullptr) {
176 head->prev_ = entry;
177 }
178 descriptor.head_.store(entry, std::memory_order_relaxed);
179 descriptor.relevant_entry_ = entry;
180 descriptor.action_flag_ = JIT_REGISTER_FN;
181 descriptor.action_timestamp_ = timestamp;
182 ActionSequnlock(descriptor);
183
184 (*register_code_ptr)();
185 return entry;
186 }
187
DeleteJITCodeEntryInternal(JITDescriptor & descriptor,void (* register_code_ptr)(),JITCodeEntry * entry)188 static void DeleteJITCodeEntryInternal(
189 JITDescriptor& descriptor,
190 void (*register_code_ptr)(),
191 JITCodeEntry* entry)
192 REQUIRES(Locks::native_debug_interface_lock_) {
193 CHECK(entry != nullptr);
194
195 // Ensure the timestamp is monotonically increasing even in presence of low
196 // granularity system timer. This ensures each entry has unique timestamp.
197 uint64_t timestamp = std::max(descriptor.action_timestamp_ + 1, NanoTime());
198
199 // We are going to modify the linked list, so take the seqlock.
200 ActionSeqlock(descriptor);
201 JITCodeEntry* next = entry->next_.load(std::memory_order_relaxed);
202 if (entry->prev_ != nullptr) {
203 entry->prev_->next_.store(next, std::memory_order_relaxed);
204 } else {
205 descriptor.head_.store(next, std::memory_order_relaxed);
206 }
207 if (next != nullptr) {
208 next->prev_ = entry->prev_;
209 }
210 descriptor.relevant_entry_ = entry;
211 descriptor.action_flag_ = JIT_UNREGISTER_FN;
212 descriptor.action_timestamp_ = timestamp;
213 ActionSequnlock(descriptor);
214
215 (*register_code_ptr)();
216
217 // Ensure that clear below can not be reordered above the unlock above.
218 std::atomic_thread_fence(std::memory_order_release);
219
220 // Aggressively clear the entry as an extra check of the synchronisation.
221 memset(entry, 0, sizeof(*entry));
222
223 delete entry;
224 }
225
226 static std::unordered_map<const void*, JITCodeEntry*> __dex_debug_entries
227 GUARDED_BY(Locks::native_debug_interface_lock_);
228
AddNativeDebugInfoForDex(Thread * current_thread,ArrayRef<const uint8_t> dexfile)229 void AddNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile) {
230 MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
231 DCHECK(dexfile.data() != nullptr);
232 // This is just defensive check. The class linker should not register the dex file twice.
233 if (__dex_debug_entries.count(dexfile.data()) == 0) {
234 JITCodeEntry* entry = CreateJITCodeEntryInternal(__dex_debug_descriptor,
235 __dex_debug_register_code_ptr,
236 dexfile);
237 __dex_debug_entries.emplace(dexfile.data(), entry);
238 }
239 }
240
RemoveNativeDebugInfoForDex(Thread * current_thread,ArrayRef<const uint8_t> dexfile)241 void RemoveNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile) {
242 MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
243 auto it = __dex_debug_entries.find(dexfile.data());
244 // We register dex files in the class linker and free them in DexFile_closeDexFile, but
245 // there might be cases where we load the dex file without using it in the class linker.
246 if (it != __dex_debug_entries.end()) {
247 DeleteJITCodeEntryInternal(__dex_debug_descriptor,
248 __dex_debug_register_code_ptr,
249 it->second);
250 __dex_debug_entries.erase(it);
251 }
252 }
253
254 static size_t __jit_debug_mem_usage
255 GUARDED_BY(Locks::native_debug_interface_lock_) = 0;
256
257 // Mapping from handle to entry. Used to manage life-time of the entries.
258 static std::unordered_map<const void*, JITCodeEntry*> __jit_debug_entries
259 GUARDED_BY(Locks::native_debug_interface_lock_);
260
AddNativeDebugInfoForJit(const void * handle,const std::vector<uint8_t> & symfile)261 void AddNativeDebugInfoForJit(const void* handle, const std::vector<uint8_t>& symfile) {
262 DCHECK_NE(symfile.size(), 0u);
263
264 // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry.
265 uint8_t* copy = new uint8_t[symfile.size()];
266 CHECK(copy != nullptr);
267 memcpy(copy, symfile.data(), symfile.size());
268
269 JITCodeEntry* entry = CreateJITCodeEntryInternal(
270 __jit_debug_descriptor,
271 __jit_debug_register_code_ptr,
272 ArrayRef<const uint8_t>(copy, symfile.size()));
273 __jit_debug_mem_usage += sizeof(JITCodeEntry) + entry->symfile_size_;
274
275 // We don't provide handle for type debug info, which means we cannot free it later.
276 // (this only happens when --generate-debug-info flag is enabled for the purpose
277 // of being debugged with gdb; it does not happen for debuggable apps by default).
278 bool ok = handle == nullptr || __jit_debug_entries.emplace(handle, entry).second;
279 DCHECK(ok) << "Native debug entry already exists for " << std::hex << handle;
280 }
281
RemoveNativeDebugInfoForJit(const void * handle)282 void RemoveNativeDebugInfoForJit(const void* handle) {
283 auto it = __jit_debug_entries.find(handle);
284 // We generate JIT native debug info only if the right runtime flags are enabled,
285 // but we try to remove it unconditionally whenever code is freed from JIT cache.
286 if (it != __jit_debug_entries.end()) {
287 JITCodeEntry* entry = it->second;
288 const uint8_t* symfile_addr = entry->symfile_addr_;
289 uint64_t symfile_size = entry->symfile_size_;
290 DeleteJITCodeEntryInternal(__jit_debug_descriptor,
291 __jit_debug_register_code_ptr,
292 entry);
293 __jit_debug_entries.erase(it);
294 __jit_debug_mem_usage -= sizeof(JITCodeEntry) + symfile_size;
295 delete[] symfile_addr;
296 }
297 }
298
GetJitNativeDebugInfoMemUsage()299 size_t GetJitNativeDebugInfoMemUsage() {
300 return __jit_debug_mem_usage + __jit_debug_entries.size() * 2 * sizeof(void*);
301 }
302
303 } // namespace art
304