1 /* 2 * Copyright (C) 2016 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include <inttypes.h> 18 #include <string.h> 19 20 #include <functional> 21 #include <iomanip> 22 #include <mutex> 23 #include <sstream> 24 #include <string> 25 #include <unordered_map> 26 27 #include <android-base/macros.h> 28 #include <android-base/strings.h> 29 #include <backtrace.h> 30 31 #include "Allocator.h" 32 #include "Binder.h" 33 #include "HeapWalker.h" 34 #include "Leak.h" 35 #include "LeakFolding.h" 36 #include "LeakPipe.h" 37 #include "ProcessMappings.h" 38 #include "PtracerThread.h" 39 #include "ScopedDisableMalloc.h" 40 #include "Semaphore.h" 41 #include "ThreadCapture.h" 42 43 #include "bionic.h" 44 #include "log.h" 45 #include "memunreachable/memunreachable.h" 46 47 using namespace std::chrono_literals; 48 49 namespace android { 50 51 const size_t Leak::contents_length; 52 53 class MemUnreachable { 54 public: 55 MemUnreachable(pid_t pid, Allocator<void> allocator) 56 : pid_(pid), allocator_(allocator), heap_walker_(allocator_) {} 57 bool CollectAllocations(const allocator::vector<ThreadInfo>& threads, 58 const allocator::vector<Mapping>& mappings, 59 const allocator::vector<uintptr_t>& refs); 60 bool GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit, size_t* num_leaks, 61 size_t* leak_bytes); 62 size_t Allocations() { return heap_walker_.Allocations(); } 63 size_t AllocationBytes() { return heap_walker_.AllocationBytes(); } 64 65 private: 66 bool ClassifyMappings(const allocator::vector<Mapping>& mappings, 67 allocator::vector<Mapping>& heap_mappings, 68 allocator::vector<Mapping>& anon_mappings, 69 allocator::vector<Mapping>& globals_mappings, 70 allocator::vector<Mapping>& stack_mappings); 71 DISALLOW_COPY_AND_ASSIGN(MemUnreachable); 72 pid_t pid_; 73 Allocator<void> allocator_; 74 HeapWalker heap_walker_; 75 }; 76 77 static void HeapIterate(const Mapping& heap_mapping, 78 const std::function<void(uintptr_t, size_t)>& func) { 79 malloc_iterate(heap_mapping.begin, heap_mapping.end - heap_mapping.begin, 80 [](uintptr_t base, size_t size, void* arg) { 81 auto f = reinterpret_cast<const std::function<void(uintptr_t, size_t)>*>(arg); 82 (*f)(base, size); 83 }, 84 const_cast<void*>(reinterpret_cast<const void*>(&func))); 85 } 86 87 bool MemUnreachable::CollectAllocations(const allocator::vector<ThreadInfo>& threads, 88 const allocator::vector<Mapping>& mappings, 89 const allocator::vector<uintptr_t>& refs) { 90 MEM_ALOGI("searching process %d for allocations", pid_); 91 92 for (auto it = mappings.begin(); it != mappings.end(); it++) { 93 heap_walker_.Mapping(it->begin, it->end); 94 } 95 96 allocator::vector<Mapping> heap_mappings{mappings}; 97 allocator::vector<Mapping> anon_mappings{mappings}; 98 allocator::vector<Mapping> globals_mappings{mappings}; 99 allocator::vector<Mapping> stack_mappings{mappings}; 100 if (!ClassifyMappings(mappings, heap_mappings, anon_mappings, globals_mappings, stack_mappings)) { 101 return false; 102 } 103 104 for (auto it = heap_mappings.begin(); it != heap_mappings.end(); it++) { 105 MEM_ALOGV("Heap mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name); 106 HeapIterate(*it, 107 [&](uintptr_t base, size_t size) { heap_walker_.Allocation(base, base + size); }); 108 } 109 110 for (auto it = anon_mappings.begin(); it != anon_mappings.end(); it++) { 111 MEM_ALOGV("Anon mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name); 112 heap_walker_.Allocation(it->begin, it->end); 113 } 114 115 for (auto it = globals_mappings.begin(); it != globals_mappings.end(); it++) { 116 MEM_ALOGV("Globals mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name); 117 heap_walker_.Root(it->begin, it->end); 118 } 119 120 for (auto thread_it = threads.begin(); thread_it != threads.end(); thread_it++) { 121 for (auto it = stack_mappings.begin(); it != stack_mappings.end(); it++) { 122 if (thread_it->stack.first >= it->begin && thread_it->stack.first <= it->end) { 123 MEM_ALOGV("Stack %" PRIxPTR "-%" PRIxPTR " %s", thread_it->stack.first, it->end, it->name); 124 heap_walker_.Root(thread_it->stack.first, it->end); 125 } 126 } 127 heap_walker_.Root(thread_it->regs); 128 } 129 130 heap_walker_.Root(refs); 131 132 MEM_ALOGI("searching done"); 133 134 return true; 135 } 136 137 bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit, 138 size_t* num_leaks, size_t* leak_bytes) { 139 MEM_ALOGI("sweeping process %d for unreachable memory", pid_); 140 leaks.clear(); 141 142 if (!heap_walker_.DetectLeaks()) { 143 return false; 144 } 145 146 allocator::vector<Range> leaked1{allocator_}; 147 heap_walker_.Leaked(leaked1, 0, num_leaks, leak_bytes); 148 149 MEM_ALOGI("sweeping done"); 150 151 MEM_ALOGI("folding related leaks"); 152 153 LeakFolding folding(allocator_, heap_walker_); 154 if (!folding.FoldLeaks()) { 155 return false; 156 } 157 158 allocator::vector<LeakFolding::Leak> leaked{allocator_}; 159 160 if (!folding.Leaked(leaked, num_leaks, leak_bytes)) { 161 return false; 162 } 163 164 allocator::unordered_map<Leak::Backtrace, Leak*> backtrace_map{allocator_}; 165 166 // Prevent reallocations of backing memory so we can store pointers into it 167 // in backtrace_map. 168 leaks.reserve(leaked.size()); 169 170 for (auto& it : leaked) { 171 leaks.emplace_back(); 172 Leak* leak = &leaks.back(); 173 174 ssize_t num_backtrace_frames = malloc_backtrace( 175 reinterpret_cast<void*>(it.range.begin), leak->backtrace.frames, leak->backtrace.max_frames); 176 if (num_backtrace_frames > 0) { 177 leak->backtrace.num_frames = num_backtrace_frames; 178 179 auto inserted = backtrace_map.emplace(leak->backtrace, leak); 180 if (!inserted.second) { 181 // Leak with same backtrace already exists, drop this one and 182 // increment similar counts on the existing one. 183 leaks.pop_back(); 184 Leak* similar_leak = inserted.first->second; 185 similar_leak->similar_count++; 186 similar_leak->similar_size += it.range.size(); 187 similar_leak->similar_referenced_count += it.referenced_count; 188 similar_leak->similar_referenced_size += it.referenced_size; 189 similar_leak->total_size += it.range.size(); 190 similar_leak->total_size += it.referenced_size; 191 continue; 192 } 193 } 194 195 leak->begin = it.range.begin; 196 leak->size = it.range.size(); 197 leak->referenced_count = it.referenced_count; 198 leak->referenced_size = it.referenced_size; 199 leak->total_size = leak->size + leak->referenced_size; 200 memcpy(leak->contents, reinterpret_cast<void*>(it.range.begin), 201 std::min(leak->size, Leak::contents_length)); 202 } 203 204 MEM_ALOGI("folding done"); 205 206 std::sort(leaks.begin(), leaks.end(), 207 [](const Leak& a, const Leak& b) { return a.total_size > b.total_size; }); 208 209 if (leaks.size() > limit) { 210 leaks.resize(limit); 211 } 212 213 return true; 214 } 215 216 static bool has_prefix(const allocator::string& s, const char* prefix) { 217 int ret = s.compare(0, strlen(prefix), prefix); 218 return ret == 0; 219 } 220 221 static bool is_sanitizer_mapping(const allocator::string& s) { 222 return s == "[anon:low shadow]" || s == "[anon:high shadow]" || has_prefix(s, "[anon:hwasan"); 223 } 224 225 bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings, 226 allocator::vector<Mapping>& heap_mappings, 227 allocator::vector<Mapping>& anon_mappings, 228 allocator::vector<Mapping>& globals_mappings, 229 allocator::vector<Mapping>& stack_mappings) { 230 heap_mappings.clear(); 231 anon_mappings.clear(); 232 globals_mappings.clear(); 233 stack_mappings.clear(); 234 235 allocator::string current_lib{allocator_}; 236 237 for (auto it = mappings.begin(); it != mappings.end(); it++) { 238 if (it->execute) { 239 current_lib = it->name; 240 continue; 241 } 242 243 if (!it->read) { 244 continue; 245 } 246 247 const allocator::string mapping_name{it->name, allocator_}; 248 if (mapping_name == "[anon:.bss]") { 249 // named .bss section 250 globals_mappings.emplace_back(*it); 251 } else if (mapping_name == current_lib) { 252 // .rodata or .data section 253 globals_mappings.emplace_back(*it); 254 } else if (mapping_name == "[anon:libc_malloc]" || 255 android::base::StartsWith(mapping_name, "[anon:scudo:") || 256 android::base::StartsWith(mapping_name, "[anon:GWP-ASan")) { 257 // named malloc mapping 258 heap_mappings.emplace_back(*it); 259 } else if (has_prefix(mapping_name, "[anon:dalvik-")) { 260 // named dalvik heap mapping 261 globals_mappings.emplace_back(*it); 262 } else if (has_prefix(mapping_name, "[stack")) { 263 // named stack mapping 264 stack_mappings.emplace_back(*it); 265 } else if (mapping_name.size() == 0) { 266 globals_mappings.emplace_back(*it); 267 } else if (has_prefix(mapping_name, "[anon:") && 268 mapping_name != "[anon:leak_detector_malloc]" && 269 !is_sanitizer_mapping(mapping_name)) { 270 // TODO(ccross): it would be nice to treat named anonymous mappings as 271 // possible leaks, but naming something in a .bss or .data section makes 272 // it impossible to distinguish them from mmaped and then named mappings. 273 globals_mappings.emplace_back(*it); 274 } 275 } 276 277 return true; 278 } 279 280 template <typename T> 281 static inline const char* plural(T val) { 282 return (val == 1) ? "" : "s"; 283 } 284 285 bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit) { 286 if (info.version > 0) { 287 MEM_ALOGE("unsupported UnreachableMemoryInfo.version %zu in GetUnreachableMemory", 288 info.version); 289 return false; 290 } 291 292 int parent_pid = getpid(); 293 int parent_tid = gettid(); 294 295 Heap heap; 296 297 Semaphore continue_parent_sem; 298 LeakPipe pipe; 299 300 PtracerThread thread{[&]() -> int { 301 ///////////////////////////////////////////// 302 // Collection thread 303 ///////////////////////////////////////////// 304 MEM_ALOGI("collecting thread info for process %d...", parent_pid); 305 306 ThreadCapture thread_capture(parent_pid, heap); 307 allocator::vector<ThreadInfo> thread_info(heap); 308 allocator::vector<Mapping> mappings(heap); 309 allocator::vector<uintptr_t> refs(heap); 310 311 // ptrace all the threads 312 if (!thread_capture.CaptureThreads()) { 313 continue_parent_sem.Post(); 314 return 1; 315 } 316 317 // collect register contents and stacks 318 if (!thread_capture.CapturedThreadInfo(thread_info)) { 319 continue_parent_sem.Post(); 320 return 1; 321 } 322 323 // snapshot /proc/pid/maps 324 if (!ProcessMappings(parent_pid, mappings)) { 325 continue_parent_sem.Post(); 326 return 1; 327 } 328 329 if (!BinderReferences(refs)) { 330 continue_parent_sem.Post(); 331 return 1; 332 } 333 334 // malloc must be enabled to call fork, at_fork handlers take the same 335 // locks as ScopedDisableMalloc. All threads are paused in ptrace, so 336 // memory state is still consistent. Unfreeze the original thread so it 337 // can drop the malloc locks, it will block until the collection thread 338 // exits. 339 thread_capture.ReleaseThread(parent_tid); 340 continue_parent_sem.Post(); 341 342 // fork a process to do the heap walking 343 int ret = fork(); 344 if (ret < 0) { 345 return 1; 346 } else if (ret == 0) { 347 ///////////////////////////////////////////// 348 // Heap walker process 349 ///////////////////////////////////////////// 350 // Examine memory state in the child using the data collected above and 351 // the CoW snapshot of the process memory contents. 352 353 if (!pipe.OpenSender()) { 354 _exit(1); 355 } 356 357 MemUnreachable unreachable{parent_pid, heap}; 358 359 if (!unreachable.CollectAllocations(thread_info, mappings, refs)) { 360 _exit(2); 361 } 362 size_t num_allocations = unreachable.Allocations(); 363 size_t allocation_bytes = unreachable.AllocationBytes(); 364 365 allocator::vector<Leak> leaks{heap}; 366 367 size_t num_leaks = 0; 368 size_t leak_bytes = 0; 369 bool ok = unreachable.GetUnreachableMemory(leaks, limit, &num_leaks, &leak_bytes); 370 371 ok = ok && pipe.Sender().Send(num_allocations); 372 ok = ok && pipe.Sender().Send(allocation_bytes); 373 ok = ok && pipe.Sender().Send(num_leaks); 374 ok = ok && pipe.Sender().Send(leak_bytes); 375 ok = ok && pipe.Sender().SendVector(leaks); 376 377 if (!ok) { 378 _exit(3); 379 } 380 381 _exit(0); 382 } else { 383 // Nothing left to do in the collection thread, return immediately, 384 // releasing all the captured threads. 385 MEM_ALOGI("collection thread done"); 386 return 0; 387 } 388 }}; 389 390 ///////////////////////////////////////////// 391 // Original thread 392 ///////////////////////////////////////////// 393 394 { 395 // Disable malloc to get a consistent view of memory 396 ScopedDisableMalloc disable_malloc; 397 398 // Start the collection thread 399 thread.Start(); 400 401 // Wait for the collection thread to signal that it is ready to fork the 402 // heap walker process. 403 continue_parent_sem.Wait(30s); 404 405 // Re-enable malloc so the collection thread can fork. 406 } 407 408 // Wait for the collection thread to exit 409 int ret = thread.Join(); 410 if (ret != 0) { 411 return false; 412 } 413 414 // Get a pipe from the heap walker process. Transferring a new pipe fd 415 // ensures no other forked processes can have it open, so when the heap 416 // walker process dies the remote side of the pipe will close. 417 if (!pipe.OpenReceiver()) { 418 return false; 419 } 420 421 bool ok = true; 422 ok = ok && pipe.Receiver().Receive(&info.num_allocations); 423 ok = ok && pipe.Receiver().Receive(&info.allocation_bytes); 424 ok = ok && pipe.Receiver().Receive(&info.num_leaks); 425 ok = ok && pipe.Receiver().Receive(&info.leak_bytes); 426 ok = ok && pipe.Receiver().ReceiveVector(info.leaks); 427 if (!ok) { 428 return false; 429 } 430 431 MEM_ALOGI("unreachable memory detection done"); 432 MEM_ALOGE("%zu bytes in %zu allocation%s unreachable out of %zu bytes in %zu allocation%s", 433 info.leak_bytes, info.num_leaks, plural(info.num_leaks), info.allocation_bytes, 434 info.num_allocations, plural(info.num_allocations)); 435 return true; 436 } 437 438 std::string Leak::ToString(bool log_contents) const { 439 std::ostringstream oss; 440 441 oss << " " << std::dec << size; 442 oss << " bytes unreachable at "; 443 oss << std::hex << begin; 444 oss << std::endl; 445 if (referenced_count > 0) { 446 oss << std::dec; 447 oss << " referencing " << referenced_size << " unreachable bytes"; 448 oss << " in " << referenced_count << " allocation" << plural(referenced_count); 449 oss << std::endl; 450 } 451 if (similar_count > 0) { 452 oss << std::dec; 453 oss << " and " << similar_size << " similar unreachable bytes"; 454 oss << " in " << similar_count << " allocation" << plural(similar_count); 455 oss << std::endl; 456 if (similar_referenced_count > 0) { 457 oss << " referencing " << similar_referenced_size << " unreachable bytes"; 458 oss << " in " << similar_referenced_count << " allocation" << plural(similar_referenced_count); 459 oss << std::endl; 460 } 461 } 462 463 if (log_contents) { 464 const int bytes_per_line = 16; 465 const size_t bytes = std::min(size, contents_length); 466 467 if (bytes == size) { 468 oss << " contents:" << std::endl; 469 } else { 470 oss << " first " << bytes << " bytes of contents:" << std::endl; 471 } 472 473 for (size_t i = 0; i < bytes; i += bytes_per_line) { 474 oss << " " << std::hex << begin + i << ": "; 475 size_t j; 476 oss << std::setfill('0'); 477 for (j = i; j < bytes && j < i + bytes_per_line; j++) { 478 oss << std::setw(2) << static_cast<int>(contents[j]) << " "; 479 } 480 oss << std::setfill(' '); 481 for (; j < i + bytes_per_line; j++) { 482 oss << " "; 483 } 484 for (j = i; j < bytes && j < i + bytes_per_line; j++) { 485 char c = contents[j]; 486 if (c < ' ' || c >= 0x7f) { 487 c = '.'; 488 } 489 oss << c; 490 } 491 oss << std::endl; 492 } 493 } 494 if (backtrace.num_frames > 0) { 495 oss << backtrace_string(backtrace.frames, backtrace.num_frames); 496 } 497 498 return oss.str(); 499 } 500 501 std::string UnreachableMemoryInfo::ToString(bool log_contents) const { 502 std::ostringstream oss; 503 oss << " " << leak_bytes << " bytes in "; 504 oss << num_leaks << " unreachable allocation" << plural(num_leaks); 505 oss << std::endl; 506 oss << " ABI: '" ABI_STRING "'" << std::endl; 507 oss << std::endl; 508 509 for (auto it = leaks.begin(); it != leaks.end(); it++) { 510 oss << it->ToString(log_contents); 511 oss << std::endl; 512 } 513 514 return oss.str(); 515 } 516 517 UnreachableMemoryInfo::~UnreachableMemoryInfo() { 518 // Clear the memory that holds the leaks, otherwise the next attempt to 519 // detect leaks may find the old data (for example in the jemalloc tcache) 520 // and consider all the leaks to be referenced. 521 memset(leaks.data(), 0, leaks.capacity() * sizeof(Leak)); 522 523 std::vector<Leak> tmp; 524 leaks.swap(tmp); 525 526 // Disable and re-enable malloc to flush the jemalloc tcache to make sure 527 // there are no copies of the leaked pointer addresses there. 528 malloc_disable(); 529 malloc_enable(); 530 } 531 532 std::string GetUnreachableMemoryString(bool log_contents, size_t limit) { 533 UnreachableMemoryInfo info; 534 if (!GetUnreachableMemory(info, limit)) { 535 return "Failed to get unreachable memory\n" 536 "If you are trying to get unreachable memory from a system app\n" 537 "(like com.android.systemui), disable selinux first using\n" 538 "setenforce 0\n"; 539 } 540 541 return info.ToString(log_contents); 542 } 543 544 } // namespace android 545 546 bool LogUnreachableMemory(bool log_contents, size_t limit) { 547 android::UnreachableMemoryInfo info; 548 if (!android::GetUnreachableMemory(info, limit)) { 549 return false; 550 } 551 552 for (auto it = info.leaks.begin(); it != info.leaks.end(); it++) { 553 MEM_ALOGE("%s", it->ToString(log_contents).c_str()); 554 } 555 return true; 556 } 557 558 bool NoLeaks() { 559 android::UnreachableMemoryInfo info; 560 if (!android::GetUnreachableMemory(info, 0)) { 561 return false; 562 } 563 564 return info.num_leaks == 0; 565 } 566