1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <inttypes.h>
18 #include <malloc.h>
19 #include <private/bionic_malloc.h>
20 #include <private/bionic_malloc_dispatch.h>
21 #include <stddef.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <sys/system_properties.h>
25 #include <sys/types.h>
26 #include <sys/wait.h>
27 #include <unistd.h>
28
29 #include <atomic>
30 #include <tuple>
31
32 #include "perfetto/base/build_config.h"
33 #include "perfetto/base/logging.h"
34 #include "perfetto/base/no_destructor.h"
35 #include "perfetto/base/unix_socket.h"
36 #include "perfetto/base/utils.h"
37 #include "src/profiling/memory/client.h"
38 #include "src/profiling/memory/proc_utils.h"
39 #include "src/profiling/memory/scoped_spinlock.h"
40 #include "src/profiling/memory/unhooked_allocator.h"
41 #include "src/profiling/memory/wire_protocol.h"
42
43 using perfetto::profiling::ScopedSpinlock;
44 using perfetto::profiling::UnhookedAllocator;
45
46 // This is so we can make an so that we can swap out with the existing
47 // libc_malloc_hooks.so
48 #ifndef HEAPPROFD_PREFIX
49 #define HEAPPROFD_PREFIX heapprofd
50 #endif
51
52 #define HEAPPROFD_ADD_PREFIX(name) \
53 PERFETTO_BUILDFLAG_CAT(HEAPPROFD_PREFIX, name)
54
55 #pragma GCC visibility push(default)
56 extern "C" {
57
58 bool HEAPPROFD_ADD_PREFIX(_initialize)(const MallocDispatch* malloc_dispatch,
59 bool* zygote_child,
60 const char* options);
61 void HEAPPROFD_ADD_PREFIX(_finalize)();
62 void HEAPPROFD_ADD_PREFIX(_dump_heap)(const char* file_name);
63 void HEAPPROFD_ADD_PREFIX(_get_malloc_leak_info)(uint8_t** info,
64 size_t* overall_size,
65 size_t* info_size,
66 size_t* total_memory,
67 size_t* backtrace_size);
68 bool HEAPPROFD_ADD_PREFIX(_write_malloc_leak_info)(FILE* fp);
69 ssize_t HEAPPROFD_ADD_PREFIX(_malloc_backtrace)(void* pointer,
70 uintptr_t* frames,
71 size_t frame_count);
72 void HEAPPROFD_ADD_PREFIX(_free_malloc_leak_info)(uint8_t* info);
73 size_t HEAPPROFD_ADD_PREFIX(_malloc_usable_size)(void* pointer);
74 void* HEAPPROFD_ADD_PREFIX(_malloc)(size_t size);
75 void HEAPPROFD_ADD_PREFIX(_free)(void* pointer);
76 void* HEAPPROFD_ADD_PREFIX(_aligned_alloc)(size_t alignment, size_t size);
77 void* HEAPPROFD_ADD_PREFIX(_memalign)(size_t alignment, size_t bytes);
78 void* HEAPPROFD_ADD_PREFIX(_realloc)(void* pointer, size_t bytes);
79 void* HEAPPROFD_ADD_PREFIX(_calloc)(size_t nmemb, size_t bytes);
80 struct mallinfo HEAPPROFD_ADD_PREFIX(_mallinfo)();
81 int HEAPPROFD_ADD_PREFIX(_mallopt)(int param, int value);
82 int HEAPPROFD_ADD_PREFIX(_malloc_info)(int options, FILE* fp);
83 int HEAPPROFD_ADD_PREFIX(_posix_memalign)(void** memptr,
84 size_t alignment,
85 size_t size);
86 int HEAPPROFD_ADD_PREFIX(_iterate)(uintptr_t base,
87 size_t size,
88 void (*callback)(uintptr_t base,
89 size_t size,
90 void* arg),
91 void* arg);
92 void HEAPPROFD_ADD_PREFIX(_malloc_disable)();
93 void HEAPPROFD_ADD_PREFIX(_malloc_enable)();
94
95 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
96 void* HEAPPROFD_ADD_PREFIX(_pvalloc)(size_t bytes);
97 void* HEAPPROFD_ADD_PREFIX(_valloc)(size_t size);
98 #endif
99 }
100 #pragma GCC visibility pop
101
102 namespace {
103
104 // The real malloc function pointers we get in initialize. Set once in the first
105 // initialize invocation, and never changed afterwards. Because bionic does a
106 // release write after initialization and an acquire read to retrieve the hooked
107 // malloc functions, we can use relaxed memory mode for both writing and
108 // reading.
109 std::atomic<const MallocDispatch*> g_dispatch{nullptr};
110
111 // Holds the active profiling client. Is empty at the start, or after we've
112 // started shutting down a profiling session. Hook invocations take shared_ptr
113 // copies (ensuring that the client stays alive until no longer needed), and do
114 // nothing if this master pointer is empty.
115 //
116 // This shared_ptr itself is protected by g_client_lock. Note that shared_ptr
117 // handles are not thread-safe by themselves:
118 // https://en.cppreference.com/w/cpp/memory/shared_ptr/atomic
119 //
120 // To avoid on-destruction re-entrancy issues, this shared_ptr needs to be
121 // constructed with an allocator that uses the unhooked malloc & free functions.
122 // See UnhookedAllocator.
123 //
124 // NoDestructor<> wrapper is used to avoid destructing the shared_ptr at program
125 // exit. The rationale is:
126 // * Avoiding the atexit destructor racing against other threads that are
127 // possibly running within the hooks.
128 // * Making sure that atexit handlers running after this global's destructor
129 // can still safely enter the hooks.
130 perfetto::base::NoDestructor<std::shared_ptr<perfetto::profiling::Client>>
131 g_client;
132
133 // Protects g_client, and serves as an external lock for sampling decisions (see
134 // perfetto::profiling::Sampler).
135 //
136 // We rely on this atomic's destuction being a nop, as it is possible for the
137 // hooks to attempt to acquire the spinlock after its destructor should have run
138 // (technically a use-after-destruct scenario).
139 std::atomic<bool> g_client_lock{false};
140
141 constexpr char kHeapprofdBinPath[] = "/system/bin/heapprofd";
142
GetDispatch()143 const MallocDispatch* GetDispatch() {
144 return g_dispatch.load(std::memory_order_relaxed);
145 }
146
CloneWithoutSigchld()147 int CloneWithoutSigchld() {
148 return clone(nullptr, nullptr, 0, nullptr);
149 }
150
ForklikeClone()151 int ForklikeClone() {
152 return clone(nullptr, nullptr, SIGCHLD, nullptr);
153 }
154
155 // Like daemon(), but using clone to avoid invoking pthread_atfork(3) handlers.
Daemonize()156 int Daemonize() {
157 switch (ForklikeClone()) {
158 case -1:
159 PERFETTO_PLOG("Daemonize.clone");
160 return -1;
161 break;
162 case 0:
163 break;
164 default:
165 _exit(0);
166 break;
167 }
168 if (setsid() == -1) {
169 PERFETTO_PLOG("Daemonize.setsid");
170 return -1;
171 }
172 // best effort chdir & fd close
173 chdir("/");
174 int fd = open("/dev/null", O_RDWR, 0);
175 if (fd != -1) {
176 dup2(fd, STDIN_FILENO);
177 dup2(fd, STDOUT_FILENO);
178 dup2(fd, STDERR_FILENO);
179 if (fd > STDERR_FILENO)
180 close(fd);
181 }
182 return 0;
183 }
184
185 // Called only if |g_client_lock| acquisition fails, which shouldn't happen
186 // unless we're in a completely unexpected state (which we won't know how to
187 // recover from). Tries to abort (SIGABRT) the whole process to serve as an
188 // explicit indication of a bug.
189 //
190 // Doesn't use PERFETTO_FATAL as that is a single attempt to self-signal (in
191 // practice - SIGTRAP), while abort() tries to make sure the process has
192 // exited one way or another.
AbortOnSpinlockTimeout()193 __attribute__((noreturn, noinline)) void AbortOnSpinlockTimeout() {
194 PERFETTO_ELOG(
195 "Timed out on the spinlock - something is horribly wrong. "
196 "Aborting whole process.");
197 abort();
198 }
199
ReadSystemProperty(const char * key)200 std::string ReadSystemProperty(const char* key) {
201 std::string prop_value;
202 const prop_info* prop = __system_property_find(key);
203 if (!prop) {
204 return prop_value; // empty
205 }
206 __system_property_read_callback(
207 prop,
208 [](void* cookie, const char* name, const char* value, uint32_t) {
209 std::string* prop_value = reinterpret_cast<std::string*>(cookie);
210 *prop_value = value;
211 },
212 &prop_value);
213 return prop_value;
214 }
215
ShouldForkPrivateDaemon()216 bool ShouldForkPrivateDaemon() {
217 std::string build_type = ReadSystemProperty("ro.build.type");
218 if (build_type.empty()) {
219 PERFETTO_ELOG(
220 "Cannot determine platform build type, proceeding in fork mode "
221 "profiling.");
222 return true;
223 }
224
225 // On development builds, we support both modes of profiling, depending on a
226 // system property.
227 if (build_type == "userdebug" || build_type == "eng") {
228 // Note: if renaming the property, also update system_property.cc
229 std::string mode = ReadSystemProperty("heapprofd.userdebug.mode");
230 return mode == "fork";
231 }
232
233 // User/other builds - always fork private profiler.
234 return true;
235 }
236
CreateClientForCentralDaemon(UnhookedAllocator<perfetto::profiling::Client> unhooked_allocator)237 std::shared_ptr<perfetto::profiling::Client> CreateClientForCentralDaemon(
238 UnhookedAllocator<perfetto::profiling::Client> unhooked_allocator) {
239 PERFETTO_DLOG("Constructing client for central daemon.");
240 using perfetto::profiling::Client;
241
242 perfetto::base::Optional<perfetto::base::UnixSocketRaw> sock =
243 Client::ConnectToHeapprofd(perfetto::profiling::kHeapprofdSocketFile);
244 if (!sock)
245 return nullptr;
246 return Client::CreateAndHandshake(std::move(sock.value()),
247 unhooked_allocator);
248 }
249
CreateClientAndPrivateDaemon(UnhookedAllocator<perfetto::profiling::Client> unhooked_allocator)250 std::shared_ptr<perfetto::profiling::Client> CreateClientAndPrivateDaemon(
251 UnhookedAllocator<perfetto::profiling::Client> unhooked_allocator) {
252 PERFETTO_DLOG("Setting up fork mode profiling.");
253 perfetto::base::UnixSocketRaw parent_sock;
254 perfetto::base::UnixSocketRaw child_sock;
255 std::tie(parent_sock, child_sock) = perfetto::base::UnixSocketRaw::CreatePair(
256 perfetto::base::SockType::kStream);
257
258 if (!parent_sock || !child_sock) {
259 PERFETTO_PLOG("Failed to create socketpair.");
260 return nullptr;
261 }
262
263 child_sock.RetainOnExec();
264
265 // Record own pid and cmdline, to pass down to the forked heapprofd.
266 pid_t target_pid = getpid();
267 std::string target_cmdline;
268 if (!perfetto::profiling::GetCmdlineForPID(target_pid, &target_cmdline)) {
269 target_cmdline = "failed-to-read-cmdline";
270 PERFETTO_ELOG(
271 "Failed to read own cmdline, proceeding as this might be a by-pid "
272 "profiling request (which will still work).");
273 }
274
275 // Prepare arguments for heapprofd.
276 std::string pid_arg =
277 std::string("--exclusive-for-pid=") + std::to_string(target_pid);
278 std::string cmd_arg =
279 std::string("--exclusive-for-cmdline=") + target_cmdline;
280 std::string fd_arg =
281 std::string("--inherit-socket-fd=") + std::to_string(child_sock.fd());
282 const char* argv[] = {kHeapprofdBinPath, pid_arg.c_str(), cmd_arg.c_str(),
283 fd_arg.c_str(), nullptr};
284
285 // Use fork-like clone to avoid invoking the host's pthread_atfork(3)
286 // handlers. Also avoid sending the current process a SIGCHILD to further
287 // reduce our interference.
288 pid_t clone_pid = CloneWithoutSigchld();
289 if (clone_pid == -1) {
290 PERFETTO_PLOG("Failed to clone.");
291 return nullptr;
292 }
293 if (clone_pid == 0) { // child
294 // Daemonize clones again, terminating the calling thread (i.e. the direct
295 // child of the original process). So the rest of this codepath will be
296 // executed in a new reparented process.
297 if (Daemonize() == -1) {
298 PERFETTO_PLOG("Daemonization failed.");
299 _exit(1);
300 }
301 execv(kHeapprofdBinPath, const_cast<char**>(argv));
302 PERFETTO_PLOG("Failed to execute private heapprofd.");
303 _exit(1);
304 } // else - parent continuing the client setup
305
306 child_sock.ReleaseFd().reset(); // close child socket's fd
307 if (!parent_sock.SetTxTimeout(perfetto::profiling::kClientSockTimeoutMs)) {
308 PERFETTO_PLOG("Failed to set socket transmit timeout.");
309 return nullptr;
310 }
311
312 if (!parent_sock.SetRxTimeout(perfetto::profiling::kClientSockTimeoutMs)) {
313 PERFETTO_PLOG("Failed to set socket receive timeout.");
314 return nullptr;
315 }
316
317 // Wait on the immediate child to exit (allow for ECHILD in the unlikely case
318 // we're in a process that has made its children unwaitable).
319 int unused = 0;
320 if (PERFETTO_EINTR(waitpid(clone_pid, &unused, __WCLONE)) == -1 &&
321 errno != ECHILD) {
322 PERFETTO_PLOG("Failed to waitpid on immediate child.");
323 return nullptr;
324 }
325
326 return perfetto::profiling::Client::CreateAndHandshake(std::move(parent_sock),
327 unhooked_allocator);
328 }
329
330 // Note: android_mallopt(M_RESET_HOOKS) is mutually exclusive with
331 // heapprofd_initialize. Concurrent calls get discarded, which might be our
332 // unpatching attempt if there is a concurrent re-initialization running due to
333 // a new signal.
334 //
335 // Note: g_client can be reset by heapprofd_initialize without calling this
336 // function.
ShutdownLazy()337 void ShutdownLazy() {
338 ScopedSpinlock s(&g_client_lock, ScopedSpinlock::Mode::Try);
339 if (PERFETTO_UNLIKELY(!s.locked()))
340 AbortOnSpinlockTimeout();
341
342 if (!g_client.ref()) // other invocation already initiated shutdown
343 return;
344
345 // Clear primary shared pointer, such that later hook invocations become nops.
346 g_client.ref().reset();
347
348 if (!android_mallopt(M_RESET_HOOKS, nullptr, 0))
349 PERFETTO_PLOG("Unpatching heapprofd hooks failed.");
350 }
351
352 } // namespace
353
354 // Setup for the rest of profiling. The first time profiling is triggered in a
355 // process, this is called after this client library is dlopened, but before the
356 // rest of the hooks are patched in. However, as we support multiple profiling
357 // sessions within a process' lifetime, this function can also be legitimately
358 // called any number of times afterwards (note: bionic guarantees that at most
359 // one initialize call is active at a time).
360 //
361 // Note: if profiling is triggered at runtime, this runs on a dedicated pthread
362 // (which is safe to block). If profiling is triggered at startup, then this
363 // code runs synchronously.
HEAPPROFD_ADD_PREFIX(_initialize)364 bool HEAPPROFD_ADD_PREFIX(_initialize)(const MallocDispatch* malloc_dispatch,
365 bool*,
366 const char*) {
367 using ::perfetto::profiling::Client;
368
369 // Table of pointers to backing implementation.
370 g_dispatch.store(malloc_dispatch, std::memory_order_relaxed);
371
372 // TODO(fmayer): Check other destructions of client and make a decision
373 // whether we want to ban heap objects in the client or not.
374 std::shared_ptr<Client> old_client;
375 {
376 ScopedSpinlock s(&g_client_lock, ScopedSpinlock::Mode::Try);
377 if (PERFETTO_UNLIKELY(!s.locked()))
378 AbortOnSpinlockTimeout();
379
380 if (g_client.ref()) {
381 PERFETTO_LOG("Rejecting concurrent profiling initialization.");
382 return true; // success as we're in a valid state
383 }
384 old_client = g_client.ref();
385 g_client.ref().reset();
386 }
387
388 old_client.reset();
389
390 // The dispatch table never changes, so let the custom allocator retain the
391 // function pointers directly.
392 UnhookedAllocator<Client> unhooked_allocator(malloc_dispatch->malloc,
393 malloc_dispatch->free);
394
395 // These factory functions use heap objects, so we need to run them without
396 // the spinlock held.
397 std::shared_ptr<Client> client =
398 ShouldForkPrivateDaemon()
399 ? CreateClientAndPrivateDaemon(unhooked_allocator)
400 : CreateClientForCentralDaemon(unhooked_allocator);
401
402 if (!client) {
403 PERFETTO_LOG("heapprofd_client not initialized, not installing hooks.");
404 return false;
405 }
406 PERFETTO_LOG("heapprofd_client initialized.");
407 {
408 ScopedSpinlock s(&g_client_lock, ScopedSpinlock::Mode::Try);
409 if (PERFETTO_UNLIKELY(!s.locked()))
410 AbortOnSpinlockTimeout();
411
412 // This cannot have been set in the meantime. There are never two concurrent
413 // calls to this function, as Bionic uses atomics to guard against that.
414 PERFETTO_DCHECK(g_client.ref() == nullptr);
415 g_client.ref() = std::move(client);
416 }
417 return true;
418 }
419
HEAPPROFD_ADD_PREFIX(_finalize)420 void HEAPPROFD_ADD_PREFIX(_finalize)() {
421 // At the time of writing, invoked only as an atexit handler. We don't have
422 // any specific action to take, and cleanup can be left to the OS.
423 }
424
425 // Decides whether an allocation with the given address and size needs to be
426 // sampled, and if so, records it. Performs the necessary synchronization (holds
427 // |g_client_lock| spinlock) while accessing the shared sampler, and obtaining a
428 // profiling client handle (shared_ptr).
429 //
430 // If the allocation is to be sampled, the recording is done without holding
431 // |g_client_lock|. The client handle is guaranteed to not be invalidated while
432 // the allocation is being recorded.
433 //
434 // If the attempt to record the allocation fails, initiates lazy shutdown of the
435 // client & hooks.
MaybeSampleAllocation(size_t size,void * addr)436 static void MaybeSampleAllocation(size_t size, void* addr) {
437 size_t sampled_alloc_sz = 0;
438 std::shared_ptr<perfetto::profiling::Client> client;
439 {
440 ScopedSpinlock s(&g_client_lock, ScopedSpinlock::Mode::Try);
441 if (PERFETTO_UNLIKELY(!s.locked()))
442 AbortOnSpinlockTimeout();
443
444 if (!g_client.ref()) // no active client (most likely shutting down)
445 return;
446
447 sampled_alloc_sz = g_client.ref()->GetSampleSizeLocked(size);
448 if (sampled_alloc_sz == 0) // not sampling
449 return;
450
451 client = g_client.ref(); // owning copy
452 } // unlock
453
454 if (!client->RecordMalloc(size, sampled_alloc_sz,
455 reinterpret_cast<uint64_t>(addr))) {
456 ShutdownLazy();
457 }
458 }
459
HEAPPROFD_ADD_PREFIX(_malloc)460 void* HEAPPROFD_ADD_PREFIX(_malloc)(size_t size) {
461 const MallocDispatch* dispatch = GetDispatch();
462 void* addr = dispatch->malloc(size);
463 MaybeSampleAllocation(size, addr);
464 return addr;
465 }
466
HEAPPROFD_ADD_PREFIX(_calloc)467 void* HEAPPROFD_ADD_PREFIX(_calloc)(size_t nmemb, size_t size) {
468 const MallocDispatch* dispatch = GetDispatch();
469 void* addr = dispatch->calloc(nmemb, size);
470 MaybeSampleAllocation(size, addr);
471 return addr;
472 }
473
HEAPPROFD_ADD_PREFIX(_aligned_alloc)474 void* HEAPPROFD_ADD_PREFIX(_aligned_alloc)(size_t alignment, size_t size) {
475 const MallocDispatch* dispatch = GetDispatch();
476 void* addr = dispatch->aligned_alloc(alignment, size);
477 MaybeSampleAllocation(size, addr);
478 return addr;
479 }
480
HEAPPROFD_ADD_PREFIX(_memalign)481 void* HEAPPROFD_ADD_PREFIX(_memalign)(size_t alignment, size_t size) {
482 const MallocDispatch* dispatch = GetDispatch();
483 void* addr = dispatch->memalign(alignment, size);
484 MaybeSampleAllocation(size, addr);
485 return addr;
486 }
487
HEAPPROFD_ADD_PREFIX(_posix_memalign)488 int HEAPPROFD_ADD_PREFIX(_posix_memalign)(void** memptr,
489 size_t alignment,
490 size_t size) {
491 const MallocDispatch* dispatch = GetDispatch();
492 int res = dispatch->posix_memalign(memptr, alignment, size);
493 if (res != 0)
494 return res;
495
496 MaybeSampleAllocation(size, *memptr);
497 return 0;
498 }
499
500 // Note: we record the free before calling the backing implementation to make
501 // sure that the address is not reused before we've processed the deallocation
502 // (which includes assigning a sequence id to it).
HEAPPROFD_ADD_PREFIX(_free)503 void HEAPPROFD_ADD_PREFIX(_free)(void* pointer) {
504 const MallocDispatch* dispatch = GetDispatch();
505 std::shared_ptr<perfetto::profiling::Client> client;
506 {
507 ScopedSpinlock s(&g_client_lock, ScopedSpinlock::Mode::Try);
508 if (PERFETTO_UNLIKELY(!s.locked()))
509 AbortOnSpinlockTimeout();
510
511 client = g_client.ref(); // owning copy (or empty)
512 }
513
514 if (client) {
515 if (!client->RecordFree(reinterpret_cast<uint64_t>(pointer)))
516 ShutdownLazy();
517 }
518 return dispatch->free(pointer);
519 }
520
521 // Approach to recording realloc: under the initial lock, get a safe copy of the
522 // client, and make the sampling decision in advance. Then record the
523 // deallocation, call the real realloc, and finally record the sample if one is
524 // necessary.
525 //
526 // As with the free, we record the deallocation before calling the backing
527 // implementation to make sure the address is still exclusive while we're
528 // processing it.
HEAPPROFD_ADD_PREFIX(_realloc)529 void* HEAPPROFD_ADD_PREFIX(_realloc)(void* pointer, size_t size) {
530 const MallocDispatch* dispatch = GetDispatch();
531
532 size_t sampled_alloc_sz = 0;
533 std::shared_ptr<perfetto::profiling::Client> client;
534 {
535 ScopedSpinlock s(&g_client_lock, ScopedSpinlock::Mode::Try);
536 if (PERFETTO_UNLIKELY(!s.locked()))
537 AbortOnSpinlockTimeout();
538
539 // If there is no active client, we still want to reach the backing realloc,
540 // so keep going.
541 if (g_client.ref()) {
542 client = g_client.ref(); // owning copy
543 sampled_alloc_sz = g_client.ref()->GetSampleSizeLocked(size);
544 }
545 } // unlock
546
547 if (client && pointer) {
548 if (!client->RecordFree(reinterpret_cast<uint64_t>(pointer)))
549 ShutdownLazy();
550 }
551 void* addr = dispatch->realloc(pointer, size);
552
553 if (size == 0 || sampled_alloc_sz == 0)
554 return addr;
555
556 if (!client->RecordMalloc(size, sampled_alloc_sz,
557 reinterpret_cast<uint64_t>(addr))) {
558 ShutdownLazy();
559 }
560 return addr;
561 }
562
HEAPPROFD_ADD_PREFIX(_dump_heap)563 void HEAPPROFD_ADD_PREFIX(_dump_heap)(const char*) {}
564
HEAPPROFD_ADD_PREFIX(_get_malloc_leak_info)565 void HEAPPROFD_ADD_PREFIX(
566 _get_malloc_leak_info)(uint8_t**, size_t*, size_t*, size_t*, size_t*) {}
567
HEAPPROFD_ADD_PREFIX(_write_malloc_leak_info)568 bool HEAPPROFD_ADD_PREFIX(_write_malloc_leak_info)(FILE*) {
569 return false;
570 }
571
HEAPPROFD_ADD_PREFIX(_malloc_backtrace)572 ssize_t HEAPPROFD_ADD_PREFIX(_malloc_backtrace)(void*, uintptr_t*, size_t) {
573 return -1;
574 }
575
HEAPPROFD_ADD_PREFIX(_free_malloc_leak_info)576 void HEAPPROFD_ADD_PREFIX(_free_malloc_leak_info)(uint8_t*) {}
577
HEAPPROFD_ADD_PREFIX(_malloc_usable_size)578 size_t HEAPPROFD_ADD_PREFIX(_malloc_usable_size)(void* pointer) {
579 const MallocDispatch* dispatch = GetDispatch();
580 return dispatch->malloc_usable_size(pointer);
581 }
582
HEAPPROFD_ADD_PREFIX(_mallinfo)583 struct mallinfo HEAPPROFD_ADD_PREFIX(_mallinfo)() {
584 const MallocDispatch* dispatch = GetDispatch();
585 return dispatch->mallinfo();
586 }
587
HEAPPROFD_ADD_PREFIX(_mallopt)588 int HEAPPROFD_ADD_PREFIX(_mallopt)(int param, int value) {
589 const MallocDispatch* dispatch = GetDispatch();
590 return dispatch->mallopt(param, value);
591 }
592
HEAPPROFD_ADD_PREFIX(_malloc_info)593 int HEAPPROFD_ADD_PREFIX(_malloc_info)(int options, FILE* fp) {
594 const MallocDispatch* dispatch = GetDispatch();
595 return dispatch->malloc_info(options, fp);
596 }
597
HEAPPROFD_ADD_PREFIX(_iterate)598 int HEAPPROFD_ADD_PREFIX(_iterate)(uintptr_t,
599 size_t,
600 void (*)(uintptr_t base,
601 size_t size,
602 void* arg),
603 void*) {
604 return 0;
605 }
606
HEAPPROFD_ADD_PREFIX(_malloc_disable)607 void HEAPPROFD_ADD_PREFIX(_malloc_disable)() {
608 const MallocDispatch* dispatch = GetDispatch();
609 return dispatch->malloc_disable();
610 }
611
HEAPPROFD_ADD_PREFIX(_malloc_enable)612 void HEAPPROFD_ADD_PREFIX(_malloc_enable)() {
613 const MallocDispatch* dispatch = GetDispatch();
614 return dispatch->malloc_enable();
615 }
616
617 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
HEAPPROFD_ADD_PREFIX(_pvalloc)618 void* HEAPPROFD_ADD_PREFIX(_pvalloc)(size_t size) {
619 const MallocDispatch* dispatch = GetDispatch();
620 return dispatch->pvalloc(size);
621 }
622
HEAPPROFD_ADD_PREFIX(_valloc)623 void* HEAPPROFD_ADD_PREFIX(_valloc)(size_t size) {
624 const MallocDispatch* dispatch = GetDispatch();
625 return dispatch->valloc(size);
626 }
627
628 #endif
629