1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "perfetto_hprof"
18
19 #include "perfetto_hprof.h"
20
21 #include <fcntl.h>
22 #include <fnmatch.h>
23 #include <inttypes.h>
24 #include <sched.h>
25 #include <signal.h>
26 #include <sys/socket.h>
27 #include <sys/stat.h>
28 #include <sys/types.h>
29 #include <sys/un.h>
30 #include <sys/wait.h>
31 #include <thread>
32 #include <time.h>
33
34 #include <limits>
35 #include <optional>
36 #include <type_traits>
37
38 #include "android-base/file.h"
39 #include "android-base/logging.h"
40 #include "android-base/properties.h"
41 #include "base/fast_exit.h"
42 #include "base/systrace.h"
43 #include "gc/heap-visit-objects-inl.h"
44 #include "gc/heap.h"
45 #include "gc/scoped_gc_critical_section.h"
46 #include "mirror/object-refvisitor-inl.h"
47 #include "nativehelper/scoped_local_ref.h"
48 #include "perfetto/profiling/parse_smaps.h"
49 #include "perfetto/trace/interned_data/interned_data.pbzero.h"
50 #include "perfetto/trace/profiling/heap_graph.pbzero.h"
51 #include "perfetto/trace/profiling/profile_common.pbzero.h"
52 #include "perfetto/trace/profiling/smaps.pbzero.h"
53 #include "perfetto/config/profiling/java_hprof_config.pbzero.h"
54 #include "perfetto/protozero/packed_repeated_fields.h"
55 #include "perfetto/tracing.h"
56 #include "runtime-inl.h"
57 #include "runtime_callbacks.h"
58 #include "scoped_thread_state_change-inl.h"
59 #include "thread_list.h"
60 #include "well_known_classes.h"
61 #include "dex/descriptors_names.h"
62
63 // There are three threads involved in this:
64 // * listener thread: this is idle in the background when this plugin gets loaded, and waits
65 // for data on on g_signal_pipe_fds.
66 // * signal thread: an arbitrary thread that handles the signal and writes data to
67 // g_signal_pipe_fds.
68 // * perfetto producer thread: once the signal is received, the app forks. In the newly forked
69 // child, the Perfetto Client API spawns a thread to communicate with traced.
70
71 namespace perfetto_hprof {
72
73 constexpr int kJavaHeapprofdSignal = __SIGRTMIN + 6;
74 constexpr time_t kWatchdogTimeoutSec = 120;
75 // This needs to be lower than the maximum acceptable chunk size, because this
76 // is checked *before* writing another submessage. We conservatively assume
77 // submessages can be up to 100k here for a 500k chunk size.
78 // DropBox has a 500k chunk limit, and each chunk needs to parse as a proto.
79 constexpr uint32_t kPacketSizeThreshold = 400000;
80 constexpr char kByte[1] = {'x'};
GetStateMutex()81 static art::Mutex& GetStateMutex() {
82 static art::Mutex state_mutex("perfetto_hprof_state_mutex", art::LockLevel::kGenericBottomLock);
83 return state_mutex;
84 }
85
GetStateCV()86 static art::ConditionVariable& GetStateCV() {
87 static art::ConditionVariable state_cv("perfetto_hprof_state_cv", GetStateMutex());
88 return state_cv;
89 }
90
91 static int requested_tracing_session_id = 0;
92 static State g_state = State::kUninitialized;
93 static bool g_oome_triggered = false;
94 static uint32_t g_oome_sessions_pending = 0;
95
96 // Pipe to signal from the signal handler into a worker thread that handles the
97 // dump requests.
98 int g_signal_pipe_fds[2];
99 static struct sigaction g_orig_act = {};
100
101 template <typename T>
FindOrAppend(std::map<T,uint64_t> * m,const T & s)102 uint64_t FindOrAppend(std::map<T, uint64_t>* m, const T& s) {
103 auto it = m->find(s);
104 if (it == m->end()) {
105 std::tie(it, std::ignore) = m->emplace(s, m->size());
106 }
107 return it->second;
108 }
109
ArmWatchdogOrDie()110 void ArmWatchdogOrDie() {
111 timer_t timerid{};
112 struct sigevent sev {};
113 sev.sigev_notify = SIGEV_SIGNAL;
114 sev.sigev_signo = SIGKILL;
115
116 if (timer_create(CLOCK_MONOTONIC, &sev, &timerid) == -1) {
117 // This only gets called in the child, so we can fatal without impacting
118 // the app.
119 PLOG(FATAL) << "failed to create watchdog timer";
120 }
121
122 struct itimerspec its {};
123 its.it_value.tv_sec = kWatchdogTimeoutSec;
124
125 if (timer_settime(timerid, 0, &its, nullptr) == -1) {
126 // This only gets called in the child, so we can fatal without impacting
127 // the app.
128 PLOG(FATAL) << "failed to arm watchdog timer";
129 }
130 }
131
StartsWith(const std::string & str,const std::string & prefix)132 bool StartsWith(const std::string& str, const std::string& prefix) {
133 return str.compare(0, prefix.length(), prefix) == 0;
134 }
135
136 // Sample entries that match one of the following
137 // start with /system/
138 // start with /vendor/
139 // start with /data/app/
140 // contains "extracted in memory from Y", where Y matches any of the above
ShouldSampleSmapsEntry(const perfetto::profiling::SmapsEntry & e)141 bool ShouldSampleSmapsEntry(const perfetto::profiling::SmapsEntry& e) {
142 if (StartsWith(e.pathname, "/system/") || StartsWith(e.pathname, "/vendor/") ||
143 StartsWith(e.pathname, "/data/app/")) {
144 return true;
145 }
146 if (StartsWith(e.pathname, "[anon:")) {
147 if (e.pathname.find("extracted in memory from /system/") != std::string::npos) {
148 return true;
149 }
150 if (e.pathname.find("extracted in memory from /vendor/") != std::string::npos) {
151 return true;
152 }
153 if (e.pathname.find("extracted in memory from /data/app/") != std::string::npos) {
154 return true;
155 }
156 }
157 return false;
158 }
159
GetCurrentBootClockNs()160 uint64_t GetCurrentBootClockNs() {
161 struct timespec ts = {};
162 if (clock_gettime(CLOCK_BOOTTIME, &ts) != 0) {
163 LOG(FATAL) << "Failed to get boottime.";
164 }
165 return ts.tv_sec * 1000000000LL + ts.tv_nsec;
166 }
167
IsDebugBuild()168 bool IsDebugBuild() {
169 std::string build_type = android::base::GetProperty("ro.build.type", "");
170 return !build_type.empty() && build_type != "user";
171 }
172
173 // Verifies the manifest restrictions are respected.
174 // For regular heap dumps this is already handled by heapprofd.
IsOomeHeapDumpAllowed(const perfetto::DataSourceConfig & ds_config)175 bool IsOomeHeapDumpAllowed(const perfetto::DataSourceConfig& ds_config) {
176 if (art::Runtime::Current()->IsJavaDebuggable() || IsDebugBuild()) {
177 return true;
178 }
179
180 if (ds_config.session_initiator() ==
181 perfetto::DataSourceConfig::SESSION_INITIATOR_TRUSTED_SYSTEM) {
182 return art::Runtime::Current()->IsProfileable() || art::Runtime::Current()->IsSystemServer();
183 } else {
184 return art::Runtime::Current()->IsProfileableFromShell();
185 }
186 }
187
188 class JavaHprofDataSource : public perfetto::DataSource<JavaHprofDataSource> {
189 public:
190 constexpr static perfetto::BufferExhaustedPolicy kBufferExhaustedPolicy =
191 perfetto::BufferExhaustedPolicy::kStall;
192
JavaHprofDataSource(bool is_oome_heap)193 explicit JavaHprofDataSource(bool is_oome_heap) : is_oome_heap_(is_oome_heap) {}
194
OnSetup(const SetupArgs & args)195 void OnSetup(const SetupArgs& args) override {
196 if (!is_oome_heap_) {
197 uint64_t normalized_tracing_session_id =
198 args.config->tracing_session_id() % std::numeric_limits<int32_t>::max();
199 if (requested_tracing_session_id < 0) {
200 LOG(ERROR) << "invalid requested tracing session id " << requested_tracing_session_id;
201 return;
202 }
203 if (static_cast<uint64_t>(requested_tracing_session_id) != normalized_tracing_session_id) {
204 return;
205 }
206 }
207
208 // This is on the heap as it triggers -Wframe-larger-than.
209 std::unique_ptr<perfetto::protos::pbzero::JavaHprofConfig::Decoder> cfg(
210 new perfetto::protos::pbzero::JavaHprofConfig::Decoder(
211 args.config->java_hprof_config_raw()));
212
213 dump_smaps_ = cfg->dump_smaps();
214 for (auto it = cfg->ignored_types(); it; ++it) {
215 std::string name = (*it).ToStdString();
216 ignored_types_.emplace_back(art::InversePrettyDescriptor(name));
217 }
218 // This tracing session ID matches the requesting tracing session ID, so we know heapprofd
219 // has verified it targets this process.
220 enabled_ =
221 !is_oome_heap_ || (IsOomeHeapDumpAllowed(*args.config) && IsOomeDumpEnabled(*cfg.get()));
222 }
223
dump_smaps()224 bool dump_smaps() { return dump_smaps_; }
225
226 // Per-DataSource enable bit. Invoked by the ::Trace method.
enabled()227 bool enabled() { return enabled_; }
228
OnStart(const StartArgs &)229 void OnStart(const StartArgs&) override {
230 art::MutexLock lk(art_thread(), GetStateMutex());
231 // In case there are multiple tracing sessions waiting for an OOME error,
232 // there will be a data source instance for each of them. Before the
233 // transition to kStart and signaling the dumping thread, we need to make
234 // sure all the data sources are ready.
235 if (is_oome_heap_ && g_oome_sessions_pending > 0) {
236 --g_oome_sessions_pending;
237 }
238 if (g_state == State::kWaitForStart) {
239 // WriteHeapPackets is responsible for checking whether the DataSource is\
240 // actually enabled.
241 if (!is_oome_heap_ || g_oome_sessions_pending == 0) {
242 g_state = State::kStart;
243 GetStateCV().Broadcast(art_thread());
244 }
245 }
246 }
247
248 // This datasource can be used with a trace config with a short duration_ms
249 // but a long datasource_stop_timeout_ms. In that case, OnStop is called (in
250 // general) before the dump is done. In that case, we handle the stop
251 // asynchronously, and notify the tracing service once we are done.
252 // In case OnStop is called after the dump is done (but before the process)
253 // has exited, we just acknowledge the request.
OnStop(const StopArgs & a)254 void OnStop(const StopArgs& a) override {
255 art::MutexLock lk(art_thread(), finish_mutex_);
256 if (is_finished_) {
257 return;
258 }
259 is_stopped_ = true;
260 async_stop_ = std::move(a.HandleStopAsynchronously());
261 }
262
art_thread()263 static art::Thread* art_thread() {
264 // TODO(fmayer): Attach the Perfetto producer thread to ART and give it a name. This is
265 // not trivial, we cannot just attach the first time this method is called, because
266 // AttachCurrentThread deadlocks with the ConditionVariable::Wait in WaitForDataSource.
267 //
268 // We should attach the thread as soon as the Client API spawns it, but that needs more
269 // complicated plumbing.
270 return nullptr;
271 }
272
ignored_types()273 std::vector<std::string> ignored_types() { return ignored_types_; }
274
Finish()275 void Finish() {
276 art::MutexLock lk(art_thread(), finish_mutex_);
277 if (is_stopped_) {
278 async_stop_();
279 } else {
280 is_finished_ = true;
281 }
282 }
283
284 private:
IsOomeDumpEnabled(const perfetto::protos::pbzero::JavaHprofConfig::Decoder & cfg)285 static bool IsOomeDumpEnabled(const perfetto::protos::pbzero::JavaHprofConfig::Decoder& cfg) {
286 std::string cmdline;
287 if (!android::base::ReadFileToString("/proc/self/cmdline", &cmdline)) {
288 return false;
289 }
290 const char* argv0 = cmdline.c_str();
291
292 for (auto it = cfg.process_cmdline(); it; ++it) {
293 std::string pattern = (*it).ToStdString();
294 if (fnmatch(pattern.c_str(), argv0, FNM_NOESCAPE) == 0) {
295 return true;
296 }
297 }
298 return false;
299 }
300
301 bool is_oome_heap_ = false;
302 bool enabled_ = false;
303 bool dump_smaps_ = false;
304 std::vector<std::string> ignored_types_;
305
306 art::Mutex finish_mutex_{"perfetto_hprof_ds_mutex", art::LockLevel::kGenericBottomLock};
307 bool is_finished_ = false;
308 bool is_stopped_ = false;
309 std::function<void()> async_stop_;
310 };
311
SetupDataSource(const std::string & ds_name,bool is_oome_heap)312 void SetupDataSource(const std::string& ds_name, bool is_oome_heap) {
313 perfetto::TracingInitArgs args;
314 args.backends = perfetto::BackendType::kSystemBackend;
315 perfetto::Tracing::Initialize(args);
316
317 perfetto::DataSourceDescriptor dsd;
318 dsd.set_name(ds_name);
319 dsd.set_will_notify_on_stop(true);
320 JavaHprofDataSource::Register(dsd, is_oome_heap);
321 LOG(INFO) << "registered data source " << ds_name;
322 }
323
324 // Waits for the data source OnStart
WaitForDataSource(art::Thread * self)325 void WaitForDataSource(art::Thread* self) {
326 art::MutexLock lk(self, GetStateMutex());
327 while (g_state != State::kStart) {
328 GetStateCV().Wait(self);
329 }
330 }
331
332 // Waits for the data source OnStart with a timeout. Returns false on timeout.
TimedWaitForDataSource(art::Thread * self,int64_t timeout_ms)333 bool TimedWaitForDataSource(art::Thread* self, int64_t timeout_ms) {
334 const uint64_t cutoff_ns = GetCurrentBootClockNs() + timeout_ms * 1000000;
335 art::MutexLock lk(self, GetStateMutex());
336 while (g_state != State::kStart) {
337 const uint64_t current_ns = GetCurrentBootClockNs();
338 if (current_ns >= cutoff_ns) {
339 return false;
340 }
341 GetStateCV().TimedWait(self, (cutoff_ns - current_ns) / 1000000, 0);
342 }
343 return true;
344 }
345
346 // Helper class to write Java heap dumps to `ctx`. The whole heap dump can be
347 // split into more perfetto.protos.HeapGraph messages, to avoid making each
348 // message too big.
349 class Writer {
350 public:
Writer(pid_t pid,JavaHprofDataSource::TraceContext * ctx,uint64_t timestamp)351 Writer(pid_t pid, JavaHprofDataSource::TraceContext* ctx, uint64_t timestamp)
352 : pid_(pid), ctx_(ctx), timestamp_(timestamp),
353 last_written_(ctx_->written()) {}
354
355 // Return whether the next call to GetHeapGraph will create a new TracePacket.
will_create_new_packet() const356 bool will_create_new_packet() const {
357 return !heap_graph_ || ctx_->written() - last_written_ > kPacketSizeThreshold;
358 }
359
GetHeapGraph()360 perfetto::protos::pbzero::HeapGraph* GetHeapGraph() {
361 if (will_create_new_packet()) {
362 CreateNewHeapGraph();
363 }
364 return heap_graph_;
365 }
366
Finalize()367 void Finalize() {
368 if (trace_packet_) {
369 trace_packet_->Finalize();
370 }
371 heap_graph_ = nullptr;
372 }
373
~Writer()374 ~Writer() { Finalize(); }
375
376 private:
377 Writer(const Writer&) = delete;
378 Writer& operator=(const Writer&) = delete;
379 Writer(Writer&&) = delete;
380 Writer& operator=(Writer&&) = delete;
381
CreateNewHeapGraph()382 void CreateNewHeapGraph() {
383 if (heap_graph_) {
384 heap_graph_->set_continued(true);
385 }
386 Finalize();
387
388 uint64_t written = ctx_->written();
389
390 trace_packet_ = ctx_->NewTracePacket();
391 trace_packet_->set_timestamp(timestamp_);
392 heap_graph_ = trace_packet_->set_heap_graph();
393 heap_graph_->set_pid(pid_);
394 heap_graph_->set_index(index_++);
395
396 last_written_ = written;
397 }
398
399 const pid_t pid_;
400 JavaHprofDataSource::TraceContext* const ctx_;
401 const uint64_t timestamp_;
402
403 uint64_t last_written_ = 0;
404
405 perfetto::DataSource<JavaHprofDataSource>::TraceContext::TracePacketHandle
406 trace_packet_;
407 perfetto::protos::pbzero::HeapGraph* heap_graph_ = nullptr;
408
409 uint64_t index_ = 0;
410 };
411
412 class ReferredObjectsFinder {
413 public:
ReferredObjectsFinder(std::vector<std::pair<std::string,art::mirror::Object * >> * referred_objects,bool emit_field_ids)414 explicit ReferredObjectsFinder(
415 std::vector<std::pair<std::string, art::mirror::Object*>>* referred_objects,
416 bool emit_field_ids)
417 : referred_objects_(referred_objects), emit_field_ids_(emit_field_ids) {}
418
419 // For art::mirror::Object::VisitReferences.
operator ()(art::ObjPtr<art::mirror::Object> obj,art::MemberOffset offset,bool is_static) const420 void operator()(art::ObjPtr<art::mirror::Object> obj, art::MemberOffset offset,
421 bool is_static) const
422 REQUIRES_SHARED(art::Locks::mutator_lock_) {
423 if (offset.Uint32Value() == art::mirror::Object::ClassOffset().Uint32Value()) {
424 // Skip shadow$klass pointer.
425 return;
426 }
427 art::mirror::Object* ref = obj->GetFieldObject<art::mirror::Object>(offset);
428 art::ArtField* field;
429 if (is_static) {
430 field = art::ArtField::FindStaticFieldWithOffset(obj->AsClass(), offset.Uint32Value());
431 } else {
432 field = art::ArtField::FindInstanceFieldWithOffset(obj->GetClass(), offset.Uint32Value());
433 }
434 std::string field_name = "";
435 if (field != nullptr && emit_field_ids_) {
436 field_name = field->PrettyField(/*with_type=*/true);
437 }
438 referred_objects_->emplace_back(std::move(field_name), ref);
439 }
440
VisitRootIfNonNull(art::mirror::CompressedReference<art::mirror::Object> * root) const441 void VisitRootIfNonNull(
442 [[maybe_unused]] art::mirror::CompressedReference<art::mirror::Object>* root) const {}
VisitRoot(art::mirror::CompressedReference<art::mirror::Object> * root) const443 void VisitRoot(
444 [[maybe_unused]] art::mirror::CompressedReference<art::mirror::Object>* root) const {}
445
446 private:
447 // We can use a raw Object* pointer here, because there are no concurrent GC threads after the
448 // fork.
449 std::vector<std::pair<std::string, art::mirror::Object*>>* referred_objects_;
450 // Prettifying field names is expensive; avoid if field name will not be used.
451 bool emit_field_ids_;
452 };
453
454 class RootFinder : public art::SingleRootVisitor {
455 public:
RootFinder(std::map<art::RootType,std::vector<art::mirror::Object * >> * root_objects)456 explicit RootFinder(
457 std::map<art::RootType, std::vector<art::mirror::Object*>>* root_objects)
458 : root_objects_(root_objects) {}
459
VisitRoot(art::mirror::Object * root,const art::RootInfo & info)460 void VisitRoot(art::mirror::Object* root, const art::RootInfo& info) override {
461 (*root_objects_)[info.GetType()].emplace_back(root);
462 }
463
464 private:
465 // We can use a raw Object* pointer here, because there are no concurrent GC threads after the
466 // fork.
467 std::map<art::RootType, std::vector<art::mirror::Object*>>* root_objects_;
468 };
469
ToProtoType(art::RootType art_type)470 perfetto::protos::pbzero::HeapGraphRoot::Type ToProtoType(art::RootType art_type) {
471 using perfetto::protos::pbzero::HeapGraphRoot;
472 switch (art_type) {
473 case art::kRootUnknown:
474 return HeapGraphRoot::ROOT_UNKNOWN;
475 case art::kRootJNIGlobal:
476 return HeapGraphRoot::ROOT_JNI_GLOBAL;
477 case art::kRootJNILocal:
478 return HeapGraphRoot::ROOT_JNI_LOCAL;
479 case art::kRootJavaFrame:
480 return HeapGraphRoot::ROOT_JAVA_FRAME;
481 case art::kRootNativeStack:
482 return HeapGraphRoot::ROOT_NATIVE_STACK;
483 case art::kRootStickyClass:
484 return HeapGraphRoot::ROOT_STICKY_CLASS;
485 case art::kRootThreadBlock:
486 return HeapGraphRoot::ROOT_THREAD_BLOCK;
487 case art::kRootMonitorUsed:
488 return HeapGraphRoot::ROOT_MONITOR_USED;
489 case art::kRootThreadObject:
490 return HeapGraphRoot::ROOT_THREAD_OBJECT;
491 case art::kRootInternedString:
492 return HeapGraphRoot::ROOT_INTERNED_STRING;
493 case art::kRootFinalizing:
494 return HeapGraphRoot::ROOT_FINALIZING;
495 case art::kRootDebugger:
496 return HeapGraphRoot::ROOT_DEBUGGER;
497 case art::kRootReferenceCleanup:
498 return HeapGraphRoot::ROOT_REFERENCE_CLEANUP;
499 case art::kRootVMInternal:
500 return HeapGraphRoot::ROOT_VM_INTERNAL;
501 case art::kRootJNIMonitor:
502 return HeapGraphRoot::ROOT_JNI_MONITOR;
503 }
504 }
505
ProtoClassKind(uint32_t class_flags)506 perfetto::protos::pbzero::HeapGraphType::Kind ProtoClassKind(uint32_t class_flags) {
507 using perfetto::protos::pbzero::HeapGraphType;
508 switch (class_flags) {
509 case art::mirror::kClassFlagNormal:
510 case art::mirror::kClassFlagRecord:
511 return HeapGraphType::KIND_NORMAL;
512 case art::mirror::kClassFlagNoReferenceFields:
513 case art::mirror::kClassFlagNoReferenceFields | art::mirror::kClassFlagRecord:
514 return HeapGraphType::KIND_NOREFERENCES;
515 case art::mirror::kClassFlagString | art::mirror::kClassFlagNoReferenceFields:
516 return HeapGraphType::KIND_STRING;
517 case art::mirror::kClassFlagObjectArray:
518 return HeapGraphType::KIND_ARRAY;
519 case art::mirror::kClassFlagClass:
520 return HeapGraphType::KIND_CLASS;
521 case art::mirror::kClassFlagClassLoader:
522 return HeapGraphType::KIND_CLASSLOADER;
523 case art::mirror::kClassFlagDexCache:
524 return HeapGraphType::KIND_DEXCACHE;
525 case art::mirror::kClassFlagSoftReference:
526 return HeapGraphType::KIND_SOFT_REFERENCE;
527 case art::mirror::kClassFlagWeakReference:
528 return HeapGraphType::KIND_WEAK_REFERENCE;
529 case art::mirror::kClassFlagFinalizerReference:
530 return HeapGraphType::KIND_FINALIZER_REFERENCE;
531 case art::mirror::kClassFlagPhantomReference:
532 return HeapGraphType::KIND_PHANTOM_REFERENCE;
533 default:
534 return HeapGraphType::KIND_UNKNOWN;
535 }
536 }
537
PrettyType(art::mirror::Class * klass)538 std::string PrettyType(art::mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS {
539 if (klass == nullptr) {
540 return "(raw)";
541 }
542 std::string temp;
543 std::string result(art::PrettyDescriptor(klass->GetDescriptor(&temp)));
544 return result;
545 }
546
DumpSmaps(JavaHprofDataSource::TraceContext * ctx)547 void DumpSmaps(JavaHprofDataSource::TraceContext* ctx) {
548 FILE* smaps = fopen("/proc/self/smaps", "re");
549 if (smaps != nullptr) {
550 auto trace_packet = ctx->NewTracePacket();
551 auto* smaps_packet = trace_packet->set_smaps_packet();
552 smaps_packet->set_pid(getpid());
553 perfetto::profiling::ParseSmaps(smaps,
554 [&smaps_packet](const perfetto::profiling::SmapsEntry& e) {
555 if (ShouldSampleSmapsEntry(e)) {
556 auto* smaps_entry = smaps_packet->add_entries();
557 smaps_entry->set_path(e.pathname);
558 smaps_entry->set_size_kb(e.size_kb);
559 smaps_entry->set_private_dirty_kb(e.private_dirty_kb);
560 smaps_entry->set_swap_kb(e.swap_kb);
561 }
562 });
563 fclose(smaps);
564 } else {
565 PLOG(ERROR) << "failed to open smaps";
566 }
567 }
568
GetObjectId(const art::mirror::Object * obj)569 uint64_t GetObjectId(const art::mirror::Object* obj) {
570 return reinterpret_cast<uint64_t>(obj) / std::alignment_of<art::mirror::Object>::value;
571 }
572
573 template <typename F>
ForInstanceReferenceField(art::mirror::Class * klass,F fn)574 void ForInstanceReferenceField(art::mirror::Class* klass, F fn) NO_THREAD_SAFETY_ANALYSIS {
575 for (art::ArtField& af : klass->GetIFields()) {
576 if (af.IsPrimitiveType() ||
577 af.GetOffset().Uint32Value() == art::mirror::Object::ClassOffset().Uint32Value()) {
578 continue;
579 }
580 fn(af.GetOffset());
581 }
582 }
583
EncodedSize(uint64_t n)584 size_t EncodedSize(uint64_t n) {
585 if (n == 0) return 1;
586 return 1 + static_cast<size_t>(art::MostSignificantBit(n)) / 7;
587 }
588
589 // Returns all the references that `*obj` (an object of type `*klass`) is holding.
GetReferences(art::mirror::Object * obj,art::mirror::Class * klass,bool emit_field_ids)590 std::vector<std::pair<std::string, art::mirror::Object*>> GetReferences(art::mirror::Object* obj,
591 art::mirror::Class* klass,
592 bool emit_field_ids)
593 REQUIRES_SHARED(art::Locks::mutator_lock_) {
594 std::vector<std::pair<std::string, art::mirror::Object*>> referred_objects;
595 ReferredObjectsFinder objf(&referred_objects, emit_field_ids);
596
597 uint32_t klass_flags = klass->GetClassFlags();
598 if (klass_flags != art::mirror::kClassFlagNormal &&
599 klass_flags != art::mirror::kClassFlagSoftReference &&
600 klass_flags != art::mirror::kClassFlagWeakReference &&
601 klass_flags != art::mirror::kClassFlagFinalizerReference &&
602 klass_flags != art::mirror::kClassFlagPhantomReference) {
603 obj->VisitReferences(objf, art::VoidFunctor());
604 } else {
605 for (art::mirror::Class* cls = klass; cls != nullptr; cls = cls->GetSuperClass().Ptr()) {
606 ForInstanceReferenceField(cls,
607 [obj, objf](art::MemberOffset offset) NO_THREAD_SAFETY_ANALYSIS {
608 objf(art::ObjPtr<art::mirror::Object>(obj),
609 offset,
610 /*is_static=*/false);
611 });
612 }
613 }
614 return referred_objects;
615 }
616
617 // Returns the base for delta encoding all the `referred_objects`. If delta
618 // encoding would waste space, returns 0.
EncodeBaseObjId(const std::vector<std::pair<std::string,art::mirror::Object * >> & referred_objects,const art::mirror::Object * min_nonnull_ptr)619 uint64_t EncodeBaseObjId(
620 const std::vector<std::pair<std::string, art::mirror::Object*>>& referred_objects,
621 const art::mirror::Object* min_nonnull_ptr) REQUIRES_SHARED(art::Locks::mutator_lock_) {
622 uint64_t base_obj_id = GetObjectId(min_nonnull_ptr);
623 if (base_obj_id <= 1) {
624 return 0;
625 }
626
627 // We need to decrement the base for object ids so that we can tell apart
628 // null references.
629 base_obj_id--;
630 uint64_t bytes_saved = 0;
631 for (const auto& p : referred_objects) {
632 art::mirror::Object* referred_obj = p.second;
633 if (!referred_obj) {
634 continue;
635 }
636 uint64_t referred_obj_id = GetObjectId(referred_obj);
637 bytes_saved += EncodedSize(referred_obj_id) - EncodedSize(referred_obj_id - base_obj_id);
638 }
639
640 // +1 for storing the field id.
641 if (bytes_saved <= EncodedSize(base_obj_id) + 1) {
642 // Subtracting the base ptr gains fewer bytes than it takes to store it.
643 return 0;
644 }
645 return base_obj_id;
646 }
647
648 // Helper to keep intermediate state while dumping objects and classes from ART into
649 // perfetto.protos.HeapGraph.
650 class HeapGraphDumper {
651 public:
652 // Instances of classes whose name is in `ignored_types` will be ignored.
HeapGraphDumper(const std::vector<std::string> & ignored_types)653 explicit HeapGraphDumper(const std::vector<std::string>& ignored_types)
654 : ignored_types_(ignored_types),
655 reference_field_ids_(std::make_unique<protozero::PackedVarInt>()),
656 reference_object_ids_(std::make_unique<protozero::PackedVarInt>()) {}
657
658 // Dumps a heap graph from `*runtime` and writes it to `writer`.
Dump(art::Runtime * runtime,Writer & writer)659 void Dump(art::Runtime* runtime, Writer& writer) REQUIRES(art::Locks::mutator_lock_) {
660 DumpRootObjects(runtime, writer);
661
662 DumpObjects(runtime, writer);
663
664 WriteInternedData(writer);
665 }
666
667 private:
668 // Dumps the root objects from `*runtime` to `writer`.
DumpRootObjects(art::Runtime * runtime,Writer & writer)669 void DumpRootObjects(art::Runtime* runtime, Writer& writer)
670 REQUIRES_SHARED(art::Locks::mutator_lock_) {
671 std::map<art::RootType, std::vector<art::mirror::Object*>> root_objects;
672 RootFinder rcf(&root_objects);
673 runtime->VisitRoots(&rcf);
674 std::unique_ptr<protozero::PackedVarInt> object_ids(new protozero::PackedVarInt);
675 for (const auto& p : root_objects) {
676 const art::RootType root_type = p.first;
677 const std::vector<art::mirror::Object*>& children = p.second;
678 perfetto::protos::pbzero::HeapGraphRoot* root_proto = writer.GetHeapGraph()->add_roots();
679 root_proto->set_root_type(ToProtoType(root_type));
680 for (art::mirror::Object* obj : children) {
681 if (writer.will_create_new_packet()) {
682 root_proto->set_object_ids(*object_ids);
683 object_ids->Reset();
684 root_proto = writer.GetHeapGraph()->add_roots();
685 root_proto->set_root_type(ToProtoType(root_type));
686 }
687 object_ids->Append(GetObjectId(obj));
688 }
689 root_proto->set_object_ids(*object_ids);
690 object_ids->Reset();
691 }
692 }
693
694 // Dumps all the objects from `*runtime` to `writer`.
DumpObjects(art::Runtime * runtime,Writer & writer)695 void DumpObjects(art::Runtime* runtime, Writer& writer) REQUIRES(art::Locks::mutator_lock_) {
696 runtime->GetHeap()->VisitObjectsPaused(
697 [this, &writer](art::mirror::Object* obj)
698 REQUIRES_SHARED(art::Locks::mutator_lock_) { WriteOneObject(obj, writer); });
699 }
700
701 // Writes all the previously accumulated (while dumping objects and roots) interned data to
702 // `writer`.
WriteInternedData(Writer & writer)703 void WriteInternedData(Writer& writer) {
704 for (const auto& p : interned_locations_) {
705 const std::string& str = p.first;
706 uint64_t id = p.second;
707
708 perfetto::protos::pbzero::InternedString* location_proto =
709 writer.GetHeapGraph()->add_location_names();
710 location_proto->set_iid(id);
711 location_proto->set_str(reinterpret_cast<const uint8_t*>(str.c_str()), str.size());
712 }
713 for (const auto& p : interned_fields_) {
714 const std::string& str = p.first;
715 uint64_t id = p.second;
716
717 perfetto::protos::pbzero::InternedString* field_proto =
718 writer.GetHeapGraph()->add_field_names();
719 field_proto->set_iid(id);
720 field_proto->set_str(reinterpret_cast<const uint8_t*>(str.c_str()), str.size());
721 }
722 }
723
724 // Writes `*obj` into `writer`.
WriteOneObject(art::mirror::Object * obj,Writer & writer)725 void WriteOneObject(art::mirror::Object* obj, Writer& writer)
726 REQUIRES_SHARED(art::Locks::mutator_lock_) {
727 if (obj->IsClass()) {
728 WriteClass(obj->AsClass().Ptr(), writer);
729 }
730
731 art::mirror::Class* klass = obj->GetClass();
732 uintptr_t class_ptr = reinterpret_cast<uintptr_t>(klass);
733 // We need to synethesize a new type for Class<Foo>, which does not exist
734 // in the runtime. Otherwise, all the static members of all classes would be
735 // attributed to java.lang.Class.
736 if (klass->IsClassClass()) {
737 class_ptr = WriteSyntheticClassFromObj(obj, writer);
738 }
739
740 if (IsIgnored(obj)) {
741 return;
742 }
743
744 auto class_id = FindOrAppend(&interned_classes_, class_ptr);
745
746 uint64_t object_id = GetObjectId(obj);
747 perfetto::protos::pbzero::HeapGraphObject* object_proto = writer.GetHeapGraph()->add_objects();
748 if (prev_object_id_ && prev_object_id_ < object_id) {
749 object_proto->set_id_delta(object_id - prev_object_id_);
750 } else {
751 object_proto->set_id(object_id);
752 }
753 prev_object_id_ = object_id;
754 object_proto->set_type_id(class_id);
755
756 // Arrays / strings are magic and have an instance dependent size.
757 if (obj->SizeOf() != klass->GetObjectSize()) {
758 object_proto->set_self_size(obj->SizeOf());
759 }
760
761 FillReferences(obj, klass, object_proto);
762
763 FillFieldValues(obj, klass, object_proto);
764 }
765
766 // Writes `*klass` into `writer`.
WriteClass(art::mirror::Class * klass,Writer & writer)767 void WriteClass(art::mirror::Class* klass, Writer& writer)
768 REQUIRES_SHARED(art::Locks::mutator_lock_) {
769 perfetto::protos::pbzero::HeapGraphType* type_proto = writer.GetHeapGraph()->add_types();
770 type_proto->set_id(FindOrAppend(&interned_classes_, reinterpret_cast<uintptr_t>(klass)));
771 type_proto->set_class_name(PrettyType(klass));
772 type_proto->set_location_id(FindOrAppend(&interned_locations_, klass->GetLocation()));
773 type_proto->set_object_size(klass->GetObjectSize());
774 type_proto->set_kind(ProtoClassKind(klass->GetClassFlags()));
775 type_proto->set_classloader_id(GetObjectId(klass->GetClassLoader().Ptr()));
776 if (klass->GetSuperClass().Ptr()) {
777 type_proto->set_superclass_id(FindOrAppend(
778 &interned_classes_, reinterpret_cast<uintptr_t>(klass->GetSuperClass().Ptr())));
779 }
780 ForInstanceReferenceField(
781 klass, [klass, this](art::MemberOffset offset) NO_THREAD_SAFETY_ANALYSIS {
782 auto art_field = art::ArtField::FindInstanceFieldWithOffset(klass, offset.Uint32Value());
783 reference_field_ids_->Append(
784 FindOrAppend(&interned_fields_, art_field->PrettyField(true)));
785 });
786 type_proto->set_reference_field_id(*reference_field_ids_);
787 reference_field_ids_->Reset();
788 }
789
790 // Creates a fake class that represents a type only used by `*obj` into `writer`.
WriteSyntheticClassFromObj(art::mirror::Object * obj,Writer & writer)791 uintptr_t WriteSyntheticClassFromObj(art::mirror::Object* obj, Writer& writer)
792 REQUIRES_SHARED(art::Locks::mutator_lock_) {
793 CHECK(obj->IsClass());
794 perfetto::protos::pbzero::HeapGraphType* type_proto = writer.GetHeapGraph()->add_types();
795 // All pointers are at least multiples of two, so this way we can make sure
796 // we are not colliding with a real class.
797 uintptr_t class_ptr = reinterpret_cast<uintptr_t>(obj) | 1;
798 auto class_id = FindOrAppend(&interned_classes_, class_ptr);
799 type_proto->set_id(class_id);
800 type_proto->set_class_name(obj->PrettyTypeOf());
801 type_proto->set_location_id(FindOrAppend(&interned_locations_, obj->AsClass()->GetLocation()));
802 return class_ptr;
803 }
804
805 // Fills `*object_proto` with all the references held by `*obj` (an object of type `*klass`).
FillReferences(art::mirror::Object * obj,art::mirror::Class * klass,perfetto::protos::pbzero::HeapGraphObject * object_proto)806 void FillReferences(art::mirror::Object* obj,
807 art::mirror::Class* klass,
808 perfetto::protos::pbzero::HeapGraphObject* object_proto)
809 REQUIRES_SHARED(art::Locks::mutator_lock_) {
810 const uint32_t klass_flags = klass->GetClassFlags();
811 const bool emit_field_ids = klass_flags != art::mirror::kClassFlagObjectArray &&
812 klass_flags != art::mirror::kClassFlagNormal &&
813 klass_flags != art::mirror::kClassFlagSoftReference &&
814 klass_flags != art::mirror::kClassFlagWeakReference &&
815 klass_flags != art::mirror::kClassFlagFinalizerReference &&
816 klass_flags != art::mirror::kClassFlagPhantomReference;
817 std::vector<std::pair<std::string, art::mirror::Object*>> referred_objects =
818 GetReferences(obj, klass, emit_field_ids);
819
820 art::mirror::Object* min_nonnull_ptr = FilterIgnoredReferencesAndFindMin(referred_objects);
821
822 uint64_t base_obj_id = EncodeBaseObjId(referred_objects, min_nonnull_ptr);
823
824 for (const auto& p : referred_objects) {
825 const std::string& field_name = p.first;
826 art::mirror::Object* referred_obj = p.second;
827 if (emit_field_ids) {
828 reference_field_ids_->Append(FindOrAppend(&interned_fields_, field_name));
829 }
830 uint64_t referred_obj_id = GetObjectId(referred_obj);
831 if (referred_obj_id) {
832 referred_obj_id -= base_obj_id;
833 }
834 reference_object_ids_->Append(referred_obj_id);
835 }
836 if (emit_field_ids) {
837 object_proto->set_reference_field_id(*reference_field_ids_);
838 reference_field_ids_->Reset();
839 }
840 if (base_obj_id) {
841 // The field is called `reference_field_id_base`, but it has always been used as a base for
842 // `reference_object_id`. It should be called `reference_object_id_base`.
843 object_proto->set_reference_field_id_base(base_obj_id);
844 }
845 object_proto->set_reference_object_id(*reference_object_ids_);
846 reference_object_ids_->Reset();
847 }
848
849 // Iterates all the `referred_objects` and sets all the objects that are supposed to be ignored
850 // to nullptr. Returns the object with the smallest address (ignoring nullptr).
FilterIgnoredReferencesAndFindMin(std::vector<std::pair<std::string,art::mirror::Object * >> & referred_objects) const851 art::mirror::Object* FilterIgnoredReferencesAndFindMin(
852 std::vector<std::pair<std::string, art::mirror::Object*>>& referred_objects) const
853 REQUIRES_SHARED(art::Locks::mutator_lock_) {
854 art::mirror::Object* min_nonnull_ptr = nullptr;
855 for (auto& p : referred_objects) {
856 art::mirror::Object*& referred_obj = p.second;
857 if (referred_obj == nullptr)
858 continue;
859 if (IsIgnored(referred_obj)) {
860 referred_obj = nullptr;
861 continue;
862 }
863 if (min_nonnull_ptr == nullptr || min_nonnull_ptr > referred_obj) {
864 min_nonnull_ptr = referred_obj;
865 }
866 }
867 return min_nonnull_ptr;
868 }
869
870 // Fills `*object_proto` with the value of a subset of potentially interesting fields of `*obj`
871 // (an object of type `*klass`).
FillFieldValues(art::mirror::Object * obj,art::mirror::Class * klass,perfetto::protos::pbzero::HeapGraphObject * object_proto) const872 void FillFieldValues(art::mirror::Object* obj,
873 art::mirror::Class* klass,
874 perfetto::protos::pbzero::HeapGraphObject* object_proto) const
875 REQUIRES_SHARED(art::Locks::mutator_lock_) {
876 if (obj->IsClass() || klass->IsClassClass()) {
877 return;
878 }
879
880 for (art::mirror::Class* cls = klass; cls != nullptr; cls = cls->GetSuperClass().Ptr()) {
881 if (cls->IsArrayClass()) {
882 continue;
883 }
884
885 if (cls->DescriptorEquals("Llibcore/util/NativeAllocationRegistry;")) {
886 art::ArtField* af = cls->FindDeclaredInstanceField(
887 "size", art::Primitive::Descriptor(art::Primitive::kPrimLong));
888 if (af) {
889 object_proto->set_native_allocation_registry_size_field(af->GetLong(obj));
890 }
891 }
892 }
893 }
894
895 // Returns true if `*obj` has a type that's supposed to be ignored.
IsIgnored(art::mirror::Object * obj) const896 bool IsIgnored(art::mirror::Object* obj) const REQUIRES_SHARED(art::Locks::mutator_lock_) {
897 if (obj->IsClass()) {
898 return false;
899 }
900 art::mirror::Class* klass = obj->GetClass();
901 std::string temp;
902 std::string_view name(klass->GetDescriptor(&temp));
903 return std::find(ignored_types_.begin(), ignored_types_.end(), name) != ignored_types_.end();
904 }
905
906 // Name of classes whose instances should be ignored.
907 const std::vector<std::string> ignored_types_;
908
909 // Make sure that intern ID 0 (default proto value for a uint64_t) always maps to ""
910 // (default proto value for a string) or to 0 (default proto value for a uint64).
911
912 // Map from string (the field name) to its index in perfetto.protos.HeapGraph.field_names
913 std::map<std::string, uint64_t> interned_fields_{{"", 0}};
914 // Map from string (the location name) to its index in perfetto.protos.HeapGraph.location_names
915 std::map<std::string, uint64_t> interned_locations_{{"", 0}};
916 // Map from addr (the class pointer) to its id in perfetto.protos.HeapGraph.types
917 std::map<uintptr_t, uint64_t> interned_classes_{{0, 0}};
918
919 // Temporary buffers: used locally in some methods and then cleared.
920 std::unique_ptr<protozero::PackedVarInt> reference_field_ids_;
921 std::unique_ptr<protozero::PackedVarInt> reference_object_ids_;
922
923 // Id of the previous object that was dumped. Used for delta encoding.
924 uint64_t prev_object_id_ = 0;
925 };
926
927 // waitpid with a timeout implemented by ~busy-waiting
928 // See b/181031512 for rationale.
BusyWaitpid(pid_t pid,uint32_t timeout_ms)929 void BusyWaitpid(pid_t pid, uint32_t timeout_ms) {
930 for (size_t i = 0;; ++i) {
931 if (i == timeout_ms) {
932 // The child hasn't exited.
933 // Give up and SIGKILL it. The next waitpid should succeed.
934 LOG(ERROR) << "perfetto_hprof child timed out. Sending SIGKILL.";
935 kill(pid, SIGKILL);
936 }
937 int stat_loc;
938 pid_t wait_result = waitpid(pid, &stat_loc, WNOHANG);
939 if (wait_result == -1 && errno != EINTR) {
940 if (errno != ECHILD) {
941 // This hopefully never happens (should only be EINVAL).
942 PLOG(FATAL_WITHOUT_ABORT) << "waitpid";
943 }
944 // If we get ECHILD, the parent process was handling SIGCHLD, or did a wildcard wait.
945 // The child is no longer here either way, so that's good enough for us.
946 break;
947 } else if (wait_result > 0) {
948 break;
949 } else { // wait_result == 0 || errno == EINTR.
950 usleep(1000);
951 }
952 }
953 }
954
955 enum class ResumeParentPolicy {
956 IMMEDIATELY,
957 DEFERRED
958 };
959
ForkAndRun(art::Thread * self,ResumeParentPolicy resume_parent_policy,const std::function<void (pid_t child)> & parent_runnable,const std::function<void (pid_t parent,uint64_t timestamp)> & child_runnable)960 void ForkAndRun(art::Thread* self,
961 ResumeParentPolicy resume_parent_policy,
962 const std::function<void(pid_t child)>& parent_runnable,
963 const std::function<void(pid_t parent, uint64_t timestamp)>& child_runnable) {
964 pid_t parent_pid = getpid();
965 LOG(INFO) << "forking for " << parent_pid;
966 // Need to take a heap dump while GC isn't running. See the comment in
967 // Heap::VisitObjects(). Also we need the critical section to avoid visiting
968 // the same object twice. See b/34967844.
969 //
970 // We need to do this before the fork, because otherwise it can deadlock
971 // waiting for the GC, as all other threads get terminated by the clone, but
972 // their locks are not released.
973 // This does not perfectly solve all fork-related issues, as there could still be threads that
974 // are unaffected by ScopedSuspendAll and in a non-fork-friendly situation
975 // (e.g. inside a malloc holding a lock). This situation is quite rare, and in that case we will
976 // hit the watchdog in the grand-child process if it gets stuck.
977 std::optional<art::gc::ScopedGCCriticalSection> gcs(std::in_place, self, art::gc::kGcCauseHprof,
978 art::gc::kCollectorTypeHprof);
979
980 std::optional<art::ScopedSuspendAll> ssa(std::in_place, __FUNCTION__, /* long_suspend=*/ true);
981
982 pid_t pid = fork();
983 if (pid == -1) {
984 // Fork error.
985 PLOG(ERROR) << "fork";
986 return;
987 }
988 if (pid != 0) {
989 // Parent
990 if (resume_parent_policy == ResumeParentPolicy::IMMEDIATELY) {
991 // Stop the thread suspension as soon as possible to allow the rest of the application to
992 // continue while we waitpid here.
993 ssa.reset();
994 gcs.reset();
995 }
996 parent_runnable(pid);
997 if (resume_parent_policy != ResumeParentPolicy::IMMEDIATELY) {
998 ssa.reset();
999 gcs.reset();
1000 }
1001 return;
1002 }
1003 // The following code is only executed by the child of the original process.
1004 // Uninstall signal handler, so we don't trigger a profile on it.
1005 if (sigaction(kJavaHeapprofdSignal, &g_orig_act, nullptr) != 0) {
1006 close(g_signal_pipe_fds[0]);
1007 close(g_signal_pipe_fds[1]);
1008 PLOG(FATAL) << "Failed to sigaction";
1009 return;
1010 }
1011
1012 uint64_t ts = GetCurrentBootClockNs();
1013 child_runnable(parent_pid, ts);
1014 // Prevent the `atexit` handlers from running. We do not want to call cleanup
1015 // functions the parent process has registered.
1016 art::FastExit(0);
1017 }
1018
WriteHeapPackets(pid_t parent_pid,uint64_t timestamp)1019 void WriteHeapPackets(pid_t parent_pid, uint64_t timestamp) {
1020 JavaHprofDataSource::Trace(
1021 [parent_pid, timestamp](JavaHprofDataSource::TraceContext ctx)
1022 NO_THREAD_SAFETY_ANALYSIS {
1023 bool dump_smaps;
1024 std::vector<std::string> ignored_types;
1025 {
1026 auto ds = ctx.GetDataSourceLocked();
1027 if (!ds || !ds->enabled()) {
1028 if (ds) ds->Finish();
1029 LOG(INFO) << "skipping irrelevant data source.";
1030 return;
1031 }
1032 dump_smaps = ds->dump_smaps();
1033 ignored_types = ds->ignored_types();
1034 }
1035 LOG(INFO) << "dumping heap for " << parent_pid;
1036 if (dump_smaps) {
1037 DumpSmaps(&ctx);
1038 }
1039 Writer writer(parent_pid, &ctx, timestamp);
1040 HeapGraphDumper dumper(ignored_types);
1041
1042 dumper.Dump(art::Runtime::Current(), writer);
1043
1044 writer.Finalize();
1045 ctx.Flush([] {
1046 art::MutexLock lk(JavaHprofDataSource::art_thread(), GetStateMutex());
1047 g_state = State::kEnd;
1048 GetStateCV().Broadcast(JavaHprofDataSource::art_thread());
1049 });
1050 // Wait for the Flush that will happen on the Perfetto thread.
1051 {
1052 art::MutexLock lk(JavaHprofDataSource::art_thread(), GetStateMutex());
1053 while (g_state != State::kEnd) {
1054 GetStateCV().Wait(JavaHprofDataSource::art_thread());
1055 }
1056 }
1057 {
1058 auto ds = ctx.GetDataSourceLocked();
1059 if (ds) {
1060 ds->Finish();
1061 } else {
1062 LOG(ERROR) << "datasource timed out (duration_ms + datasource_stop_timeout_ms) "
1063 "before dump finished";
1064 }
1065 }
1066 });
1067 }
1068
DumpPerfetto(art::Thread * self)1069 void DumpPerfetto(art::Thread* self) {
1070 ForkAndRun(
1071 self,
1072 ResumeParentPolicy::IMMEDIATELY,
1073 // parent thread
1074 [](pid_t child) {
1075 // Busy waiting here will introduce some extra latency, but that is okay because we have
1076 // already unsuspended all other threads. This runs on the perfetto_hprof_listener, which
1077 // is not needed for progress of the app itself.
1078 // We daemonize the child process, so effectively we only need to wait
1079 // for it to fork and exit.
1080 BusyWaitpid(child, 1000);
1081 },
1082 // child thread
1083 [self](pid_t dumped_pid, uint64_t timestamp) {
1084 // Daemon creates a new process that is the grand-child of the original process, and exits.
1085 if (daemon(0, 0) == -1) {
1086 PLOG(FATAL) << "daemon";
1087 }
1088 // The following code is only executed by the grand-child of the original process.
1089
1090 // Make sure that this is the first thing we do after forking, so if anything
1091 // below hangs, the fork will go away from the watchdog.
1092 ArmWatchdogOrDie();
1093 SetupDataSource("android.java_hprof", false);
1094 WaitForDataSource(self);
1095 WriteHeapPackets(dumped_pid, timestamp);
1096 LOG(INFO) << "finished dumping heap for " << dumped_pid;
1097 });
1098 }
1099
DumpPerfettoOutOfMemory()1100 void DumpPerfettoOutOfMemory() REQUIRES_SHARED(art::Locks::mutator_lock_) {
1101 art::Thread* self = art::Thread::Current();
1102 if (!self) {
1103 LOG(FATAL_WITHOUT_ABORT) << "no thread in DumpPerfettoOutOfMemory";
1104 return;
1105 }
1106
1107 // Ensure that there is an active, armed tracing session
1108 uint32_t session_cnt =
1109 android::base::GetUintProperty<uint32_t>("traced.oome_heap_session.count", 0);
1110 if (session_cnt == 0) {
1111 return;
1112 }
1113 {
1114 // OutOfMemoryErrors are reentrant, make sure we do not fork and process
1115 // more than once.
1116 art::MutexLock lk(self, GetStateMutex());
1117 if (g_oome_triggered) {
1118 return;
1119 }
1120 g_oome_triggered = true;
1121 g_oome_sessions_pending = session_cnt;
1122 }
1123
1124 art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended);
1125 // If we fork & resume the original process execution it will most likely exit
1126 // ~immediately due to the OOME error thrown. When the system detects that
1127 // that, it will cleanup by killing all processes in the cgroup (including
1128 // the process we just forked).
1129 // We need to avoid the race between the heap dump and the process group
1130 // cleanup, and the only way to do this is to avoid resuming the original
1131 // process until the heap dump is complete.
1132 // Given we are already about to crash anyway, the diagnostic data we get
1133 // outweighs the cost of introducing some latency.
1134 ForkAndRun(
1135 self,
1136 ResumeParentPolicy::DEFERRED,
1137 // parent process
1138 [](pid_t child) {
1139 // waitpid to reap the zombie
1140 // we are explicitly waiting for the child to exit
1141 // The reason for the timeout on top of the watchdog is that it is
1142 // possible (albeit unlikely) that even the watchdog will fail to be
1143 // activated in the case of an atfork handler.
1144 BusyWaitpid(child, kWatchdogTimeoutSec * 1000);
1145 },
1146 // child process
1147 [self](pid_t dumped_pid, uint64_t timestamp) {
1148 ArmWatchdogOrDie();
1149 art::SetThreadName("perfetto_oome_hprof");
1150 art::ScopedTrace trace("perfetto_hprof oome");
1151 SetupDataSource("android.java_hprof.oom", true);
1152 perfetto::Tracing::ActivateTriggers({"com.android.telemetry.art-outofmemory"}, 500);
1153
1154 // A pre-armed tracing session might not exist, so we should wait for a
1155 // limited amount of time before we decide to let the execution continue.
1156 if (!TimedWaitForDataSource(self, 1000)) {
1157 LOG(INFO) << "OOME hprof timeout (state " << g_state << ")";
1158 return;
1159 }
1160 WriteHeapPackets(dumped_pid, timestamp);
1161 LOG(INFO) << "OOME hprof complete for " << dumped_pid;
1162 });
1163 }
1164
1165 // The plugin initialization function.
ArtPlugin_Initialize()1166 extern "C" bool ArtPlugin_Initialize() {
1167 if (art::Runtime::Current() == nullptr) {
1168 return false;
1169 }
1170 art::Thread* self = art::Thread::Current();
1171 {
1172 art::MutexLock lk(self, GetStateMutex());
1173 if (g_state != State::kUninitialized) {
1174 LOG(ERROR) << "perfetto_hprof already initialized. state: " << g_state;
1175 return false;
1176 }
1177 g_state = State::kWaitForListener;
1178 }
1179
1180 if (pipe2(g_signal_pipe_fds, O_CLOEXEC) == -1) {
1181 PLOG(ERROR) << "Failed to pipe";
1182 return false;
1183 }
1184
1185 struct sigaction act = {};
1186 act.sa_flags = SA_SIGINFO | SA_RESTART;
1187 act.sa_sigaction = [](int, siginfo_t* si, void*) {
1188 requested_tracing_session_id = si->si_value.sival_int;
1189 if (write(g_signal_pipe_fds[1], kByte, sizeof(kByte)) == -1) {
1190 PLOG(ERROR) << "Failed to trigger heap dump";
1191 }
1192 };
1193
1194 // TODO(fmayer): We can probably use the SignalCatcher thread here to not
1195 // have an idle thread.
1196 if (sigaction(kJavaHeapprofdSignal, &act, &g_orig_act) != 0) {
1197 close(g_signal_pipe_fds[0]);
1198 close(g_signal_pipe_fds[1]);
1199 PLOG(ERROR) << "Failed to sigaction";
1200 return false;
1201 }
1202
1203 std::thread th([] {
1204 art::Runtime* runtime = art::Runtime::Current();
1205 if (!runtime) {
1206 LOG(FATAL_WITHOUT_ABORT) << "no runtime in perfetto_hprof_listener";
1207 return;
1208 }
1209 if (!runtime->AttachCurrentThread("perfetto_hprof_listener", /*as_daemon=*/ true,
1210 runtime->GetSystemThreadGroup(), /*create_peer=*/ false)) {
1211 LOG(ERROR) << "failed to attach thread.";
1212 {
1213 art::MutexLock lk(nullptr, GetStateMutex());
1214 g_state = State::kUninitialized;
1215 GetStateCV().Broadcast(nullptr);
1216 }
1217
1218 return;
1219 }
1220 art::Thread* self = art::Thread::Current();
1221 if (!self) {
1222 LOG(FATAL_WITHOUT_ABORT) << "no thread in perfetto_hprof_listener";
1223 return;
1224 }
1225 {
1226 art::MutexLock lk(self, GetStateMutex());
1227 if (g_state == State::kWaitForListener) {
1228 g_state = State::kWaitForStart;
1229 GetStateCV().Broadcast(self);
1230 }
1231 }
1232 char buf[1];
1233 for (;;) {
1234 int res;
1235 do {
1236 res = read(g_signal_pipe_fds[0], buf, sizeof(buf));
1237 } while (res == -1 && errno == EINTR);
1238
1239 if (res <= 0) {
1240 if (res == -1) {
1241 PLOG(ERROR) << "failed to read";
1242 }
1243 close(g_signal_pipe_fds[0]);
1244 return;
1245 }
1246
1247 perfetto_hprof::DumpPerfetto(self);
1248 }
1249 });
1250 th.detach();
1251
1252 // Register the OOM error handler.
1253 art::Runtime::Current()->SetOutOfMemoryErrorHook(perfetto_hprof::DumpPerfettoOutOfMemory);
1254
1255 return true;
1256 }
1257
ArtPlugin_Deinitialize()1258 extern "C" bool ArtPlugin_Deinitialize() {
1259 art::Runtime::Current()->SetOutOfMemoryErrorHook(nullptr);
1260
1261 if (sigaction(kJavaHeapprofdSignal, &g_orig_act, nullptr) != 0) {
1262 PLOG(ERROR) << "failed to reset signal handler";
1263 // We cannot close the pipe if the signal handler wasn't unregistered,
1264 // to avoid receiving SIGPIPE.
1265 return false;
1266 }
1267 close(g_signal_pipe_fds[1]);
1268
1269 art::Thread* self = art::Thread::Current();
1270 art::MutexLock lk(self, GetStateMutex());
1271 // Wait until after the thread was registered to the runtime. This is so
1272 // we do not attempt to register it with the runtime after it had been torn
1273 // down (ArtPlugin_Deinitialize gets called in the Runtime dtor).
1274 while (g_state == State::kWaitForListener) {
1275 GetStateCV().Wait(art::Thread::Current());
1276 }
1277 g_state = State::kUninitialized;
1278 GetStateCV().Broadcast(self);
1279 return true;
1280 }
1281
1282 } // namespace perfetto_hprof
1283
1284 namespace perfetto {
1285
1286 PERFETTO_DEFINE_DATA_SOURCE_STATIC_MEMBERS(perfetto_hprof::JavaHprofDataSource);
1287
1288 }
1289