1 /* 2 * Copyright (C) 2018 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef SRC_PROFILING_MEMORY_UNWINDING_H_ 18 #define SRC_PROFILING_MEMORY_UNWINDING_H_ 19 20 #include <unwindstack/Regs.h> 21 22 #include "perfetto/base/time.h" 23 #include "perfetto/ext/base/scoped_file.h" 24 #include "perfetto/ext/base/thread_task_runner.h" 25 #include "perfetto/ext/tracing/core/basic_types.h" 26 #include "src/profiling/common/unwind_support.h" 27 #include "src/profiling/memory/bookkeeping.h" 28 #include "src/profiling/memory/unwound_messages.h" 29 #include "src/profiling/memory/wire_protocol.h" 30 31 namespace perfetto { 32 namespace profiling { 33 34 std::unique_ptr<unwindstack::Regs> CreateRegsFromRawData( 35 unwindstack::ArchEnum arch, 36 void* raw_data); 37 38 bool DoUnwind(WireMessage*, UnwindingMetadata* metadata, AllocRecord* out); 39 40 // AllocRecords are expensive to construct and destruct. We have seen up to 41 // 10 % of total CPU of heapprofd being used to destruct them. That is why 42 // we re-use them to cut CPU usage significantly. 43 class AllocRecordArena { 44 public: AllocRecordArena()45 AllocRecordArena() : alloc_records_mutex_(new std::mutex()) {} 46 47 void ReturnAllocRecord(std::unique_ptr<AllocRecord>); 48 std::unique_ptr<AllocRecord> BorrowAllocRecord(); 49 50 void Enable(); 51 void Disable(); 52 53 private: 54 std::unique_ptr<std::mutex> alloc_records_mutex_; 55 std::vector<std::unique_ptr<AllocRecord>> alloc_records_; 56 bool enabled_ = true; 57 }; 58 59 class UnwindingWorker : public base::UnixSocket::EventListener { 60 public: 61 class Delegate { 62 public: 63 virtual void PostAllocRecord(UnwindingWorker*, 64 std::unique_ptr<AllocRecord>) = 0; 65 virtual void PostFreeRecord(UnwindingWorker*, std::vector<FreeRecord>) = 0; 66 virtual void PostHeapNameRecord(UnwindingWorker*, HeapNameRecord rec) = 0; 67 virtual void PostSocketDisconnected(UnwindingWorker*, 68 DataSourceInstanceID, 69 pid_t pid, 70 SharedRingBuffer::Stats stats) = 0; 71 virtual ~Delegate(); 72 }; 73 74 struct HandoffData { 75 DataSourceInstanceID data_source_instance_id; 76 base::UnixSocketRaw sock; 77 base::ScopedFile maps_fd; 78 base::ScopedFile mem_fd; 79 SharedRingBuffer shmem; 80 ClientConfiguration client_config; 81 bool stream_allocations; 82 }; 83 UnwindingWorker(Delegate * delegate,base::ThreadTaskRunner thread_task_runner)84 UnwindingWorker(Delegate* delegate, base::ThreadTaskRunner thread_task_runner) 85 : delegate_(delegate), 86 thread_task_runner_(std::move(thread_task_runner)) {} 87 88 // Public API safe to call from other threads. 89 void PostDisconnectSocket(pid_t pid); 90 void PostHandoffSocket(HandoffData); ReturnAllocRecord(std::unique_ptr<AllocRecord> record)91 void ReturnAllocRecord(std::unique_ptr<AllocRecord> record) { 92 alloc_record_arena_.ReturnAllocRecord(std::move(record)); 93 } 94 95 // Implementation of UnixSocket::EventListener. 96 // Do not call explicitly. 97 void OnDisconnect(base::UnixSocket* self) override; OnNewIncomingConnection(base::UnixSocket *,std::unique_ptr<base::UnixSocket>)98 void OnNewIncomingConnection(base::UnixSocket*, 99 std::unique_ptr<base::UnixSocket>) override { 100 PERFETTO_DFATAL_OR_ELOG("This should not happen."); 101 } 102 void OnDataAvailable(base::UnixSocket* self) override; 103 104 public: 105 // public for testing/fuzzer 106 struct ClientData { 107 DataSourceInstanceID data_source_instance_id; 108 std::unique_ptr<base::UnixSocket> sock; 109 UnwindingMetadata metadata; 110 SharedRingBuffer shmem; 111 ClientConfiguration client_config; 112 bool stream_allocations; 113 std::vector<FreeRecord> free_records; 114 }; 115 116 // public for testing/fuzzing 117 static void HandleBuffer(UnwindingWorker* self, 118 AllocRecordArena* alloc_record_arena, 119 const SharedRingBuffer::Buffer& buf, 120 ClientData* client_data, 121 pid_t peer_pid, 122 Delegate* delegate); 123 124 private: 125 void HandleHandoffSocket(HandoffData data); 126 void HandleDisconnectSocket(pid_t pid); 127 std::unique_ptr<AllocRecord> BorrowAllocRecord(); 128 129 enum class ReadAndUnwindBatchResult { 130 kHasMore, 131 kReadSome, 132 kReadNone, 133 }; 134 ReadAndUnwindBatchResult ReadAndUnwindBatch(ClientData* client_data); 135 void BatchUnwindJob(pid_t); 136 137 AllocRecordArena alloc_record_arena_; 138 std::map<pid_t, ClientData> client_data_; 139 Delegate* delegate_; 140 141 // Task runner with a dedicated thread. Keep last as instances this class are 142 // currently (incorrectly) being destroyed on the main thread, instead of the 143 // task thread. By destroying this task runner first, we ensure that the 144 // UnwindingWorker is not active while the rest of its state is being 145 // destroyed. Additionally this ensures that the destructing thread sees a 146 // consistent view of the memory due to the ThreadTaskRunner's destructor 147 // joining a thread. 148 // 149 // Additionally, keep the destructor defaulted, as its body would still race 150 // against an active task thread. 151 // 152 // TODO(rsavitski): make the task thread own the object's lifetime (likely by 153 // refactoring base::ThreadTaskRunner). 154 base::ThreadTaskRunner thread_task_runner_; 155 }; 156 157 } // namespace profiling 158 } // namespace perfetto 159 160 #endif // SRC_PROFILING_MEMORY_UNWINDING_H_ 161