1 /* 2 * Copyright (C) 2015 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 #define ATRACE_TAG ATRACE_TAG_ALWAYS 17 #include "event_fd.h" 18 19 #include <fcntl.h> 20 #include <stdio.h> 21 #include <string.h> 22 #include <sys/ioctl.h> 23 #include <sys/mman.h> 24 #include <sys/syscall.h> 25 #include <sys/types.h> 26 #include <atomic> 27 #include <memory> 28 #include <cutils/trace.h> 29 #include <utils/Trace.h> 30 31 #include <android-base/file.h> 32 #include <android-base/logging.h> 33 #include <android-base/stringprintf.h> 34 35 #include "environment.h" 36 #include "event_attr.h" 37 #include "event_type.h" 38 #include "perf_event.h" 39 #include "utils.h" 40 41 static int perf_event_open(const perf_event_attr& attr, pid_t pid, int cpu, 42 int group_fd, unsigned long flags) { // NOLINT 43 return syscall(__NR_perf_event_open, &attr, pid, cpu, group_fd, flags); 44 } 45 46 std::unique_ptr<EventFd> EventFd::OpenEventFile(const perf_event_attr& attr, pid_t tid, int cpu, 47 EventFd* group_event_fd, 48 const std::string& event_name, bool report_error) { 49 int group_fd = -1; 50 if (group_event_fd != nullptr) { 51 group_fd = group_event_fd->perf_event_fd_; 52 } 53 perf_event_attr real_attr = attr; 54 if (attr.freq) { 55 uint64_t max_sample_freq; 56 if (GetMaxSampleFrequency(&max_sample_freq) && max_sample_freq < attr.sample_freq) { 57 static bool warned = false; 58 if (!warned) { 59 warned = true; 60 LOG(INFO) << "Adjust sample freq to max allowed sample freq " << max_sample_freq; 61 } 62 real_attr.sample_freq = max_sample_freq; 63 } 64 } 65 int perf_event_fd = perf_event_open(real_attr, tid, cpu, group_fd, 0); 66 if (perf_event_fd == -1) { 67 if (report_error) { 68 PLOG(ERROR) << "open perf_event_file (event " << event_name << ", tid " 69 << tid << ", cpu " << cpu << ", group_fd " << group_fd 70 << ") failed"; 71 } else { 72 PLOG(DEBUG) << "open perf_event_file (event " << event_name << ", tid " 73 << tid << ", cpu " << cpu << ", group_fd " << group_fd 74 << ") failed"; 75 } 76 return nullptr; 77 } 78 if (fcntl(perf_event_fd, F_SETFD, FD_CLOEXEC) == -1) { 79 if (report_error) { 80 PLOG(ERROR) << "fcntl(FD_CLOEXEC) for perf_event_file (event " 81 << event_name << ", tid " << tid << ", cpu " << cpu 82 << ", group_fd " << group_fd << ") failed"; 83 } else { 84 PLOG(DEBUG) << "fcntl(FD_CLOEXEC) for perf_event_file (event " 85 << event_name << ", tid " << tid << ", cpu " << cpu 86 << ", group_fd " << group_fd << ") failed"; 87 } 88 return nullptr; 89 } 90 return std::unique_ptr<EventFd>( 91 new EventFd(real_attr, perf_event_fd, event_name, tid, cpu)); 92 } 93 94 EventFd::~EventFd() { 95 DestroyMappedBuffer(); 96 DestroyAuxBuffer(); 97 close(perf_event_fd_); 98 } 99 100 std::string EventFd::Name() const { 101 return android::base::StringPrintf( 102 "perf_event_file(event %s, tid %d, cpu %d)", event_name_.c_str(), tid_, 103 cpu_); 104 } 105 106 uint64_t EventFd::Id() const { 107 if (id_ == 0) { 108 PerfCounter counter; 109 if (InnerReadCounter(&counter)) { 110 id_ = counter.id; 111 } 112 } 113 return id_; 114 } 115 116 bool EventFd::SetEnableEvent(bool enable) { 117 int result = ioctl(perf_event_fd_, enable ? PERF_EVENT_IOC_ENABLE : PERF_EVENT_IOC_DISABLE, 0); 118 if (result < 0) { 119 PLOG(ERROR) << "ioctl(" << (enable ? "enable" : "disable") << ")" << Name() << " failed"; 120 return false; 121 } 122 return true; 123 } 124 125 bool EventFd::SetFilter(const std::string& filter) { 126 bool success = ioctl(perf_event_fd_, PERF_EVENT_IOC_SET_FILTER, filter.c_str()) >= 0; 127 if (!success) { 128 PLOG(ERROR) << "failed to set filter"; 129 } 130 return success; 131 } 132 133 bool EventFd::InnerReadCounter(PerfCounter* counter) const { 134 CHECK(counter != nullptr); 135 if (!android::base::ReadFully(perf_event_fd_, counter, sizeof(*counter))) { 136 PLOG(ERROR) << "ReadCounter from " << Name() << " failed"; 137 return false; 138 } 139 return true; 140 } 141 142 bool EventFd::ReadCounter(PerfCounter* counter) { 143 if (!InnerReadCounter(counter)) { 144 return false; 145 } 146 // Trace is always available to systrace if enabled 147 if (tid_ > 0) { 148 ATRACE_INT64(android::base::StringPrintf( 149 "%s_tid%d_cpu%d", event_name_.c_str(), tid_, 150 cpu_).c_str(), counter->value - last_counter_value_); 151 } else { 152 ATRACE_INT64(android::base::StringPrintf( 153 "%s_cpu%d", event_name_.c_str(), 154 cpu_).c_str(), counter->value - last_counter_value_); 155 } 156 last_counter_value_ = counter->value; 157 return true; 158 } 159 160 bool EventFd::CreateMappedBuffer(size_t mmap_pages, bool report_error) { 161 CHECK(IsPowerOfTwo(mmap_pages)); 162 size_t page_size = sysconf(_SC_PAGE_SIZE); 163 size_t mmap_len = (mmap_pages + 1) * page_size; 164 void* mmap_addr = mmap(nullptr, mmap_len, PROT_READ | PROT_WRITE, MAP_SHARED, 165 perf_event_fd_, 0); 166 if (mmap_addr == MAP_FAILED) { 167 bool is_perm_error = (errno == EPERM); 168 if (report_error) { 169 PLOG(ERROR) << "mmap(" << mmap_pages << ") failed for " << Name(); 170 } else { 171 PLOG(DEBUG) << "mmap(" << mmap_pages << ") failed for " << Name(); 172 } 173 if (report_error && is_perm_error) { 174 LOG(ERROR) 175 << "It seems the kernel doesn't allow allocating enough " 176 << "buffer for dumping samples, consider decreasing mmap pages(-m)."; 177 } 178 return false; 179 } 180 mmap_addr_ = mmap_addr; 181 mmap_len_ = mmap_len; 182 mmap_metadata_page_ = reinterpret_cast<perf_event_mmap_page*>(mmap_addr_); 183 mmap_data_buffer_ = reinterpret_cast<char*>(mmap_addr_) + page_size; 184 mmap_data_buffer_size_ = mmap_len_ - page_size; 185 return true; 186 } 187 188 bool EventFd::ShareMappedBuffer(const EventFd& event_fd, bool report_error) { 189 CHECK(!HasMappedBuffer()); 190 CHECK(event_fd.HasMappedBuffer()); 191 int result = 192 ioctl(perf_event_fd_, PERF_EVENT_IOC_SET_OUTPUT, event_fd.perf_event_fd_); 193 if (result != 0) { 194 if (report_error) { 195 PLOG(ERROR) << "failed to share mapped buffer of " 196 << event_fd.perf_event_fd_ << " with " << perf_event_fd_; 197 } 198 return false; 199 } 200 return true; 201 } 202 203 void EventFd::DestroyMappedBuffer() { 204 if (HasMappedBuffer()) { 205 munmap(mmap_addr_, mmap_len_); 206 mmap_addr_ = nullptr; 207 mmap_len_ = 0; 208 mmap_metadata_page_ = nullptr; 209 mmap_data_buffer_ = nullptr; 210 mmap_data_buffer_size_ = 0; 211 } 212 } 213 214 std::vector<char> EventFd::GetAvailableMmapData() { 215 size_t data_pos; 216 size_t data_size = GetAvailableMmapDataSize(data_pos); 217 std::vector<char> data(data_size); 218 if (data_size > 0) { 219 size_t copy_size = std::min(data_size, mmap_data_buffer_size_ - data_pos); 220 memcpy(&data[0], mmap_data_buffer_ + data_pos, copy_size); 221 if (copy_size < data_size) { 222 memcpy(&data[copy_size], mmap_data_buffer_, data_size - copy_size); 223 } 224 DiscardMmapData(data_size); 225 } 226 return data; 227 } 228 229 size_t EventFd::GetAvailableMmapDataSize(size_t& data_pos) { 230 // The mmap_data_buffer is used as a ring buffer between the kernel and 231 // simpleperf. The kernel continuously writes records to the buffer, and 232 // simpleperf continuously read records out. 233 // _________________________________________ 234 // buffer | can write | can read | can write | 235 // ^ ^ 236 // read_head write_head 237 // 238 // So simpleperf can read records in [read_head, write_head), and the kernel 239 // can write records in [write_head, read_head). The kernel is responsible 240 // for updating write_head, and simpleperf is responsible for updating 241 // read_head. 242 243 uint64_t write_head = mmap_metadata_page_->data_head; 244 uint64_t read_head = mmap_metadata_page_->data_tail; 245 // The kernel may decrease data_head temporarily (http://b/132446871), making 246 // write_head < read_head. So check it to avoid available data size underflow. 247 if (write_head <= read_head) { 248 // No available data. 249 return 0; 250 } 251 // rmb() used to ensure reading data after reading data_head. 252 __sync_synchronize(); 253 data_pos = read_head & (mmap_data_buffer_size_ - 1); 254 return write_head - read_head; 255 } 256 257 void EventFd::DiscardMmapData(size_t discard_size) { 258 // mb() used to ensure finish reading data before writing data_tail. 259 __sync_synchronize(); 260 mmap_metadata_page_->data_tail += discard_size; 261 } 262 263 bool EventFd::CreateAuxBuffer(size_t aux_buffer_size, bool report_error) { 264 CHECK(HasMappedBuffer()); 265 CHECK(IsPowerOfTwo(aux_buffer_size)); 266 mmap_metadata_page_->aux_offset = mmap_len_; 267 mmap_metadata_page_->aux_size = aux_buffer_size; 268 mmap_metadata_page_->aux_head = 0; 269 mmap_metadata_page_->aux_tail = 0; 270 void* mmap_addr = mmap(nullptr, aux_buffer_size, PROT_READ | PROT_WRITE, MAP_SHARED, 271 perf_event_fd_, mmap_metadata_page_->aux_offset); 272 if (mmap_addr == MAP_FAILED) { 273 if (report_error) { 274 PLOG(ERROR) << "failed to mmap aux buffer of size " << aux_buffer_size << " for " << Name(); 275 } else { 276 PLOG(DEBUG) << "failed to mmap aux buffer of size " << aux_buffer_size << " for " << Name(); 277 } 278 return false; 279 } 280 aux_buffer_ = static_cast<char*>(mmap_addr); 281 aux_buffer_size_ = aux_buffer_size; 282 return true; 283 } 284 285 void EventFd::DestroyAuxBuffer() { 286 if (HasAuxBuffer()) { 287 munmap(aux_buffer_, aux_buffer_size_); 288 aux_buffer_ = nullptr; 289 aux_buffer_size_ = 0; 290 } 291 } 292 293 uint64_t EventFd::GetAvailableAuxData(char** buf1, size_t* size1, char** buf2, size_t* size2) { 294 // Aux buffer is similar to mapped_data_buffer. See comments in GetAvailableMmapData(). 295 uint64_t write_head = mmap_metadata_page_->aux_head; 296 uint64_t read_head = mmap_metadata_page_->aux_tail; 297 if (write_head <= read_head) { 298 *size1 = *size2 = 0; 299 return 0; // No available data. 300 } 301 // rmb() used to ensure reading data after reading aux_head. 302 __sync_synchronize(); 303 size_t data_pos = read_head & (aux_buffer_size_ - 1); 304 size_t data_size = write_head - read_head; 305 *buf1 = aux_buffer_ + data_pos; 306 if (data_size <= aux_buffer_size_ - data_pos) { 307 *size1 = data_size; 308 *size2 = 0; 309 } else { 310 *size1 = aux_buffer_size_ - data_pos; 311 *buf2 = aux_buffer_; 312 *size2 = data_size - *size1; 313 } 314 return read_head; 315 } 316 317 void EventFd::DiscardAuxData(size_t discard_size) { 318 // mb() used to ensure finish reading data before writing aux_tail. 319 __sync_synchronize(); 320 mmap_metadata_page_->aux_tail += discard_size; 321 } 322 323 bool EventFd::StartPolling(IOEventLoop& loop, 324 const std::function<bool()>& callback) { 325 ioevent_ref_ = loop.AddReadEvent(perf_event_fd_, callback); 326 return ioevent_ref_ != nullptr; 327 } 328 329 bool EventFd::StopPolling() { return IOEventLoop::DelEvent(ioevent_ref_); } 330 331 bool IsEventAttrSupported(const perf_event_attr& attr, const std::string& event_name) { 332 return EventFd::OpenEventFile(attr, getpid(), -1, nullptr, event_name, false) != nullptr; 333 } 334