1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/profiling/memory/shared_ring_buffer.h"
18
19 #include <atomic>
20 #include <type_traits>
21
22 #include <errno.h>
23 #include <inttypes.h>
24 #include <sys/mman.h>
25 #include <sys/stat.h>
26 #include <unistd.h>
27
28 #include "perfetto/base/build_config.h"
29 #include "perfetto/base/scoped_file.h"
30 #include "perfetto/base/temp_file.h"
31 #include "src/profiling/memory/scoped_spinlock.h"
32
33 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
34 #include <linux/memfd.h>
35 #include <sys/syscall.h>
36 #endif
37
38 namespace perfetto {
39 namespace profiling {
40
41 namespace {
42
43 constexpr auto kMetaPageSize = base::kPageSize;
44 constexpr auto kAlignment = 8; // 64 bits to use aligned memcpy().
45 constexpr auto kHeaderSize = kAlignment;
46 constexpr auto kGuardSize = base::kPageSize * 1024 * 16; // 64 MB.
47 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
48 constexpr auto kFDSeals = F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_SEAL;
49 #endif
50
51 } // namespace
52
53
SharedRingBuffer(CreateFlag,size_t size)54 SharedRingBuffer::SharedRingBuffer(CreateFlag, size_t size) {
55 size_t size_with_meta = size + kMetaPageSize;
56 base::ScopedFile fd;
57 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
58 bool is_memfd = false;
59 fd.reset(static_cast<int>(syscall(__NR_memfd_create, "heapprofd_ringbuf",
60 MFD_CLOEXEC | MFD_ALLOW_SEALING)));
61 is_memfd = !!fd;
62
63 if (!fd) {
64 #if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
65 // In-tree builds should only allow mem_fd, so we can inspect the seals
66 // to verify the fd is appropriately sealed.
67 PERFETTO_ELOG("memfd_create() failed");
68 return;
69 #else
70 PERFETTO_DPLOG("memfd_create() failed");
71 #endif
72 }
73 #endif
74
75 if (!fd)
76 fd = base::TempFile::CreateUnlinked().ReleaseFD();
77
78 PERFETTO_CHECK(fd);
79 int res = ftruncate(fd.get(), static_cast<off_t>(size_with_meta));
80 PERFETTO_CHECK(res == 0);
81 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
82 if (is_memfd) {
83 res = fcntl(*fd, F_ADD_SEALS, kFDSeals);
84 if (res != 0) {
85 PERFETTO_PLOG("Failed to seal FD.");
86 return;
87 }
88 }
89 #endif
90 Initialize(std::move(fd));
91 if (!is_valid())
92 return;
93
94 new (meta_) MetadataPage();
95 }
96
~SharedRingBuffer()97 SharedRingBuffer::~SharedRingBuffer() {
98 static_assert(std::is_trivially_constructible<MetadataPage>::value,
99 "MetadataPage must be trivially constructible");
100 static_assert(std::is_trivially_destructible<MetadataPage>::value,
101 "MetadataPage must be trivially destructible");
102
103 if (is_valid()) {
104 size_t outer_size = kMetaPageSize + size_ * 2 + kGuardSize;
105 munmap(meta_, outer_size);
106 }
107 }
108
Initialize(base::ScopedFile mem_fd)109 void SharedRingBuffer::Initialize(base::ScopedFile mem_fd) {
110 #if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
111 int seals = fcntl(*mem_fd, F_GET_SEALS);
112 if (seals == -1) {
113 PERFETTO_PLOG("Failed to get seals of FD.");
114 return;
115 }
116 if ((seals & kFDSeals) != kFDSeals) {
117 PERFETTO_ELOG("FD not properly sealed. Expected %x, got %x", kFDSeals,
118 seals);
119 return;
120 }
121 #endif
122
123 struct stat stat_buf = {};
124 int res = fstat(*mem_fd, &stat_buf);
125 if (res != 0 || stat_buf.st_size == 0) {
126 PERFETTO_PLOG("Could not attach to fd.");
127 return;
128 }
129 auto size_with_meta = static_cast<size_t>(stat_buf.st_size);
130 auto size = size_with_meta - kMetaPageSize;
131
132 // |size_with_meta| must be a power of two number of pages + 1 page (for
133 // metadata).
134 if (size_with_meta < 2 * base::kPageSize || size % base::kPageSize ||
135 (size & (size - 1))) {
136 PERFETTO_ELOG("SharedRingBuffer size is invalid (%zu)", size_with_meta);
137 return;
138 }
139
140 // First of all reserve the whole virtual region to fit the buffer twice
141 // + metadata page + red zone at the end.
142 size_t outer_size = kMetaPageSize + size * 2 + kGuardSize;
143 uint8_t* region = reinterpret_cast<uint8_t*>(
144 mmap(nullptr, outer_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
145 if (region == MAP_FAILED) {
146 PERFETTO_PLOG("mmap(PROT_NONE) failed");
147 return;
148 }
149
150 // Map first the whole buffer (including the initial metadata page) @ off=0.
151 void* reg1 = mmap(region, size_with_meta, PROT_READ | PROT_WRITE,
152 MAP_SHARED | MAP_FIXED, *mem_fd, 0);
153
154 // Then map again the buffer, skipping the metadata page. The final result is:
155 // [ METADATA ] [ RING BUFFER SHMEM ] [ RING BUFFER SHMEM ]
156 void* reg2 = mmap(region + size_with_meta, size, PROT_READ | PROT_WRITE,
157 MAP_SHARED | MAP_FIXED, *mem_fd,
158 /*offset=*/kMetaPageSize);
159
160 if (reg1 != region || reg2 != region + size_with_meta) {
161 PERFETTO_PLOG("mmap(MAP_SHARED) failed");
162 munmap(region, outer_size);
163 return;
164 }
165 size_ = size;
166 meta_ = reinterpret_cast<MetadataPage*>(region);
167 mem_ = region + kMetaPageSize;
168 mem_fd_ = std::move(mem_fd);
169 }
170
BeginWrite(const ScopedSpinlock & spinlock,size_t size)171 SharedRingBuffer::Buffer SharedRingBuffer::BeginWrite(
172 const ScopedSpinlock& spinlock,
173 size_t size) {
174 PERFETTO_DCHECK(spinlock.locked());
175 Buffer result;
176
177 base::Optional<PointerPositions> opt_pos = GetPointerPositions(spinlock);
178 if (!opt_pos) {
179 meta_->stats.num_writes_corrupt++;
180 errno = EBADF;
181 return result;
182 }
183 auto pos = opt_pos.value();
184
185 const uint64_t size_with_header =
186 base::AlignUp<kAlignment>(size + kHeaderSize);
187
188 // size_with_header < size is for catching overflow of size_with_header.
189 if (PERFETTO_UNLIKELY(size_with_header < size)) {
190 errno = EINVAL;
191 return result;
192 }
193
194 if (size_with_header > write_avail(pos)) {
195 meta_->stats.num_writes_overflow++;
196 errno = EAGAIN;
197 return result;
198 }
199
200 uint8_t* wr_ptr = at(pos.write_pos);
201
202 result.size = size;
203 result.data = wr_ptr + kHeaderSize;
204 meta_->write_pos += size_with_header;
205 meta_->stats.bytes_written += size;
206 meta_->stats.num_writes_succeeded++;
207 // By making this a release store, we can save grabbing the spinlock in
208 // EndWrite.
209 reinterpret_cast<std::atomic<uint32_t>*>(wr_ptr)->store(
210 0, std::memory_order_release);
211 return result;
212 }
213
EndWrite(Buffer buf)214 void SharedRingBuffer::EndWrite(Buffer buf) {
215 if (!buf)
216 return;
217 uint8_t* wr_ptr = buf.data - kHeaderSize;
218 PERFETTO_DCHECK(reinterpret_cast<uintptr_t>(wr_ptr) % kAlignment == 0);
219 reinterpret_cast<std::atomic<uint32_t>*>(wr_ptr)->store(
220 static_cast<uint32_t>(buf.size), std::memory_order_release);
221 }
222
BeginRead()223 SharedRingBuffer::Buffer SharedRingBuffer::BeginRead() {
224 ScopedSpinlock spinlock(&meta_->spinlock, ScopedSpinlock::Mode::Blocking);
225
226 base::Optional<PointerPositions> opt_pos = GetPointerPositions(spinlock);
227 if (!opt_pos) {
228 meta_->stats.num_reads_corrupt++;
229 errno = EBADF;
230 return Buffer();
231 }
232 auto pos = opt_pos.value();
233
234 size_t avail_read = read_avail(pos);
235
236 if (avail_read < kHeaderSize) {
237 meta_->stats.num_reads_nodata++;
238 errno = EAGAIN;
239 return Buffer(); // No data
240 }
241
242 uint8_t* rd_ptr = at(pos.read_pos);
243 PERFETTO_DCHECK(reinterpret_cast<uintptr_t>(rd_ptr) % kAlignment == 0);
244 const size_t size = reinterpret_cast<std::atomic<uint32_t>*>(rd_ptr)->load(
245 std::memory_order_acquire);
246 if (size == 0) {
247 meta_->stats.num_reads_nodata++;
248 errno = EAGAIN;
249 return Buffer();
250 }
251 const size_t size_with_header = base::AlignUp<kAlignment>(size + kHeaderSize);
252
253 if (size_with_header > avail_read) {
254 PERFETTO_ELOG(
255 "Corrupted header detected, size=%zu"
256 ", read_avail=%zu, rd=%" PRIu64 ", wr=%" PRIu64,
257 size, avail_read, pos.read_pos, pos.write_pos);
258 meta_->stats.num_reads_corrupt++;
259 errno = EBADF;
260 return Buffer();
261 }
262
263 rd_ptr += kHeaderSize;
264 PERFETTO_DCHECK(reinterpret_cast<uintptr_t>(rd_ptr) % kAlignment == 0);
265 return Buffer(rd_ptr, size);
266 }
267
EndRead(Buffer buf)268 void SharedRingBuffer::EndRead(Buffer buf) {
269 if (!buf)
270 return;
271 ScopedSpinlock spinlock(&meta_->spinlock, ScopedSpinlock::Mode::Blocking);
272 size_t size_with_header = base::AlignUp<kAlignment>(buf.size + kHeaderSize);
273 meta_->read_pos += size_with_header;
274 meta_->stats.num_reads_succeeded++;
275 }
276
IsCorrupt(const PointerPositions & pos)277 bool SharedRingBuffer::IsCorrupt(const PointerPositions& pos) {
278 if (pos.write_pos < pos.read_pos || pos.write_pos - pos.read_pos > size_ ||
279 pos.write_pos % kAlignment || pos.read_pos % kAlignment) {
280 PERFETTO_ELOG("Ring buffer corrupted, rd=%" PRIu64 ", wr=%" PRIu64
281 ", size=%zu",
282 pos.read_pos, pos.write_pos, size_);
283 return true;
284 }
285 return false;
286 }
287
SharedRingBuffer(SharedRingBuffer && other)288 SharedRingBuffer::SharedRingBuffer(SharedRingBuffer&& other) noexcept {
289 *this = std::move(other);
290 }
291
operator =(SharedRingBuffer && other)292 SharedRingBuffer& SharedRingBuffer::operator=(SharedRingBuffer&& other) {
293 mem_fd_ = std::move(other.mem_fd_);
294 std::tie(meta_, mem_, size_) = std::tie(other.meta_, other.mem_, other.size_);
295 std::tie(other.meta_, other.mem_, other.size_) =
296 std::make_tuple(nullptr, nullptr, 0);
297 return *this;
298 }
299
300 // static
Create(size_t size)301 base::Optional<SharedRingBuffer> SharedRingBuffer::Create(size_t size) {
302 auto buf = SharedRingBuffer(CreateFlag(), size);
303 if (!buf.is_valid())
304 return base::nullopt;
305 return base::make_optional(std::move(buf));
306 }
307
308 // static
Attach(base::ScopedFile mem_fd)309 base::Optional<SharedRingBuffer> SharedRingBuffer::Attach(
310 base::ScopedFile mem_fd) {
311 auto buf = SharedRingBuffer(AttachFlag(), std::move(mem_fd));
312 if (!buf.is_valid())
313 return base::nullopt;
314 return base::make_optional(std::move(buf));
315 }
316
317 } // namespace profiling
318 } // namespace perfetto
319