1 // Copyright 2020 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "host-common/address_space_shared_slots_host_memory_allocator.h"
16 #include "host-common/address_space_device.hpp"
17 #include "host-common/vm_operations.h"
18 #include "host-common/crash-handler.h"
19 #include "host-common/crash_reporter.h"
20 #include "aemu/base/AlignedBuf.h"
21 #include "aemu/base/synchronization/Lock.h"
22 #include <map>
23 #include <unordered_set>
24 #include <unordered_map>
25 #include <utility>
26
27 namespace android {
28 namespace emulation {
29 namespace {
align(size_t value,size_t alignment)30 size_t align(size_t value, size_t alignment) {
31 return (value + alignment - 1) & (~(alignment - 1));
32 }
33
34 typedef AddressSpaceSharedSlotsHostMemoryAllocatorContext ASSSHMAC;
35 typedef ASSSHMAC::MemBlock MemBlock;
36 typedef MemBlock::FreeSubblocks_t FreeSubblocks_t;
37
38 using base::AutoLock;
39 using base::Lock;
40
41 #if defined(__APPLE__) && defined(__arm64__)
42 constexpr uint32_t kAllocAlignment = 16384;
43 #else
44 constexpr uint32_t kAllocAlignment = 4096;
45 #endif
46
allocateAddressSpaceBlock(const AddressSpaceHwFuncs * hw,uint32_t size)47 uint64_t allocateAddressSpaceBlock(const AddressSpaceHwFuncs* hw, uint32_t size) {
48 uint64_t offset;
49 if (hw->allocSharedHostRegionLocked(size, &offset)) {
50 return 0;
51 } else {
52 return hw->getPhysAddrStartLocked() + offset;
53 }
54 }
55
allocateAddressSpaceBlockFixed(uint64_t gpa,const AddressSpaceHwFuncs * hw,uint32_t size)56 uint64_t allocateAddressSpaceBlockFixed(uint64_t gpa, const AddressSpaceHwFuncs* hw, uint32_t size) {
57 uint64_t offset = gpa - hw->getPhysAddrStartLocked();
58 if (hw->allocSharedHostRegionFixedLocked(size, offset)) {
59 // Note: even if we do not succeed in allocSharedHostRegionFixedLocked,
60 // assume this is because we're doing a snapshot load, and the VMSTATE
61 // description of memory slots in hw/pci/goldfish_address_space.c
62 // already contains the entry we wanted. TODO: Consider always
63 // allowing allocSharedHostRegionFixedLocked succeed if it encounters
64 // an unavailable block at the same offset and size, and/or add a
65 // "forSnapshotLoad" flag to allocSharedHostRegionFixedLocked in order
66 // to specifically account for this case.
67 return hw->getPhysAddrStartLocked() + offset;
68 } else {
69 return hw->getPhysAddrStartLocked() + offset;
70 }
71 }
72
freeAddressBlock(const AddressSpaceHwFuncs * hw,uint64_t phys)73 int freeAddressBlock(const AddressSpaceHwFuncs* hw, uint64_t phys) {
74 const uint64_t start = hw->getPhysAddrStartLocked();
75 if (phys < start) { return -1; }
76 return hw->freeSharedHostRegionLocked(phys - start);
77 }
78
79 std::map<uint64_t, MemBlock> g_blocks;
80 Lock g_blocksLock;
81
translatePhysAddr(uint64_t p)82 std::pair<uint64_t, MemBlock*> translatePhysAddr(uint64_t p) {
83 for (auto& kv: g_blocks) {
84 MemBlock& block = kv.second;
85 if (p >= block.physBaseLoaded && p < block.physBaseLoaded + block.bitsSize) {
86 return {block.physBase + (p - block.physBaseLoaded), &block};
87 }
88 }
89
90 return {0, nullptr};
91 }
92 } // namespace
93
MemBlock(const address_space_device_control_ops * o,const AddressSpaceHwFuncs * h,uint32_t sz)94 MemBlock::MemBlock(const address_space_device_control_ops* o, const AddressSpaceHwFuncs* h, uint32_t sz)
95 : ops(o), hw(h) {
96 bits = android::aligned_buf_alloc(kAllocAlignment, sz);
97 bitsSize = sz;
98 physBase = allocateAddressSpaceBlock(hw, sz);
99 if (!physBase) {
100 crashhandler_die("%s:%d: allocateAddressSpaceBlock", __func__, __LINE__);
101 }
102 physBaseLoaded = 0;
103 if (!ops->add_memory_mapping(physBase, bits, bitsSize)) {
104 crashhandler_die("%s:%d: add_memory_mapping", __func__, __LINE__);
105 }
106
107 if (!freeSubblocks.insert({0, sz}).second) {
108 crashhandler_die("%s:%d: freeSubblocks.insert", __func__, __LINE__);
109 }
110 }
111
MemBlock(MemBlock && rhs)112 MemBlock::MemBlock(MemBlock&& rhs)
113 : ops(std::exchange(rhs.ops, nullptr)),
114 hw(std::exchange(rhs.hw, nullptr)),
115 physBase(std::exchange(rhs.physBase, 0)),
116 physBaseLoaded(std::exchange(rhs.physBaseLoaded, 0)),
117 bits(std::exchange(rhs.bits, nullptr)),
118 bitsSize(std::exchange(rhs.bitsSize, 0)),
119 freeSubblocks(std::move(rhs.freeSubblocks)) {
120 }
121
operator =(MemBlock rhs)122 MemBlock& MemBlock::operator=(MemBlock rhs) {
123 swap(*this, rhs);
124 return *this;
125 }
126
~MemBlock()127 MemBlock::~MemBlock() {
128 if (physBase) {
129 ops->remove_memory_mapping(physBase, bits, bitsSize);
130 freeAddressBlock(hw, physBase);
131 android::aligned_buf_free(bits);
132 }
133 }
134
swap(MemBlock & lhs,MemBlock & rhs)135 void swap(MemBlock& lhs, MemBlock& rhs) {
136 using std::swap;
137
138 swap(lhs.physBase, rhs.physBase);
139 swap(lhs.physBaseLoaded, rhs.physBaseLoaded);
140 swap(lhs.bits, rhs.bits);
141 swap(lhs.bitsSize, rhs.bitsSize);
142 swap(lhs.freeSubblocks, rhs.freeSubblocks);
143 }
144
145
isAllFree() const146 bool MemBlock::isAllFree() const {
147 if (freeSubblocks.size() == 1) {
148 const auto kv = *freeSubblocks.begin();
149 return (kv.first == 0) && (kv.second == bitsSize);
150 } else {
151 return false;
152 }
153 }
154
allocate(const size_t requestedSize)155 uint64_t MemBlock::allocate(const size_t requestedSize) {
156 FreeSubblocks_t::iterator i = findFreeSubblock(&freeSubblocks, requestedSize);
157 if (i == freeSubblocks.end()) {
158 return 0;
159 }
160
161 const uint32_t subblockOffset = i->first;
162 const uint32_t subblockSize = i->second;
163
164 freeSubblocks.erase(i);
165 if (subblockSize > requestedSize) {
166 if (!freeSubblocks.insert({subblockOffset + requestedSize,
167 subblockSize - requestedSize}).second) {
168 crashhandler_die("%s:%d: freeSubblocks.insert", __func__, __LINE__);
169 }
170 }
171
172 return physBase + subblockOffset;
173 }
174
unallocate(uint64_t phys,uint32_t subblockSize)175 void MemBlock::unallocate(
176 uint64_t phys, uint32_t subblockSize) {
177 if (phys >= physBase + bitsSize) {
178 crashhandler_die("%s:%d: phys >= physBase + bitsSize", __func__, __LINE__);
179 }
180
181 auto r = freeSubblocks.insert({phys - physBase, subblockSize});
182 if (!r.second) {
183 crashhandler_die("%s:%d: freeSubblocks.insert", __func__, __LINE__);
184 }
185
186 FreeSubblocks_t::iterator i = r.first;
187 if (i != freeSubblocks.begin()) {
188 i = tryMergeSubblocks(&freeSubblocks, i, std::prev(i), i);
189 }
190 FreeSubblocks_t::iterator next = std::next(i);
191 if (next != freeSubblocks.end()) {
192 i = tryMergeSubblocks(&freeSubblocks, i, i, next);
193 }
194 }
195
findFreeSubblock(FreeSubblocks_t * fsb,const size_t sz)196 FreeSubblocks_t::iterator MemBlock::findFreeSubblock(FreeSubblocks_t* fsb,
197 const size_t sz) {
198 if (fsb->empty()) {
199 return fsb->end();
200 } else {
201 auto best = fsb->end();
202 size_t bestSize = ~size_t(0);
203
204 for (auto i = fsb->begin(); i != fsb->end(); ++i) {
205 if (i->second >= sz && sz < bestSize) {
206 best = i;
207 bestSize = i->second;
208 }
209 }
210
211 return best;
212 }
213 }
214
tryMergeSubblocks(FreeSubblocks_t * fsb,FreeSubblocks_t::iterator ret,FreeSubblocks_t::iterator lhs,FreeSubblocks_t::iterator rhs)215 FreeSubblocks_t::iterator MemBlock::tryMergeSubblocks(
216 FreeSubblocks_t* fsb,
217 FreeSubblocks_t::iterator ret,
218 FreeSubblocks_t::iterator lhs,
219 FreeSubblocks_t::iterator rhs) {
220 if (lhs->first + lhs->second == rhs->first) {
221 const uint32_t subblockOffset = lhs->first;
222 const uint32_t subblockSize = lhs->second + rhs->second;
223
224 fsb->erase(lhs);
225 fsb->erase(rhs);
226 auto r = fsb->insert({subblockOffset, subblockSize});
227 if (!r.second) {
228 crashhandler_die("%s:%d: fsb->insert", __func__, __LINE__);
229 }
230
231 return r.first;
232 } else {
233 return ret;
234 }
235 }
236
save(base::Stream * stream) const237 void MemBlock::save(base::Stream* stream) const {
238 stream->putBe64(physBase);
239 stream->putBe32(bitsSize);
240 stream->write(bits, bitsSize);
241 stream->putBe32(freeSubblocks.size());
242 for (const auto& kv: freeSubblocks) {
243 stream->putBe32(kv.first);
244 stream->putBe32(kv.second);
245 }
246 }
247
load(base::Stream * stream,const address_space_device_control_ops * ops,const AddressSpaceHwFuncs * hw,MemBlock * block)248 bool MemBlock::load(base::Stream* stream,
249 const address_space_device_control_ops* ops,
250 const AddressSpaceHwFuncs* hw,
251 MemBlock* block) {
252 const uint64_t physBaseLoaded = stream->getBe64();
253 const uint32_t bitsSize = stream->getBe32();
254 void* const bits = android::aligned_buf_alloc(kAllocAlignment, bitsSize);
255 if (!bits) {
256 return false;
257 }
258 if (stream->read(bits, bitsSize) != static_cast<ssize_t>(bitsSize)) {
259 android::aligned_buf_free(bits);
260 return false;
261 }
262 const uint64_t physBase = allocateAddressSpaceBlockFixed(physBaseLoaded, hw, bitsSize);
263 if (!physBase) {
264 android::aligned_buf_free(bits);
265 return false;
266 }
267 if (!ops->add_memory_mapping(physBase, bits, bitsSize)) {
268 freeAddressBlock(hw, physBase);
269 android::aligned_buf_free(bits);
270 return false;
271 }
272
273 FreeSubblocks_t freeSubblocks;
274 for (uint32_t freeSubblocksSize = stream->getBe32();
275 freeSubblocksSize > 0;
276 --freeSubblocksSize) {
277 const uint32_t off = stream->getBe32();
278 const uint32_t sz = stream->getBe32();
279 if (!freeSubblocks.insert({off, sz}).second) {
280 crashhandler_die("%s:%d: freeSubblocks.insert", __func__, __LINE__);
281 }
282 }
283
284 block->hw = hw;
285 block->ops = ops;
286 block->physBase = physBase;
287 block->physBaseLoaded = physBaseLoaded;
288 block->bits = bits;
289 block->bitsSize = bitsSize;
290 block->freeSubblocks = std::move(freeSubblocks);
291
292 return true;
293 }
294
AddressSpaceSharedSlotsHostMemoryAllocatorContext(const address_space_device_control_ops * ops,const AddressSpaceHwFuncs * hw)295 AddressSpaceSharedSlotsHostMemoryAllocatorContext::AddressSpaceSharedSlotsHostMemoryAllocatorContext(
296 const address_space_device_control_ops *ops, const AddressSpaceHwFuncs* hw)
297 : m_ops(ops),
298 m_hw(hw) {}
299
~AddressSpaceSharedSlotsHostMemoryAllocatorContext()300 AddressSpaceSharedSlotsHostMemoryAllocatorContext::~AddressSpaceSharedSlotsHostMemoryAllocatorContext() {
301 clear();
302 }
303
perform(AddressSpaceDevicePingInfo * info)304 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::perform(AddressSpaceDevicePingInfo *info) {
305 uint64_t result;
306
307 switch (static_cast<HostMemoryAllocatorCommand>(info->metadata)) {
308 case HostMemoryAllocatorCommand::Allocate:
309 result = allocate(info);
310 break;
311
312 case HostMemoryAllocatorCommand::Unallocate:
313 result = unallocate(info->phys_addr);
314 break;
315
316 case HostMemoryAllocatorCommand::CheckIfSharedSlotsSupported:
317 result = 0;
318 break;
319
320 default:
321 result = -1;
322 break;
323 }
324
325 info->metadata = result;
326 }
327
328 uint64_t
allocate(AddressSpaceDevicePingInfo * info)329 AddressSpaceSharedSlotsHostMemoryAllocatorContext::allocate(
330 AddressSpaceDevicePingInfo *info) {
331 const uint32_t alignedSize = align(info->size, (*m_hw->getGuestPageSize)());
332
333 AutoLock lock(g_blocksLock);
334 for (auto& kv : g_blocks) {
335 uint64_t physAddr = kv.second.allocate(alignedSize);
336 if (physAddr) {
337 return populatePhysAddr(info, physAddr, alignedSize, &kv.second);
338 }
339 }
340
341 const uint32_t defaultSize = 64u << 20;
342 MemBlock newBlock(m_ops, m_hw, std::max(alignedSize, defaultSize));
343 const uint64_t physAddr = newBlock.allocate(alignedSize);
344 if (!physAddr) {
345 return -1;
346 }
347
348 const uint64_t physBase = newBlock.physBase;
349 auto r = g_blocks.insert({physBase, std::move(newBlock)});
350 if (!r.second) {
351 crashhandler_die("%s:%d: g_blocks.insert", __func__, __LINE__);
352 }
353
354 return populatePhysAddr(info, physAddr, alignedSize, &r.first->second);
355 }
356
357 uint64_t
unallocate(const uint64_t physAddr)358 AddressSpaceSharedSlotsHostMemoryAllocatorContext::unallocate(
359 const uint64_t physAddr) {
360 AutoLock lock(g_blocksLock);
361
362 auto i = m_allocations.find(physAddr);
363 if (i == m_allocations.end()) {
364 return -1;
365 }
366
367 MemBlock* block = i->second.second;
368 block->unallocate(physAddr, i->second.first);
369 m_allocations.erase(physAddr);
370
371 if (block->isAllFree()) {
372 gcEmptyBlocks(1);
373 }
374
375 return 0;
376 }
377
gcEmptyBlocks(int allowedEmpty)378 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::gcEmptyBlocks(int allowedEmpty) {
379 auto i = g_blocks.begin();
380 while (i != g_blocks.end()) {
381 if (i->second.isAllFree()) {
382 if (allowedEmpty > 0) {
383 --allowedEmpty;
384 ++i;
385 } else {
386 i = g_blocks.erase(i);
387 }
388 } else {
389 ++i;
390 }
391 }
392 }
393
populatePhysAddr(AddressSpaceDevicePingInfo * info,const uint64_t physAddr,const uint32_t alignedSize,MemBlock * owner)394 uint64_t AddressSpaceSharedSlotsHostMemoryAllocatorContext::populatePhysAddr(
395 AddressSpaceDevicePingInfo *info,
396 const uint64_t physAddr,
397 const uint32_t alignedSize,
398 MemBlock* owner) {
399 info->phys_addr = physAddr - get_address_space_device_hw_funcs()->getPhysAddrStartLocked();
400 info->size = alignedSize;
401 if (!m_allocations.insert({physAddr, {alignedSize, owner}}).second) {
402 crashhandler_die("%s:%d: m_allocations.insert", __func__, __LINE__);
403 }
404 return 0;
405 }
406
getDeviceType() const407 AddressSpaceDeviceType AddressSpaceSharedSlotsHostMemoryAllocatorContext::getDeviceType() const {
408 return AddressSpaceDeviceType::SharedSlotsHostMemoryAllocator;
409 }
410
save(base::Stream * stream) const411 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::save(base::Stream* stream) const {
412 AutoLock lock(g_blocksLock);
413
414 stream->putBe32(m_allocations.size());
415 for (const auto& kv: m_allocations) {
416 stream->putBe64(kv.first);
417 stream->putBe32(kv.second.first);
418 }
419 }
420
load(base::Stream * stream)421 bool AddressSpaceSharedSlotsHostMemoryAllocatorContext::load(base::Stream* stream) {
422 clear();
423
424 AutoLock lock(g_blocksLock);
425 for (uint32_t sz = stream->getBe32(); sz > 0; --sz) {
426 const uint64_t phys = stream->getBe64();
427 const uint32_t size = stream->getBe32();
428 const auto r = translatePhysAddr(phys);
429 if (phys) {
430 if (!m_allocations.insert({r.first, {size, r.second}}).second) {
431 crashhandler_die("%s:%d: m_allocations.insert", __func__, __LINE__);
432 }
433 } else {
434 crashhandler_die("%s:%d: translatePhysAddr", __func__, __LINE__);
435 }
436 }
437
438 return true;
439 }
440
clear()441 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::clear() {
442 AutoLock lock(g_blocksLock);
443 for (const auto& kv: m_allocations) {
444 MemBlock* block = kv.second.second;
445 block->unallocate(kv.first, kv.second.first);
446 }
447 m_allocations.clear();
448 }
449
globalStateSave(base::Stream * stream)450 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::globalStateSave(base::Stream* stream) {
451 AutoLock lock(g_blocksLock);
452
453 stream->putBe32(g_blocks.size());
454 for (const auto& kv: g_blocks) {
455 kv.second.save(stream);
456 }
457 }
458
459 // get_address_space_device_hw_funcs()
460
globalStateLoad(base::Stream * stream,const address_space_device_control_ops * ops,const AddressSpaceHwFuncs * hw)461 bool AddressSpaceSharedSlotsHostMemoryAllocatorContext::globalStateLoad(
462 base::Stream* stream,
463 const address_space_device_control_ops *ops,
464 const AddressSpaceHwFuncs* hw) {
465 AutoLock lock(g_blocksLock);
466
467 for (uint32_t sz = stream->getBe32(); sz > 0; --sz) {
468 MemBlock block;
469 if (!MemBlock::load(stream, ops, hw, &block)) { return false; }
470
471 const uint64_t physBase = block.physBase;
472 if (!g_blocks.insert({physBase, std::move(block)}).second) {
473 crashhandler_die("%s:%d: block->unallocate", __func__, __LINE__);
474 }
475 }
476
477 return true;
478 }
479
globalStateClear()480 void AddressSpaceSharedSlotsHostMemoryAllocatorContext::globalStateClear() {
481 AutoLock lock(g_blocksLock);
482 g_blocks.clear();
483 }
484
485 } // namespace emulation
486 } // namespace android
487