1 // Copyright 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "host-common/address_space_graphics.h"
16
17 #include "host-common/address_space_device.hpp"
18 #include "host-common/address_space_device.h"
19 #include "host-common/vm_operations.h"
20 #include "host-common/crash-handler.h"
21 #include "host-common/crash_reporter.h"
22 #include "host-common/GfxstreamFatalError.h"
23 #include "host-common/globals.h"
24 #include "aemu/base/AlignedBuf.h"
25 #include "aemu/base/SubAllocator.h"
26 #include "aemu/base/synchronization/Lock.h"
27
28 #include <memory>
29
30 #define ASGFX_DEBUG 0
31
32 #if ASGFX_DEBUG
33 #define ASGFX_LOG(fmt,...) printf("%s:%d " fmt "\n", __func__, __LINE__, ##__VA_ARGS__);
34 #else
35 #define ASGFX_LOG(fmt,...)
36 #endif
37
38 using android::base::AutoLock;
39 using android::base::Lock;
40 using android::base::SubAllocator;
41 using emugl::ABORT_REASON_OTHER;
42 using emugl::FatalError;
43
44 namespace android {
45 namespace emulation {
46 namespace asg {
47
48 struct AllocationCreateInfo {
49 bool dedicated;
50 bool virtioGpu;
51 bool hostmemRegisterFixed;
52 bool fromLoad;
53 uint64_t size;
54 uint64_t hostmemId;
55 void *externalAddr;
56 };
57
58 struct Block {
59 char* buffer = nullptr;
60 SubAllocator* subAlloc = nullptr;
61 uint64_t offsetIntoPhys = 0; // guest claimShared/mmap uses this
62 // size: implicitly ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE
63 bool isEmpty = true;
64 bool dedicated = false;
65 size_t dedicatedSize = 0;
66 bool usesVirtioGpuHostmem = false;
67 uint64_t hostmemId = 0;
68 bool external = false;
69 };
70
71 class Globals {
72 public:
Globals()73 Globals() :
74 mPerContextBufferSize(
75 aemu_get_android_hw()->hw_gltransport_asg_writeBufferSize) { }
76
~Globals()77 ~Globals() { clear(); }
78
initialize(const address_space_device_control_ops * ops)79 void initialize(const address_space_device_control_ops* ops) {
80 AutoLock lock(mLock);
81
82 if (mInitialized) return;
83
84 mControlOps = ops;
85 mInitialized = true;
86 }
87
setConsumer(ConsumerInterface iface)88 void setConsumer(ConsumerInterface iface) {
89 mConsumerInterface = iface;
90 }
91
getConsumerInterface()92 ConsumerInterface getConsumerInterface() {
93 if (!mConsumerInterface.create ||
94 !mConsumerInterface.destroy ||
95 !mConsumerInterface.preSave ||
96 !mConsumerInterface.globalPreSave ||
97 !mConsumerInterface.save ||
98 !mConsumerInterface.globalPostSave ||
99 !mConsumerInterface.postSave) {
100 crashhandler_die("Consumer interface has not been set\n");
101 }
102 return mConsumerInterface;
103 }
104
controlOps()105 const address_space_device_control_ops* controlOps() {
106 return mControlOps;
107 }
108
clear()109 void clear() {
110 for (auto& block: mRingBlocks) {
111 if (block.isEmpty) continue;
112 destroyBlockLocked(block);
113 }
114
115 for (auto& block: mBufferBlocks) {
116 if (block.isEmpty) continue;
117 destroyBlockLocked(block);
118 }
119
120 for (auto& block: mCombinedBlocks) {
121 if (block.isEmpty) continue;
122 destroyBlockLocked(block);
123 }
124
125 mRingBlocks.clear();
126 mBufferBlocks.clear();
127 mCombinedBlocks.clear();
128 }
129
perContextBufferSize() const130 uint64_t perContextBufferSize() const {
131 return mPerContextBufferSize;
132 }
133
newAllocation(struct AllocationCreateInfo & create,std::vector<Block> & existingBlocks)134 Allocation newAllocation(struct AllocationCreateInfo& create,
135 std::vector<Block>& existingBlocks) {
136 AutoLock lock(mLock);
137
138 if (create.size > ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE) {
139 crashhandler_die(
140 "wanted size 0x%llx which is "
141 "greater than block size 0x%llx",
142 (unsigned long long)create.size,
143 (unsigned long long)ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
144 }
145
146 size_t index = 0;
147
148 Allocation res;
149
150 for (auto& block : existingBlocks) {
151
152 if (block.isEmpty) {
153 fillBlockLocked(block, create);
154 }
155
156 auto buf = block.subAlloc->alloc(create.size);
157
158 if (buf) {
159 res.buffer = (char*)buf;
160 res.blockIndex = index;
161 res.offsetIntoPhys =
162 block.offsetIntoPhys +
163 block.subAlloc->getOffset(buf);
164 res.size = create.size;
165 res.dedicated = create.dedicated;
166 res.hostmemId = create.hostmemId;
167 return res;
168 } else {
169 // block full
170 }
171
172 ++index;
173 }
174
175 Block newBlock;
176 fillBlockLocked(newBlock, create);
177
178 auto buf = newBlock.subAlloc->alloc(create.size);
179
180 if (!buf) {
181 crashhandler_die(
182 "failed to allocate size 0x%llx "
183 "(no free slots or out of host memory)",
184 (unsigned long long)create.size);
185 }
186
187 existingBlocks.push_back(newBlock);
188
189 res.buffer = (char*)buf;
190 res.blockIndex = index;
191 res.offsetIntoPhys =
192 newBlock.offsetIntoPhys +
193 newBlock.subAlloc->getOffset(buf);
194 res.size = create.size;
195 res.dedicated = create.dedicated;
196 res.hostmemId = create.hostmemId;
197
198 return res;
199 }
200
deleteAllocation(const Allocation & alloc,std::vector<Block> & existingBlocks)201 void deleteAllocation(const Allocation& alloc, std::vector<Block>& existingBlocks) {
202 if (!alloc.buffer) return;
203
204 AutoLock lock(mLock);
205
206 if (existingBlocks.size() <= alloc.blockIndex) {
207 crashhandler_die(
208 "should be a block at index %zu "
209 "but it is not found", alloc.blockIndex);
210 }
211
212 auto& block = existingBlocks[alloc.blockIndex];
213
214 if (block.dedicated) {
215 destroyBlockLocked(block);
216 return;
217 }
218
219 if (!block.subAlloc->free(alloc.buffer)) {
220 crashhandler_die(
221 "failed to free %p (block start: %p)",
222 alloc.buffer,
223 block.buffer);
224 }
225
226 if (shouldDestryBlockLocked(block)) {
227 destroyBlockLocked(block);
228 }
229 }
230
allocRingStorage()231 Allocation allocRingStorage() {
232 struct AllocationCreateInfo create = {0};
233 create.size = sizeof(struct asg_ring_storage);
234 return newAllocation(create, mRingBlocks);
235 }
236
freeRingStorage(const Allocation & alloc)237 void freeRingStorage(const Allocation& alloc) {
238 if (alloc.isView) return;
239 deleteAllocation(alloc, mRingBlocks);
240 }
241
allocBuffer()242 Allocation allocBuffer() {
243 struct AllocationCreateInfo create = {0};
244 create.size = mPerContextBufferSize;
245 return newAllocation(create, mBufferBlocks);
246 }
247
freeBuffer(const Allocation & alloc)248 void freeBuffer(const Allocation& alloc) {
249 if (alloc.isView) return;
250 deleteAllocation(alloc, mBufferBlocks);
251 }
252
allocRingAndBufferStorageDedicated(const struct AddressSpaceCreateInfo & asgCreate)253 Allocation allocRingAndBufferStorageDedicated(const struct AddressSpaceCreateInfo& asgCreate) {
254 struct AllocationCreateInfo create = {0};
255 create.size = sizeof(struct asg_ring_storage) + mPerContextBufferSize;
256 create.dedicated = true;
257 create.virtioGpu = true;
258 if (asgCreate.externalAddr) {
259 create.externalAddr = asgCreate.externalAddr;
260 if (asgCreate.externalAddrSize < static_cast<uint64_t>(create.size)) {
261 crashhandler_die("External address size too small\n");
262 }
263
264 create.size = asgCreate.externalAddrSize;
265 }
266
267 return newAllocation(create, mCombinedBlocks);
268 }
269
allocRingViewIntoCombined(const Allocation & alloc)270 Allocation allocRingViewIntoCombined(const Allocation& alloc) {
271 Allocation res = alloc;
272 res.buffer = alloc.buffer;
273 res.size = sizeof(struct asg_ring_storage);
274 res.isView = true;
275 return res;
276 }
277
allocBufferViewIntoCombined(const Allocation & alloc)278 Allocation allocBufferViewIntoCombined(const Allocation& alloc) {
279 Allocation res = alloc;
280 res.buffer = alloc.buffer + sizeof(asg_ring_storage);
281 res.size = mPerContextBufferSize;
282 res.isView = true;
283 return res;
284 }
285
freeRingAndBuffer(const Allocation & alloc)286 void freeRingAndBuffer(const Allocation& alloc) {
287 deleteAllocation(alloc, mCombinedBlocks);
288 }
289
preSave()290 void preSave() {
291 // mConsumerInterface.globalPreSave();
292 }
293
save(base::Stream * stream)294 void save(base::Stream* stream) {
295 stream->putBe64(mRingBlocks.size());
296 stream->putBe64(mBufferBlocks.size());
297 stream->putBe64(mCombinedBlocks.size());
298
299 for (const auto& block: mRingBlocks) {
300 saveBlockLocked(stream, block);
301 }
302
303 for (const auto& block: mBufferBlocks) {
304 saveBlockLocked(stream, block);
305 }
306
307 for (const auto& block: mCombinedBlocks) {
308 saveBlockLocked(stream, block);
309 }
310 }
311
postSave()312 void postSave() {
313 // mConsumerInterface.globalPostSave();
314 }
315
load(base::Stream * stream)316 bool load(base::Stream* stream) {
317 clear();
318 mConsumerInterface.globalPreLoad();
319
320 uint64_t ringBlockCount = stream->getBe64();
321 uint64_t bufferBlockCount = stream->getBe64();
322 uint64_t combinedBlockCount = stream->getBe64();
323
324 mRingBlocks.resize(ringBlockCount);
325 mBufferBlocks.resize(bufferBlockCount);
326 mCombinedBlocks.resize(combinedBlockCount);
327
328 for (auto& block: mRingBlocks) {
329 loadBlockLocked(stream, block);
330 }
331
332 for (auto& block: mBufferBlocks) {
333 loadBlockLocked(stream, block);
334 }
335
336 for (auto& block: mCombinedBlocks) {
337 loadBlockLocked(stream, block);
338 }
339
340 return true;
341 }
342
343 // Assumes that blocks have been loaded,
344 // and that alloc has its blockIndex/offsetIntoPhys fields filled already
fillAllocFromLoad(Allocation & alloc,AddressSpaceGraphicsContext::AllocType allocType)345 void fillAllocFromLoad(Allocation& alloc, AddressSpaceGraphicsContext::AllocType allocType) {
346 switch (allocType) {
347 case AddressSpaceGraphicsContext::AllocType::AllocTypeRing:
348 if (mRingBlocks.size() <= alloc.blockIndex) return;
349 fillAllocFromLoad(mRingBlocks[alloc.blockIndex], alloc);
350 break;
351 case AddressSpaceGraphicsContext::AllocType::AllocTypeBuffer:
352 if (mBufferBlocks.size() <= alloc.blockIndex) return;
353 fillAllocFromLoad(mBufferBlocks[alloc.blockIndex], alloc);
354 break;
355 case AddressSpaceGraphicsContext::AllocType::AllocTypeCombined:
356 if (mCombinedBlocks.size() <= alloc.blockIndex) return;
357 fillAllocFromLoad(mCombinedBlocks[alloc.blockIndex], alloc);
358 break;
359 default:
360 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER));
361 break;
362 }
363 }
364
365 private:
366
saveBlockLocked(base::Stream * stream,const Block & block)367 void saveBlockLocked(
368 base::Stream* stream,
369 const Block& block) {
370
371 if (block.isEmpty) {
372 stream->putBe32(0);
373 return;
374 } else {
375 stream->putBe32(1);
376 }
377
378 stream->putBe64(block.offsetIntoPhys);
379 stream->putBe32(block.dedicated);
380 stream->putBe64(block.dedicatedSize);
381 stream->putBe32(block.usesVirtioGpuHostmem);
382 stream->putBe64(block.hostmemId);
383
384 block.subAlloc->save(stream);
385
386 stream->putBe64(ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
387 stream->write(block.buffer, ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
388 }
389
loadBlockLocked(base::Stream * stream,Block & block)390 void loadBlockLocked(
391 base::Stream* stream,
392 Block& block) {
393
394 uint32_t filled = stream->getBe32();
395 struct AllocationCreateInfo create = {0};
396
397 if (!filled) {
398 block.isEmpty = true;
399 return;
400 } else {
401 block.isEmpty = false;
402 }
403
404 block.offsetIntoPhys = stream->getBe64();
405
406 create.dedicated = stream->getBe32();
407 create.size = stream->getBe64();
408 create.virtioGpu = stream->getBe32();
409 create.hostmemRegisterFixed = true;
410 create.fromLoad = true;
411 create.hostmemId = stream->getBe64();
412
413 fillBlockLocked(block, create);
414
415 block.subAlloc->load(stream);
416
417 stream->getBe64();
418 stream->read(block.buffer, ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
419 }
420
fillAllocFromLoad(const Block & block,Allocation & alloc)421 void fillAllocFromLoad(const Block& block, Allocation& alloc) {
422 alloc.buffer = block.buffer + (alloc.offsetIntoPhys - block.offsetIntoPhys);
423 alloc.dedicated = block.dedicated;
424 alloc.hostmemId = block.hostmemId;
425 }
426
fillBlockLocked(Block & block,struct AllocationCreateInfo & create)427 void fillBlockLocked(Block& block, struct AllocationCreateInfo& create) {
428 if (create.dedicated) {
429 if (create.virtioGpu) {
430 void* buf;
431
432 if (create.externalAddr) {
433 buf = create.externalAddr;
434 block.external = true;
435 } else {
436 buf = aligned_buf_alloc(ADDRESS_SPACE_GRAPHICS_PAGE_SIZE, create.size);
437
438 struct MemEntry entry = { 0 };
439 entry.hva = buf;
440 entry.size = create.size;
441 entry.register_fixed = create.hostmemRegisterFixed;
442 entry.fixed_id = create.hostmemId ? create.hostmemId : 0;
443 entry.caching = MAP_CACHE_CACHED;
444
445 create.hostmemId = mControlOps->hostmem_register(&entry);
446 }
447
448 block.buffer = (char*)buf;
449 block.subAlloc =
450 new SubAllocator(buf, create.size, ADDRESS_SPACE_GRAPHICS_PAGE_SIZE);
451 block.offsetIntoPhys = 0;
452
453 block.isEmpty = false;
454 block.usesVirtioGpuHostmem = create.virtioGpu;
455 block.hostmemId = create.hostmemId;
456 block.dedicated = create.dedicated;
457 block.dedicatedSize = create.size;
458
459 } else {
460 crashhandler_die(
461 "Cannot use dedicated allocation without virtio-gpu hostmem id");
462 }
463 } else {
464 if (create.virtioGpu) {
465 crashhandler_die(
466 "Only dedicated allocation allowed in virtio-gpu hostmem id path");
467 } else {
468 uint64_t offsetIntoPhys;
469 int allocRes = 0;
470
471 if (create.fromLoad) {
472 offsetIntoPhys = block.offsetIntoPhys;
473 allocRes = get_address_space_device_hw_funcs()->
474 allocSharedHostRegionFixedLocked(
475 ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE, offsetIntoPhys);
476 if (allocRes) {
477 // Disregard alloc failures for now. This is because when it fails,
478 // we can assume the correct allocation already exists there (tested)
479 }
480 } else {
481 int allocRes = get_address_space_device_hw_funcs()->
482 allocSharedHostRegionLocked(
483 ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE, &offsetIntoPhys);
484
485 if (allocRes) {
486 crashhandler_die(
487 "Failed to allocate physical address graphics backing memory.");
488 }
489 }
490
491 void* buf =
492 aligned_buf_alloc(
493 ADDRESS_SPACE_GRAPHICS_PAGE_SIZE,
494 ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
495
496 mControlOps->add_memory_mapping(
497 get_address_space_device_hw_funcs()->getPhysAddrStartLocked() +
498 offsetIntoPhys, buf,
499 ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
500
501 block.buffer = (char*)buf;
502 block.subAlloc =
503 new SubAllocator(
504 buf, ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE,
505 ADDRESS_SPACE_GRAPHICS_PAGE_SIZE);
506 block.offsetIntoPhys = offsetIntoPhys;
507
508 block.isEmpty = false;
509 }
510 }
511 }
512
destroyBlockLocked(Block & block)513 void destroyBlockLocked(Block& block) {
514
515 if (block.usesVirtioGpuHostmem && !block.external) {
516 mControlOps->hostmem_unregister(block.hostmemId);
517 } else if (!block.external) {
518 mControlOps->remove_memory_mapping(
519 get_address_space_device_hw_funcs()->getPhysAddrStartLocked() +
520 block.offsetIntoPhys,
521 block.buffer,
522 ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
523
524 get_address_space_device_hw_funcs()->freeSharedHostRegionLocked(
525 block.offsetIntoPhys);
526 }
527
528 delete block.subAlloc;
529 if (!block.external) {
530 aligned_buf_free(block.buffer);
531 }
532
533 block.isEmpty = true;
534 }
535
shouldDestryBlockLocked(const Block & block) const536 bool shouldDestryBlockLocked(const Block& block) const {
537 return block.subAlloc->empty();
538 }
539
540 Lock mLock;
541 uint64_t mPerContextBufferSize;
542 bool mInitialized = false;
543 const address_space_device_control_ops* mControlOps = 0;
544 ConsumerInterface mConsumerInterface;
545 std::vector<Block> mRingBlocks;
546 std::vector<Block> mBufferBlocks;
547 std::vector<Block> mCombinedBlocks;
548 };
549
sGlobals()550 static Globals* sGlobals() {
551 static Globals* g = new Globals;
552 return g;
553 }
554
555 // static
init(const address_space_device_control_ops * ops)556 void AddressSpaceGraphicsContext::init(const address_space_device_control_ops* ops) {
557 sGlobals()->initialize(ops);
558 }
559
560 // static
clear()561 void AddressSpaceGraphicsContext::clear() {
562 sGlobals()->clear();
563 }
564
565 // static
setConsumer(ConsumerInterface iface)566 void AddressSpaceGraphicsContext::setConsumer(
567 ConsumerInterface iface) {
568 sGlobals()->setConsumer(iface);
569 }
570
AddressSpaceGraphicsContext(const struct AddressSpaceCreateInfo & create)571 AddressSpaceGraphicsContext::AddressSpaceGraphicsContext(
572 const struct AddressSpaceCreateInfo& create)
573 : mConsumerCallbacks((ConsumerCallbacks){
574 [this] { return onUnavailableRead(); },
575 [](uint64_t physAddr) { return (char*)sGlobals()->controlOps()->get_host_ptr(physAddr); },
576 }),
577 mConsumerInterface(sGlobals()->getConsumerInterface()),
578 mIsVirtio(false) {
579 mIsVirtio = (create.type == AddressSpaceDeviceType::VirtioGpuGraphics);
580 if (create.fromSnapshot) {
581 // Use load() instead to initialize
582 return;
583 }
584
585 if (mIsVirtio) {
586 mCombinedAllocation = sGlobals()->allocRingAndBufferStorageDedicated(create);
587 mRingAllocation = sGlobals()->allocRingViewIntoCombined(mCombinedAllocation);
588 mBufferAllocation = sGlobals()->allocBufferViewIntoCombined(mCombinedAllocation);
589 } else {
590 mRingAllocation = sGlobals()->allocRingStorage();
591 mBufferAllocation = sGlobals()->allocBuffer();
592 }
593
594 if (!mRingAllocation.buffer) {
595 crashhandler_die(
596 "Failed to allocate ring for ASG context");
597 }
598
599 if (!mBufferAllocation.buffer) {
600 crashhandler_die(
601 "Failed to allocate buffer for ASG context");
602 }
603
604 mHostContext = asg_context_create(
605 mRingAllocation.buffer,
606 mBufferAllocation.buffer,
607 sGlobals()->perContextBufferSize());
608 mHostContext.ring_config->buffer_size =
609 sGlobals()->perContextBufferSize();
610 mHostContext.ring_config->flush_interval =
611 aemu_get_android_hw()->hw_gltransport_asg_writeStepSize;
612 mHostContext.ring_config->host_consumed_pos = 0;
613 mHostContext.ring_config->guest_write_pos = 0;
614 mHostContext.ring_config->transfer_mode = 1;
615 mHostContext.ring_config->transfer_size = 0;
616 mHostContext.ring_config->in_error = 0;
617
618 mSavedConfig = *mHostContext.ring_config;
619
620 std::optional<std::string> nameOpt;
621 if (create.contextNameSize) {
622 std::string name(create.contextName, create.contextNameSize);
623 nameOpt = name;
624 }
625
626 if (create.createRenderThread) {
627 mCurrentConsumer = mConsumerInterface.create(
628 mHostContext, nullptr, mConsumerCallbacks, create.virtioGpuContextId, create.virtioGpuCapsetId,
629 std::move(nameOpt));
630 }
631 }
632
~AddressSpaceGraphicsContext()633 AddressSpaceGraphicsContext::~AddressSpaceGraphicsContext() {
634 if (mCurrentConsumer) {
635 mExiting = 1;
636 *(mHostContext.host_state) = ASG_HOST_STATE_EXIT;
637 mConsumerMessages.send(ConsumerCommand::Exit);
638 mConsumerInterface.destroy(mCurrentConsumer);
639 }
640
641 sGlobals()->freeBuffer(mBufferAllocation);
642 sGlobals()->freeRingStorage(mRingAllocation);
643 sGlobals()->freeRingAndBuffer(mCombinedAllocation);
644 }
645
perform(AddressSpaceDevicePingInfo * info)646 void AddressSpaceGraphicsContext::perform(AddressSpaceDevicePingInfo* info) {
647 switch (static_cast<asg_command>(info->metadata)) {
648 case ASG_GET_RING:
649 info->metadata = mRingAllocation.offsetIntoPhys;
650 info->size = mRingAllocation.size;
651 break;
652 case ASG_GET_BUFFER:
653 info->metadata = mBufferAllocation.offsetIntoPhys;
654 info->size = mBufferAllocation.size;
655 break;
656 case ASG_SET_VERSION: {
657 auto guestVersion = (uint32_t)info->size;
658 info->size = (uint64_t)(mVersion > guestVersion ? guestVersion : mVersion);
659 mVersion = (uint32_t)info->size;
660 mCurrentConsumer = mConsumerInterface.create(
661 mHostContext, nullptr /* no load stream */, mConsumerCallbacks, 0, 0,
662 std::nullopt);
663
664 if (mIsVirtio) {
665 info->metadata = mCombinedAllocation.hostmemId;
666 }
667 break;
668 }
669 case ASG_NOTIFY_AVAILABLE:
670 mConsumerMessages.trySend(ConsumerCommand::Wakeup);
671 info->metadata = 0;
672 break;
673 case ASG_GET_CONFIG:
674 *mHostContext.ring_config = mSavedConfig;
675 info->metadata = 0;
676 break;
677 }
678 }
679
onUnavailableRead()680 int AddressSpaceGraphicsContext::onUnavailableRead() {
681 static const uint32_t kMaxUnavailableReads = 8;
682
683 ++mUnavailableReadCount;
684 ring_buffer_yield();
685
686 ConsumerCommand cmd;
687
688 if (mExiting) {
689 mUnavailableReadCount = kMaxUnavailableReads;
690 }
691
692 if (mUnavailableReadCount >= kMaxUnavailableReads) {
693 mUnavailableReadCount = 0;
694
695 sleep:
696 *(mHostContext.host_state) = ASG_HOST_STATE_NEED_NOTIFY;
697 mConsumerMessages.receive(&cmd);
698
699 switch (cmd) {
700 case ConsumerCommand::Wakeup:
701 *(mHostContext.host_state) = ASG_HOST_STATE_CAN_CONSUME;
702 break;
703 case ConsumerCommand::Exit:
704 *(mHostContext.host_state) = ASG_HOST_STATE_EXIT;
705 return -1;
706 case ConsumerCommand::Sleep:
707 goto sleep;
708 case ConsumerCommand::PausePreSnapshot:
709 return -2;
710 case ConsumerCommand::ResumePostSnapshot:
711 return -3;
712 default:
713 crashhandler_die(
714 "AddressSpaceGraphicsContext::onUnavailableRead: "
715 "Unknown command: 0x%x\n",
716 (uint32_t)cmd);
717 }
718
719 return 1;
720 }
721 return 0;
722 }
723
getDeviceType() const724 AddressSpaceDeviceType AddressSpaceGraphicsContext::getDeviceType() const {
725 return AddressSpaceDeviceType::Graphics;
726 }
727
preSave() const728 void AddressSpaceGraphicsContext::preSave() const {
729 if (mCurrentConsumer) {
730 mConsumerInterface.preSave(mCurrentConsumer);
731 mConsumerMessages.send(ConsumerCommand::PausePreSnapshot);
732 }
733 }
734
save(base::Stream * stream) const735 void AddressSpaceGraphicsContext::save(base::Stream* stream) const {
736 stream->putBe32(mIsVirtio);
737 stream->putBe32(mVersion);
738 stream->putBe32(mExiting);
739 stream->putBe32(mUnavailableReadCount);
740
741 saveAllocation(stream, mRingAllocation);
742 saveAllocation(stream, mBufferAllocation);
743 saveAllocation(stream, mCombinedAllocation);
744
745 saveRingConfig(stream, mSavedConfig);
746
747 if (mCurrentConsumer) {
748 stream->putBe32(1);
749 mConsumerInterface.save(mCurrentConsumer, stream);
750 } else {
751 stream->putBe32(0);
752 }
753 }
754
postSave() const755 void AddressSpaceGraphicsContext::postSave() const {
756 if (mCurrentConsumer) {
757 mConsumerMessages.send(ConsumerCommand::ResumePostSnapshot);
758 mConsumerInterface.postSave(mCurrentConsumer);
759 }
760 }
761
load(base::Stream * stream)762 bool AddressSpaceGraphicsContext::load(base::Stream* stream) {
763 mIsVirtio = stream->getBe32();
764 mVersion = stream->getBe32();
765 mExiting = stream->getBe32();
766 mUnavailableReadCount = stream->getBe32();
767
768 loadAllocation(stream, mRingAllocation, AllocType::AllocTypeRing);
769 loadAllocation(stream, mBufferAllocation, AllocType::AllocTypeBuffer);
770 loadAllocation(stream, mCombinedAllocation, AllocType::AllocTypeCombined);
771
772 mHostContext = asg_context_create(
773 mRingAllocation.buffer,
774 mBufferAllocation.buffer,
775 sGlobals()->perContextBufferSize());
776 mHostContext.ring_config->buffer_size =
777 sGlobals()->perContextBufferSize();
778 mHostContext.ring_config->flush_interval =
779 aemu_get_android_hw()->hw_gltransport_asg_writeStepSize;
780
781 // In load, the live ring config state is in shared host/guest ram.
782 //
783 // mHostContext.ring_config->host_consumed_pos = 0;
784 // mHostContext.ring_config->transfer_mode = 1;
785 // mHostContext.ring_config->transfer_size = 0;
786 // mHostContext.ring_config->in_error = 0;
787
788 loadRingConfig(stream, mSavedConfig);
789
790 uint32_t consumerExists = stream->getBe32();
791
792 if (consumerExists) {
793 mCurrentConsumer = mConsumerInterface.create(
794 mHostContext, stream, mConsumerCallbacks, 0, 0, std::nullopt);
795 mConsumerInterface.postLoad(mCurrentConsumer);
796 }
797
798 return true;
799 }
800
globalStatePreSave()801 void AddressSpaceGraphicsContext::globalStatePreSave() {
802 sGlobals()->preSave();
803 }
804
globalStateSave(base::Stream * stream)805 void AddressSpaceGraphicsContext::globalStateSave(base::Stream* stream) {
806 sGlobals()->save(stream);
807 }
808
globalStatePostSave()809 void AddressSpaceGraphicsContext::globalStatePostSave() {
810 sGlobals()->postSave();
811 }
812
globalStateLoad(base::Stream * stream)813 bool AddressSpaceGraphicsContext::globalStateLoad(base::Stream* stream) {
814 return sGlobals()->load(stream);
815 }
816
saveRingConfig(base::Stream * stream,const struct asg_ring_config & config) const817 void AddressSpaceGraphicsContext::saveRingConfig(base::Stream* stream, const struct asg_ring_config& config) const {
818 stream->putBe32(config.buffer_size);
819 stream->putBe32(config.flush_interval);
820 stream->putBe32(config.host_consumed_pos);
821 stream->putBe32(config.guest_write_pos);
822 stream->putBe32(config.transfer_mode);
823 stream->putBe32(config.transfer_size);
824 stream->putBe32(config.in_error);
825 }
826
saveAllocation(base::Stream * stream,const Allocation & alloc) const827 void AddressSpaceGraphicsContext::saveAllocation(base::Stream* stream, const Allocation& alloc) const {
828 stream->putBe64(alloc.blockIndex);
829 stream->putBe64(alloc.offsetIntoPhys);
830 stream->putBe64(alloc.size);
831 stream->putBe32(alloc.isView);
832 }
833
loadRingConfig(base::Stream * stream,struct asg_ring_config & config)834 void AddressSpaceGraphicsContext::loadRingConfig(base::Stream* stream, struct asg_ring_config& config) {
835 config.buffer_size = stream->getBe32();
836 config.flush_interval = stream->getBe32();
837 config.host_consumed_pos = stream->getBe32();
838 config.guest_write_pos = stream->getBe32();
839 config.transfer_mode = stream->getBe32();
840 config.transfer_size = stream->getBe32();
841 config.in_error = stream->getBe32();
842 }
843
loadAllocation(base::Stream * stream,Allocation & alloc,AddressSpaceGraphicsContext::AllocType type)844 void AddressSpaceGraphicsContext::loadAllocation(base::Stream* stream, Allocation& alloc, AddressSpaceGraphicsContext::AllocType type) {
845 alloc.blockIndex = stream->getBe64();
846 alloc.offsetIntoPhys = stream->getBe64();
847 alloc.size = stream->getBe64();
848 alloc.isView = stream->getBe32();
849
850 sGlobals()->fillAllocFromLoad(alloc, type);
851 }
852
853 } // namespace asg
854 } // namespace emulation
855 } // namespace android
856