1 // Copyright (C) 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include "RingStream.h"
15 
16 #include "aemu/base/system/System.h"
17 
18 #define EMUGL_DEBUG_LEVEL  0
19 
20 #include "host-common/crash_reporter.h"
21 #include "host-common/debug.h"
22 #include "host-common/dma_device.h"
23 #include "host-common/GfxstreamFatalError.h"
24 
25 #include <assert.h>
26 #include <memory.h>
27 
28 using emugl::ABORT_REASON_OTHER;
29 using emugl::FatalError;
30 
31 namespace gfxstream {
32 
RingStream(struct asg_context context,android::emulation::asg::ConsumerCallbacks callbacks,size_t bufsize)33 RingStream::RingStream(
34     struct asg_context context,
35     android::emulation::asg::ConsumerCallbacks callbacks,
36     size_t bufsize) :
37     IOStream(bufsize),
38     mContext(context),
39     mCallbacks(callbacks) { }
40 RingStream::~RingStream() = default;
41 
getNeededFreeTailSize() const42 int RingStream::getNeededFreeTailSize() const {
43     return mContext.ring_config->flush_interval;
44 }
45 
allocBuffer(size_t minSize)46 void* RingStream::allocBuffer(size_t minSize) {
47     if (mWriteBuffer.size() < minSize) {
48         mWriteBuffer.resize_noinit(minSize);
49     }
50     return mWriteBuffer.data();
51 }
52 
commitBuffer(size_t size)53 int RingStream::commitBuffer(size_t size) {
54     size_t sent = 0;
55     auto data = mWriteBuffer.data();
56 
57     size_t iters = 0;
58     size_t backedOffIters = 0;
59     const size_t kBackoffIters = 10000000ULL;
60     while (sent < size) {
61         ++iters;
62         auto avail = ring_buffer_available_write(
63             mContext.from_host_large_xfer.ring,
64             &mContext.from_host_large_xfer.view);
65 
66         // Check if the guest process crashed.
67         if (!avail) {
68             if (*(mContext.host_state) == ASG_HOST_STATE_EXIT) {
69                 return sent;
70             } else {
71                 ring_buffer_yield();
72                 if (iters > kBackoffIters) {
73                     android::base::sleepUs(10);
74                     ++backedOffIters;
75                 }
76             }
77             continue;
78         }
79 
80         auto remaining = size - sent;
81         auto todo = remaining < avail ? remaining : avail;
82 
83         ring_buffer_view_write(
84             mContext.from_host_large_xfer.ring,
85             &mContext.from_host_large_xfer.view,
86             data + sent, todo, 1);
87 
88         sent += todo;
89     }
90 
91     if (backedOffIters > 0) {
92         fprintf(stderr, "%s: warning: backed off %zu times due to guest slowness.\n",
93                 __func__,
94                 backedOffIters);
95     }
96     return sent;
97 }
98 
readRaw(void * buf,size_t * inout_len)99 const unsigned char* RingStream::readRaw(void* buf, size_t* inout_len) {
100     size_t wanted = *inout_len;
101     size_t count = 0U;
102     auto dst = static_cast<char*>(buf);
103 
104     uint32_t ringAvailable = 0;
105     uint32_t ringLargeXferAvailable = 0;
106 
107     const uint32_t maxSpins = 30;
108     uint32_t spins = 0;
109     bool inLargeXfer = true;
110 
111     *(mContext.host_state) = ASG_HOST_STATE_CAN_CONSUME;
112 
113     while (count < wanted) {
114 
115         if (mReadBufferLeft) {
116             size_t avail = std::min<size_t>(wanted - count, mReadBufferLeft);
117             memcpy(dst + count,
118                     mReadBuffer.data() + (mReadBuffer.size() - mReadBufferLeft),
119                     avail);
120             count += avail;
121             mReadBufferLeft -= avail;
122             continue;
123         }
124 
125         mReadBuffer.clear();
126 
127         // no read buffer left...
128         if (count > 0) {  // There is some data to return.
129             break;
130         }
131 
132         *(mContext.host_state) = ASG_HOST_STATE_CAN_CONSUME;
133 
134         // if (mInSnapshotOperation) {
135         //     fprintf(stderr, "%s: %p in snapshot operation, exit\n", __func__, mRenderThreadPtr);
136         //     // In a snapshot operation, exit
137         //     return nullptr;
138         // }
139 
140         if (mShouldExit) {
141             return nullptr;
142         }
143 
144         ringAvailable =
145             ring_buffer_available_read(mContext.to_host, 0);
146         ringLargeXferAvailable =
147             ring_buffer_available_read(
148                 mContext.to_host_large_xfer.ring,
149                 &mContext.to_host_large_xfer.view);
150 
151         auto current = dst + count;
152         auto ptrEnd = dst + wanted;
153 
154         if (ringAvailable) {
155             inLargeXfer = false;
156             uint32_t transferMode =
157                 mContext.ring_config->transfer_mode;
158             switch (transferMode) {
159                 case 1:
160                     type1Read(ringAvailable, dst, &count, &current, ptrEnd);
161                     break;
162                 case 2:
163                     type2Read(ringAvailable, &count, &current, ptrEnd);
164                     break;
165                 case 3:
166                     // emugl::emugl_crash_reporter(
167                     //     "Guest should never set to "
168                     //     "transfer mode 3 with ringAvailable != 0\n");
169                 default:
170                     // emugl::emugl_crash_reporter(
171                     //     "Unknown transfer mode %u\n",
172                     //     transferMode);
173                     break;
174             }
175         } else if (ringLargeXferAvailable) {
176             type3Read(ringLargeXferAvailable,
177                       &count, &current, ptrEnd);
178             inLargeXfer = true;
179             if (0 == __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE)) {
180                 inLargeXfer = false;
181             }
182         } else {
183             if (inLargeXfer && 0 != __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE)) {
184                 continue;
185             }
186 
187             if (inLargeXfer && 0 == __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE)) {
188                 inLargeXfer = false;
189             }
190 
191             if (++spins < maxSpins) {
192                 ring_buffer_yield();
193                 continue;
194             } else {
195                 spins = 0;
196             }
197 
198             if (mShouldExit) {
199                 return nullptr;
200             }
201 
202             if (mShouldExitForSnapshot && mInSnapshotOperation) {
203                 return nullptr;
204             }
205 
206             int unavailReadResult = mCallbacks.onUnavailableRead();
207 
208             if (-1 == unavailReadResult) {
209                 mShouldExit = true;
210             }
211 
212             // pause pre snapshot
213             if (-2 == unavailReadResult) {
214                 mShouldExitForSnapshot = true;
215             }
216 
217             // resume post snapshot
218             if (-3 == unavailReadResult) {
219                 mShouldExitForSnapshot = false;
220             }
221 
222             continue;
223         }
224     }
225 
226     *inout_len = count;
227     ++mXmits;
228     mTotalRecv += count;
229     D("read %d bytes", (int)count);
230 
231     *(mContext.host_state) = ASG_HOST_STATE_RENDERING;
232     return (const unsigned char*)buf;
233 }
234 
type1Read(uint32_t available,char * begin,size_t * count,char ** current,const char * ptrEnd)235 void RingStream::type1Read(
236     uint32_t available,
237     char* begin,
238     size_t* count, char** current, const char* ptrEnd) {
239 
240     uint32_t xferTotal = available / sizeof(struct asg_type1_xfer);
241 
242     if (mType1Xfers.size() < xferTotal) {
243         mType1Xfers.resize(xferTotal * 2);
244     }
245 
246     auto xfersPtr = mType1Xfers.data();
247 
248     ring_buffer_copy_contents(
249         mContext.to_host, 0, xferTotal * sizeof(struct asg_type1_xfer), (uint8_t*)xfersPtr);
250 
251     for (uint32_t i = 0; i < xferTotal; ++i) {
252         if (*current + xfersPtr[i].size > ptrEnd) {
253             // Save in a temp buffer or we'll get stuck
254             if (begin == *current && i == 0) {
255                 const char* src = mContext.buffer + xfersPtr[i].offset;
256                 mReadBuffer.resize_noinit(xfersPtr[i].size);
257                 memcpy(mReadBuffer.data(), src, xfersPtr[i].size);
258                 mReadBufferLeft = xfersPtr[i].size;
259                 ring_buffer_advance_read(
260                         mContext.to_host, sizeof(struct asg_type1_xfer), 1);
261                 __atomic_fetch_add(&mContext.ring_config->host_consumed_pos, xfersPtr[i].size, __ATOMIC_RELEASE);
262             }
263             return;
264         }
265         const char* src = mContext.buffer + xfersPtr[i].offset;
266         memcpy(*current, src, xfersPtr[i].size);
267         ring_buffer_advance_read(
268                 mContext.to_host, sizeof(struct asg_type1_xfer), 1);
269         __atomic_fetch_add(&mContext.ring_config->host_consumed_pos, xfersPtr[i].size, __ATOMIC_RELEASE);
270         *current += xfersPtr[i].size;
271         *count += xfersPtr[i].size;
272 
273         // TODO: Figure out why running multiple xfers here can result in data
274         // corruption.
275         return;
276     }
277 }
278 
type2Read(uint32_t available,size_t * count,char ** current,const char * ptrEnd)279 void RingStream::type2Read(
280     uint32_t available,
281     size_t* count, char** current,const char* ptrEnd) {
282 
283     GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER)) << "nyi. abort";
284 
285     uint32_t xferTotal = available / sizeof(struct asg_type2_xfer);
286 
287     if (mType2Xfers.size() < xferTotal) {
288         mType2Xfers.resize(xferTotal * 2);
289     }
290 
291     auto xfersPtr = mType2Xfers.data();
292 
293     ring_buffer_copy_contents(
294         mContext.to_host, 0, available, (uint8_t*)xfersPtr);
295 
296     for (uint32_t i = 0; i < xferTotal; ++i) {
297 
298         if (*current + xfersPtr[i].size > ptrEnd) return;
299 
300         const char* src =
301             mCallbacks.getPtr(xfersPtr[i].physAddr);
302 
303         memcpy(*current, src, xfersPtr[i].size);
304 
305         ring_buffer_advance_read(
306             mContext.to_host, sizeof(struct asg_type1_xfer), 1);
307 
308         *current += xfersPtr[i].size;
309         *count += xfersPtr[i].size;
310     }
311 }
312 
type3Read(uint32_t available,size_t * count,char ** current,const char * ptrEnd)313 void RingStream::type3Read(
314     uint32_t available,
315     size_t* count, char** current, const char* ptrEnd) {
316 
317     uint32_t xferTotal = __atomic_load_n(&mContext.ring_config->transfer_size, __ATOMIC_ACQUIRE);
318     uint32_t maxCanRead = ptrEnd - *current;
319     uint32_t ringAvail = available;
320     uint32_t actuallyRead = std::min(ringAvail, std::min(xferTotal, maxCanRead));
321 
322     // Decrement transfer_size before letting the guest proceed in ring_buffer funcs or we will race
323     // to the next time the guest sets transfer_size
324     __atomic_fetch_sub(&mContext.ring_config->transfer_size, actuallyRead, __ATOMIC_RELEASE);
325 
326     ring_buffer_read_fully_with_abort(
327             mContext.to_host_large_xfer.ring,
328             &mContext.to_host_large_xfer.view,
329             *current, actuallyRead,
330             1, &mContext.ring_config->in_error);
331 
332     *current += actuallyRead;
333     *count += actuallyRead;
334 }
335 
getDmaForReading(uint64_t guest_paddr)336 void* RingStream::getDmaForReading(uint64_t guest_paddr) {
337     return emugl::g_emugl_dma_get_host_addr(guest_paddr);
338 }
339 
unlockDma(uint64_t guest_paddr)340 void RingStream::unlockDma(uint64_t guest_paddr) { emugl::g_emugl_dma_unlock(guest_paddr); }
341 
writeFully(const void * buf,size_t len)342 int RingStream::writeFully(const void* buf, size_t len) {
343     void* dstBuf = alloc(len);
344     memcpy(dstBuf, buf, len);
345     flush();
346     return 0;
347 }
348 
readFully(void * buf,size_t len)349 const unsigned char *RingStream::readFully( void *buf, size_t len) {
350     GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER)) << "not intended for use with RingStream";
351 }
352 
onSave(android::base::Stream * stream)353 void RingStream::onSave(android::base::Stream* stream) {
354     stream->putBe32(mReadBufferLeft);
355     stream->write(mReadBuffer.data() + mReadBuffer.size() - mReadBufferLeft,
356                   mReadBufferLeft);
357     android::base::saveBuffer(stream, mWriteBuffer);
358 }
359 
onLoad(android::base::Stream * stream)360 unsigned char* RingStream::onLoad(android::base::Stream* stream) {
361     android::base::loadBuffer(stream, &mReadBuffer);
362     mReadBufferLeft = mReadBuffer.size();
363     android::base::loadBuffer(stream, &mWriteBuffer);
364     return reinterpret_cast<unsigned char*>(mWriteBuffer.data());
365 }
366 
367 }  // namespace gfxstream
368