1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "C2DmaBufAllocator"
19
20 #include <BufferAllocator/BufferAllocator.h>
21 #include <C2Buffer.h>
22 #include <C2Debug.h>
23 #include <C2DmaBufAllocator.h>
24 #include <C2ErrnoUtils.h>
25
26 #include <linux/ion.h>
27 #include <sys/mman.h>
28 #include <unistd.h> // getpagesize, size_t, close, dup
29 #include <utils/Log.h>
30
31 #include <list>
32
33 #include <android-base/properties.h>
34 #include <media/stagefright/foundation/Mutexed.h>
35
36 namespace android {
37
38 namespace {
39 constexpr size_t USAGE_LRU_CACHE_SIZE = 1024;
40
41 // max padding after ion/dmabuf allocations in bytes
42 constexpr uint32_t MAX_PADDING = 0x8000; // 32KB
43 }
44
45 /* =========================== BUFFER HANDLE =========================== */
46 /**
47 * Buffer handle
48 *
49 * Stores dmabuf fd & metadata
50 *
51 * This handle will not capture mapped fd-s as updating that would require a
52 * global mutex.
53 */
54
55 struct C2HandleBuf : public C2Handle {
C2HandleBufandroid::C2HandleBuf56 C2HandleBuf(int bufferFd, size_t size)
57 : C2Handle(cHeader),
58 mFds{bufferFd},
59 mInts{int(size & 0xFFFFFFFF), int((uint64_t(size) >> 32) & 0xFFFFFFFF), kMagic} {}
60
61 static bool IsValid(const C2Handle* const o);
62
bufferFdandroid::C2HandleBuf63 int bufferFd() const { return mFds.mBuffer; }
sizeandroid::C2HandleBuf64 size_t size() const {
65 return size_t(unsigned(mInts.mSizeLo)) | size_t(uint64_t(unsigned(mInts.mSizeHi)) << 32);
66 }
67
68 protected:
69 struct {
70 int mBuffer; // dmabuf fd
71 } mFds;
72 struct {
73 int mSizeLo; // low 32-bits of size
74 int mSizeHi; // high 32-bits of size
75 int mMagic;
76 } mInts;
77
78 private:
79 typedef C2HandleBuf _type;
80 enum {
81 kMagic = '\xc2io\x00',
82 numFds = sizeof(mFds) / sizeof(int),
83 numInts = sizeof(mInts) / sizeof(int),
84 version = sizeof(C2Handle)
85 };
86 // constexpr static C2Handle cHeader = { version, numFds, numInts, {} };
87 const static C2Handle cHeader;
88 };
89
90 const C2Handle C2HandleBuf::cHeader = {
91 C2HandleBuf::version, C2HandleBuf::numFds, C2HandleBuf::numInts, {}};
92
93 // static
IsValid(const C2Handle * const o)94 bool C2HandleBuf::IsValid(const C2Handle* const o) {
95 if (!o || memcmp(o, &cHeader, sizeof(cHeader))) {
96 return false;
97 }
98 const C2HandleBuf* other = static_cast<const C2HandleBuf*>(o);
99 return other->mInts.mMagic == kMagic;
100 }
101
102 /* =========================== DMABUF ALLOCATION =========================== */
103 class C2DmaBufAllocation : public C2LinearAllocation {
104 public:
105 /* Interface methods */
106 virtual c2_status_t map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence* fence,
107 void** addr /* nonnull */) override;
108 virtual c2_status_t unmap(void* addr, size_t size, C2Fence* fenceFd) override;
109 virtual ~C2DmaBufAllocation() override;
110 virtual const C2Handle* handle() const override;
111 virtual id_t getAllocatorId() const override;
112 virtual bool equals(const std::shared_ptr<C2LinearAllocation>& other) const override;
113
114 // internal methods
115
116 /**
117 * Constructs an allocation via a new allocation.
118 *
119 * @param alloc allocator
120 * @param allocSize size used for the allocator
121 * @param capacity capacity advertised to the client
122 * @param heap_name name of the dmabuf heap (device)
123 * @param flags flags
124 * @param id allocator id
125 */
126 C2DmaBufAllocation(BufferAllocator& alloc, size_t allocSize, size_t capacity,
127 C2String heap_name, unsigned flags, C2Allocator::id_t id);
128
129 /**
130 * Constructs an allocation by wrapping an existing allocation.
131 *
132 * @param size capacity advertised to the client
133 * @param shareFd dmabuf fd of the wrapped allocation
134 * @param id allocator id
135 */
136 C2DmaBufAllocation(size_t size, int shareFd, C2Allocator::id_t id);
137
138 c2_status_t status() const;
139
140 protected:
mapInternal(size_t mapSize,size_t mapOffset,size_t alignmentBytes,int prot,int flags,void ** base,void ** addr)141 virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
142 int prot, int flags, void** base, void** addr) {
143 c2_status_t err = C2_OK;
144 *base = mmap(nullptr, mapSize, prot, flags, mHandle.bufferFd(), mapOffset);
145 ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
146 "returned (%d)",
147 mapSize, prot, flags, mHandle.bufferFd(), mapOffset, errno);
148 if (*base == MAP_FAILED) {
149 *base = *addr = nullptr;
150 err = c2_map_errno<EINVAL>(errno);
151 } else {
152 *addr = (uint8_t*)*base + alignmentBytes;
153 }
154 return err;
155 }
156
157 C2Allocator::id_t mId;
158 C2HandleBuf mHandle;
159 c2_status_t mInit;
160 struct Mapping {
161 void* addr;
162 size_t alignmentBytes;
163 size_t size;
164 };
165 Mutexed<std::list<Mapping>> mMappings;
166
167 // TODO: we could make this encapsulate shared_ptr and copiable
168 C2_DO_NOT_COPY(C2DmaBufAllocation);
169 };
170
map(size_t offset,size_t size,C2MemoryUsage usage,C2Fence * fence,void ** addr)171 c2_status_t C2DmaBufAllocation::map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence* fence,
172 void** addr) {
173 static const size_t kPageSize = getpagesize();
174 (void)fence; // TODO: wait for fence
175 *addr = nullptr;
176 if (!mMappings.lock()->empty()) {
177 ALOGV("multiple map");
178 // TODO: technically we should return DUPLICATE here, but our block views
179 // don't actually unmap, so we end up remapping the buffer multiple times.
180 //
181 // return C2_DUPLICATE;
182 }
183 if (size == 0) {
184 return C2_BAD_VALUE;
185 }
186
187 int prot = PROT_NONE;
188 int flags = MAP_SHARED;
189 if (usage.expected & C2MemoryUsage::CPU_READ) {
190 prot |= PROT_READ;
191 }
192 if (usage.expected & C2MemoryUsage::CPU_WRITE) {
193 prot |= PROT_WRITE;
194 }
195
196 size_t alignmentBytes = offset % kPageSize;
197 size_t mapOffset = offset - alignmentBytes;
198 size_t mapSize = size + alignmentBytes;
199 Mapping map = {nullptr, alignmentBytes, mapSize};
200
201 c2_status_t err =
202 mapInternal(mapSize, mapOffset, alignmentBytes, prot, flags, &(map.addr), addr);
203 if (map.addr) {
204 mMappings.lock()->push_back(map);
205 }
206 return err;
207 }
208
unmap(void * addr,size_t size,C2Fence * fence)209 c2_status_t C2DmaBufAllocation::unmap(void* addr, size_t size, C2Fence* fence) {
210 Mutexed<std::list<Mapping>>::Locked mappings(mMappings);
211 if (mappings->empty()) {
212 ALOGD("tried to unmap unmapped buffer");
213 return C2_NOT_FOUND;
214 }
215 for (auto it = mappings->begin(); it != mappings->end(); ++it) {
216 if (addr != (uint8_t*)it->addr + it->alignmentBytes ||
217 size + it->alignmentBytes != it->size) {
218 continue;
219 }
220 int err = munmap(it->addr, it->size);
221 if (err != 0) {
222 ALOGD("munmap failed");
223 return c2_map_errno<EINVAL>(errno);
224 }
225 if (fence) {
226 *fence = C2Fence(); // not using fences
227 }
228 (void)mappings->erase(it);
229 ALOGV("successfully unmapped: %d", mHandle.bufferFd());
230 return C2_OK;
231 }
232 ALOGD("unmap failed to find specified map");
233 return C2_BAD_VALUE;
234 }
235
status() const236 c2_status_t C2DmaBufAllocation::status() const {
237 return mInit;
238 }
239
getAllocatorId() const240 C2Allocator::id_t C2DmaBufAllocation::getAllocatorId() const {
241 return mId;
242 }
243
equals(const std::shared_ptr<C2LinearAllocation> & other) const244 bool C2DmaBufAllocation::equals(const std::shared_ptr<C2LinearAllocation>& other) const {
245 if (!other || other->getAllocatorId() != getAllocatorId()) {
246 return false;
247 }
248 // get user handle to compare objects
249 std::shared_ptr<C2DmaBufAllocation> otherAsBuf =
250 std::static_pointer_cast<C2DmaBufAllocation>(other);
251 return mHandle.bufferFd() == otherAsBuf->mHandle.bufferFd();
252 }
253
handle() const254 const C2Handle* C2DmaBufAllocation::handle() const {
255 return &mHandle;
256 }
257
~C2DmaBufAllocation()258 C2DmaBufAllocation::~C2DmaBufAllocation() {
259 Mutexed<std::list<Mapping>>::Locked mappings(mMappings);
260 if (!mappings->empty()) {
261 ALOGD("Dangling mappings!");
262 for (const Mapping& map : *mappings) {
263 int err = munmap(map.addr, map.size);
264 if (err) ALOGD("munmap failed");
265 }
266 }
267 if (mInit == C2_OK) {
268 native_handle_close(&mHandle);
269 }
270 }
271
C2DmaBufAllocation(BufferAllocator & alloc,size_t allocSize,size_t capacity,C2String heap_name,unsigned flags,C2Allocator::id_t id)272 C2DmaBufAllocation::C2DmaBufAllocation(BufferAllocator& alloc, size_t allocSize, size_t capacity,
273 C2String heap_name, unsigned flags, C2Allocator::id_t id)
274 : C2LinearAllocation(capacity), mHandle(-1, 0) {
275 int bufferFd = -1;
276 int ret = 0;
277
278 bufferFd = alloc.Alloc(heap_name, allocSize, flags);
279 if (bufferFd < 0) {
280 ret = bufferFd;
281 }
282
283 // this may be a non-working handle if bufferFd is negative
284 mHandle = C2HandleBuf(bufferFd, capacity);
285 mId = id;
286 mInit = c2_status_t(c2_map_errno<ENOMEM, EACCES, EINVAL>(ret));
287 }
288
C2DmaBufAllocation(size_t size,int shareFd,C2Allocator::id_t id)289 C2DmaBufAllocation::C2DmaBufAllocation(size_t size, int shareFd, C2Allocator::id_t id)
290 : C2LinearAllocation(size), mHandle(-1, 0) {
291 mHandle = C2HandleBuf(shareFd, size);
292 mId = id;
293 mInit = c2_status_t(c2_map_errno<ENOMEM, EACCES, EINVAL>(0));
294 }
295
296 /* =========================== DMABUF ALLOCATOR =========================== */
C2DmaBufAllocator(id_t id)297 C2DmaBufAllocator::C2DmaBufAllocator(id_t id) : mInit(C2_OK) {
298 C2MemoryUsage minUsage = {0, 0};
299 C2MemoryUsage maxUsage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
300 Traits traits = {"android.allocator.dmabuf", id, LINEAR, minUsage, maxUsage};
301 mTraits = std::make_shared<Traits>(traits);
302 }
303
getId() const304 C2Allocator::id_t C2DmaBufAllocator::getId() const {
305 std::lock_guard<std::mutex> lock(mUsageMapperLock);
306 return mTraits->id;
307 }
308
getName() const309 C2String C2DmaBufAllocator::getName() const {
310 std::lock_guard<std::mutex> lock(mUsageMapperLock);
311 return mTraits->name;
312 }
313
getTraits() const314 std::shared_ptr<const C2Allocator::Traits> C2DmaBufAllocator::getTraits() const {
315 std::lock_guard<std::mutex> lock(mUsageMapperLock);
316 return mTraits;
317 }
318
setUsageMapper(const UsageMapperFn & mapper __unused,uint64_t minUsage,uint64_t maxUsage,uint64_t blockSize)319 void C2DmaBufAllocator::setUsageMapper(const UsageMapperFn& mapper __unused, uint64_t minUsage,
320 uint64_t maxUsage, uint64_t blockSize) {
321 std::lock_guard<std::mutex> lock(mUsageMapperLock);
322 mUsageMapperCache.clear();
323 mUsageMapperLru.clear();
324 mUsageMapper = mapper;
325 Traits traits = {mTraits->name, mTraits->id, LINEAR, C2MemoryUsage(minUsage),
326 C2MemoryUsage(maxUsage)};
327 mTraits = std::make_shared<Traits>(traits);
328 mBlockSize = blockSize;
329 }
330
operator ()(const MapperKey & k) const331 std::size_t C2DmaBufAllocator::MapperKeyHash::operator()(const MapperKey& k) const {
332 return std::hash<uint64_t>{}(k.first) ^ std::hash<size_t>{}(k.second);
333 }
334
mapUsage(C2MemoryUsage usage,size_t capacity,C2String * heap_name,unsigned * flags)335 c2_status_t C2DmaBufAllocator::mapUsage(C2MemoryUsage usage, size_t capacity, C2String* heap_name,
336 unsigned* flags) {
337 std::lock_guard<std::mutex> lock(mUsageMapperLock);
338 c2_status_t res = C2_OK;
339 // align capacity
340 capacity = (capacity + mBlockSize - 1) & ~(mBlockSize - 1);
341 MapperKey key = std::make_pair(usage.expected, capacity);
342 auto entry = mUsageMapperCache.find(key);
343 if (entry == mUsageMapperCache.end()) {
344 if (mUsageMapper) {
345 res = mUsageMapper(usage, capacity, heap_name, flags);
346 } else {
347 if (C2DmaBufAllocator::system_uncached_supported() &&
348 !(usage.expected & (C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE)))
349 *heap_name = "system-uncached";
350 else
351 *heap_name = "system";
352 *flags = 0;
353 res = C2_NO_INIT;
354 }
355 // add usage to cache
356 MapperValue value = std::make_tuple(*heap_name, *flags, res);
357 mUsageMapperLru.emplace_front(key, value);
358 mUsageMapperCache.emplace(std::make_pair(key, mUsageMapperLru.begin()));
359 if (mUsageMapperCache.size() > USAGE_LRU_CACHE_SIZE) {
360 // remove LRU entry
361 MapperKey lruKey = mUsageMapperLru.front().first;
362 mUsageMapperCache.erase(lruKey);
363 mUsageMapperLru.pop_back();
364 }
365 } else {
366 // move entry to MRU
367 mUsageMapperLru.splice(mUsageMapperLru.begin(), mUsageMapperLru, entry->second);
368 const MapperValue& value = entry->second->second;
369 std::tie(*heap_name, *flags, res) = value;
370 }
371 return res;
372 }
373
newLinearAllocation(uint32_t capacity,C2MemoryUsage usage,std::shared_ptr<C2LinearAllocation> * allocation)374 c2_status_t C2DmaBufAllocator::newLinearAllocation(
375 uint32_t capacity, C2MemoryUsage usage, std::shared_ptr<C2LinearAllocation>* allocation) {
376 if (allocation == nullptr) {
377 return C2_BAD_VALUE;
378 }
379
380 allocation->reset();
381 if (mInit != C2_OK) {
382 return mInit;
383 }
384
385 C2String heap_name;
386 unsigned flags = 0;
387 c2_status_t ret = mapUsage(usage, capacity, &heap_name, &flags);
388 if (ret && ret != C2_NO_INIT) {
389 return ret;
390 }
391
392 // TODO: should we pad before mapping usage?
393
394 // NOTE: read this property directly from the property as this code has to run on
395 // Android Q, but the sysprop was only introduced in Android S.
396 static size_t sPadding =
397 base::GetUintProperty("media.c2.dmabuf.padding", (uint32_t)0, MAX_PADDING);
398 if (sPadding > SIZE_MAX - capacity) {
399 // size would overflow
400 ALOGD("dmabuf_alloc: size #%x cannot accommodate padding #%zx", capacity, sPadding);
401 return C2_NO_MEMORY;
402 }
403
404 size_t allocSize = (size_t)capacity + sPadding;
405 // TODO: should we align allocation size to mBlockSize to reflect the true allocation size?
406 std::shared_ptr<C2DmaBufAllocation> alloc = std::make_shared<C2DmaBufAllocation>(
407 mBufferAllocator, allocSize, allocSize - sPadding, heap_name, flags, getId());
408 ret = alloc->status();
409 if (ret == C2_OK) {
410 *allocation = alloc;
411 }
412 return ret;
413 }
414
priorLinearAllocation(const C2Handle * handle,std::shared_ptr<C2LinearAllocation> * allocation)415 c2_status_t C2DmaBufAllocator::priorLinearAllocation(
416 const C2Handle* handle, std::shared_ptr<C2LinearAllocation>* allocation) {
417 *allocation = nullptr;
418 if (mInit != C2_OK) {
419 return mInit;
420 }
421
422 if (!C2HandleBuf::IsValid(handle)) {
423 return C2_BAD_VALUE;
424 }
425
426 // TODO: get capacity and validate it
427 const C2HandleBuf* h = static_cast<const C2HandleBuf*>(handle);
428 std::shared_ptr<C2DmaBufAllocation> alloc =
429 std::make_shared<C2DmaBufAllocation>(h->size(), h->bufferFd(), getId());
430 c2_status_t ret = alloc->status();
431 if (ret == C2_OK) {
432 *allocation = alloc;
433 native_handle_delete(
434 const_cast<native_handle_t*>(reinterpret_cast<const native_handle_t*>(handle)));
435 }
436 return ret;
437 }
438
439 // static
CheckHandle(const C2Handle * const o)440 bool C2DmaBufAllocator::CheckHandle(const C2Handle* const o) {
441 return C2HandleBuf::IsValid(o);
442 }
443
444 } // namespace android
445