1 #include <base/logging.h>
2 #include <binder/Parcel.h>
3 #include <private/dvr/buffer_hub_client.h>
4 #include <private/dvr/buffer_hub_queue_client.h>
5
6 #include <gtest/gtest.h>
7 #include <poll.h>
8 #include <sys/eventfd.h>
9
10 #include <vector>
11
12 // Enable/disable debug logging.
13 #define TRACE 0
14
15 namespace android {
16 namespace dvr {
17
18 using pdx::LocalChannelHandle;
19 using pdx::LocalHandle;
20
21 namespace {
22
23 constexpr uint32_t kBufferWidth = 100;
24 constexpr uint32_t kBufferHeight = 1;
25 constexpr uint32_t kBufferLayerCount = 1;
26 constexpr uint32_t kBufferFormat = HAL_PIXEL_FORMAT_BLOB;
27 constexpr uint64_t kBufferUsage = GRALLOC_USAGE_SW_READ_RARELY;
28 constexpr int kTimeoutMs = 100;
29 constexpr int kNoTimeout = 0;
30
31 class BufferHubQueueTest : public ::testing::Test {
32 public:
CreateProducerQueue(const ProducerQueueConfig & config,const UsagePolicy & usage)33 bool CreateProducerQueue(const ProducerQueueConfig& config,
34 const UsagePolicy& usage) {
35 producer_queue_ = ProducerQueue::Create(config, usage);
36 return producer_queue_ != nullptr;
37 }
38
CreateConsumerQueue()39 bool CreateConsumerQueue() {
40 if (producer_queue_) {
41 consumer_queue_ = producer_queue_->CreateConsumerQueue();
42 return consumer_queue_ != nullptr;
43 } else {
44 return false;
45 }
46 }
47
CreateQueues(const ProducerQueueConfig & config,const UsagePolicy & usage)48 bool CreateQueues(const ProducerQueueConfig& config,
49 const UsagePolicy& usage) {
50 return CreateProducerQueue(config, usage) && CreateConsumerQueue();
51 }
52
AllocateBuffer(size_t * slot_out=nullptr)53 void AllocateBuffer(size_t* slot_out = nullptr) {
54 // Create producer buffer.
55 auto status = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight,
56 kBufferLayerCount,
57 kBufferFormat, kBufferUsage);
58
59 ASSERT_TRUE(status.ok());
60 size_t slot = status.take();
61 if (slot_out)
62 *slot_out = slot;
63 }
64
WaitAndHandleOnce(BufferHubQueue * queue,int timeout_ms)65 bool WaitAndHandleOnce(BufferHubQueue* queue, int timeout_ms) {
66 pollfd pfd{queue->queue_fd(), POLLIN, 0};
67 int ret;
68 do {
69 ret = poll(&pfd, 1, timeout_ms);
70 } while (ret == -1 && errno == EINTR);
71
72 if (ret < 0) {
73 ALOGW("Failed to poll queue %d's event fd, error: %s.", queue->id(),
74 strerror(errno));
75 return false;
76 } else if (ret == 0) {
77 return false;
78 }
79 return queue->HandleQueueEvents();
80 }
81
82 protected:
83 ProducerQueueConfigBuilder config_builder_;
84 std::unique_ptr<ProducerQueue> producer_queue_;
85 std::unique_ptr<ConsumerQueue> consumer_queue_;
86 };
87
TEST_F(BufferHubQueueTest,TestDequeue)88 TEST_F(BufferHubQueueTest, TestDequeue) {
89 const int64_t nb_dequeue_times = 16;
90
91 ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
92
93 // Allocate only one buffer.
94 AllocateBuffer();
95
96 // But dequeue multiple times.
97 for (int64_t i = 0; i < nb_dequeue_times; i++) {
98 size_t slot;
99 LocalHandle fence;
100 DvrNativeBufferMetadata mi, mo;
101
102 // Producer gains a buffer.
103 auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
104 EXPECT_TRUE(p1_status.ok());
105 auto p1 = p1_status.take();
106 ASSERT_NE(p1, nullptr);
107
108 // Producer posts the buffer.
109 mi.index = i;
110 EXPECT_EQ(p1->PostAsync(&mi, LocalHandle()), 0);
111
112 // Consumer acquires a buffer.
113 auto c1_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
114 EXPECT_TRUE(c1_status.ok());
115 auto c1 = c1_status.take();
116 ASSERT_NE(c1, nullptr);
117 EXPECT_EQ(mi.index, i);
118 EXPECT_EQ(mo.index, i);
119
120 // Consumer releases the buffer.
121 EXPECT_EQ(c1->ReleaseAsync(&mi, LocalHandle()), 0);
122 }
123 }
124
TEST_F(BufferHubQueueTest,TestProducerConsumer)125 TEST_F(BufferHubQueueTest, TestProducerConsumer) {
126 const size_t kBufferCount = 16;
127 size_t slot;
128 DvrNativeBufferMetadata mi, mo;
129 LocalHandle fence;
130
131 ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
132
133 for (size_t i = 0; i < kBufferCount; i++) {
134 AllocateBuffer();
135
136 // Producer queue has all the available buffers on initialize.
137 ASSERT_EQ(producer_queue_->count(), i + 1);
138 ASSERT_EQ(producer_queue_->capacity(), i + 1);
139
140 // Consumer queue has no avaiable buffer on initialize.
141 ASSERT_EQ(consumer_queue_->count(), 0U);
142 // Consumer queue does not import buffers until a dequeue is issued.
143 ASSERT_EQ(consumer_queue_->capacity(), i);
144 // Dequeue returns timeout since no buffer is ready to consumer, but
145 // this implicitly triggers buffer import and bump up |capacity|.
146 auto status = consumer_queue_->Dequeue(kNoTimeout, &slot, &mo, &fence);
147 ASSERT_FALSE(status.ok());
148 ASSERT_EQ(ETIMEDOUT, status.error());
149 ASSERT_EQ(consumer_queue_->capacity(), i + 1);
150 }
151
152 // Use eventfd as a stand-in for a fence.
153 LocalHandle post_fence(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK));
154
155 for (size_t i = 0; i < kBufferCount; i++) {
156 // First time there is no buffer available to dequeue.
157 auto consumer_status =
158 consumer_queue_->Dequeue(kNoTimeout, &slot, &mo, &fence);
159 ASSERT_FALSE(consumer_status.ok());
160 ASSERT_EQ(consumer_status.error(), ETIMEDOUT);
161
162 // Make sure Producer buffer is POSTED so that it's ready to Accquire
163 // in the consumer's Dequeue() function.
164 auto producer_status =
165 producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
166 ASSERT_TRUE(producer_status.ok());
167 auto producer = producer_status.take();
168 ASSERT_NE(nullptr, producer);
169
170 mi.index = static_cast<int64_t>(i);
171 ASSERT_EQ(producer->PostAsync(&mi, post_fence), 0);
172
173 // Second time the just the POSTED buffer should be dequeued.
174 consumer_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
175 ASSERT_TRUE(consumer_status.ok());
176 EXPECT_TRUE(fence.IsValid());
177
178 auto consumer = consumer_status.take();
179 ASSERT_NE(nullptr, consumer);
180 ASSERT_EQ(mi.index, mo.index);
181 }
182 }
183
TEST_F(BufferHubQueueTest,TestRemoveBuffer)184 TEST_F(BufferHubQueueTest, TestRemoveBuffer) {
185 ASSERT_TRUE(CreateProducerQueue(config_builder_.Build(), UsagePolicy{}));
186 DvrNativeBufferMetadata mo;
187
188 // Allocate buffers.
189 const size_t kBufferCount = 4u;
190 for (size_t i = 0; i < kBufferCount; i++) {
191 AllocateBuffer();
192 }
193 ASSERT_EQ(kBufferCount, producer_queue_->count());
194 ASSERT_EQ(kBufferCount, producer_queue_->capacity());
195
196 consumer_queue_ = producer_queue_->CreateConsumerQueue();
197 ASSERT_NE(nullptr, consumer_queue_);
198
199 // Check that buffers are correctly imported on construction.
200 EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
201 EXPECT_EQ(0u, consumer_queue_->count());
202
203 // Dequeue all the buffers and keep track of them in an array. This prevents
204 // the producer queue ring buffer ref counts from interfering with the tests.
205 struct Entry {
206 std::shared_ptr<BufferProducer> buffer;
207 LocalHandle fence;
208 size_t slot;
209 };
210 std::array<Entry, kBufferCount> buffers;
211
212 for (size_t i = 0; i < kBufferCount; i++) {
213 Entry* entry = &buffers[i];
214 auto producer_status = producer_queue_->Dequeue(
215 kTimeoutMs, &entry->slot, &mo, &entry->fence);
216 ASSERT_TRUE(producer_status.ok());
217 entry->buffer = producer_status.take();
218 ASSERT_NE(nullptr, entry->buffer);
219 }
220
221 // Remove a buffer and make sure both queues reflect the change.
222 ASSERT_TRUE(producer_queue_->RemoveBuffer(buffers[0].slot));
223 EXPECT_EQ(kBufferCount - 1, producer_queue_->capacity());
224
225 // As long as the removed buffer is still alive the consumer queue won't know
226 // its gone.
227 EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
228 EXPECT_FALSE(consumer_queue_->HandleQueueEvents());
229 EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
230
231 // Release the removed buffer.
232 buffers[0].buffer = nullptr;
233
234 // Now the consumer queue should know it's gone.
235 EXPECT_FALSE(WaitAndHandleOnce(consumer_queue_.get(), kTimeoutMs));
236 ASSERT_EQ(kBufferCount - 1, consumer_queue_->capacity());
237
238 // Allocate a new buffer. This should take the first empty slot.
239 size_t slot;
240 AllocateBuffer(&slot);
241 ALOGE_IF(TRACE, "ALLOCATE %zu", slot);
242 EXPECT_EQ(buffers[0].slot, slot);
243 EXPECT_EQ(kBufferCount, producer_queue_->capacity());
244
245 // The consumer queue should pick up the new buffer.
246 EXPECT_EQ(kBufferCount - 1, consumer_queue_->capacity());
247 EXPECT_FALSE(consumer_queue_->HandleQueueEvents());
248 EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
249
250 // Remove and allocate a buffer.
251 ASSERT_TRUE(producer_queue_->RemoveBuffer(buffers[1].slot));
252 EXPECT_EQ(kBufferCount - 1, producer_queue_->capacity());
253 buffers[1].buffer = nullptr;
254
255 AllocateBuffer(&slot);
256 ALOGE_IF(TRACE, "ALLOCATE %zu", slot);
257 EXPECT_EQ(buffers[1].slot, slot);
258 EXPECT_EQ(kBufferCount, producer_queue_->capacity());
259
260 // The consumer queue should pick up the new buffer but the count shouldn't
261 // change.
262 EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
263 EXPECT_FALSE(consumer_queue_->HandleQueueEvents());
264 EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
265
266 // Remove and allocate a buffer, but don't free the buffer right away.
267 ASSERT_TRUE(producer_queue_->RemoveBuffer(buffers[2].slot));
268 EXPECT_EQ(kBufferCount - 1, producer_queue_->capacity());
269
270 AllocateBuffer(&slot);
271 ALOGE_IF(TRACE, "ALLOCATE %zu", slot);
272 EXPECT_EQ(buffers[2].slot, slot);
273 EXPECT_EQ(kBufferCount, producer_queue_->capacity());
274
275 EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
276 EXPECT_FALSE(consumer_queue_->HandleQueueEvents());
277 EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
278
279 // Release the producer buffer to trigger a POLLHUP event for an already
280 // removed buffer.
281 buffers[2].buffer = nullptr;
282 EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
283 EXPECT_FALSE(consumer_queue_->HandleQueueEvents());
284 EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
285 }
286
TEST_F(BufferHubQueueTest,TestMultipleConsumers)287 TEST_F(BufferHubQueueTest, TestMultipleConsumers) {
288 // ProducerConfigureBuilder doesn't set Metadata{size}, which means there
289 // is no metadata associated with this BufferQueue's buffer.
290 ASSERT_TRUE(CreateProducerQueue(config_builder_.Build(), UsagePolicy{}));
291
292 // Allocate buffers.
293 const size_t kBufferCount = 4u;
294 for (size_t i = 0; i < kBufferCount; i++) {
295 AllocateBuffer();
296 }
297 ASSERT_EQ(kBufferCount, producer_queue_->count());
298
299 // Build a silent consumer queue to test multi-consumer queue features.
300 auto silent_queue = producer_queue_->CreateSilentConsumerQueue();
301 ASSERT_NE(nullptr, silent_queue);
302
303 // Check that silent queue doesn't import buffers on creation.
304 EXPECT_EQ(silent_queue->capacity(), 0U);
305
306 // Dequeue and post a buffer.
307 size_t slot;
308 LocalHandle fence;
309 DvrNativeBufferMetadata mi, mo;
310 auto producer_status =
311 producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
312 EXPECT_TRUE(producer_status.ok());
313 auto producer_buffer = producer_status.take();
314 ASSERT_NE(producer_buffer, nullptr);
315 EXPECT_EQ(producer_buffer->PostAsync(&mi, {}), 0);
316 // After post, check the number of remaining available buffers.
317 EXPECT_EQ(producer_queue_->count(), kBufferCount - 1);
318
319 // Currently we expect no buffer to be available prior to calling
320 // WaitForBuffers/HandleQueueEvents.
321 // TODO(eieio): Note this behavior may change in the future.
322 EXPECT_EQ(silent_queue->count(), 0U);
323 EXPECT_FALSE(silent_queue->HandleQueueEvents());
324 EXPECT_EQ(silent_queue->count(), 0U);
325
326 // Build a new consumer queue to test multi-consumer queue features.
327 consumer_queue_ = silent_queue->CreateConsumerQueue();
328 ASSERT_NE(consumer_queue_, nullptr);
329
330 // Check that buffers are correctly imported on construction.
331 EXPECT_EQ(consumer_queue_->capacity(), kBufferCount);
332 // Buffers are only imported, but their availability is not checked until
333 // first call to Dequeue().
334 EXPECT_EQ(consumer_queue_->count(), 0U);
335
336 // Reclaim released/ignored buffers.
337 EXPECT_EQ(producer_queue_->count(), kBufferCount - 1);
338
339 usleep(10000);
340 WaitAndHandleOnce(producer_queue_.get(), kTimeoutMs);
341 EXPECT_EQ(producer_queue_->count(), kBufferCount - 1);
342
343 // Post another buffer.
344 producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
345 EXPECT_TRUE(producer_status.ok());
346 producer_buffer = producer_status.take();
347 ASSERT_NE(producer_buffer, nullptr);
348 EXPECT_EQ(producer_buffer->PostAsync(&mi, {}), 0);
349
350 // Verify that the consumer queue receives it.
351 size_t consumer_queue_count = consumer_queue_->count();
352 WaitAndHandleOnce(consumer_queue_.get(), kTimeoutMs);
353 EXPECT_GT(consumer_queue_->count(), consumer_queue_count);
354
355 // Save the current consumer queue buffer count to compare after the dequeue.
356 consumer_queue_count = consumer_queue_->count();
357
358 // Dequeue and acquire/release (discard) buffers on the consumer end.
359 auto consumer_status =
360 consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
361 EXPECT_TRUE(consumer_status.ok());
362 auto consumer_buffer = consumer_status.take();
363 ASSERT_NE(consumer_buffer, nullptr);
364 consumer_buffer->Discard();
365
366 // Buffer should be returned to the producer queue without being handled by
367 // the silent consumer queue.
368 EXPECT_LT(consumer_queue_->count(), consumer_queue_count);
369 EXPECT_EQ(producer_queue_->count(), kBufferCount - 2);
370
371 WaitAndHandleOnce(producer_queue_.get(), kTimeoutMs);
372 EXPECT_EQ(producer_queue_->count(), kBufferCount - 1);
373 }
374
375 struct TestUserMetadata {
376 char a;
377 int32_t b;
378 int64_t c;
379 };
380
381 constexpr uint64_t kUserMetadataSize =
382 static_cast<uint64_t>(sizeof(TestUserMetadata));
383
TEST_F(BufferHubQueueTest,TestUserMetadata)384 TEST_F(BufferHubQueueTest, TestUserMetadata) {
385 ASSERT_TRUE(CreateQueues(
386 config_builder_.SetMetadata<TestUserMetadata>().Build(), UsagePolicy{}));
387
388 AllocateBuffer();
389
390 std::vector<TestUserMetadata> user_metadata_list = {
391 {'0', 0, 0}, {'1', 10, 3333}, {'@', 123, 1000000000}};
392
393 for (auto user_metadata : user_metadata_list) {
394 size_t slot;
395 LocalHandle fence;
396 DvrNativeBufferMetadata mi, mo;
397
398 auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
399 EXPECT_TRUE(p1_status.ok());
400 auto p1 = p1_status.take();
401 ASSERT_NE(p1, nullptr);
402
403 // TODO(b/69469185): Test against metadata from consumer once we implement
404 // release metadata properly.
405 // EXPECT_EQ(mo.user_metadata_ptr, 0U);
406 // EXPECT_EQ(mo.user_metadata_size, 0U);
407
408 mi.user_metadata_size = kUserMetadataSize;
409 mi.user_metadata_ptr = reinterpret_cast<uint64_t>(&user_metadata);
410 EXPECT_EQ(p1->PostAsync(&mi, {}), 0);
411 auto c1_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
412 EXPECT_TRUE(c1_status.ok());
413 auto c1 = c1_status.take();
414 ASSERT_NE(c1, nullptr);
415
416 EXPECT_EQ(mo.user_metadata_size, kUserMetadataSize);
417 auto out_user_metadata =
418 reinterpret_cast<TestUserMetadata*>(mo.user_metadata_ptr);
419 EXPECT_EQ(user_metadata.a, out_user_metadata->a);
420 EXPECT_EQ(user_metadata.b, out_user_metadata->b);
421 EXPECT_EQ(user_metadata.c, out_user_metadata->c);
422
423 // When release, empty metadata is also legit.
424 mi.user_metadata_size = 0U;
425 mi.user_metadata_ptr = 0U;
426 c1->ReleaseAsync(&mi, {});
427 }
428 }
429
TEST_F(BufferHubQueueTest,TestUserMetadataMismatch)430 TEST_F(BufferHubQueueTest, TestUserMetadataMismatch) {
431 ASSERT_TRUE(CreateQueues(
432 config_builder_.SetMetadata<TestUserMetadata>().Build(), UsagePolicy{}));
433
434 AllocateBuffer();
435
436 TestUserMetadata user_metadata;
437 size_t slot;
438 LocalHandle fence;
439 DvrNativeBufferMetadata mi, mo;
440 auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
441 EXPECT_TRUE(p1_status.ok());
442 auto p1 = p1_status.take();
443 ASSERT_NE(p1, nullptr);
444
445 // Post with mismatched user metadata size will fail. But the producer buffer
446 // itself should stay untouched.
447 mi.user_metadata_ptr = reinterpret_cast<uint64_t>(&user_metadata);
448 mi.user_metadata_size = kUserMetadataSize + 1;
449 EXPECT_EQ(p1->PostAsync(&mi, {}), -E2BIG);
450 // Post with the exact same user metdata size can success.
451 mi.user_metadata_ptr = reinterpret_cast<uint64_t>(&user_metadata);
452 mi.user_metadata_size = kUserMetadataSize;
453 EXPECT_EQ(p1->PostAsync(&mi, {}), 0);
454 }
455
TEST_F(BufferHubQueueTest,TestEnqueue)456 TEST_F(BufferHubQueueTest, TestEnqueue) {
457 ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(),
458 UsagePolicy{}));
459 AllocateBuffer();
460
461 size_t slot;
462 LocalHandle fence;
463 DvrNativeBufferMetadata mo;
464 auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
465 ASSERT_TRUE(p1_status.ok());
466 auto p1 = p1_status.take();
467 ASSERT_NE(nullptr, p1);
468
469 producer_queue_->Enqueue(p1, slot, 0ULL);
470 auto c1_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
471 ASSERT_FALSE(c1_status.ok());
472 }
473
TEST_F(BufferHubQueueTest,TestAllocateBuffer)474 TEST_F(BufferHubQueueTest, TestAllocateBuffer) {
475 ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
476
477 size_t ps1;
478 AllocateBuffer();
479 LocalHandle fence;
480 DvrNativeBufferMetadata mi, mo;
481 auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &ps1, &mo, &fence);
482 ASSERT_TRUE(p1_status.ok());
483 auto p1 = p1_status.take();
484 ASSERT_NE(p1, nullptr);
485
486 // producer queue is exhausted
487 size_t ps2;
488 auto p2_status = producer_queue_->Dequeue(kTimeoutMs, &ps2, &mo, &fence);
489 ASSERT_FALSE(p2_status.ok());
490 ASSERT_EQ(ETIMEDOUT, p2_status.error());
491
492 // dynamically add buffer.
493 AllocateBuffer();
494 ASSERT_EQ(producer_queue_->count(), 1U);
495 ASSERT_EQ(producer_queue_->capacity(), 2U);
496
497 // now we can dequeue again
498 p2_status = producer_queue_->Dequeue(kTimeoutMs, &ps2, &mo, &fence);
499 ASSERT_TRUE(p2_status.ok());
500 auto p2 = p2_status.take();
501 ASSERT_NE(p2, nullptr);
502 ASSERT_EQ(producer_queue_->count(), 0U);
503 // p1 and p2 should have different slot number
504 ASSERT_NE(ps1, ps2);
505
506 // Consumer queue does not import buffers until |Dequeue| or |ImportBuffers|
507 // are called. So far consumer_queue_ should be empty.
508 ASSERT_EQ(consumer_queue_->count(), 0U);
509
510 int64_t seq = 1;
511 mi.index = seq;
512 ASSERT_EQ(p1->PostAsync(&mi, {}), 0);
513
514 size_t cs1, cs2;
515 auto c1_status = consumer_queue_->Dequeue(kTimeoutMs, &cs1, &mo, &fence);
516 ASSERT_TRUE(c1_status.ok());
517 auto c1 = c1_status.take();
518 ASSERT_NE(c1, nullptr);
519 ASSERT_EQ(consumer_queue_->count(), 0U);
520 ASSERT_EQ(consumer_queue_->capacity(), 2U);
521 ASSERT_EQ(cs1, ps1);
522
523 ASSERT_EQ(p2->PostAsync(&mi, {}), 0);
524 auto c2_status = consumer_queue_->Dequeue(kTimeoutMs, &cs2, &mo, &fence);
525 ASSERT_TRUE(c2_status.ok());
526 auto c2 = c2_status.take();
527 ASSERT_NE(c2, nullptr);
528 ASSERT_EQ(cs2, ps2);
529 }
530
TEST_F(BufferHubQueueTest,TestUsageSetMask)531 TEST_F(BufferHubQueueTest, TestUsageSetMask) {
532 const uint32_t set_mask = GRALLOC_USAGE_SW_WRITE_OFTEN;
533 ASSERT_TRUE(
534 CreateQueues(config_builder_.Build(), UsagePolicy{set_mask, 0, 0, 0}));
535
536 // When allocation, leave out |set_mask| from usage bits on purpose.
537 auto status = producer_queue_->AllocateBuffer(
538 kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
539 kBufferUsage & ~set_mask);
540 ASSERT_TRUE(status.ok());
541
542 LocalHandle fence;
543 size_t slot;
544 DvrNativeBufferMetadata mo;
545 auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
546 ASSERT_TRUE(p1_status.ok());
547 auto p1 = p1_status.take();
548 ASSERT_EQ(p1->usage() & set_mask, set_mask);
549 }
550
TEST_F(BufferHubQueueTest,TestUsageClearMask)551 TEST_F(BufferHubQueueTest, TestUsageClearMask) {
552 const uint32_t clear_mask = GRALLOC_USAGE_SW_WRITE_OFTEN;
553 ASSERT_TRUE(
554 CreateQueues(config_builder_.Build(), UsagePolicy{0, clear_mask, 0, 0}));
555
556 // When allocation, add |clear_mask| into usage bits on purpose.
557 auto status = producer_queue_->AllocateBuffer(
558 kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
559 kBufferUsage | clear_mask);
560 ASSERT_TRUE(status.ok());
561
562 LocalHandle fence;
563 size_t slot;
564 DvrNativeBufferMetadata mo;
565 auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
566 ASSERT_TRUE(p1_status.ok());
567 auto p1 = p1_status.take();
568 ASSERT_EQ(p1->usage() & clear_mask, 0U);
569 }
570
TEST_F(BufferHubQueueTest,TestUsageDenySetMask)571 TEST_F(BufferHubQueueTest, TestUsageDenySetMask) {
572 const uint32_t deny_set_mask = GRALLOC_USAGE_SW_WRITE_OFTEN;
573 ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(),
574 UsagePolicy{0, 0, deny_set_mask, 0}));
575
576 // Now that |deny_set_mask| is illegal, allocation without those bits should
577 // be able to succeed.
578 auto status = producer_queue_->AllocateBuffer(
579 kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
580 kBufferUsage & ~deny_set_mask);
581 ASSERT_TRUE(status.ok());
582
583 // While allocation with those bits should fail.
584 status = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight,
585 kBufferLayerCount, kBufferFormat,
586 kBufferUsage | deny_set_mask);
587 ASSERT_FALSE(status.ok());
588 ASSERT_EQ(EINVAL, status.error());
589 }
590
TEST_F(BufferHubQueueTest,TestUsageDenyClearMask)591 TEST_F(BufferHubQueueTest, TestUsageDenyClearMask) {
592 const uint32_t deny_clear_mask = GRALLOC_USAGE_SW_WRITE_OFTEN;
593 ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(),
594 UsagePolicy{0, 0, 0, deny_clear_mask}));
595
596 // Now that clearing |deny_clear_mask| is illegal (i.e. setting these bits are
597 // mandatory), allocation with those bits should be able to succeed.
598 auto status = producer_queue_->AllocateBuffer(
599 kBufferWidth, kBufferHeight, kBufferLayerCount, kBufferFormat,
600 kBufferUsage | deny_clear_mask);
601 ASSERT_TRUE(status.ok());
602
603 // While allocation without those bits should fail.
604 status = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight,
605 kBufferLayerCount, kBufferFormat,
606 kBufferUsage & ~deny_clear_mask);
607 ASSERT_FALSE(status.ok());
608 ASSERT_EQ(EINVAL, status.error());
609 }
610
TEST_F(BufferHubQueueTest,TestQueueInfo)611 TEST_F(BufferHubQueueTest, TestQueueInfo) {
612 static const bool kIsAsync = true;
613 ASSERT_TRUE(CreateQueues(config_builder_.SetIsAsync(kIsAsync)
614 .SetDefaultWidth(kBufferWidth)
615 .SetDefaultHeight(kBufferHeight)
616 .SetDefaultFormat(kBufferFormat)
617 .Build(),
618 UsagePolicy{}));
619
620 EXPECT_EQ(producer_queue_->default_width(), kBufferWidth);
621 EXPECT_EQ(producer_queue_->default_height(), kBufferHeight);
622 EXPECT_EQ(producer_queue_->default_format(), kBufferFormat);
623 EXPECT_EQ(producer_queue_->is_async(), kIsAsync);
624
625 EXPECT_EQ(consumer_queue_->default_width(), kBufferWidth);
626 EXPECT_EQ(consumer_queue_->default_height(), kBufferHeight);
627 EXPECT_EQ(consumer_queue_->default_format(), kBufferFormat);
628 EXPECT_EQ(consumer_queue_->is_async(), kIsAsync);
629 }
630
TEST_F(BufferHubQueueTest,TestFreeAllBuffers)631 TEST_F(BufferHubQueueTest, TestFreeAllBuffers) {
632 constexpr size_t kBufferCount = 2;
633
634 #define CHECK_NO_BUFFER_THEN_ALLOCATE(num_buffers) \
635 EXPECT_EQ(consumer_queue_->count(), 0U); \
636 EXPECT_EQ(consumer_queue_->capacity(), 0U); \
637 EXPECT_EQ(producer_queue_->count(), 0U); \
638 EXPECT_EQ(producer_queue_->capacity(), 0U); \
639 for (size_t i = 0; i < num_buffers; i++) { \
640 AllocateBuffer(); \
641 } \
642 EXPECT_EQ(producer_queue_->count(), num_buffers); \
643 EXPECT_EQ(producer_queue_->capacity(), num_buffers);
644
645 size_t slot;
646 LocalHandle fence;
647 pdx::Status<void> status;
648 pdx::Status<std::shared_ptr<BufferConsumer>> consumer_status;
649 pdx::Status<std::shared_ptr<BufferProducer>> producer_status;
650 std::shared_ptr<BufferConsumer> consumer_buffer;
651 std::shared_ptr<BufferProducer> producer_buffer;
652 DvrNativeBufferMetadata mi, mo;
653
654 ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
655
656 // Free all buffers when buffers are avaible for dequeue.
657 CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
658 status = producer_queue_->FreeAllBuffers();
659 EXPECT_TRUE(status.ok());
660
661 // Free all buffers when one buffer is dequeued.
662 CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
663 producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
664 ASSERT_TRUE(producer_status.ok());
665 status = producer_queue_->FreeAllBuffers();
666 EXPECT_TRUE(status.ok());
667
668 // Free all buffers when all buffers are dequeued.
669 CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
670 for (size_t i = 0; i < kBufferCount; i++) {
671 producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
672 ASSERT_TRUE(producer_status.ok());
673 }
674 status = producer_queue_->FreeAllBuffers();
675 EXPECT_TRUE(status.ok());
676
677 // Free all buffers when one buffer is posted.
678 CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
679 producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
680 ASSERT_TRUE(producer_status.ok());
681 producer_buffer = producer_status.take();
682 ASSERT_NE(nullptr, producer_buffer);
683 ASSERT_EQ(0, producer_buffer->PostAsync(&mi, fence));
684 status = producer_queue_->FreeAllBuffers();
685 EXPECT_TRUE(status.ok());
686
687 // Free all buffers when all buffers are posted.
688 CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
689 for (size_t i = 0; i < kBufferCount; i++) {
690 producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
691 ASSERT_TRUE(producer_status.ok());
692 producer_buffer = producer_status.take();
693 ASSERT_NE(producer_buffer, nullptr);
694 ASSERT_EQ(producer_buffer->PostAsync(&mi, fence), 0);
695 }
696 status = producer_queue_->FreeAllBuffers();
697 EXPECT_TRUE(status.ok());
698
699 // Free all buffers when all buffers are acquired.
700 CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
701 for (size_t i = 0; i < kBufferCount; i++) {
702 producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
703 ASSERT_TRUE(producer_status.ok());
704 producer_buffer = producer_status.take();
705 ASSERT_NE(producer_buffer, nullptr);
706 ASSERT_EQ(producer_buffer->PostAsync(&mi, fence), 0);
707 consumer_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
708 ASSERT_TRUE(consumer_status.ok());
709 }
710
711 status = producer_queue_->FreeAllBuffers();
712 EXPECT_TRUE(status.ok());
713
714 // In addition to FreeAllBuffers() from the queue, it is also required to
715 // delete all references to the ProducerBuffer (i.e. the PDX client).
716 producer_buffer = nullptr;
717
718 // Crank consumer queue events to pickup EPOLLHUP events on the queue.
719 consumer_queue_->HandleQueueEvents();
720
721 // One last check.
722 CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
723
724 #undef CHECK_NO_BUFFER_THEN_ALLOCATE
725 }
726
TEST_F(BufferHubQueueTest,TestProducerToParcelableNotEmpty)727 TEST_F(BufferHubQueueTest, TestProducerToParcelableNotEmpty) {
728 ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<uint64_t>().Build(),
729 UsagePolicy{}));
730
731 // Allocate only one buffer.
732 AllocateBuffer();
733
734 // Export should fail as the queue is not empty.
735 auto status = producer_queue_->TakeAsParcelable();
736 EXPECT_FALSE(status.ok());
737 }
738
TEST_F(BufferHubQueueTest,TestProducerExportToParcelable)739 TEST_F(BufferHubQueueTest, TestProducerExportToParcelable) {
740 ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
741
742 auto s1 = producer_queue_->TakeAsParcelable();
743 EXPECT_TRUE(s1.ok());
744
745 ProducerQueueParcelable output_parcelable = s1.take();
746 EXPECT_TRUE(output_parcelable.IsValid());
747
748 Parcel parcel;
749 status_t res;
750 res = output_parcelable.writeToParcel(&parcel);
751 EXPECT_EQ(res, NO_ERROR);
752
753 // After written into parcelable, the output_parcelable is still valid has
754 // keeps the producer channel alive.
755 EXPECT_TRUE(output_parcelable.IsValid());
756
757 // Creating producer buffer should fail.
758 auto s2 = producer_queue_->AllocateBuffer(kBufferWidth, kBufferHeight,
759 kBufferLayerCount, kBufferFormat,
760 kBufferUsage);
761 ASSERT_FALSE(s2.ok());
762
763 // Reset the data position so that we can read back from the same parcel
764 // without doing actually Binder IPC.
765 parcel.setDataPosition(0);
766 producer_queue_ = nullptr;
767
768 // Recreate the producer queue from the parcel.
769 ProducerQueueParcelable input_parcelable;
770 EXPECT_FALSE(input_parcelable.IsValid());
771
772 res = input_parcelable.readFromParcel(&parcel);
773 EXPECT_EQ(res, NO_ERROR);
774 EXPECT_TRUE(input_parcelable.IsValid());
775
776 EXPECT_EQ(producer_queue_, nullptr);
777 producer_queue_ = ProducerQueue::Import(input_parcelable.TakeChannelHandle());
778 EXPECT_FALSE(input_parcelable.IsValid());
779 ASSERT_NE(producer_queue_, nullptr);
780
781 // Newly created queue from the parcel can allocate buffer, post buffer to
782 // consumer.
783 EXPECT_NO_FATAL_FAILURE(AllocateBuffer());
784 EXPECT_EQ(producer_queue_->count(), 1U);
785 EXPECT_EQ(producer_queue_->capacity(), 1U);
786
787 size_t slot;
788 DvrNativeBufferMetadata producer_meta;
789 DvrNativeBufferMetadata consumer_meta;
790 LocalHandle fence;
791 auto s3 = producer_queue_->Dequeue(0, &slot, &producer_meta, &fence);
792 EXPECT_TRUE(s3.ok());
793
794 std::shared_ptr<BufferProducer> p1 = s3.take();
795 ASSERT_NE(p1, nullptr);
796
797 producer_meta.timestamp = 42;
798 EXPECT_EQ(p1->PostAsync(&producer_meta, LocalHandle()), 0);
799
800 // Make sure the buffer can be dequeued from consumer side.
801 auto s4 = consumer_queue_->Dequeue(kTimeoutMs, &slot, &consumer_meta, &fence);
802 EXPECT_TRUE(s4.ok());
803 EXPECT_EQ(consumer_queue_->capacity(), 1U);
804
805 auto consumer = s4.take();
806 ASSERT_NE(consumer, nullptr);
807 EXPECT_EQ(producer_meta.timestamp, consumer_meta.timestamp);
808 }
809
TEST_F(BufferHubQueueTest,TestCreateConsumerParcelable)810 TEST_F(BufferHubQueueTest, TestCreateConsumerParcelable) {
811 ASSERT_TRUE(CreateProducerQueue(config_builder_.Build(), UsagePolicy{}));
812
813 auto s1 = producer_queue_->CreateConsumerQueueParcelable();
814 EXPECT_TRUE(s1.ok());
815 ConsumerQueueParcelable output_parcelable = s1.take();
816 EXPECT_TRUE(output_parcelable.IsValid());
817
818 // Write to a Parcel new object.
819 Parcel parcel;
820 status_t res;
821 res = output_parcelable.writeToParcel(&parcel);
822
823 // Reset the data position so that we can read back from the same parcel
824 // without doing actually Binder IPC.
825 parcel.setDataPosition(0);
826
827 // No consumer queue created yet.
828 EXPECT_EQ(consumer_queue_, nullptr);
829
830 // If the parcel contains a consumer queue, read into a
831 // ProducerQueueParcelable should fail.
832 ProducerQueueParcelable wrongly_typed_parcelable;
833 EXPECT_FALSE(wrongly_typed_parcelable.IsValid());
834 res = wrongly_typed_parcelable.readFromParcel(&parcel);
835 EXPECT_EQ(res, -EINVAL);
836 parcel.setDataPosition(0);
837
838 // Create the consumer queue from the parcel.
839 ConsumerQueueParcelable input_parcelable;
840 EXPECT_FALSE(input_parcelable.IsValid());
841
842 res = input_parcelable.readFromParcel(&parcel);
843 EXPECT_EQ(res, NO_ERROR);
844 EXPECT_TRUE(input_parcelable.IsValid());
845
846 consumer_queue_ = ConsumerQueue::Import(input_parcelable.TakeChannelHandle());
847 EXPECT_FALSE(input_parcelable.IsValid());
848 ASSERT_NE(consumer_queue_, nullptr);
849
850 EXPECT_NO_FATAL_FAILURE(AllocateBuffer());
851 EXPECT_EQ(producer_queue_->count(), 1U);
852 EXPECT_EQ(producer_queue_->capacity(), 1U);
853
854 size_t slot;
855 DvrNativeBufferMetadata producer_meta;
856 DvrNativeBufferMetadata consumer_meta;
857 LocalHandle fence;
858 auto s2 = producer_queue_->Dequeue(0, &slot, &producer_meta, &fence);
859 EXPECT_TRUE(s2.ok());
860
861 std::shared_ptr<BufferProducer> p1 = s2.take();
862 ASSERT_NE(p1, nullptr);
863
864 producer_meta.timestamp = 42;
865 EXPECT_EQ(p1->PostAsync(&producer_meta, LocalHandle()), 0);
866
867 // Make sure the buffer can be dequeued from consumer side.
868 auto s3 = consumer_queue_->Dequeue(kTimeoutMs, &slot, &consumer_meta, &fence);
869 EXPECT_TRUE(s3.ok());
870 EXPECT_EQ(consumer_queue_->capacity(), 1U);
871
872 auto consumer = s3.take();
873 ASSERT_NE(consumer, nullptr);
874 EXPECT_EQ(producer_meta.timestamp, consumer_meta.timestamp);
875 }
876
877 } // namespace
878
879 } // namespace dvr
880 } // namespace android
881