1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "Camera3-DepthCompositeStream"
18 #define ATRACE_TAG ATRACE_TAG_CAMERA
19 //#define LOG_NDEBUG 0
20 
21 #include "api1/client2/JpegProcessor.h"
22 #include "common/CameraProviderManager.h"
23 #include <gui/Surface.h>
24 #include <utils/Log.h>
25 #include <utils/Trace.h>
26 
27 #include "DepthCompositeStream.h"
28 
29 namespace android {
30 namespace camera3 {
31 
DepthCompositeStream(sp<CameraDeviceBase> device,wp<hardware::camera2::ICameraDeviceCallbacks> cb)32 DepthCompositeStream::DepthCompositeStream(sp<CameraDeviceBase> device,
33         wp<hardware::camera2::ICameraDeviceCallbacks> cb) :
34         CompositeStream(device, cb),
35         mBlobStreamId(-1),
36         mBlobSurfaceId(-1),
37         mDepthStreamId(-1),
38         mDepthSurfaceId(-1),
39         mBlobWidth(0),
40         mBlobHeight(0),
41         mDepthBufferAcquired(false),
42         mBlobBufferAcquired(false),
43         mProducerListener(new ProducerListener()),
44         mMaxJpegSize(-1),
45         mIsLogicalCamera(false) {
46     if (device != nullptr) {
47         CameraMetadata staticInfo = device->info();
48         auto entry = staticInfo.find(ANDROID_JPEG_MAX_SIZE);
49         if (entry.count > 0) {
50             mMaxJpegSize = entry.data.i32[0];
51         } else {
52             ALOGW("%s: Maximum jpeg size absent from camera characteristics", __FUNCTION__);
53         }
54 
55         entry = staticInfo.find(ANDROID_LENS_INTRINSIC_CALIBRATION);
56         if (entry.count == 5) {
57             mIntrinsicCalibration.reserve(5);
58             mIntrinsicCalibration.insert(mIntrinsicCalibration.end(), entry.data.f,
59                     entry.data.f + 5);
60         } else {
61             ALOGW("%s: Intrinsic calibration absent from camera characteristics!", __FUNCTION__);
62         }
63 
64         entry = staticInfo.find(ANDROID_LENS_DISTORTION);
65         if (entry.count == 5) {
66             mLensDistortion.reserve(5);
67             mLensDistortion.insert(mLensDistortion.end(), entry.data.f, entry.data.f + 5);
68         } else {
69             ALOGW("%s: Lens distortion absent from camera characteristics!", __FUNCTION__);
70         }
71 
72         entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
73         for (size_t i = 0; i < entry.count; ++i) {
74             uint8_t capability = entry.data.u8[i];
75             if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) {
76                 mIsLogicalCamera = true;
77                 break;
78             }
79         }
80 
81         getSupportedDepthSizes(staticInfo, &mSupportedDepthSizes);
82     }
83 }
84 
~DepthCompositeStream()85 DepthCompositeStream::~DepthCompositeStream() {
86     mBlobConsumer.clear(),
87     mBlobSurface.clear(),
88     mBlobStreamId = -1;
89     mBlobSurfaceId = -1;
90     mDepthConsumer.clear();
91     mDepthSurface.clear();
92     mDepthConsumer = nullptr;
93     mDepthSurface = nullptr;
94 }
95 
compilePendingInputLocked()96 void DepthCompositeStream::compilePendingInputLocked() {
97     CpuConsumer::LockedBuffer imgBuffer;
98 
99     while (!mInputJpegBuffers.empty() && !mBlobBufferAcquired) {
100         auto it = mInputJpegBuffers.begin();
101         auto res = mBlobConsumer->lockNextBuffer(&imgBuffer);
102         if (res == NOT_ENOUGH_DATA) {
103             // Can not lock any more buffers.
104             break;
105         } else if (res != OK) {
106             ALOGE("%s: Error locking blob image buffer: %s (%d)", __FUNCTION__,
107                     strerror(-res), res);
108             mPendingInputFrames[*it].error = true;
109             mInputJpegBuffers.erase(it);
110             continue;
111         }
112 
113         if (*it != imgBuffer.timestamp) {
114             ALOGW("%s: Expecting jpeg buffer with time stamp: %" PRId64 " received buffer with "
115                     "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
116         }
117 
118         if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
119                 (mPendingInputFrames[imgBuffer.timestamp].error)) {
120             mBlobConsumer->unlockBuffer(imgBuffer);
121         } else {
122             mPendingInputFrames[imgBuffer.timestamp].jpegBuffer = imgBuffer;
123             mBlobBufferAcquired = true;
124         }
125         mInputJpegBuffers.erase(it);
126     }
127 
128     while (!mInputDepthBuffers.empty() && !mDepthBufferAcquired) {
129         auto it = mInputDepthBuffers.begin();
130         auto res = mDepthConsumer->lockNextBuffer(&imgBuffer);
131         if (res == NOT_ENOUGH_DATA) {
132             // Can not lock any more buffers.
133             break;
134         } else if (res != OK) {
135             ALOGE("%s: Error receiving depth image buffer: %s (%d)", __FUNCTION__,
136                     strerror(-res), res);
137             mPendingInputFrames[*it].error = true;
138             mInputDepthBuffers.erase(it);
139             continue;
140         }
141 
142         if (*it != imgBuffer.timestamp) {
143             ALOGW("%s: Expecting depth buffer with time stamp: %" PRId64 " received buffer with "
144                     "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
145         }
146 
147         if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
148                 (mPendingInputFrames[imgBuffer.timestamp].error)) {
149             mDepthConsumer->unlockBuffer(imgBuffer);
150         } else {
151             mPendingInputFrames[imgBuffer.timestamp].depthBuffer = imgBuffer;
152             mDepthBufferAcquired = true;
153         }
154         mInputDepthBuffers.erase(it);
155     }
156 
157     while (!mCaptureResults.empty()) {
158         auto it = mCaptureResults.begin();
159         // Negative timestamp indicates that something went wrong during the capture result
160         // collection process.
161         if (it->first >= 0) {
162             mPendingInputFrames[it->first].frameNumber = std::get<0>(it->second);
163             mPendingInputFrames[it->first].result = std::get<1>(it->second);
164         }
165         mCaptureResults.erase(it);
166     }
167 
168     while (!mFrameNumberMap.empty()) {
169         auto it = mFrameNumberMap.begin();
170         mPendingInputFrames[it->second].frameNumber = it->first;
171         mFrameNumberMap.erase(it);
172     }
173 
174     auto it = mErrorFrameNumbers.begin();
175     while (it != mErrorFrameNumbers.end()) {
176         bool frameFound = false;
177         for (auto &inputFrame : mPendingInputFrames) {
178             if (inputFrame.second.frameNumber == *it) {
179                 inputFrame.second.error = true;
180                 frameFound = true;
181                 break;
182             }
183         }
184 
185         if (frameFound) {
186             it = mErrorFrameNumbers.erase(it);
187         } else {
188             ALOGW("%s: Not able to find failing input with frame number: %" PRId64, __FUNCTION__,
189                     *it);
190             it++;
191         }
192     }
193 }
194 
getNextReadyInputLocked(int64_t * currentTs)195 bool DepthCompositeStream::getNextReadyInputLocked(int64_t *currentTs /*inout*/) {
196     if (currentTs == nullptr) {
197         return false;
198     }
199 
200     bool newInputAvailable = false;
201     for (const auto& it : mPendingInputFrames) {
202         if ((!it.second.error) && (it.second.depthBuffer.data != nullptr) &&
203                 (it.second.jpegBuffer.data != nullptr) && (it.first < *currentTs)) {
204             *currentTs = it.first;
205             newInputAvailable = true;
206         }
207     }
208 
209     return newInputAvailable;
210 }
211 
getNextFailingInputLocked(int64_t * currentTs)212 int64_t DepthCompositeStream::getNextFailingInputLocked(int64_t *currentTs /*inout*/) {
213     int64_t ret = -1;
214     if (currentTs == nullptr) {
215         return ret;
216     }
217 
218     for (const auto& it : mPendingInputFrames) {
219         if (it.second.error && !it.second.errorNotified && (it.first < *currentTs)) {
220             *currentTs = it.first;
221             ret = it.second.frameNumber;
222         }
223     }
224 
225     return ret;
226 }
227 
processInputFrame(nsecs_t ts,const InputFrame & inputFrame)228 status_t DepthCompositeStream::processInputFrame(nsecs_t ts, const InputFrame &inputFrame) {
229     status_t res;
230     sp<ANativeWindow> outputANW = mOutputSurface;
231     ANativeWindowBuffer *anb;
232     int fenceFd;
233     void *dstBuffer;
234 
235     auto jpegSize = android::camera2::JpegProcessor::findJpegSize(inputFrame.jpegBuffer.data,
236             inputFrame.jpegBuffer.width);
237     if (jpegSize == 0) {
238         ALOGW("%s: Failed to find input jpeg size, default to using entire buffer!", __FUNCTION__);
239         jpegSize = inputFrame.jpegBuffer.width;
240     }
241 
242     size_t maxDepthJpegSize;
243     if (mMaxJpegSize > 0) {
244         maxDepthJpegSize = mMaxJpegSize;
245     } else {
246         maxDepthJpegSize = std::max<size_t> (jpegSize,
247                 inputFrame.depthBuffer.width * inputFrame.depthBuffer.height * 3 / 2);
248     }
249     uint8_t jpegQuality = 100;
250     auto entry = inputFrame.result.find(ANDROID_JPEG_QUALITY);
251     if (entry.count > 0) {
252         jpegQuality = entry.data.u8[0];
253     }
254 
255     // The final depth photo will consist of the main jpeg buffer, the depth map buffer (also in
256     // jpeg format) and confidence map (jpeg as well). Assume worst case that all 3 jpeg need
257     // max jpeg size.
258     size_t finalJpegBufferSize = maxDepthJpegSize * 3;
259 
260     if ((res = native_window_set_buffers_dimensions(mOutputSurface.get(), finalJpegBufferSize, 1))
261             != OK) {
262         ALOGE("%s: Unable to configure stream buffer dimensions"
263                 " %zux%u for stream %d", __FUNCTION__, finalJpegBufferSize, 1U, mBlobStreamId);
264         return res;
265     }
266 
267     res = outputANW->dequeueBuffer(mOutputSurface.get(), &anb, &fenceFd);
268     if (res != OK) {
269         ALOGE("%s: Error retrieving output buffer: %s (%d)", __FUNCTION__, strerror(-res),
270                 res);
271         return res;
272     }
273 
274     sp<GraphicBuffer> gb = GraphicBuffer::from(anb);
275     res = gb->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, &dstBuffer, fenceFd);
276     if (res != OK) {
277         ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__,
278                 strerror(-res), res);
279         outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
280         return res;
281     }
282 
283     if ((gb->getWidth() < finalJpegBufferSize) || (gb->getHeight() != 1)) {
284         ALOGE("%s: Blob buffer size mismatch, expected %dx%d received %zux%u", __FUNCTION__,
285                 gb->getWidth(), gb->getHeight(), finalJpegBufferSize, 1U);
286         outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
287         return BAD_VALUE;
288     }
289 
290     DepthPhotoInputFrame depthPhoto;
291     depthPhoto.mMainJpegBuffer = reinterpret_cast<const char*> (inputFrame.jpegBuffer.data);
292     depthPhoto.mMainJpegWidth = mBlobWidth;
293     depthPhoto.mMainJpegHeight = mBlobHeight;
294     depthPhoto.mMainJpegSize = jpegSize;
295     depthPhoto.mDepthMapBuffer = reinterpret_cast<uint16_t*> (inputFrame.depthBuffer.data);
296     depthPhoto.mDepthMapWidth = inputFrame.depthBuffer.width;
297     depthPhoto.mDepthMapHeight = inputFrame.depthBuffer.height;
298     depthPhoto.mDepthMapStride = inputFrame.depthBuffer.stride;
299     depthPhoto.mJpegQuality = jpegQuality;
300     depthPhoto.mIsLogical = mIsLogicalCamera;
301     depthPhoto.mMaxJpegSize = maxDepthJpegSize;
302     // The camera intrinsic calibration layout is as follows:
303     // [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew]
304     if (mIntrinsicCalibration.size() == 5) {
305         memcpy(depthPhoto.mIntrinsicCalibration, mIntrinsicCalibration.data(),
306                 sizeof(depthPhoto.mIntrinsicCalibration));
307         depthPhoto.mIsIntrinsicCalibrationValid = 1;
308     } else {
309         depthPhoto.mIsIntrinsicCalibrationValid = 0;
310     }
311     // The camera lens distortion contains the following lens correction coefficients.
312     // [kappa_1, kappa_2, kappa_3 kappa_4, kappa_5]
313     if (mLensDistortion.size() == 5) {
314         memcpy(depthPhoto.mLensDistortion, mLensDistortion.data(),
315                 sizeof(depthPhoto.mLensDistortion));
316         depthPhoto.mIsLensDistortionValid = 1;
317     } else {
318         depthPhoto.mIsLensDistortionValid = 0;
319     }
320     entry = inputFrame.result.find(ANDROID_JPEG_ORIENTATION);
321     if (entry.count > 0) {
322         // The camera jpeg orientation values must be within [0, 90, 180, 270].
323         switch (entry.data.i32[0]) {
324             case 0:
325             case 90:
326             case 180:
327             case 270:
328                 depthPhoto.mOrientation = static_cast<DepthPhotoOrientation> (entry.data.i32[0]);
329                 break;
330             default:
331                 ALOGE("%s: Unexpected jpeg orientation value: %d, default to 0 degrees",
332                         __FUNCTION__, entry.data.i32[0]);
333         }
334     }
335 
336     size_t actualJpegSize = 0;
337     res = processDepthPhotoFrame(depthPhoto, finalJpegBufferSize, dstBuffer, &actualJpegSize);
338     if (res != 0) {
339         ALOGE("%s: Depth photo processing failed: %s (%d)", __FUNCTION__, strerror(-res), res);
340         outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
341         return res;
342     }
343 
344     size_t finalJpegSize = actualJpegSize + sizeof(struct camera3_jpeg_blob);
345     if (finalJpegSize > finalJpegBufferSize) {
346         ALOGE("%s: Final jpeg buffer not large enough for the jpeg blob header", __FUNCTION__);
347         outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
348         return NO_MEMORY;
349     }
350 
351     res = native_window_set_buffers_timestamp(mOutputSurface.get(), ts);
352     if (res != OK) {
353         ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)", __FUNCTION__,
354                 getStreamId(), strerror(-res), res);
355         return res;
356     }
357 
358     ALOGV("%s: Final jpeg size: %zu", __func__, finalJpegSize);
359     uint8_t* header = static_cast<uint8_t *> (dstBuffer) +
360         (gb->getWidth() - sizeof(struct camera3_jpeg_blob));
361     struct camera3_jpeg_blob *blob = reinterpret_cast<struct camera3_jpeg_blob*> (header);
362     blob->jpeg_blob_id = CAMERA3_JPEG_BLOB_ID;
363     blob->jpeg_size = actualJpegSize;
364     outputANW->queueBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
365 
366     return res;
367 }
368 
releaseInputFrameLocked(InputFrame * inputFrame)369 void DepthCompositeStream::releaseInputFrameLocked(InputFrame *inputFrame /*out*/) {
370     if (inputFrame == nullptr) {
371         return;
372     }
373 
374     if (inputFrame->depthBuffer.data != nullptr) {
375         mDepthConsumer->unlockBuffer(inputFrame->depthBuffer);
376         inputFrame->depthBuffer.data = nullptr;
377         mDepthBufferAcquired = false;
378     }
379 
380     if (inputFrame->jpegBuffer.data != nullptr) {
381         mBlobConsumer->unlockBuffer(inputFrame->jpegBuffer);
382         inputFrame->jpegBuffer.data = nullptr;
383         mBlobBufferAcquired = false;
384     }
385 
386     if ((inputFrame->error || mErrorState) && !inputFrame->errorNotified) {
387         //TODO: Figure out correct requestId
388         notifyError(inputFrame->frameNumber, -1 /*requestId*/);
389         inputFrame->errorNotified = true;
390     }
391 }
392 
releaseInputFramesLocked(int64_t currentTs)393 void DepthCompositeStream::releaseInputFramesLocked(int64_t currentTs) {
394     auto it = mPendingInputFrames.begin();
395     while (it != mPendingInputFrames.end()) {
396         if (it->first <= currentTs) {
397             releaseInputFrameLocked(&it->second);
398             it = mPendingInputFrames.erase(it);
399         } else {
400             it++;
401         }
402     }
403 }
404 
threadLoop()405 bool DepthCompositeStream::threadLoop() {
406     int64_t currentTs = INT64_MAX;
407     bool newInputAvailable = false;
408 
409     {
410         Mutex::Autolock l(mMutex);
411 
412         if (mErrorState) {
413             // In case we landed in error state, return any pending buffers and
414             // halt all further processing.
415             compilePendingInputLocked();
416             releaseInputFramesLocked(currentTs);
417             return false;
418         }
419 
420         while (!newInputAvailable) {
421             compilePendingInputLocked();
422             newInputAvailable = getNextReadyInputLocked(&currentTs);
423             if (!newInputAvailable) {
424                 auto failingFrameNumber = getNextFailingInputLocked(&currentTs);
425                 if (failingFrameNumber >= 0) {
426                     // We cannot erase 'mPendingInputFrames[currentTs]' at this point because it is
427                     // possible for two internal stream buffers to fail. In such scenario the
428                     // composite stream should notify the client about a stream buffer error only
429                     // once and this information is kept within 'errorNotified'.
430                     // Any present failed input frames will be removed on a subsequent call to
431                     // 'releaseInputFramesLocked()'.
432                     releaseInputFrameLocked(&mPendingInputFrames[currentTs]);
433                     currentTs = INT64_MAX;
434                 }
435 
436                 auto ret = mInputReadyCondition.waitRelative(mMutex, kWaitDuration);
437                 if (ret == TIMED_OUT) {
438                     return true;
439                 } else if (ret != OK) {
440                     ALOGE("%s: Timed wait on condition failed: %s (%d)", __FUNCTION__,
441                             strerror(-ret), ret);
442                     return false;
443                 }
444             }
445         }
446     }
447 
448     auto res = processInputFrame(currentTs, mPendingInputFrames[currentTs]);
449     Mutex::Autolock l(mMutex);
450     if (res != OK) {
451         ALOGE("%s: Failed processing frame with timestamp: %" PRIu64 ": %s (%d)", __FUNCTION__,
452                 currentTs, strerror(-res), res);
453         mPendingInputFrames[currentTs].error = true;
454     }
455 
456     releaseInputFramesLocked(currentTs);
457 
458     return true;
459 }
460 
isDepthCompositeStream(const sp<Surface> & surface)461 bool DepthCompositeStream::isDepthCompositeStream(const sp<Surface> &surface) {
462     ANativeWindow *anw = surface.get();
463     status_t err;
464     int format;
465     if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
466         String8 msg = String8::format("Failed to query Surface format: %s (%d)", strerror(-err),
467                 err);
468         ALOGE("%s: %s", __FUNCTION__, msg.string());
469         return false;
470     }
471 
472     int dataspace;
473     if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE, &dataspace)) != OK) {
474         String8 msg = String8::format("Failed to query Surface dataspace: %s (%d)", strerror(-err),
475                 err);
476         ALOGE("%s: %s", __FUNCTION__, msg.string());
477         return false;
478     }
479 
480     if ((format == HAL_PIXEL_FORMAT_BLOB) && (dataspace == HAL_DATASPACE_DYNAMIC_DEPTH)) {
481         return true;
482     }
483 
484     return false;
485 }
486 
createInternalStreams(const std::vector<sp<Surface>> & consumers,bool,uint32_t width,uint32_t height,int format,camera3_stream_rotation_t rotation,int * id,const String8 & physicalCameraId,std::vector<int> * surfaceIds,int,bool)487 status_t DepthCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers,
488         bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format,
489         camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
490         std::vector<int> *surfaceIds, int /*streamSetId*/, bool /*isShared*/) {
491     if (mSupportedDepthSizes.empty()) {
492         ALOGE("%s: This camera device doesn't support any depth map streams!", __FUNCTION__);
493         return INVALID_OPERATION;
494     }
495 
496     size_t depthWidth, depthHeight;
497     auto ret = getMatchingDepthSize(width, height, mSupportedDepthSizes, &depthWidth, &depthHeight);
498     if (ret != OK) {
499         ALOGE("%s: Failed to find an appropriate depth stream size!", __FUNCTION__);
500         return ret;
501     }
502 
503     sp<CameraDeviceBase> device = mDevice.promote();
504     if (!device.get()) {
505         ALOGE("%s: Invalid camera device!", __FUNCTION__);
506         return NO_INIT;
507     }
508 
509     sp<IGraphicBufferProducer> producer;
510     sp<IGraphicBufferConsumer> consumer;
511     BufferQueue::createBufferQueue(&producer, &consumer);
512     mBlobConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/1, /*controlledByApp*/ true);
513     mBlobConsumer->setFrameAvailableListener(this);
514     mBlobConsumer->setName(String8("Camera3-JpegCompositeStream"));
515     mBlobSurface = new Surface(producer);
516 
517     ret = device->createStream(mBlobSurface, width, height, format, kJpegDataSpace, rotation,
518             id, physicalCameraId, surfaceIds);
519     if (ret == OK) {
520         mBlobStreamId = *id;
521         mBlobSurfaceId = (*surfaceIds)[0];
522         mOutputSurface = consumers[0];
523     } else {
524         return ret;
525     }
526 
527     BufferQueue::createBufferQueue(&producer, &consumer);
528     mDepthConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/ 1, /*controlledByApp*/ true);
529     mDepthConsumer->setFrameAvailableListener(this);
530     mDepthConsumer->setName(String8("Camera3-DepthCompositeStream"));
531     mDepthSurface = new Surface(producer);
532     std::vector<int> depthSurfaceId;
533     ret = device->createStream(mDepthSurface, depthWidth, depthHeight, kDepthMapPixelFormat,
534             kDepthMapDataSpace, rotation, &mDepthStreamId, physicalCameraId, &depthSurfaceId);
535     if (ret == OK) {
536         mDepthSurfaceId = depthSurfaceId[0];
537     } else {
538         return ret;
539     }
540 
541     ret = registerCompositeStreamListener(getStreamId());
542     if (ret != OK) {
543         ALOGE("%s: Failed to register blob stream listener!", __FUNCTION__);
544         return ret;
545     }
546 
547     ret = registerCompositeStreamListener(mDepthStreamId);
548     if (ret != OK) {
549         ALOGE("%s: Failed to register depth stream listener!", __FUNCTION__);
550         return ret;
551     }
552 
553     mBlobWidth = width;
554     mBlobHeight = height;
555 
556     return ret;
557 }
558 
configureStream()559 status_t DepthCompositeStream::configureStream() {
560     if (isRunning()) {
561         // Processing thread is already running, nothing more to do.
562         return NO_ERROR;
563     }
564 
565     if (mOutputSurface.get() == nullptr) {
566         ALOGE("%s: No valid output surface set!", __FUNCTION__);
567         return NO_INIT;
568     }
569 
570     auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mProducerListener);
571     if (res != OK) {
572         ALOGE("%s: Unable to connect to native window for stream %d",
573                 __FUNCTION__, mBlobStreamId);
574         return res;
575     }
576 
577     if ((res = native_window_set_buffers_format(mOutputSurface.get(), HAL_PIXEL_FORMAT_BLOB))
578             != OK) {
579         ALOGE("%s: Unable to configure stream buffer format for stream %d", __FUNCTION__,
580                 mBlobStreamId);
581         return res;
582     }
583 
584     int maxProducerBuffers;
585     ANativeWindow *anw = mBlobSurface.get();
586     if ((res = anw->query(anw, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxProducerBuffers)) != OK) {
587         ALOGE("%s: Unable to query consumer undequeued"
588                 " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
589         return res;
590     }
591 
592     ANativeWindow *anwConsumer = mOutputSurface.get();
593     int maxConsumerBuffers;
594     if ((res = anwConsumer->query(anwConsumer, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
595                     &maxConsumerBuffers)) != OK) {
596         ALOGE("%s: Unable to query consumer undequeued"
597                 " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
598         return res;
599     }
600 
601     if ((res = native_window_set_buffer_count(
602                     anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) {
603         ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mBlobStreamId);
604         return res;
605     }
606 
607     run("DepthCompositeStreamProc");
608 
609     return NO_ERROR;
610 }
611 
deleteInternalStreams()612 status_t DepthCompositeStream::deleteInternalStreams() {
613     // The 'CameraDeviceClient' parent will delete the blob stream
614     requestExit();
615 
616     auto ret = join();
617     if (ret != OK) {
618         ALOGE("%s: Failed to join with the main processing thread: %s (%d)", __FUNCTION__,
619                 strerror(-ret), ret);
620     }
621 
622     if (mDepthStreamId >= 0) {
623         // Camera devices may not be valid after switching to offline mode.
624         // In this case, all offline streams including internal composite streams
625         // are managed and released by the offline session.
626         sp<CameraDeviceBase> device = mDevice.promote();
627         if (device.get() != nullptr) {
628             ret = device->deleteStream(mDepthStreamId);
629         }
630 
631         mDepthStreamId = -1;
632     }
633 
634     if (mOutputSurface != nullptr) {
635         mOutputSurface->disconnect(NATIVE_WINDOW_API_CAMERA);
636         mOutputSurface.clear();
637     }
638 
639     return ret;
640 }
641 
onFrameAvailable(const BufferItem & item)642 void DepthCompositeStream::onFrameAvailable(const BufferItem& item) {
643     if (item.mDataSpace == kJpegDataSpace) {
644         ALOGV("%s: Jpeg buffer with ts: %" PRIu64 " ms. arrived!",
645                 __func__, ns2ms(item.mTimestamp));
646 
647         Mutex::Autolock l(mMutex);
648         if (!mErrorState) {
649             mInputJpegBuffers.push_back(item.mTimestamp);
650             mInputReadyCondition.signal();
651         }
652     } else if (item.mDataSpace == kDepthMapDataSpace) {
653         ALOGV("%s: Depth buffer with ts: %" PRIu64 " ms. arrived!", __func__,
654                 ns2ms(item.mTimestamp));
655 
656         Mutex::Autolock l(mMutex);
657         if (!mErrorState) {
658             mInputDepthBuffers.push_back(item.mTimestamp);
659             mInputReadyCondition.signal();
660         }
661     } else {
662         ALOGE("%s: Unexpected data space: 0x%x", __FUNCTION__, item.mDataSpace);
663     }
664 }
665 
insertGbp(SurfaceMap * outSurfaceMap,Vector<int32_t> * outputStreamIds,int32_t * currentStreamId)666 status_t DepthCompositeStream::insertGbp(SurfaceMap* /*out*/outSurfaceMap,
667         Vector<int32_t> * /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) {
668     if (outSurfaceMap->find(mDepthStreamId) == outSurfaceMap->end()) {
669         (*outSurfaceMap)[mDepthStreamId] = std::vector<size_t>();
670         outputStreamIds->push_back(mDepthStreamId);
671     }
672     (*outSurfaceMap)[mDepthStreamId].push_back(mDepthSurfaceId);
673 
674     if (outSurfaceMap->find(mBlobStreamId) == outSurfaceMap->end()) {
675         (*outSurfaceMap)[mBlobStreamId] = std::vector<size_t>();
676         outputStreamIds->push_back(mBlobStreamId);
677     }
678     (*outSurfaceMap)[mBlobStreamId].push_back(mBlobSurfaceId);
679 
680     if (currentStreamId != nullptr) {
681         *currentStreamId = mBlobStreamId;
682     }
683 
684     return NO_ERROR;
685 }
686 
insertCompositeStreamIds(std::vector<int32_t> * compositeStreamIds)687 status_t DepthCompositeStream::insertCompositeStreamIds(
688         std::vector<int32_t>* compositeStreamIds /*out*/) {
689     if (compositeStreamIds == nullptr) {
690         return BAD_VALUE;
691     }
692 
693     compositeStreamIds->push_back(mDepthStreamId);
694     compositeStreamIds->push_back(mBlobStreamId);
695 
696     return OK;
697 }
698 
onResultError(const CaptureResultExtras & resultExtras)699 void DepthCompositeStream::onResultError(const CaptureResultExtras& resultExtras) {
700     // Processing can continue even in case of result errors.
701     // At the moment depth composite stream processing relies mainly on static camera
702     // characteristics data. The actual result data can be used for the jpeg quality but
703     // in case it is absent we can default to maximum.
704     eraseResult(resultExtras.frameNumber);
705 }
706 
onStreamBufferError(const CaptureResultExtras & resultExtras)707 bool DepthCompositeStream::onStreamBufferError(const CaptureResultExtras& resultExtras) {
708     bool ret = false;
709     // Buffer errors concerning internal composite streams should not be directly visible to
710     // camera clients. They must only receive a single buffer error with the public composite
711     // stream id.
712     if ((resultExtras.errorStreamId == mDepthStreamId) ||
713             (resultExtras.errorStreamId == mBlobStreamId)) {
714         flagAnErrorFrameNumber(resultExtras.frameNumber);
715         ret = true;
716     }
717 
718     return ret;
719 }
720 
getMatchingDepthSize(size_t width,size_t height,const std::vector<std::tuple<size_t,size_t>> & supporedDepthSizes,size_t * depthWidth,size_t * depthHeight)721 status_t DepthCompositeStream::getMatchingDepthSize(size_t width, size_t height,
722         const std::vector<std::tuple<size_t, size_t>>& supporedDepthSizes,
723         size_t *depthWidth /*out*/, size_t *depthHeight /*out*/) {
724     if ((depthWidth == nullptr) || (depthHeight == nullptr)) {
725         return BAD_VALUE;
726     }
727 
728     float arTol = CameraProviderManager::kDepthARTolerance;
729     *depthWidth = *depthHeight = 0;
730 
731     float aspectRatio = static_cast<float> (width) / static_cast<float> (height);
732     for (const auto& it : supporedDepthSizes) {
733         auto currentWidth = std::get<0>(it);
734         auto currentHeight = std::get<1>(it);
735         if ((currentWidth == width) && (currentHeight == height)) {
736             *depthWidth = width;
737             *depthHeight = height;
738             break;
739         } else {
740             float currentRatio = static_cast<float> (currentWidth) /
741                     static_cast<float> (currentHeight);
742             auto currentSize = currentWidth * currentHeight;
743             auto oldSize = (*depthWidth) * (*depthHeight);
744             if ((fabs(aspectRatio - currentRatio) <= arTol) && (currentSize > oldSize)) {
745                 *depthWidth = currentWidth;
746                 *depthHeight = currentHeight;
747             }
748         }
749     }
750 
751     return ((*depthWidth > 0) && (*depthHeight > 0)) ? OK : BAD_VALUE;
752 }
753 
getSupportedDepthSizes(const CameraMetadata & ch,std::vector<std::tuple<size_t,size_t>> * depthSizes)754 void DepthCompositeStream::getSupportedDepthSizes(const CameraMetadata& ch,
755         std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/) {
756     if (depthSizes == nullptr) {
757         return;
758     }
759 
760     auto entry = ch.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS);
761     if (entry.count > 0) {
762         // Depth stream dimensions have four int32_t components
763         // (pixelformat, width, height, type)
764         size_t entryCount = entry.count / 4;
765         depthSizes->reserve(entryCount);
766         for (size_t i = 0; i < entry.count; i += 4) {
767             if ((entry.data.i32[i] == kDepthMapPixelFormat) &&
768                     (entry.data.i32[i+3] ==
769                      ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) {
770                 depthSizes->push_back(std::make_tuple(entry.data.i32[i+1],
771                             entry.data.i32[i+2]));
772             }
773         }
774     }
775 }
776 
getCompositeStreamInfo(const OutputStreamInfo & streamInfo,const CameraMetadata & ch,std::vector<OutputStreamInfo> * compositeOutput)777 status_t DepthCompositeStream::getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
778             const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/) {
779     if (compositeOutput == nullptr) {
780         return BAD_VALUE;
781     }
782 
783     std::vector<std::tuple<size_t, size_t>> depthSizes;
784     getSupportedDepthSizes(ch, &depthSizes);
785     if (depthSizes.empty()) {
786         ALOGE("%s: No depth stream configurations present", __FUNCTION__);
787         return BAD_VALUE;
788     }
789 
790     size_t depthWidth, depthHeight;
791     auto ret = getMatchingDepthSize(streamInfo.width, streamInfo.height, depthSizes, &depthWidth,
792             &depthHeight);
793     if (ret != OK) {
794         ALOGE("%s: No matching depth stream size found", __FUNCTION__);
795         return ret;
796     }
797 
798     compositeOutput->clear();
799     compositeOutput->insert(compositeOutput->end(), 2, streamInfo);
800 
801     // Jpeg/Blob stream info
802     (*compositeOutput)[0].dataSpace = kJpegDataSpace;
803     (*compositeOutput)[0].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
804 
805     // Depth stream info
806     (*compositeOutput)[1].width = depthWidth;
807     (*compositeOutput)[1].height = depthHeight;
808     (*compositeOutput)[1].format = kDepthMapPixelFormat;
809     (*compositeOutput)[1].dataSpace = kDepthMapDataSpace;
810     (*compositeOutput)[1].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
811 
812     return NO_ERROR;
813 }
814 
815 }; // namespace camera3
816 }; // namespace android
817