1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 //#define LOG_NDEBUG 0
18 //#define LOG_NNDEBUG 0
19 #define LOG_TAG "EmulatedCamera2_Sensor"
20
21 #ifdef LOG_NNDEBUG
22 #define ALOGVV(...) ALOGV(__VA_ARGS__)
23 #else
24 #define ALOGVV(...) ((void)0)
25 #endif
26
27 #include <utils/Log.h>
28
29 #include "../EmulatedFakeCamera2.h"
30 #include "Sensor.h"
31 #include <cmath>
32 #include <cstdlib>
33 #include "system/camera_metadata.h"
34
35 namespace android {
36
37 const unsigned int Sensor::kResolution[2] = {640, 480};
38 const unsigned int Sensor::kActiveArray[4] = {0, 0, 640, 480};
39
40 //const nsecs_t Sensor::kExposureTimeRange[2] =
41 // {1000L, 30000000000L} ; // 1 us - 30 sec
42 //const nsecs_t Sensor::kFrameDurationRange[2] =
43 // {33331760L, 30000000000L}; // ~1/30 s - 30 sec
44 const nsecs_t Sensor::kExposureTimeRange[2] =
45 {1000L, 300000000L} ; // 1 us - 0.3 sec
46 const nsecs_t Sensor::kFrameDurationRange[2] =
47 {33331760L, 300000000L}; // ~1/30 s - 0.3 sec
48
49 const nsecs_t Sensor::kMinVerticalBlank = 10000L;
50
51 const uint8_t Sensor::kColorFilterArrangement =
52 ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_RGGB;
53
54 // Output image data characteristics
55 const uint32_t Sensor::kMaxRawValue = 4000;
56 const uint32_t Sensor::kBlackLevel = 1000;
57
58 // Sensor sensitivity
59 const float Sensor::kSaturationVoltage = 0.520f;
60 const uint32_t Sensor::kSaturationElectrons = 2000;
61 const float Sensor::kVoltsPerLuxSecond = 0.100f;
62
63 const float Sensor::kElectronsPerLuxSecond =
64 Sensor::kSaturationElectrons / Sensor::kSaturationVoltage
65 * Sensor::kVoltsPerLuxSecond;
66
67 const float Sensor::kBaseGainFactor = (float)Sensor::kMaxRawValue /
68 Sensor::kSaturationElectrons;
69
70 const float Sensor::kReadNoiseStddevBeforeGain = 1.177; // in electrons
71 const float Sensor::kReadNoiseStddevAfterGain = 2.100; // in digital counts
72 const float Sensor::kReadNoiseVarBeforeGain =
73 Sensor::kReadNoiseStddevBeforeGain *
74 Sensor::kReadNoiseStddevBeforeGain;
75 const float Sensor::kReadNoiseVarAfterGain =
76 Sensor::kReadNoiseStddevAfterGain *
77 Sensor::kReadNoiseStddevAfterGain;
78
79 // While each row has to read out, reset, and then expose, the (reset +
80 // expose) sequence can be overlapped by other row readouts, so the final
81 // minimum frame duration is purely a function of row readout time, at least
82 // if there's a reasonable number of rows.
83 const nsecs_t Sensor::kRowReadoutTime =
84 Sensor::kFrameDurationRange[0] / Sensor::kResolution[1];
85
86 const int32_t Sensor::kSensitivityRange[2] = {100, 1600};
87 const uint32_t Sensor::kDefaultSensitivity = 100;
88
89 /** A few utility functions for math, normal distributions */
90
91 // Take advantage of IEEE floating-point format to calculate an approximate
92 // square root. Accurate to within +-3.6%
sqrtf_approx(float r)93 float sqrtf_approx(float r) {
94 // Modifier is based on IEEE floating-point representation; the
95 // manipulations boil down to finding approximate log2, dividing by two, and
96 // then inverting the log2. A bias is added to make the relative error
97 // symmetric about the real answer.
98 const int32_t modifier = 0x1FBB4000;
99
100 int32_t r_i = *(int32_t*)(&r);
101 r_i = (r_i >> 1) + modifier;
102
103 return *(float*)(&r_i);
104 }
105
106
107
Sensor()108 Sensor::Sensor():
109 Thread(false),
110 mGotVSync(false),
111 mExposureTime(kFrameDurationRange[0]-kMinVerticalBlank),
112 mFrameDuration(kFrameDurationRange[0]),
113 mGainFactor(kDefaultSensitivity),
114 mNextBuffers(NULL),
115 mFrameNumber(0),
116 mCapturedBuffers(NULL),
117 mListener(NULL),
118 mScene(kResolution[0], kResolution[1], kElectronsPerLuxSecond)
119 {
120
121 }
122
~Sensor()123 Sensor::~Sensor() {
124 shutDown();
125 }
126
startUp()127 status_t Sensor::startUp() {
128 ALOGV("%s: E", __FUNCTION__);
129
130 int res;
131 mCapturedBuffers = NULL;
132 res = run("EmulatedFakeCamera2::Sensor",
133 ANDROID_PRIORITY_URGENT_DISPLAY);
134
135 if (res != OK) {
136 ALOGE("Unable to start up sensor capture thread: %d", res);
137 }
138 return res;
139 }
140
shutDown()141 status_t Sensor::shutDown() {
142 ALOGV("%s: E", __FUNCTION__);
143
144 int res;
145 res = requestExitAndWait();
146 if (res != OK) {
147 ALOGE("Unable to shut down sensor capture thread: %d", res);
148 }
149 return res;
150 }
151
getScene()152 Scene &Sensor::getScene() {
153 return mScene;
154 }
155
setExposureTime(uint64_t ns)156 void Sensor::setExposureTime(uint64_t ns) {
157 Mutex::Autolock lock(mControlMutex);
158 ALOGVV("Exposure set to %f", ns/1000000.f);
159 mExposureTime = ns;
160 }
161
setFrameDuration(uint64_t ns)162 void Sensor::setFrameDuration(uint64_t ns) {
163 Mutex::Autolock lock(mControlMutex);
164 ALOGVV("Frame duration set to %f", ns/1000000.f);
165 mFrameDuration = ns;
166 }
167
setSensitivity(uint32_t gain)168 void Sensor::setSensitivity(uint32_t gain) {
169 Mutex::Autolock lock(mControlMutex);
170 ALOGVV("Gain set to %d", gain);
171 mGainFactor = gain;
172 }
173
setDestinationBuffers(Buffers * buffers)174 void Sensor::setDestinationBuffers(Buffers *buffers) {
175 Mutex::Autolock lock(mControlMutex);
176 mNextBuffers = buffers;
177 }
178
setFrameNumber(uint32_t frameNumber)179 void Sensor::setFrameNumber(uint32_t frameNumber) {
180 Mutex::Autolock lock(mControlMutex);
181 mFrameNumber = frameNumber;
182 }
183
waitForVSync(nsecs_t reltime)184 bool Sensor::waitForVSync(nsecs_t reltime) {
185 int res;
186 Mutex::Autolock lock(mControlMutex);
187
188 mGotVSync = false;
189 res = mVSync.waitRelative(mControlMutex, reltime);
190 if (res != OK && res != TIMED_OUT) {
191 ALOGE("%s: Error waiting for VSync signal: %d", __FUNCTION__, res);
192 return false;
193 }
194 return mGotVSync;
195 }
196
waitForNewFrame(nsecs_t reltime,nsecs_t * captureTime)197 bool Sensor::waitForNewFrame(nsecs_t reltime,
198 nsecs_t *captureTime) {
199 Mutex::Autolock lock(mReadoutMutex);
200 uint8_t *ret;
201 if (mCapturedBuffers == NULL) {
202 int res;
203 res = mReadoutAvailable.waitRelative(mReadoutMutex, reltime);
204 if (res == TIMED_OUT) {
205 return false;
206 } else if (res != OK || mCapturedBuffers == NULL) {
207 ALOGE("Error waiting for sensor readout signal: %d", res);
208 return false;
209 }
210 } else {
211 mReadoutComplete.signal();
212 }
213
214 *captureTime = mCaptureTime;
215 mCapturedBuffers = NULL;
216 return true;
217 }
218
~SensorListener()219 Sensor::SensorListener::~SensorListener() {
220 }
221
setSensorListener(SensorListener * listener)222 void Sensor::setSensorListener(SensorListener *listener) {
223 Mutex::Autolock lock(mControlMutex);
224 mListener = listener;
225 }
226
readyToRun()227 status_t Sensor::readyToRun() {
228 ALOGV("Starting up sensor thread");
229 mStartupTime = systemTime();
230 mNextCaptureTime = 0;
231 mNextCapturedBuffers = NULL;
232 return OK;
233 }
234
threadLoop()235 bool Sensor::threadLoop() {
236 /**
237 * Sensor capture operation main loop.
238 *
239 * Stages are out-of-order relative to a single frame's processing, but
240 * in-order in time.
241 */
242
243 /**
244 * Stage 1: Read in latest control parameters
245 */
246 uint64_t exposureDuration;
247 uint64_t frameDuration;
248 uint32_t gain;
249 Buffers *nextBuffers;
250 uint32_t frameNumber;
251 SensorListener *listener = NULL;
252 {
253 Mutex::Autolock lock(mControlMutex);
254 exposureDuration = mExposureTime;
255 frameDuration = mFrameDuration;
256 gain = mGainFactor;
257 nextBuffers = mNextBuffers;
258 frameNumber = mFrameNumber;
259 listener = mListener;
260 // Don't reuse a buffer set
261 mNextBuffers = NULL;
262
263 // Signal VSync for start of readout
264 ALOGVV("Sensor VSync");
265 mGotVSync = true;
266 mVSync.signal();
267 }
268
269 /**
270 * Stage 3: Read out latest captured image
271 */
272
273 Buffers *capturedBuffers = NULL;
274 nsecs_t captureTime = 0;
275
276 nsecs_t startRealTime = systemTime();
277 // Stagefright cares about system time for timestamps, so base simulated
278 // time on that.
279 nsecs_t simulatedTime = startRealTime;
280 nsecs_t frameEndRealTime = startRealTime + frameDuration;
281 nsecs_t frameReadoutEndRealTime = startRealTime +
282 kRowReadoutTime * kResolution[1];
283
284 if (mNextCapturedBuffers != NULL) {
285 ALOGVV("Sensor starting readout");
286 // Pretend we're doing readout now; will signal once enough time has elapsed
287 capturedBuffers = mNextCapturedBuffers;
288 captureTime = mNextCaptureTime;
289 }
290 simulatedTime += kRowReadoutTime + kMinVerticalBlank;
291
292 // TODO: Move this signal to another thread to simulate readout
293 // time properly
294 if (capturedBuffers != NULL) {
295 ALOGVV("Sensor readout complete");
296 Mutex::Autolock lock(mReadoutMutex);
297 if (mCapturedBuffers != NULL) {
298 ALOGV("Waiting for readout thread to catch up!");
299 mReadoutComplete.wait(mReadoutMutex);
300 }
301
302 mCapturedBuffers = capturedBuffers;
303 mCaptureTime = captureTime;
304 mReadoutAvailable.signal();
305 capturedBuffers = NULL;
306 }
307
308 /**
309 * Stage 2: Capture new image
310 */
311 mNextCaptureTime = simulatedTime;
312 mNextCapturedBuffers = nextBuffers;
313
314 if (mNextCapturedBuffers != NULL) {
315 if (listener != NULL) {
316 listener->onSensorEvent(frameNumber, SensorListener::EXPOSURE_START,
317 mNextCaptureTime);
318 }
319 ALOGVV("Starting next capture: Exposure: %f ms, gain: %d",
320 (float)exposureDuration/1e6, gain);
321 mScene.setExposureDuration((float)exposureDuration/1e9);
322 mScene.calculateScene(mNextCaptureTime);
323
324 // Might be adding more buffers, so size isn't constant
325 for (size_t i = 0; i < mNextCapturedBuffers->size(); i++) {
326 const StreamBuffer &b = (*mNextCapturedBuffers)[i];
327 ALOGVV("Sensor capturing buffer %d: stream %d,"
328 " %d x %d, format %x, stride %d, buf %p, img %p",
329 i, b.streamId, b.width, b.height, b.format, b.stride,
330 b.buffer, b.img);
331 switch(b.format) {
332 case HAL_PIXEL_FORMAT_RAW16:
333 captureRaw(b.img, gain, b.stride);
334 break;
335 case HAL_PIXEL_FORMAT_RGB_888:
336 captureRGB(b.img, gain, b.stride);
337 break;
338 case HAL_PIXEL_FORMAT_RGBA_8888:
339 captureRGBA(b.img, gain, b.stride);
340 break;
341 case HAL_PIXEL_FORMAT_BLOB:
342 if (b.dataSpace != HAL_DATASPACE_DEPTH) {
343 // Add auxillary buffer of the right size
344 // Assumes only one BLOB (JPEG) buffer in
345 // mNextCapturedBuffers
346 StreamBuffer bAux;
347 bAux.streamId = 0;
348 bAux.width = b.width;
349 bAux.height = b.height;
350 bAux.format = HAL_PIXEL_FORMAT_RGB_888;
351 bAux.stride = b.width;
352 bAux.buffer = NULL;
353 // TODO: Reuse these
354 bAux.img = new uint8_t[b.width * b.height * 3];
355 mNextCapturedBuffers->push_back(bAux);
356 } else {
357 captureDepthCloud(b.img);
358 }
359 break;
360 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
361 captureNV21(b.img, gain, b.stride);
362 break;
363 case HAL_PIXEL_FORMAT_YV12:
364 // TODO:
365 ALOGE("%s: Format %x is TODO", __FUNCTION__, b.format);
366 break;
367 case HAL_PIXEL_FORMAT_Y16:
368 captureDepth(b.img, gain, b.stride);
369 break;
370 default:
371 ALOGE("%s: Unknown format %x, no output", __FUNCTION__,
372 b.format);
373 break;
374 }
375 }
376 }
377
378 ALOGVV("Sensor vertical blanking interval");
379 nsecs_t workDoneRealTime = systemTime();
380 const nsecs_t timeAccuracy = 2e6; // 2 ms of imprecision is ok
381 if (workDoneRealTime < frameEndRealTime - timeAccuracy) {
382 timespec t;
383 t.tv_sec = (frameEndRealTime - workDoneRealTime) / 1000000000L;
384 t.tv_nsec = (frameEndRealTime - workDoneRealTime) % 1000000000L;
385
386 int ret;
387 do {
388 ret = nanosleep(&t, &t);
389 } while (ret != 0);
390 }
391 nsecs_t endRealTime = systemTime();
392 ALOGVV("Frame cycle took %d ms, target %d ms",
393 (int)((endRealTime - startRealTime)/1000000),
394 (int)(frameDuration / 1000000));
395 return true;
396 };
397
captureRaw(uint8_t * img,uint32_t gain,uint32_t stride)398 void Sensor::captureRaw(uint8_t *img, uint32_t gain, uint32_t stride) {
399 float totalGain = gain/100.0 * kBaseGainFactor;
400 float noiseVarGain = totalGain * totalGain;
401 float readNoiseVar = kReadNoiseVarBeforeGain * noiseVarGain
402 + kReadNoiseVarAfterGain;
403
404 int bayerSelect[4] = {Scene::R, Scene::Gr, Scene::Gb, Scene::B}; // RGGB
405 mScene.setReadoutPixel(0,0);
406 for (unsigned int y = 0; y < kResolution[1]; y++ ) {
407 int *bayerRow = bayerSelect + (y & 0x1) * 2;
408 uint16_t *px = (uint16_t*)img + y * stride;
409 for (unsigned int x = 0; x < kResolution[0]; x++) {
410 uint32_t electronCount;
411 electronCount = mScene.getPixelElectrons()[bayerRow[x & 0x1]];
412
413 // TODO: Better pixel saturation curve?
414 electronCount = (electronCount < kSaturationElectrons) ?
415 electronCount : kSaturationElectrons;
416
417 // TODO: Better A/D saturation curve?
418 uint16_t rawCount = electronCount * totalGain;
419 rawCount = (rawCount < kMaxRawValue) ? rawCount : kMaxRawValue;
420
421 // Calculate noise value
422 // TODO: Use more-correct Gaussian instead of uniform noise
423 float photonNoiseVar = electronCount * noiseVarGain;
424 float noiseStddev = sqrtf_approx(readNoiseVar + photonNoiseVar);
425 // Scaled to roughly match gaussian/uniform noise stddev
426 float noiseSample = std::rand() * (2.5 / (1.0 + RAND_MAX)) - 1.25;
427
428 rawCount += kBlackLevel;
429 rawCount += noiseStddev * noiseSample;
430
431 *px++ = rawCount;
432 }
433 // TODO: Handle this better
434 //simulatedTime += kRowReadoutTime;
435 }
436 ALOGVV("Raw sensor image captured");
437 }
438
captureRGBA(uint8_t * img,uint32_t gain,uint32_t stride)439 void Sensor::captureRGBA(uint8_t *img, uint32_t gain, uint32_t stride) {
440 float totalGain = gain/100.0 * kBaseGainFactor;
441 // In fixed-point math, calculate total scaling from electrons to 8bpp
442 int scale64x = 64 * totalGain * 255 / kMaxRawValue;
443 uint32_t inc = kResolution[0] / stride;
444
445 for (unsigned int y = 0, outY = 0; y < kResolution[1]; y+=inc, outY++ ) {
446 uint8_t *px = img + outY * stride * 4;
447 mScene.setReadoutPixel(0, y);
448 for (unsigned int x = 0; x < kResolution[0]; x+=inc) {
449 uint32_t rCount, gCount, bCount;
450 // TODO: Perfect demosaicing is a cheat
451 const uint32_t *pixel = mScene.getPixelElectrons();
452 rCount = pixel[Scene::R] * scale64x;
453 gCount = pixel[Scene::Gr] * scale64x;
454 bCount = pixel[Scene::B] * scale64x;
455
456 *px++ = rCount < 255*64 ? rCount / 64 : 255;
457 *px++ = gCount < 255*64 ? gCount / 64 : 255;
458 *px++ = bCount < 255*64 ? bCount / 64 : 255;
459 *px++ = 255;
460 for (unsigned int j = 1; j < inc; j++)
461 mScene.getPixelElectrons();
462 }
463 // TODO: Handle this better
464 //simulatedTime += kRowReadoutTime;
465 }
466 ALOGVV("RGBA sensor image captured");
467 }
468
captureRGB(uint8_t * img,uint32_t gain,uint32_t stride)469 void Sensor::captureRGB(uint8_t *img, uint32_t gain, uint32_t stride) {
470 float totalGain = gain/100.0 * kBaseGainFactor;
471 // In fixed-point math, calculate total scaling from electrons to 8bpp
472 int scale64x = 64 * totalGain * 255 / kMaxRawValue;
473 uint32_t inc = kResolution[0] / stride;
474
475 for (unsigned int y = 0, outY = 0; y < kResolution[1]; y += inc, outY++ ) {
476 mScene.setReadoutPixel(0, y);
477 uint8_t *px = img + outY * stride * 3;
478 for (unsigned int x = 0; x < kResolution[0]; x += inc) {
479 uint32_t rCount, gCount, bCount;
480 // TODO: Perfect demosaicing is a cheat
481 const uint32_t *pixel = mScene.getPixelElectrons();
482 rCount = pixel[Scene::R] * scale64x;
483 gCount = pixel[Scene::Gr] * scale64x;
484 bCount = pixel[Scene::B] * scale64x;
485
486 *px++ = rCount < 255*64 ? rCount / 64 : 255;
487 *px++ = gCount < 255*64 ? gCount / 64 : 255;
488 *px++ = bCount < 255*64 ? bCount / 64 : 255;
489 for (unsigned int j = 1; j < inc; j++)
490 mScene.getPixelElectrons();
491 }
492 // TODO: Handle this better
493 //simulatedTime += kRowReadoutTime;
494 }
495 ALOGVV("RGB sensor image captured");
496 }
497
captureNV21(uint8_t * img,uint32_t gain,uint32_t stride)498 void Sensor::captureNV21(uint8_t *img, uint32_t gain, uint32_t stride) {
499 float totalGain = gain/100.0 * kBaseGainFactor;
500 // Using fixed-point math with 6 bits of fractional precision.
501 // In fixed-point math, calculate total scaling from electrons to 8bpp
502 const int scale64x = 64 * totalGain * 255 / kMaxRawValue;
503 // In fixed-point math, saturation point of sensor after gain
504 const int saturationPoint = 64 * 255;
505 // Fixed-point coefficients for RGB-YUV transform
506 // Based on JFIF RGB->YUV transform.
507 // Cb/Cr offset scaled by 64x twice since they're applied post-multiply
508 const int rgbToY[] = {19, 37, 7};
509 const int rgbToCb[] = {-10,-21, 32, 524288};
510 const int rgbToCr[] = {32,-26, -5, 524288};
511 // Scale back to 8bpp non-fixed-point
512 const int scaleOut = 64;
513 const int scaleOutSq = scaleOut * scaleOut; // after multiplies
514
515 uint32_t inc = kResolution[0] / stride;
516 uint32_t outH = kResolution[1] / inc;
517 for (unsigned int y = 0, outY = 0;
518 y < kResolution[1]; y+=inc, outY++) {
519 uint8_t *pxY = img + outY * stride;
520 uint8_t *pxVU = img + (outH + outY / 2) * stride;
521 mScene.setReadoutPixel(0,y);
522 for (unsigned int outX = 0; outX < stride; outX++) {
523 int32_t rCount, gCount, bCount;
524 // TODO: Perfect demosaicing is a cheat
525 const uint32_t *pixel = mScene.getPixelElectrons();
526 rCount = pixel[Scene::R] * scale64x;
527 rCount = rCount < saturationPoint ? rCount : saturationPoint;
528 gCount = pixel[Scene::Gr] * scale64x;
529 gCount = gCount < saturationPoint ? gCount : saturationPoint;
530 bCount = pixel[Scene::B] * scale64x;
531 bCount = bCount < saturationPoint ? bCount : saturationPoint;
532
533 *pxY++ = (rgbToY[0] * rCount +
534 rgbToY[1] * gCount +
535 rgbToY[2] * bCount) / scaleOutSq;
536 if (outY % 2 == 0 && outX % 2 == 0) {
537 *pxVU++ = (rgbToCr[0] * rCount +
538 rgbToCr[1] * gCount +
539 rgbToCr[2] * bCount +
540 rgbToCr[3]) / scaleOutSq;
541 *pxVU++ = (rgbToCb[0] * rCount +
542 rgbToCb[1] * gCount +
543 rgbToCb[2] * bCount +
544 rgbToCb[3]) / scaleOutSq;
545 }
546 for (unsigned int j = 1; j < inc; j++)
547 mScene.getPixelElectrons();
548 }
549 }
550 ALOGVV("NV21 sensor image captured");
551 }
552
captureDepth(uint8_t * img,uint32_t gain,uint32_t stride)553 void Sensor::captureDepth(uint8_t *img, uint32_t gain, uint32_t stride) {
554 float totalGain = gain/100.0 * kBaseGainFactor;
555 // In fixed-point math, calculate scaling factor to 13bpp millimeters
556 int scale64x = 64 * totalGain * 8191 / kMaxRawValue;
557 uint32_t inc = kResolution[0] / stride;
558
559 for (unsigned int y = 0, outY = 0; y < kResolution[1]; y += inc, outY++ ) {
560 mScene.setReadoutPixel(0, y);
561 uint16_t *px = ((uint16_t*)img) + outY * stride;
562 for (unsigned int x = 0; x < kResolution[0]; x += inc) {
563 uint32_t depthCount;
564 // TODO: Make up real depth scene instead of using green channel
565 // as depth
566 const uint32_t *pixel = mScene.getPixelElectrons();
567 depthCount = pixel[Scene::Gr] * scale64x;
568
569 *px++ = depthCount < 8191*64 ? depthCount / 64 : 0;
570 for (unsigned int j = 1; j < inc; j++)
571 mScene.getPixelElectrons();
572 }
573 // TODO: Handle this better
574 //simulatedTime += kRowReadoutTime;
575 }
576 ALOGVV("Depth sensor image captured");
577 }
578
captureDepthCloud(uint8_t * img)579 void Sensor::captureDepthCloud(uint8_t *img) {
580
581 android_depth_points *cloud = reinterpret_cast<android_depth_points*>(img);
582
583 cloud->num_points = 16;
584
585 // TODO: Create point cloud values that match RGB scene
586 const int FLOATS_PER_POINT = 4;
587 const float JITTER_STDDEV = 0.1f;
588 for (size_t y = 0, i = 0; y < 4; y++) {
589 for (size_t x = 0; x < 4; x++, i++) {
590 float randSampleX = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
591 randSampleX *= JITTER_STDDEV;
592
593 float randSampleY = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
594 randSampleY *= JITTER_STDDEV;
595
596 float randSampleZ = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
597 randSampleZ *= JITTER_STDDEV;
598
599 cloud->xyzc_points[i * FLOATS_PER_POINT + 0] = x - 1.5f + randSampleX;
600 cloud->xyzc_points[i * FLOATS_PER_POINT + 1] = y - 1.5f + randSampleY;
601 cloud->xyzc_points[i * FLOATS_PER_POINT + 2] = 3.f + randSampleZ;
602 cloud->xyzc_points[i * FLOATS_PER_POINT + 3] = 0.8f;
603 }
604 }
605
606 ALOGVV("Depth point cloud captured");
607
608 }
609
610 } // namespace android
611