Home | History | Annotate | Download | only in fake-pipeline2
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 //#define LOG_NDEBUG 0
     18 //#define LOG_NNDEBUG 0
     19 #define LOG_TAG "EmulatedCamera2_Sensor"
     20 
     21 #ifdef LOG_NNDEBUG
     22 #define ALOGVV(...) ALOGV(__VA_ARGS__)
     23 #else
     24 #define ALOGVV(...) ((void)0)
     25 #endif
     26 
     27 #include <utils/Log.h>
     28 
     29 #include <cmath>
     30 #include <cstdlib>
     31 #include "../EmulatedFakeCamera2.h"
     32 #include "Sensor.h"
     33 #include "guest/libs/platform_support/api_level_fixes.h"
     34 #include "system/camera_metadata.h"
     35 
     36 namespace android {
     37 
     38 // const nsecs_t Sensor::kExposureTimeRange[2] =
     39 //    {1000L, 30000000000L} ; // 1 us - 30 sec
     40 // const nsecs_t Sensor::kFrameDurationRange[2] =
     41 //    {33331760L, 30000000000L}; // ~1/30 s - 30 sec
     42 const nsecs_t Sensor::kExposureTimeRange[2] = {1000L,
     43                                                300000000L};  // 1 us - 0.3 sec
     44 const nsecs_t Sensor::kFrameDurationRange[2] = {
     45     33331760L, 300000000L};  // ~1/30 s - 0.3 sec
     46 
     47 const nsecs_t Sensor::kMinVerticalBlank = 10000L;
     48 
     49 const uint8_t Sensor::kColorFilterArrangement =
     50     ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_RGGB;
     51 
     52 // Output image data characteristics
     53 const uint32_t Sensor::kMaxRawValue = 4000;
     54 const uint32_t Sensor::kBlackLevel = 1000;
     55 
     56 // Sensor sensitivity
     57 const float Sensor::kSaturationVoltage = 0.520f;
     58 const uint32_t Sensor::kSaturationElectrons = 2000;
     59 const float Sensor::kVoltsPerLuxSecond = 0.100f;
     60 
     61 const float Sensor::kElectronsPerLuxSecond = Sensor::kSaturationElectrons /
     62                                              Sensor::kSaturationVoltage *
     63                                              Sensor::kVoltsPerLuxSecond;
     64 
     65 const float Sensor::kBaseGainFactor =
     66     (float)Sensor::kMaxRawValue / Sensor::kSaturationElectrons;
     67 
     68 const float Sensor::kReadNoiseStddevBeforeGain = 1.177;  // in electrons
     69 const float Sensor::kReadNoiseStddevAfterGain = 2.100;   // in digital counts
     70 const float Sensor::kReadNoiseVarBeforeGain =
     71     Sensor::kReadNoiseStddevBeforeGain * Sensor::kReadNoiseStddevBeforeGain;
     72 const float Sensor::kReadNoiseVarAfterGain =
     73     Sensor::kReadNoiseStddevAfterGain * Sensor::kReadNoiseStddevAfterGain;
     74 
     75 const int32_t Sensor::kSensitivityRange[2] = {100, 1600};
     76 const uint32_t Sensor::kDefaultSensitivity = 100;
     77 
     78 /** A few utility functions for math, normal distributions */
     79 
     80 // Take advantage of IEEE floating-point format to calculate an approximate
     81 // square root. Accurate to within +-3.6%
     82 float sqrtf_approx(float r) {
     83   // Modifier is based on IEEE floating-point representation; the
     84   // manipulations boil down to finding approximate log2, dividing by two, and
     85   // then inverting the log2. A bias is added to make the relative error
     86   // symmetric about the real answer.
     87   const int32_t modifier = 0x1FBB4000;
     88 
     89   int32_t r_i = *(int32_t *)(&r);
     90   r_i = (r_i >> 1) + modifier;
     91 
     92   return *(float *)(&r_i);
     93 }
     94 
     95 Sensor::Sensor(uint32_t width, uint32_t height)
     96     : Thread(false),
     97       mResolution{width, height},
     98       mActiveArray{0, 0, width, height},
     99       mRowReadoutTime(kFrameDurationRange[0] / height),
    100       mGotVSync(false),
    101       mExposureTime(kFrameDurationRange[0] - kMinVerticalBlank),
    102       mFrameDuration(kFrameDurationRange[0]),
    103       mGainFactor(kDefaultSensitivity),
    104       mNextBuffers(NULL),
    105       mFrameNumber(0),
    106       mCapturedBuffers(NULL),
    107       mListener(NULL),
    108       mScene(width, height, kElectronsPerLuxSecond) {
    109   ALOGV("Sensor created with pixel array %d x %d", width, height);
    110 }
    111 
    112 Sensor::~Sensor() { shutDown(); }
    113 
    114 status_t Sensor::startUp() {
    115   ALOGV("%s: E", __FUNCTION__);
    116 
    117   int res;
    118   mCapturedBuffers = NULL;
    119   res = run("EmulatedFakeCamera2::Sensor", ANDROID_PRIORITY_URGENT_DISPLAY);
    120 
    121   if (res != OK) {
    122     ALOGE("Unable to start up sensor capture thread: %d", res);
    123   }
    124   return res;
    125 }
    126 
    127 status_t Sensor::shutDown() {
    128   ALOGV("%s: E", __FUNCTION__);
    129 
    130   int res;
    131   res = requestExitAndWait();
    132   if (res != OK) {
    133     ALOGE("Unable to shut down sensor capture thread: %d", res);
    134   }
    135   return res;
    136 }
    137 
    138 Scene &Sensor::getScene() { return mScene; }
    139 
    140 void Sensor::setExposureTime(uint64_t ns) {
    141   Mutex::Autolock lock(mControlMutex);
    142   ALOGVV("Exposure set to %f", ns / 1000000.f);
    143   mExposureTime = ns;
    144 }
    145 
    146 void Sensor::setFrameDuration(uint64_t ns) {
    147   Mutex::Autolock lock(mControlMutex);
    148   ALOGVV("Frame duration set to %f", ns / 1000000.f);
    149   mFrameDuration = ns;
    150 }
    151 
    152 void Sensor::setSensitivity(uint32_t gain) {
    153   Mutex::Autolock lock(mControlMutex);
    154   ALOGVV("Gain set to %d", gain);
    155   mGainFactor = gain;
    156 }
    157 
    158 void Sensor::setDestinationBuffers(Buffers *buffers) {
    159   Mutex::Autolock lock(mControlMutex);
    160   mNextBuffers = buffers;
    161 }
    162 
    163 void Sensor::setFrameNumber(uint32_t frameNumber) {
    164   Mutex::Autolock lock(mControlMutex);
    165   mFrameNumber = frameNumber;
    166 }
    167 
    168 bool Sensor::waitForVSync(nsecs_t reltime) {
    169   int res;
    170   Mutex::Autolock lock(mControlMutex);
    171 
    172   mGotVSync = false;
    173   res = mVSync.waitRelative(mControlMutex, reltime);
    174   if (res != OK && res != TIMED_OUT) {
    175     ALOGE("%s: Error waiting for VSync signal: %d", __FUNCTION__, res);
    176     return false;
    177   }
    178   return mGotVSync;
    179 }
    180 
    181 bool Sensor::waitForNewFrame(nsecs_t reltime, nsecs_t *captureTime) {
    182   Mutex::Autolock lock(mReadoutMutex);
    183 
    184   if (mCapturedBuffers == NULL) {
    185     int res;
    186     res = mReadoutAvailable.waitRelative(mReadoutMutex, reltime);
    187     if (res == TIMED_OUT) {
    188       return false;
    189     } else if (res != OK || mCapturedBuffers == NULL) {
    190       ALOGE("Error waiting for sensor readout signal: %d", res);
    191       return false;
    192     }
    193   }
    194   mReadoutComplete.signal();
    195 
    196   *captureTime = mCaptureTime;
    197   mCapturedBuffers = NULL;
    198   return true;
    199 }
    200 
    201 Sensor::SensorListener::~SensorListener() {}
    202 
    203 void Sensor::setSensorListener(SensorListener *listener) {
    204   Mutex::Autolock lock(mControlMutex);
    205   mListener = listener;
    206 }
    207 
    208 status_t Sensor::readyToRun() {
    209   ALOGV("Starting up sensor thread");
    210   mStartupTime = systemTime();
    211   mNextCaptureTime = 0;
    212   mNextCapturedBuffers = NULL;
    213   return OK;
    214 }
    215 
    216 bool Sensor::threadLoop() {
    217   /**
    218    * Sensor capture operation main loop.
    219    *
    220    * Stages are out-of-order relative to a single frame's processing, but
    221    * in-order in time.
    222    */
    223 
    224   /**
    225    * Stage 1: Read in latest control parameters
    226    */
    227   uint64_t exposureDuration;
    228   uint64_t frameDuration;
    229   uint32_t gain;
    230   Buffers *nextBuffers;
    231   uint32_t frameNumber;
    232   SensorListener *listener = NULL;
    233   {
    234     Mutex::Autolock lock(mControlMutex);
    235     exposureDuration = mExposureTime;
    236     frameDuration = mFrameDuration;
    237     gain = mGainFactor;
    238     nextBuffers = mNextBuffers;
    239     frameNumber = mFrameNumber;
    240     listener = mListener;
    241     // Don't reuse a buffer set
    242     mNextBuffers = NULL;
    243 
    244     // Signal VSync for start of readout
    245     ALOGVV("Sensor VSync");
    246     mGotVSync = true;
    247     mVSync.signal();
    248   }
    249 
    250   /**
    251    * Stage 3: Read out latest captured image
    252    */
    253 
    254   Buffers *capturedBuffers = NULL;
    255   nsecs_t captureTime = 0;
    256 
    257   nsecs_t startRealTime = systemTime();
    258   // Stagefright cares about system time for timestamps, so base simulated
    259   // time on that.
    260   nsecs_t simulatedTime = startRealTime;
    261   nsecs_t frameEndRealTime = startRealTime + frameDuration;
    262 
    263   if (mNextCapturedBuffers != NULL) {
    264     ALOGVV("Sensor starting readout");
    265     // Pretend we're doing readout now; will signal once enough time has elapsed
    266     capturedBuffers = mNextCapturedBuffers;
    267     captureTime = mNextCaptureTime;
    268   }
    269   simulatedTime += mRowReadoutTime + kMinVerticalBlank;
    270 
    271   // TODO: Move this signal to another thread to simulate readout
    272   // time properly
    273   if (capturedBuffers != NULL) {
    274     ALOGVV("Sensor readout complete");
    275     Mutex::Autolock lock(mReadoutMutex);
    276     if (mCapturedBuffers != NULL) {
    277       ALOGV("Waiting for readout thread to catch up!");
    278       mReadoutComplete.wait(mReadoutMutex);
    279     }
    280 
    281     mCapturedBuffers = capturedBuffers;
    282     mCaptureTime = captureTime;
    283     mReadoutAvailable.signal();
    284     capturedBuffers = NULL;
    285   }
    286 
    287   /**
    288    * Stage 2: Capture new image
    289    */
    290   mNextCaptureTime = simulatedTime;
    291   mNextCapturedBuffers = nextBuffers;
    292 
    293   if (mNextCapturedBuffers != NULL) {
    294     if (listener != NULL) {
    295       listener->onSensorEvent(frameNumber, SensorListener::EXPOSURE_START,
    296                               mNextCaptureTime);
    297     }
    298     ALOGVV("Starting next capture: Exposure: %f ms, gain: %d",
    299            (float)exposureDuration / 1e6, gain);
    300     mScene.setExposureDuration((float)exposureDuration / 1e9);
    301     mScene.calculateScene(mNextCaptureTime);
    302 
    303     // Might be adding more buffers, so size isn't constant
    304     for (size_t i = 0; i < mNextCapturedBuffers->size(); i++) {
    305       const StreamBuffer &b = (*mNextCapturedBuffers)[i];
    306       ALOGVV(
    307           "Sensor capturing buffer %d: stream %d,"
    308           " %d x %d, format %x, stride %d, buf %p, img %p",
    309           i, b.streamId, b.width, b.height, b.format, b.stride, b.buffer,
    310           b.img);
    311       switch (b.format) {
    312 #if VSOC_PLATFORM_SDK_AFTER(K)
    313         case HAL_PIXEL_FORMAT_RAW16:
    314           captureRaw(b.img, gain, b.stride);
    315           break;
    316 #endif
    317         case HAL_PIXEL_FORMAT_RGB_888:
    318           captureRGB(b.img, gain, b.stride);
    319           break;
    320         case HAL_PIXEL_FORMAT_RGBA_8888:
    321           captureRGBA(b.img, gain, b.stride);
    322           break;
    323         case HAL_PIXEL_FORMAT_BLOB:
    324 #if defined HAL_DATASPACE_DEPTH
    325           if (b.dataSpace != HAL_DATASPACE_DEPTH) {
    326 #endif
    327             // Add auxillary buffer of the right size
    328             // Assumes only one BLOB (JPEG) buffer in
    329             // mNextCapturedBuffers
    330             StreamBuffer bAux;
    331             bAux.streamId = 0;
    332             bAux.width = b.width;
    333             bAux.height = b.height;
    334             bAux.format = HAL_PIXEL_FORMAT_RGB_888;
    335             bAux.stride = b.width;
    336             bAux.buffer = NULL;
    337             // TODO: Reuse these
    338             bAux.img = new uint8_t[b.width * b.height * 3];
    339             mNextCapturedBuffers->push_back(bAux);
    340 #if defined HAL_DATASPACE_DEPTH
    341           } else {
    342             captureDepthCloud(b.img);
    343           }
    344 #endif
    345           break;
    346         case HAL_PIXEL_FORMAT_YCrCb_420_SP:
    347         case HAL_PIXEL_FORMAT_YCbCr_420_888:
    348           captureNV21(b.img, gain, b.stride);
    349           break;
    350         case HAL_PIXEL_FORMAT_YV12:
    351           // TODO:
    352           ALOGE("%s: Format %x is TODO", __FUNCTION__, b.format);
    353           break;
    354         case HAL_PIXEL_FORMAT_Y16:
    355           captureDepth(b.img, gain, b.stride);
    356           break;
    357         default:
    358           ALOGE("%s: Unknown format %x, no output", __FUNCTION__, b.format);
    359           break;
    360       }
    361     }
    362   }
    363 
    364   ALOGVV("Sensor vertical blanking interval");
    365   nsecs_t workDoneRealTime = systemTime();
    366   const nsecs_t timeAccuracy = 2e6;  // 2 ms of imprecision is ok
    367   if (workDoneRealTime < frameEndRealTime - timeAccuracy) {
    368     timespec t;
    369     t.tv_sec = (frameEndRealTime - workDoneRealTime) / 1000000000L;
    370     t.tv_nsec = (frameEndRealTime - workDoneRealTime) % 1000000000L;
    371 
    372     int ret;
    373     do {
    374       ret = nanosleep(&t, &t);
    375     } while (ret != 0);
    376   }
    377   nsecs_t endRealTime __unused = systemTime();
    378   ALOGVV("Frame cycle took %d ms, target %d ms",
    379          (int)((endRealTime - startRealTime) / 1000000),
    380          (int)(frameDuration / 1000000));
    381   return true;
    382 };
    383 
    384 void Sensor::captureRaw(uint8_t *img, uint32_t gain, uint32_t stride) {
    385   float totalGain = gain / 100.0 * kBaseGainFactor;
    386   float noiseVarGain = totalGain * totalGain;
    387   float readNoiseVar =
    388       kReadNoiseVarBeforeGain * noiseVarGain + kReadNoiseVarAfterGain;
    389 
    390   int bayerSelect[4] = {Scene::R, Scene::Gr, Scene::Gb, Scene::B};  // RGGB
    391   mScene.setReadoutPixel(0, 0);
    392   for (unsigned int y = 0; y < mResolution[1]; y++) {
    393     int *bayerRow = bayerSelect + (y & 0x1) * 2;
    394     uint16_t *px = (uint16_t *)img + y * stride;
    395     for (unsigned int x = 0; x < mResolution[0]; x++) {
    396       uint32_t electronCount;
    397       electronCount = mScene.getPixelElectrons()[bayerRow[x & 0x1]];
    398 
    399       // TODO: Better pixel saturation curve?
    400       electronCount = (electronCount < kSaturationElectrons)
    401                           ? electronCount
    402                           : kSaturationElectrons;
    403 
    404       // TODO: Better A/D saturation curve?
    405       uint16_t rawCount = electronCount * totalGain;
    406       rawCount = (rawCount < kMaxRawValue) ? rawCount : kMaxRawValue;
    407 
    408       // Calculate noise value
    409       // TODO: Use more-correct Gaussian instead of uniform noise
    410       float photonNoiseVar = electronCount * noiseVarGain;
    411       float noiseStddev = sqrtf_approx(readNoiseVar + photonNoiseVar);
    412       // Scaled to roughly match gaussian/uniform noise stddev
    413       float noiseSample = std::rand() * (2.5 / (1.0 + RAND_MAX)) - 1.25;
    414 
    415       rawCount += kBlackLevel;
    416       rawCount += noiseStddev * noiseSample;
    417 
    418       *px++ = rawCount;
    419     }
    420     // TODO: Handle this better
    421     // simulatedTime += mRowReadoutTime;
    422   }
    423   ALOGVV("Raw sensor image captured");
    424 }
    425 
    426 void Sensor::captureRGBA(uint8_t *img, uint32_t gain, uint32_t stride) {
    427   float totalGain = gain / 100.0 * kBaseGainFactor;
    428   // In fixed-point math, calculate total scaling from electrons to 8bpp
    429   int scale64x = 64 * totalGain * 255 / kMaxRawValue;
    430   uint32_t inc = ceil((float)mResolution[0] / stride);
    431 
    432   for (unsigned int y = 0, outY = 0; y < mResolution[1]; y += inc, outY++) {
    433     uint8_t *px = img + outY * stride * 4;
    434     mScene.setReadoutPixel(0, y);
    435     for (unsigned int x = 0; x < mResolution[0]; x += inc) {
    436       uint32_t rCount, gCount, bCount;
    437       // TODO: Perfect demosaicing is a cheat
    438       const uint32_t *pixel = mScene.getPixelElectrons();
    439       rCount = pixel[Scene::R] * scale64x;
    440       gCount = pixel[Scene::Gr] * scale64x;
    441       bCount = pixel[Scene::B] * scale64x;
    442 
    443       *px++ = rCount < 255 * 64 ? rCount / 64 : 255;
    444       *px++ = gCount < 255 * 64 ? gCount / 64 : 255;
    445       *px++ = bCount < 255 * 64 ? bCount / 64 : 255;
    446       *px++ = 255;
    447       for (unsigned int j = 1; j < inc; j++) mScene.getPixelElectrons();
    448     }
    449     // TODO: Handle this better
    450     // simulatedTime += mRowReadoutTime;
    451   }
    452   ALOGVV("RGBA sensor image captured");
    453 }
    454 
    455 void Sensor::captureRGB(uint8_t *img, uint32_t gain, uint32_t stride) {
    456   float totalGain = gain / 100.0 * kBaseGainFactor;
    457   // In fixed-point math, calculate total scaling from electrons to 8bpp
    458   int scale64x = 64 * totalGain * 255 / kMaxRawValue;
    459   uint32_t inc = ceil((float)mResolution[0] / stride);
    460 
    461   for (unsigned int y = 0, outY = 0; y < mResolution[1]; y += inc, outY++) {
    462     mScene.setReadoutPixel(0, y);
    463     uint8_t *px = img + outY * stride * 3;
    464     for (unsigned int x = 0; x < mResolution[0]; x += inc) {
    465       uint32_t rCount, gCount, bCount;
    466       // TODO: Perfect demosaicing is a cheat
    467       const uint32_t *pixel = mScene.getPixelElectrons();
    468       rCount = pixel[Scene::R] * scale64x;
    469       gCount = pixel[Scene::Gr] * scale64x;
    470       bCount = pixel[Scene::B] * scale64x;
    471 
    472       *px++ = rCount < 255 * 64 ? rCount / 64 : 255;
    473       *px++ = gCount < 255 * 64 ? gCount / 64 : 255;
    474       *px++ = bCount < 255 * 64 ? bCount / 64 : 255;
    475       for (unsigned int j = 1; j < inc; j++) mScene.getPixelElectrons();
    476     }
    477     // TODO: Handle this better
    478     // simulatedTime += mRowReadoutTime;
    479   }
    480   ALOGVV("RGB sensor image captured");
    481 }
    482 
    483 void Sensor::captureNV21(uint8_t *img, uint32_t gain, uint32_t stride) {
    484   float totalGain = gain / 100.0 * kBaseGainFactor;
    485   // Using fixed-point math with 6 bits of fractional precision.
    486   // In fixed-point math, calculate total scaling from electrons to 8bpp
    487   const int scale64x = 64 * totalGain * 255 / kMaxRawValue;
    488   // In fixed-point math, saturation point of sensor after gain
    489   const int saturationPoint = 64 * 255;
    490   // Fixed-point coefficients for RGB-YUV transform
    491   // Based on JFIF RGB->YUV transform.
    492   // Cb/Cr offset scaled by 64x twice since they're applied post-multiply
    493   const int rgbToY[] = {19, 37, 7};
    494   const int rgbToCb[] = {-10, -21, 32, 524288};
    495   const int rgbToCr[] = {32, -26, -5, 524288};
    496   // Scale back to 8bpp non-fixed-point
    497   const int scaleOut = 64;
    498   const int scaleOutSq = scaleOut * scaleOut;  // after multiplies
    499 
    500   // inc = how many pixels to skip while reading every next pixel
    501   // horizontally.
    502   uint32_t inc = ceil((float)mResolution[0] / stride);
    503   // outH = projected vertical resolution based on stride.
    504   uint32_t outH = mResolution[1] / inc;
    505   for (unsigned int y = 0, outY = 0; y < mResolution[1]; y += inc, outY++) {
    506     uint8_t *pxY = img + outY * stride;
    507     uint8_t *pxVU = img + (outH + outY / 2) * stride;
    508     mScene.setReadoutPixel(0, y);
    509     for (unsigned int outX = 0; outX < stride; outX++) {
    510       int32_t rCount, gCount, bCount;
    511       // TODO: Perfect demosaicing is a cheat
    512       const uint32_t *pixel = mScene.getPixelElectrons();
    513       rCount = pixel[Scene::R] * scale64x;
    514       rCount = rCount < saturationPoint ? rCount : saturationPoint;
    515       gCount = pixel[Scene::Gr] * scale64x;
    516       gCount = gCount < saturationPoint ? gCount : saturationPoint;
    517       bCount = pixel[Scene::B] * scale64x;
    518       bCount = bCount < saturationPoint ? bCount : saturationPoint;
    519 
    520       *pxY++ = (rgbToY[0] * rCount + rgbToY[1] * gCount + rgbToY[2] * bCount) /
    521                scaleOutSq;
    522       if (outY % 2 == 0 && outX % 2 == 0) {
    523         *pxVU++ = (rgbToCb[0] * rCount + rgbToCb[1] * gCount +
    524                    rgbToCb[2] * bCount + rgbToCb[3]) /
    525                   scaleOutSq;
    526         *pxVU++ = (rgbToCr[0] * rCount + rgbToCr[1] * gCount +
    527                    rgbToCr[2] * bCount + rgbToCr[3]) /
    528                   scaleOutSq;
    529       }
    530 
    531       // Skip unprocessed pixels from sensor.
    532       for (unsigned int j = 1; j < inc; j++) mScene.getPixelElectrons();
    533     }
    534   }
    535   ALOGVV("NV21 sensor image captured");
    536 }
    537 
    538 void Sensor::captureDepth(uint8_t *img, uint32_t gain, uint32_t stride) {
    539   float totalGain = gain / 100.0 * kBaseGainFactor;
    540   // In fixed-point math, calculate scaling factor to 13bpp millimeters
    541   int scale64x = 64 * totalGain * 8191 / kMaxRawValue;
    542   uint32_t inc = ceil((float)mResolution[0] / stride);
    543 
    544   for (unsigned int y = 0, outY = 0; y < mResolution[1]; y += inc, outY++) {
    545     mScene.setReadoutPixel(0, y);
    546     uint16_t *px = ((uint16_t *)img) + outY * stride;
    547     for (unsigned int x = 0; x < mResolution[0]; x += inc) {
    548       uint32_t depthCount;
    549       // TODO: Make up real depth scene instead of using green channel
    550       // as depth
    551       const uint32_t *pixel = mScene.getPixelElectrons();
    552       depthCount = pixel[Scene::Gr] * scale64x;
    553 
    554       *px++ = depthCount < 8191 * 64 ? depthCount / 64 : 0;
    555       for (unsigned int j = 1; j < inc; j++) mScene.getPixelElectrons();
    556     }
    557     // TODO: Handle this better
    558     // simulatedTime += mRowReadoutTime;
    559   }
    560   ALOGVV("Depth sensor image captured");
    561 }
    562 
    563 void Sensor::captureDepthCloud(uint8_t * /*img*/) {
    564 #if defined HAL_DATASPACE_DEPTH
    565   android_depth_points *cloud = reinterpret_cast<android_depth_points *>(img);
    566 
    567   cloud->num_points = 16;
    568 
    569   // TODO: Create point cloud values that match RGB scene
    570   const int FLOATS_PER_POINT = 4;
    571   const float JITTER_STDDEV = 0.1f;
    572   for (size_t y = 0, i = 0; y < 4; y++) {
    573     for (size_t x = 0; x < 4; x++, i++) {
    574       float randSampleX = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
    575       randSampleX *= JITTER_STDDEV;
    576 
    577       float randSampleY = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
    578       randSampleY *= JITTER_STDDEV;
    579 
    580       float randSampleZ = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
    581       randSampleZ *= JITTER_STDDEV;
    582 
    583       cloud->xyzc_points[i * FLOATS_PER_POINT + 0] = x - 1.5f + randSampleX;
    584       cloud->xyzc_points[i * FLOATS_PER_POINT + 1] = y - 1.5f + randSampleY;
    585       cloud->xyzc_points[i * FLOATS_PER_POINT + 2] = 3.f + randSampleZ;
    586       cloud->xyzc_points[i * FLOATS_PER_POINT + 3] = 0.8f;
    587     }
    588   }
    589 
    590   ALOGVV("Depth point cloud captured");
    591 #endif
    592 }
    593 
    594 }  // namespace android
    595