Home | History | Annotate | Download | only in fake-pipeline2
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 //#define LOG_NDEBUG 0
     18 //#define LOG_NNDEBUG 0
     19 #define LOG_TAG "EmulatedCamera2_Sensor"
     20 
     21 #ifdef LOG_NNDEBUG
     22 #define ALOGVV(...) ALOGV(__VA_ARGS__)
     23 #else
     24 #define ALOGVV(...) ((void)0)
     25 #endif
     26 
     27 #include <utils/Log.h>
     28 
     29 #include "../EmulatedFakeCamera2.h"
     30 #include "Sensor.h"
     31 #include <cmath>
     32 #include <cstdlib>
     33 #include "system/camera_metadata.h"
     34 
     35 namespace android {
     36 
     37 //const nsecs_t Sensor::kExposureTimeRange[2] =
     38 //    {1000L, 30000000000L} ; // 1 us - 30 sec
     39 //const nsecs_t Sensor::kFrameDurationRange[2] =
     40 //    {33331760L, 30000000000L}; // ~1/30 s - 30 sec
     41 const nsecs_t Sensor::kExposureTimeRange[2] =
     42     {1000L, 300000000L} ; // 1 us - 0.3 sec
     43 const nsecs_t Sensor::kFrameDurationRange[2] =
     44     {33331760L, 300000000L}; // ~1/30 s - 0.3 sec
     45 
     46 const nsecs_t Sensor::kMinVerticalBlank = 10000L;
     47 
     48 const uint8_t Sensor::kColorFilterArrangement =
     49     ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_RGGB;
     50 
     51 // Output image data characteristics
     52 const uint32_t Sensor::kMaxRawValue = 4000;
     53 const uint32_t Sensor::kBlackLevel  = 1000;
     54 
     55 // Sensor sensitivity
     56 const float Sensor::kSaturationVoltage      = 0.520f;
     57 const uint32_t Sensor::kSaturationElectrons = 2000;
     58 const float Sensor::kVoltsPerLuxSecond      = 0.100f;
     59 
     60 const float Sensor::kElectronsPerLuxSecond =
     61         Sensor::kSaturationElectrons / Sensor::kSaturationVoltage
     62         * Sensor::kVoltsPerLuxSecond;
     63 
     64 const float Sensor::kBaseGainFactor = (float)Sensor::kMaxRawValue /
     65             Sensor::kSaturationElectrons;
     66 
     67 const float Sensor::kReadNoiseStddevBeforeGain = 1.177; // in electrons
     68 const float Sensor::kReadNoiseStddevAfterGain =  2.100; // in digital counts
     69 const float Sensor::kReadNoiseVarBeforeGain =
     70             Sensor::kReadNoiseStddevBeforeGain *
     71             Sensor::kReadNoiseStddevBeforeGain;
     72 const float Sensor::kReadNoiseVarAfterGain =
     73             Sensor::kReadNoiseStddevAfterGain *
     74             Sensor::kReadNoiseStddevAfterGain;
     75 
     76 const int32_t Sensor::kSensitivityRange[2] = {100, 1600};
     77 const uint32_t Sensor::kDefaultSensitivity = 100;
     78 
     79 /** A few utility functions for math, normal distributions */
     80 
     81 // Take advantage of IEEE floating-point format to calculate an approximate
     82 // square root. Accurate to within +-3.6%
     83 float sqrtf_approx(float r) {
     84     // Modifier is based on IEEE floating-point representation; the
     85     // manipulations boil down to finding approximate log2, dividing by two, and
     86     // then inverting the log2. A bias is added to make the relative error
     87     // symmetric about the real answer.
     88     const int32_t modifier = 0x1FBB4000;
     89 
     90     int32_t r_i = *(int32_t*)(&r);
     91     r_i = (r_i >> 1) + modifier;
     92 
     93     return *(float*)(&r_i);
     94 }
     95 
     96 
     97 
     98 Sensor::Sensor(uint32_t width, uint32_t height):
     99         Thread(false),
    100         mResolution{width, height},
    101         mActiveArray{0, 0, width, height},
    102         mRowReadoutTime(kFrameDurationRange[0] / height),
    103         mGotVSync(false),
    104         mExposureTime(kFrameDurationRange[0]-kMinVerticalBlank),
    105         mFrameDuration(kFrameDurationRange[0]),
    106         mGainFactor(kDefaultSensitivity),
    107         mNextBuffers(NULL),
    108         mFrameNumber(0),
    109         mCapturedBuffers(NULL),
    110         mListener(NULL),
    111         mScene(width, height, kElectronsPerLuxSecond)
    112 {
    113     ALOGV("Sensor created with pixel array %d x %d", width, height);
    114 }
    115 
    116 Sensor::~Sensor() {
    117     shutDown();
    118 }
    119 
    120 status_t Sensor::startUp() {
    121     ALOGV("%s: E", __FUNCTION__);
    122 
    123     int res;
    124     mCapturedBuffers = NULL;
    125     res = run("EmulatedFakeCamera2::Sensor",
    126             ANDROID_PRIORITY_URGENT_DISPLAY);
    127 
    128     if (res != OK) {
    129         ALOGE("Unable to start up sensor capture thread: %d", res);
    130     }
    131     return res;
    132 }
    133 
    134 status_t Sensor::shutDown() {
    135     ALOGV("%s: E", __FUNCTION__);
    136 
    137     int res;
    138     res = requestExitAndWait();
    139     if (res != OK) {
    140         ALOGE("Unable to shut down sensor capture thread: %d", res);
    141     }
    142     return res;
    143 }
    144 
    145 Scene &Sensor::getScene() {
    146     return mScene;
    147 }
    148 
    149 void Sensor::setExposureTime(uint64_t ns) {
    150     Mutex::Autolock lock(mControlMutex);
    151     ALOGVV("Exposure set to %f", ns/1000000.f);
    152     mExposureTime = ns;
    153 }
    154 
    155 void Sensor::setFrameDuration(uint64_t ns) {
    156     Mutex::Autolock lock(mControlMutex);
    157     ALOGVV("Frame duration set to %f", ns/1000000.f);
    158     mFrameDuration = ns;
    159 }
    160 
    161 void Sensor::setSensitivity(uint32_t gain) {
    162     Mutex::Autolock lock(mControlMutex);
    163     ALOGVV("Gain set to %d", gain);
    164     mGainFactor = gain;
    165 }
    166 
    167 void Sensor::setDestinationBuffers(Buffers *buffers) {
    168     Mutex::Autolock lock(mControlMutex);
    169     mNextBuffers = buffers;
    170 }
    171 
    172 void Sensor::setFrameNumber(uint32_t frameNumber) {
    173     Mutex::Autolock lock(mControlMutex);
    174     mFrameNumber = frameNumber;
    175 }
    176 
    177 bool Sensor::waitForVSync(nsecs_t reltime) {
    178     int res;
    179     Mutex::Autolock lock(mControlMutex);
    180 
    181     mGotVSync = false;
    182     res = mVSync.waitRelative(mControlMutex, reltime);
    183     if (res != OK && res != TIMED_OUT) {
    184         ALOGE("%s: Error waiting for VSync signal: %d", __FUNCTION__, res);
    185         return false;
    186     }
    187     return mGotVSync;
    188 }
    189 
    190 bool Sensor::waitForNewFrame(nsecs_t reltime,
    191         nsecs_t *captureTime) {
    192     Mutex::Autolock lock(mReadoutMutex);
    193     uint8_t *ret;
    194     if (mCapturedBuffers == NULL) {
    195         int res;
    196         res = mReadoutAvailable.waitRelative(mReadoutMutex, reltime);
    197         if (res == TIMED_OUT) {
    198             return false;
    199         } else if (res != OK || mCapturedBuffers == NULL) {
    200             ALOGE("Error waiting for sensor readout signal: %d", res);
    201             return false;
    202         }
    203     }
    204     mReadoutComplete.signal();
    205 
    206     *captureTime = mCaptureTime;
    207     mCapturedBuffers = NULL;
    208     return true;
    209 }
    210 
    211 Sensor::SensorListener::~SensorListener() {
    212 }
    213 
    214 void Sensor::setSensorListener(SensorListener *listener) {
    215     Mutex::Autolock lock(mControlMutex);
    216     mListener = listener;
    217 }
    218 
    219 status_t Sensor::readyToRun() {
    220     ALOGV("Starting up sensor thread");
    221     mStartupTime = systemTime();
    222     mNextCaptureTime = 0;
    223     mNextCapturedBuffers = NULL;
    224     return OK;
    225 }
    226 
    227 bool Sensor::threadLoop() {
    228     /**
    229      * Sensor capture operation main loop.
    230      *
    231      * Stages are out-of-order relative to a single frame's processing, but
    232      * in-order in time.
    233      */
    234 
    235     /**
    236      * Stage 1: Read in latest control parameters
    237      */
    238     uint64_t exposureDuration;
    239     uint64_t frameDuration;
    240     uint32_t gain;
    241     Buffers *nextBuffers;
    242     uint32_t frameNumber;
    243     SensorListener *listener = NULL;
    244     {
    245         Mutex::Autolock lock(mControlMutex);
    246         exposureDuration = mExposureTime;
    247         frameDuration    = mFrameDuration;
    248         gain             = mGainFactor;
    249         nextBuffers      = mNextBuffers;
    250         frameNumber      = mFrameNumber;
    251         listener         = mListener;
    252         // Don't reuse a buffer set
    253         mNextBuffers = NULL;
    254 
    255         // Signal VSync for start of readout
    256         ALOGVV("Sensor VSync");
    257         mGotVSync = true;
    258         mVSync.signal();
    259     }
    260 
    261     /**
    262      * Stage 3: Read out latest captured image
    263      */
    264 
    265     Buffers *capturedBuffers = NULL;
    266     nsecs_t captureTime = 0;
    267 
    268     nsecs_t startRealTime  = systemTime();
    269     // Stagefright cares about system time for timestamps, so base simulated
    270     // time on that.
    271     nsecs_t simulatedTime    = startRealTime;
    272     nsecs_t frameEndRealTime = startRealTime + frameDuration;
    273     nsecs_t frameReadoutEndRealTime = startRealTime +
    274             mRowReadoutTime * mResolution[1];
    275 
    276     if (mNextCapturedBuffers != NULL) {
    277         ALOGVV("Sensor starting readout");
    278         // Pretend we're doing readout now; will signal once enough time has elapsed
    279         capturedBuffers = mNextCapturedBuffers;
    280         captureTime    = mNextCaptureTime;
    281     }
    282     simulatedTime += mRowReadoutTime + kMinVerticalBlank;
    283 
    284     // TODO: Move this signal to another thread to simulate readout
    285     // time properly
    286     if (capturedBuffers != NULL) {
    287         ALOGVV("Sensor readout complete");
    288         Mutex::Autolock lock(mReadoutMutex);
    289         if (mCapturedBuffers != NULL) {
    290             ALOGV("Waiting for readout thread to catch up!");
    291             mReadoutComplete.wait(mReadoutMutex);
    292         }
    293 
    294         mCapturedBuffers = capturedBuffers;
    295         mCaptureTime = captureTime;
    296         mReadoutAvailable.signal();
    297         capturedBuffers = NULL;
    298     }
    299 
    300     /**
    301      * Stage 2: Capture new image
    302      */
    303     mNextCaptureTime = simulatedTime;
    304     mNextCapturedBuffers = nextBuffers;
    305 
    306     if (mNextCapturedBuffers != NULL) {
    307         if (listener != NULL) {
    308             listener->onSensorEvent(frameNumber, SensorListener::EXPOSURE_START,
    309                     mNextCaptureTime);
    310         }
    311         ALOGVV("Starting next capture: Exposure: %f ms, gain: %d",
    312                 (float)exposureDuration/1e6, gain);
    313         mScene.setExposureDuration((float)exposureDuration/1e9);
    314         mScene.calculateScene(mNextCaptureTime);
    315 
    316         // Might be adding more buffers, so size isn't constant
    317         for (size_t i = 0; i < mNextCapturedBuffers->size(); i++) {
    318             const StreamBuffer &b = (*mNextCapturedBuffers)[i];
    319             ALOGVV("Sensor capturing buffer %d: stream %d,"
    320                     " %d x %d, format %x, stride %d, buf %p, img %p",
    321                     i, b.streamId, b.width, b.height, b.format, b.stride,
    322                     b.buffer, b.img);
    323             switch(b.format) {
    324                 case HAL_PIXEL_FORMAT_RAW16:
    325                     captureRaw(b.img, gain, b.stride);
    326                     break;
    327                 case HAL_PIXEL_FORMAT_RGB_888:
    328                     captureRGB(b.img, gain, b.width, b.height);
    329                     break;
    330                 case HAL_PIXEL_FORMAT_RGBA_8888:
    331                     captureRGBA(b.img, gain, b.width, b.height);
    332                     break;
    333                 case HAL_PIXEL_FORMAT_BLOB:
    334                     if (b.dataSpace != HAL_DATASPACE_DEPTH) {
    335                         // Add auxillary buffer of the right size
    336                         // Assumes only one BLOB (JPEG) buffer in
    337                         // mNextCapturedBuffers
    338                         StreamBuffer bAux;
    339                         bAux.streamId = 0;
    340                         bAux.width = b.width;
    341                         bAux.height = b.height;
    342                         bAux.format = HAL_PIXEL_FORMAT_YCbCr_420_888;
    343                         bAux.stride = b.width;
    344                         bAux.buffer = NULL;
    345                         // TODO: Reuse these
    346                         bAux.img = new uint8_t[b.width * b.height * 3];
    347                         mNextCapturedBuffers->push_back(bAux);
    348                     } else {
    349                         captureDepthCloud(b.img);
    350                     }
    351                     break;
    352                 case HAL_PIXEL_FORMAT_YCbCr_420_888:
    353                     captureNV21(b.img, gain, b.width, b.height);
    354                    break;
    355                 case HAL_PIXEL_FORMAT_YV12:
    356                     // TODO:
    357                     ALOGE("%s: Format %x is TODO", __FUNCTION__, b.format);
    358                     break;
    359                 case HAL_PIXEL_FORMAT_Y16:
    360                     captureDepth(b.img, gain, b.width, b.height);
    361                     break;
    362                 default:
    363                     ALOGE("%s: Unknown format %x, no output", __FUNCTION__,
    364                             b.format);
    365                     break;
    366             }
    367         }
    368     }
    369 
    370     ALOGVV("Sensor vertical blanking interval");
    371     nsecs_t workDoneRealTime = systemTime();
    372     const nsecs_t timeAccuracy = 2e6; // 2 ms of imprecision is ok
    373     if (workDoneRealTime < frameEndRealTime - timeAccuracy) {
    374         timespec t;
    375         t.tv_sec = (frameEndRealTime - workDoneRealTime)  / 1000000000L;
    376         t.tv_nsec = (frameEndRealTime - workDoneRealTime) % 1000000000L;
    377 
    378         int ret;
    379         do {
    380             ret = nanosleep(&t, &t);
    381         } while (ret != 0);
    382     }
    383     nsecs_t endRealTime = systemTime();
    384     ALOGVV("Frame cycle took %d ms, target %d ms",
    385             (int)((endRealTime - startRealTime)/1000000),
    386             (int)(frameDuration / 1000000));
    387     return true;
    388 };
    389 
    390 void Sensor::captureRaw(uint8_t *img, uint32_t gain, uint32_t stride) {
    391     float totalGain = gain/100.0 * kBaseGainFactor;
    392     float noiseVarGain =  totalGain * totalGain;
    393     float readNoiseVar = kReadNoiseVarBeforeGain * noiseVarGain
    394             + kReadNoiseVarAfterGain;
    395 
    396     int bayerSelect[4] = {Scene::R, Scene::Gr, Scene::Gb, Scene::B}; // RGGB
    397     mScene.setReadoutPixel(0,0);
    398     for (unsigned int y = 0; y < mResolution[1]; y++ ) {
    399         int *bayerRow = bayerSelect + (y & 0x1) * 2;
    400         uint16_t *px = (uint16_t*)img + y * stride;
    401         for (unsigned int x = 0; x < mResolution[0]; x++) {
    402             uint32_t electronCount;
    403             electronCount = mScene.getPixelElectrons()[bayerRow[x & 0x1]];
    404 
    405             // TODO: Better pixel saturation curve?
    406             electronCount = (electronCount < kSaturationElectrons) ?
    407                     electronCount : kSaturationElectrons;
    408 
    409             // TODO: Better A/D saturation curve?
    410             uint16_t rawCount = electronCount * totalGain;
    411             rawCount = (rawCount < kMaxRawValue) ? rawCount : kMaxRawValue;
    412 
    413             // Calculate noise value
    414             // TODO: Use more-correct Gaussian instead of uniform noise
    415             float photonNoiseVar = electronCount * noiseVarGain;
    416             float noiseStddev = sqrtf_approx(readNoiseVar + photonNoiseVar);
    417             // Scaled to roughly match gaussian/uniform noise stddev
    418             float noiseSample = std::rand() * (2.5 / (1.0 + RAND_MAX)) - 1.25;
    419 
    420             rawCount += kBlackLevel;
    421             rawCount += noiseStddev * noiseSample;
    422 
    423             *px++ = rawCount;
    424         }
    425         // TODO: Handle this better
    426         //simulatedTime += mRowReadoutTime;
    427     }
    428     ALOGVV("Raw sensor image captured");
    429 }
    430 
    431 void Sensor::captureRGBA(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height) {
    432     float totalGain = gain/100.0 * kBaseGainFactor;
    433     // In fixed-point math, calculate total scaling from electrons to 8bpp
    434     int scale64x = 64 * totalGain * 255 / kMaxRawValue;
    435     unsigned int DivH= (float)mResolution[1]/height * (0x1 << 10);
    436     unsigned int DivW = (float)mResolution[0]/width * (0x1 << 10);
    437 
    438     for (unsigned int outY = 0; outY < height; outY++) {
    439         unsigned int y = outY * DivH >> 10;
    440         uint8_t *px = img + outY * width * 4;
    441         mScene.setReadoutPixel(0, y);
    442         unsigned int lastX = 0;
    443         const uint32_t *pixel = mScene.getPixelElectrons();
    444         for (unsigned int outX = 0; outX < width; outX++) {
    445             uint32_t rCount, gCount, bCount;
    446             unsigned int x = outX * DivW >> 10;
    447             if (x - lastX > 0) {
    448                 for (unsigned int k = 0; k < (x-lastX); k++) {
    449                      pixel = mScene.getPixelElectrons();
    450                 }
    451             }
    452             lastX = x;
    453             // TODO: Perfect demosaicing is a cheat
    454             rCount = (pixel[Scene::R]+(outX+outY)%64) * scale64x;
    455             gCount = (pixel[Scene::Gr]+(outX+outY)%64) * scale64x;
    456             bCount = (pixel[Scene::B]+(outX+outY)%64) * scale64x;
    457 
    458             *px++ = rCount < 255*64 ? rCount / 64 : 255;
    459             *px++ = gCount < 255*64 ? gCount / 64 : 255;
    460             *px++ = bCount < 255*64 ? bCount / 64 : 255;
    461             *px++ = 255;
    462          }
    463         // TODO: Handle this better
    464         //simulatedTime += mRowReadoutTime;
    465     }
    466     ALOGVV("RGBA sensor image captured");
    467 }
    468 
    469 void Sensor::captureRGB(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height) {
    470     float totalGain = gain/100.0 * kBaseGainFactor;
    471     // In fixed-point math, calculate total scaling from electrons to 8bpp
    472     int scale64x = 64 * totalGain * 255 / kMaxRawValue;
    473     unsigned int DivH= (float)mResolution[1]/height * (0x1 << 10);
    474     unsigned int DivW = (float)mResolution[0]/width * (0x1 << 10);
    475 
    476     for (unsigned int outY = 0; outY < height; outY++) {
    477         unsigned int y = outY * DivH >> 10;
    478         uint8_t *px = img + outY * width * 3;
    479         mScene.setReadoutPixel(0, y);
    480         unsigned int lastX = 0;
    481         const uint32_t *pixel = mScene.getPixelElectrons();
    482         for (unsigned int outX = 0; outX < width; outX++) {
    483             uint32_t rCount, gCount, bCount;
    484             unsigned int x = outX * DivW >> 10;
    485             if (x - lastX > 0) {
    486                 for (unsigned int k = 0; k < (x-lastX); k++) {
    487                     pixel = mScene.getPixelElectrons();
    488                 }
    489             }
    490             lastX = x;
    491            // TODO: Perfect demosaicing is a cheat
    492             rCount = (pixel[Scene::R]+(outX+outY)%64)  * scale64x;
    493             gCount = (pixel[Scene::Gr]+(outX+outY)%64) * scale64x;
    494             bCount = (pixel[Scene::B]+(outX+outY)%64)  * scale64x;
    495 
    496             *px++ = rCount < 255*64 ? rCount / 64 : 255;
    497             *px++ = gCount < 255*64 ? gCount / 64 : 255;
    498             *px++ = bCount < 255*64 ? bCount / 64 : 255;
    499          }
    500     }
    501     ALOGVV("RGB sensor image captured");
    502 }
    503 
    504 void Sensor::captureNV21(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height) {
    505     float totalGain = gain/100.0 * kBaseGainFactor;
    506     // Using fixed-point math with 6 bits of fractional precision.
    507     // In fixed-point math, calculate total scaling from electrons to 8bpp
    508     const int scale64x = 64 * totalGain * 255 / kMaxRawValue;
    509     // In fixed-point math, saturation point of sensor after gain
    510     const int saturationPoint = 64 * 255;
    511     // Fixed-point coefficients for RGB-YUV transform
    512     // Based on JFIF RGB->YUV transform.
    513     // Cb/Cr offset scaled by 64x twice since they're applied post-multiply
    514     const int rgbToY[]  = {19, 37, 7};
    515     const int rgbToCb[] = {-10,-21, 32, 524288};
    516     const int rgbToCr[] = {32,-26, -5, 524288};
    517     // Scale back to 8bpp non-fixed-point
    518     const int scaleOut = 64;
    519     const int scaleOutSq = scaleOut * scaleOut; // after multiplies
    520 
    521     unsigned int DivH= (float)mResolution[1]/height * (0x1 << 10);
    522     unsigned int DivW = (float)mResolution[0]/width * (0x1 << 10);
    523     for (unsigned int outY = 0; outY < height; outY++) {
    524         unsigned int y = outY * DivH >> 10;
    525         uint8_t *pxY = img + outY * width;
    526         uint8_t *pxVU = img + (height + outY / 2) * width;
    527         mScene.setReadoutPixel(0, y);
    528         unsigned int lastX = 0;
    529         const uint32_t *pixel = mScene.getPixelElectrons();
    530          for (unsigned int outX = 0; outX < width; outX++) {
    531             int32_t rCount, gCount, bCount;
    532             unsigned int x = outX * DivW >> 10;
    533             if (x - lastX > 0) {
    534                 for (unsigned int k = 0; k < (x-lastX); k++) {
    535                      pixel = mScene.getPixelElectrons();
    536                 }
    537             }
    538             lastX = x;
    539             //Slightly different color for the same Scene, result in larger
    540             //jpeg image size requried by CTS test
    541             //android.provider.cts.MediaStoreUiTest#testImageCapture
    542             rCount = (pixel[Scene::R]+(outX+outY)%64)  * scale64x;
    543             rCount = rCount < saturationPoint ? rCount : saturationPoint;
    544             gCount = (pixel[Scene::Gr]+(outX+outY)%64) * scale64x;
    545             gCount = gCount < saturationPoint ? gCount : saturationPoint;
    546             bCount = (pixel[Scene::B]+(outX+outY)%64)  * scale64x;
    547             bCount = bCount < saturationPoint ? bCount : saturationPoint;
    548             *pxY++ = (rgbToY[0] * rCount +
    549                     rgbToY[1] * gCount +
    550                     rgbToY[2] * bCount) / scaleOutSq;
    551             if (outY % 2 == 0 && outX % 2 == 0) {
    552                 *pxVU++ = (rgbToCr[0] * rCount +
    553                         rgbToCr[1] * gCount +
    554                         rgbToCr[2] * bCount +
    555                         rgbToCr[3]) / scaleOutSq;
    556                 *pxVU++ = (rgbToCb[0] * rCount +
    557                         rgbToCb[1] * gCount +
    558                         rgbToCb[2] * bCount +
    559                         rgbToCb[3]) / scaleOutSq;
    560             }
    561         }
    562     }
    563     ALOGVV("NV21 sensor image captured");
    564 }
    565 
    566 void Sensor::captureDepth(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height) {
    567     float totalGain = gain/100.0 * kBaseGainFactor;
    568     // In fixed-point math, calculate scaling factor to 13bpp millimeters
    569     int scale64x = 64 * totalGain * 8191 / kMaxRawValue;
    570     unsigned int DivH= (float)mResolution[1]/height * (0x1 << 10);
    571     unsigned int DivW = (float)mResolution[0]/width * (0x1 << 10);
    572 
    573     for (unsigned int outY = 0; outY < height; outY++) {
    574         unsigned int y = outY * DivH >> 10;
    575         uint16_t *px = ((uint16_t*)img) + outY * width;
    576         mScene.setReadoutPixel(0, y);
    577         unsigned int lastX = 0;
    578         const uint32_t *pixel = mScene.getPixelElectrons();
    579         for (unsigned int outX = 0; outX < width; outX++) {
    580             uint32_t depthCount;
    581             unsigned int x = outX * DivW >> 10;
    582             if (x - lastX > 0) {
    583                 for (unsigned int k = 0; k < (x-lastX); k++) {
    584                      pixel = mScene.getPixelElectrons();
    585                 }
    586             }
    587             lastX = x;
    588             depthCount = pixel[Scene::Gr] * scale64x;
    589             *px++ = depthCount < 8191*64 ? depthCount / 64 : 0;
    590         }
    591         // TODO: Handle this better
    592         //simulatedTime += mRowReadoutTime;
    593     }
    594     ALOGVV("Depth sensor image captured");
    595 }
    596 
    597 void Sensor::captureDepthCloud(uint8_t *img) {
    598 
    599     android_depth_points *cloud = reinterpret_cast<android_depth_points*>(img);
    600 
    601     cloud->num_points = 16;
    602 
    603     // TODO: Create point cloud values that match RGB scene
    604     const int FLOATS_PER_POINT = 4;
    605     const float JITTER_STDDEV = 0.1f;
    606     for (size_t y = 0, i = 0; y < 4; y++) {
    607         for (size_t x = 0; x < 4; x++, i++) {
    608             float randSampleX = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
    609             randSampleX *= JITTER_STDDEV;
    610 
    611             float randSampleY = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
    612             randSampleY *= JITTER_STDDEV;
    613 
    614             float randSampleZ = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
    615             randSampleZ *= JITTER_STDDEV;
    616 
    617             cloud->xyzc_points[i * FLOATS_PER_POINT + 0] = x - 1.5f + randSampleX;
    618             cloud->xyzc_points[i * FLOATS_PER_POINT + 1] = y - 1.5f + randSampleY;
    619             cloud->xyzc_points[i * FLOATS_PER_POINT + 2] = 3.f + randSampleZ;
    620             cloud->xyzc_points[i * FLOATS_PER_POINT + 3] = 0.8f;
    621         }
    622     }
    623 
    624     ALOGVV("Depth point cloud captured");
    625 
    626 }
    627 
    628 } // namespace android
    629