1 /* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 //#define LOG_NDEBUG 0 18 //#define LOG_NNDEBUG 0 19 #define LOG_TAG "EmulatedCamera2_Sensor" 20 21 #ifdef LOG_NNDEBUG 22 #define ALOGVV(...) ALOGV(__VA_ARGS__) 23 #else 24 #define ALOGVV(...) ((void)0) 25 #endif 26 27 #include <utils/Log.h> 28 29 #include "../EmulatedFakeCamera2.h" 30 #include "Sensor.h" 31 #include <cmath> 32 #include <cstdlib> 33 #include "system/camera_metadata.h" 34 35 namespace android { 36 37 const unsigned int Sensor::kResolution[2] = {640, 480}; 38 39 const nsecs_t Sensor::kExposureTimeRange[2] = 40 {1000L, 30000000000L} ; // 1 us - 30 sec 41 const nsecs_t Sensor::kFrameDurationRange[2] = 42 {33331760L, 30000000000L}; // ~1/30 s - 30 sec 43 const nsecs_t Sensor::kMinVerticalBlank = 10000L; 44 45 const uint8_t Sensor::kColorFilterArrangement = 46 ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_RGGB; 47 48 // Output image data characteristics 49 const uint32_t Sensor::kMaxRawValue = 4000; 50 const uint32_t Sensor::kBlackLevel = 1000; 51 52 // Sensor sensitivity 53 const float Sensor::kSaturationVoltage = 0.520f; 54 const uint32_t Sensor::kSaturationElectrons = 2000; 55 const float Sensor::kVoltsPerLuxSecond = 0.100f; 56 57 const float Sensor::kElectronsPerLuxSecond = 58 Sensor::kSaturationElectrons / Sensor::kSaturationVoltage 59 * Sensor::kVoltsPerLuxSecond; 60 61 const float Sensor::kBaseGainFactor = (float)Sensor::kMaxRawValue / 62 Sensor::kSaturationElectrons; 63 64 const float Sensor::kReadNoiseStddevBeforeGain = 1.177; // in electrons 65 const float Sensor::kReadNoiseStddevAfterGain = 2.100; // in digital counts 66 const float Sensor::kReadNoiseVarBeforeGain = 67 Sensor::kReadNoiseStddevBeforeGain * 68 Sensor::kReadNoiseStddevBeforeGain; 69 const float Sensor::kReadNoiseVarAfterGain = 70 Sensor::kReadNoiseStddevAfterGain * 71 Sensor::kReadNoiseStddevAfterGain; 72 73 // While each row has to read out, reset, and then expose, the (reset + 74 // expose) sequence can be overlapped by other row readouts, so the final 75 // minimum frame duration is purely a function of row readout time, at least 76 // if there's a reasonable number of rows. 77 const nsecs_t Sensor::kRowReadoutTime = 78 Sensor::kFrameDurationRange[0] / Sensor::kResolution[1]; 79 80 const int32_t Sensor::kSensitivityRange[2] = {100, 1600}; 81 const uint32_t Sensor::kDefaultSensitivity = 100; 82 83 /** A few utility functions for math, normal distributions */ 84 85 // Take advantage of IEEE floating-point format to calculate an approximate 86 // square root. Accurate to within +-3.6% 87 float sqrtf_approx(float r) { 88 // Modifier is based on IEEE floating-point representation; the 89 // manipulations boil down to finding approximate log2, dividing by two, and 90 // then inverting the log2. A bias is added to make the relative error 91 // symmetric about the real answer. 92 const int32_t modifier = 0x1FBB4000; 93 94 int32_t r_i = *(int32_t*)(&r); 95 r_i = (r_i >> 1) + modifier; 96 97 return *(float*)(&r_i); 98 } 99 100 101 102 Sensor::Sensor(): 103 Thread(false), 104 mGotVSync(false), 105 mExposureTime(kFrameDurationRange[0]-kMinVerticalBlank), 106 mFrameDuration(kFrameDurationRange[0]), 107 mGainFactor(kDefaultSensitivity), 108 mNextBuffers(NULL), 109 mFrameNumber(0), 110 mCapturedBuffers(NULL), 111 mListener(NULL), 112 mScene(kResolution[0], kResolution[1], kElectronsPerLuxSecond) 113 { 114 115 } 116 117 Sensor::~Sensor() { 118 shutDown(); 119 } 120 121 status_t Sensor::startUp() { 122 ALOGV("%s: E", __FUNCTION__); 123 124 int res; 125 mCapturedBuffers = NULL; 126 res = run("EmulatedFakeCamera2::Sensor", 127 ANDROID_PRIORITY_URGENT_DISPLAY); 128 129 if (res != OK) { 130 ALOGE("Unable to start up sensor capture thread: %d", res); 131 } 132 return res; 133 } 134 135 status_t Sensor::shutDown() { 136 ALOGV("%s: E", __FUNCTION__); 137 138 int res; 139 res = requestExitAndWait(); 140 if (res != OK) { 141 ALOGE("Unable to shut down sensor capture thread: %d", res); 142 } 143 return res; 144 } 145 146 Scene &Sensor::getScene() { 147 return mScene; 148 } 149 150 void Sensor::setExposureTime(uint64_t ns) { 151 Mutex::Autolock lock(mControlMutex); 152 ALOGVV("Exposure set to %f", ns/1000000.f); 153 mExposureTime = ns; 154 } 155 156 void Sensor::setFrameDuration(uint64_t ns) { 157 Mutex::Autolock lock(mControlMutex); 158 ALOGVV("Frame duration set to %f", ns/1000000.f); 159 mFrameDuration = ns; 160 } 161 162 void Sensor::setSensitivity(uint32_t gain) { 163 Mutex::Autolock lock(mControlMutex); 164 ALOGVV("Gain set to %d", gain); 165 mGainFactor = gain; 166 } 167 168 void Sensor::setDestinationBuffers(Buffers *buffers) { 169 Mutex::Autolock lock(mControlMutex); 170 mNextBuffers = buffers; 171 } 172 173 void Sensor::setFrameNumber(uint32_t frameNumber) { 174 Mutex::Autolock lock(mControlMutex); 175 mFrameNumber = frameNumber; 176 } 177 178 bool Sensor::waitForVSync(nsecs_t reltime) { 179 int res; 180 Mutex::Autolock lock(mControlMutex); 181 182 mGotVSync = false; 183 res = mVSync.waitRelative(mControlMutex, reltime); 184 if (res != OK && res != TIMED_OUT) { 185 ALOGE("%s: Error waiting for VSync signal: %d", __FUNCTION__, res); 186 return false; 187 } 188 return mGotVSync; 189 } 190 191 bool Sensor::waitForNewFrame(nsecs_t reltime, 192 nsecs_t *captureTime) { 193 Mutex::Autolock lock(mReadoutMutex); 194 uint8_t *ret; 195 if (mCapturedBuffers == NULL) { 196 int res; 197 res = mReadoutAvailable.waitRelative(mReadoutMutex, reltime); 198 if (res == TIMED_OUT) { 199 return false; 200 } else if (res != OK || mCapturedBuffers == NULL) { 201 ALOGE("Error waiting for sensor readout signal: %d", res); 202 return false; 203 } 204 } else { 205 mReadoutComplete.signal(); 206 } 207 208 *captureTime = mCaptureTime; 209 mCapturedBuffers = NULL; 210 return true; 211 } 212 213 Sensor::SensorListener::~SensorListener() { 214 } 215 216 void Sensor::setSensorListener(SensorListener *listener) { 217 Mutex::Autolock lock(mControlMutex); 218 mListener = listener; 219 } 220 221 status_t Sensor::readyToRun() { 222 ALOGV("Starting up sensor thread"); 223 mStartupTime = systemTime(); 224 mNextCaptureTime = 0; 225 mNextCapturedBuffers = NULL; 226 return OK; 227 } 228 229 bool Sensor::threadLoop() { 230 /** 231 * Sensor capture operation main loop. 232 * 233 * Stages are out-of-order relative to a single frame's processing, but 234 * in-order in time. 235 */ 236 237 /** 238 * Stage 1: Read in latest control parameters 239 */ 240 uint64_t exposureDuration; 241 uint64_t frameDuration; 242 uint32_t gain; 243 Buffers *nextBuffers; 244 uint32_t frameNumber; 245 SensorListener *listener = NULL; 246 { 247 Mutex::Autolock lock(mControlMutex); 248 exposureDuration = mExposureTime; 249 frameDuration = mFrameDuration; 250 gain = mGainFactor; 251 nextBuffers = mNextBuffers; 252 frameNumber = mFrameNumber; 253 listener = mListener; 254 // Don't reuse a buffer set 255 mNextBuffers = NULL; 256 257 // Signal VSync for start of readout 258 ALOGVV("Sensor VSync"); 259 mGotVSync = true; 260 mVSync.signal(); 261 } 262 263 /** 264 * Stage 3: Read out latest captured image 265 */ 266 267 Buffers *capturedBuffers = NULL; 268 nsecs_t captureTime = 0; 269 270 nsecs_t startRealTime = systemTime(); 271 // Stagefright cares about system time for timestamps, so base simulated 272 // time on that. 273 nsecs_t simulatedTime = startRealTime; 274 nsecs_t frameEndRealTime = startRealTime + frameDuration; 275 nsecs_t frameReadoutEndRealTime = startRealTime + 276 kRowReadoutTime * kResolution[1]; 277 278 if (mNextCapturedBuffers != NULL) { 279 ALOGVV("Sensor starting readout"); 280 // Pretend we're doing readout now; will signal once enough time has elapsed 281 capturedBuffers = mNextCapturedBuffers; 282 captureTime = mNextCaptureTime; 283 } 284 simulatedTime += kRowReadoutTime + kMinVerticalBlank; 285 286 // TODO: Move this signal to another thread to simulate readout 287 // time properly 288 if (capturedBuffers != NULL) { 289 ALOGVV("Sensor readout complete"); 290 Mutex::Autolock lock(mReadoutMutex); 291 if (mCapturedBuffers != NULL) { 292 ALOGV("Waiting for readout thread to catch up!"); 293 mReadoutComplete.wait(mReadoutMutex); 294 } 295 296 mCapturedBuffers = capturedBuffers; 297 mCaptureTime = captureTime; 298 mReadoutAvailable.signal(); 299 capturedBuffers = NULL; 300 } 301 302 /** 303 * Stage 2: Capture new image 304 */ 305 mNextCaptureTime = simulatedTime; 306 mNextCapturedBuffers = nextBuffers; 307 308 if (mNextCapturedBuffers != NULL) { 309 if (listener != NULL) { 310 listener->onSensorEvent(frameNumber, SensorListener::EXPOSURE_START, 311 mNextCaptureTime); 312 } 313 ALOGVV("Starting next capture: Exposure: %f ms, gain: %d", 314 (float)exposureDuration/1e6, gain); 315 mScene.setExposureDuration((float)exposureDuration/1e9); 316 mScene.calculateScene(mNextCaptureTime); 317 318 // Might be adding more buffers, so size isn't constant 319 for (size_t i = 0; i < mNextCapturedBuffers->size(); i++) { 320 const StreamBuffer &b = (*mNextCapturedBuffers)[i]; 321 ALOGVV("Sensor capturing buffer %d: stream %d," 322 " %d x %d, format %x, stride %d, buf %p, img %p", 323 i, b.streamId, b.width, b.height, b.format, b.stride, 324 b.buffer, b.img); 325 switch(b.format) { 326 case HAL_PIXEL_FORMAT_RAW_SENSOR: 327 captureRaw(b.img, gain, b.stride); 328 break; 329 case HAL_PIXEL_FORMAT_RGB_888: 330 captureRGB(b.img, gain, b.stride); 331 break; 332 case HAL_PIXEL_FORMAT_RGBA_8888: 333 captureRGBA(b.img, gain, b.stride); 334 break; 335 case HAL_PIXEL_FORMAT_BLOB: 336 // Add auxillary buffer of the right size 337 // Assumes only one BLOB (JPEG) buffer in 338 // mNextCapturedBuffers 339 StreamBuffer bAux; 340 bAux.streamId = 0; 341 bAux.width = b.width; 342 bAux.height = b.height; 343 bAux.format = HAL_PIXEL_FORMAT_RGB_888; 344 bAux.stride = b.width; 345 bAux.buffer = NULL; 346 // TODO: Reuse these 347 bAux.img = new uint8_t[b.width * b.height * 3]; 348 mNextCapturedBuffers->push_back(bAux); 349 break; 350 case HAL_PIXEL_FORMAT_YCrCb_420_SP: 351 captureNV21(b.img, gain, b.stride); 352 break; 353 case HAL_PIXEL_FORMAT_YV12: 354 // TODO: 355 ALOGE("%s: Format %x is TODO", __FUNCTION__, b.format); 356 break; 357 default: 358 ALOGE("%s: Unknown format %x, no output", __FUNCTION__, 359 b.format); 360 break; 361 } 362 } 363 } 364 365 ALOGVV("Sensor vertical blanking interval"); 366 nsecs_t workDoneRealTime = systemTime(); 367 const nsecs_t timeAccuracy = 2e6; // 2 ms of imprecision is ok 368 if (workDoneRealTime < frameEndRealTime - timeAccuracy) { 369 timespec t; 370 t.tv_sec = (frameEndRealTime - workDoneRealTime) / 1000000000L; 371 t.tv_nsec = (frameEndRealTime - workDoneRealTime) % 1000000000L; 372 373 int ret; 374 do { 375 ret = nanosleep(&t, &t); 376 } while (ret != 0); 377 } 378 nsecs_t endRealTime = systemTime(); 379 ALOGVV("Frame cycle took %d ms, target %d ms", 380 (int)((endRealTime - startRealTime)/1000000), 381 (int)(frameDuration / 1000000)); 382 return true; 383 }; 384 385 void Sensor::captureRaw(uint8_t *img, uint32_t gain, uint32_t stride) { 386 float totalGain = gain/100.0 * kBaseGainFactor; 387 float noiseVarGain = totalGain * totalGain; 388 float readNoiseVar = kReadNoiseVarBeforeGain * noiseVarGain 389 + kReadNoiseVarAfterGain; 390 391 int bayerSelect[4] = {Scene::R, Scene::Gr, Scene::Gb, Scene::B}; // RGGB 392 mScene.setReadoutPixel(0,0); 393 for (unsigned int y = 0; y < kResolution[1]; y++ ) { 394 int *bayerRow = bayerSelect + (y & 0x1) * 2; 395 uint16_t *px = (uint16_t*)img + y * stride; 396 for (unsigned int x = 0; x < kResolution[0]; x++) { 397 uint32_t electronCount; 398 electronCount = mScene.getPixelElectrons()[bayerRow[x & 0x1]]; 399 400 // TODO: Better pixel saturation curve? 401 electronCount = (electronCount < kSaturationElectrons) ? 402 electronCount : kSaturationElectrons; 403 404 // TODO: Better A/D saturation curve? 405 uint16_t rawCount = electronCount * totalGain; 406 rawCount = (rawCount < kMaxRawValue) ? rawCount : kMaxRawValue; 407 408 // Calculate noise value 409 // TODO: Use more-correct Gaussian instead of uniform noise 410 float photonNoiseVar = electronCount * noiseVarGain; 411 float noiseStddev = sqrtf_approx(readNoiseVar + photonNoiseVar); 412 // Scaled to roughly match gaussian/uniform noise stddev 413 float noiseSample = std::rand() * (2.5 / (1.0 + RAND_MAX)) - 1.25; 414 415 rawCount += kBlackLevel; 416 rawCount += noiseStddev * noiseSample; 417 418 *px++ = rawCount; 419 } 420 // TODO: Handle this better 421 //simulatedTime += kRowReadoutTime; 422 } 423 ALOGVV("Raw sensor image captured"); 424 } 425 426 void Sensor::captureRGBA(uint8_t *img, uint32_t gain, uint32_t stride) { 427 float totalGain = gain/100.0 * kBaseGainFactor; 428 // In fixed-point math, calculate total scaling from electrons to 8bpp 429 int scale64x = 64 * totalGain * 255 / kMaxRawValue; 430 uint32_t inc = kResolution[0] / stride; 431 432 for (unsigned int y = 0, outY = 0; y < kResolution[1]; y+=inc, outY++ ) { 433 uint8_t *px = img + outY * stride * 4; 434 mScene.setReadoutPixel(0, y); 435 for (unsigned int x = 0; x < kResolution[0]; x+=inc) { 436 uint32_t rCount, gCount, bCount; 437 // TODO: Perfect demosaicing is a cheat 438 const uint32_t *pixel = mScene.getPixelElectrons(); 439 rCount = pixel[Scene::R] * scale64x; 440 gCount = pixel[Scene::Gr] * scale64x; 441 bCount = pixel[Scene::B] * scale64x; 442 443 *px++ = rCount < 255*64 ? rCount / 64 : 255; 444 *px++ = gCount < 255*64 ? gCount / 64 : 255; 445 *px++ = bCount < 255*64 ? bCount / 64 : 255; 446 *px++ = 255; 447 for (unsigned int j = 1; j < inc; j++) 448 mScene.getPixelElectrons(); 449 } 450 // TODO: Handle this better 451 //simulatedTime += kRowReadoutTime; 452 } 453 ALOGVV("RGBA sensor image captured"); 454 } 455 456 void Sensor::captureRGB(uint8_t *img, uint32_t gain, uint32_t stride) { 457 float totalGain = gain/100.0 * kBaseGainFactor; 458 // In fixed-point math, calculate total scaling from electrons to 8bpp 459 int scale64x = 64 * totalGain * 255 / kMaxRawValue; 460 uint32_t inc = kResolution[0] / stride; 461 462 for (unsigned int y = 0, outY = 0; y < kResolution[1]; y += inc, outY++ ) { 463 mScene.setReadoutPixel(0, y); 464 uint8_t *px = img + outY * stride * 3; 465 for (unsigned int x = 0; x < kResolution[0]; x += inc) { 466 uint32_t rCount, gCount, bCount; 467 // TODO: Perfect demosaicing is a cheat 468 const uint32_t *pixel = mScene.getPixelElectrons(); 469 rCount = pixel[Scene::R] * scale64x; 470 gCount = pixel[Scene::Gr] * scale64x; 471 bCount = pixel[Scene::B] * scale64x; 472 473 *px++ = rCount < 255*64 ? rCount / 64 : 255; 474 *px++ = gCount < 255*64 ? gCount / 64 : 255; 475 *px++ = bCount < 255*64 ? bCount / 64 : 255; 476 for (unsigned int j = 1; j < inc; j++) 477 mScene.getPixelElectrons(); 478 } 479 // TODO: Handle this better 480 //simulatedTime += kRowReadoutTime; 481 } 482 ALOGVV("RGB sensor image captured"); 483 } 484 485 void Sensor::captureNV21(uint8_t *img, uint32_t gain, uint32_t stride) { 486 float totalGain = gain/100.0 * kBaseGainFactor; 487 // Using fixed-point math with 6 bits of fractional precision. 488 // In fixed-point math, calculate total scaling from electrons to 8bpp 489 const int scale64x = 64 * totalGain * 255 / kMaxRawValue; 490 // In fixed-point math, saturation point of sensor after gain 491 const int saturationPoint = 64 * 255; 492 // Fixed-point coefficients for RGB-YUV transform 493 // Based on JFIF RGB->YUV transform. 494 // Cb/Cr offset scaled by 64x twice since they're applied post-multiply 495 const int rgbToY[] = {19, 37, 7}; 496 const int rgbToCb[] = {-10,-21, 32, 524288}; 497 const int rgbToCr[] = {32,-26, -5, 524288}; 498 // Scale back to 8bpp non-fixed-point 499 const int scaleOut = 64; 500 const int scaleOutSq = scaleOut * scaleOut; // after multiplies 501 502 uint32_t inc = kResolution[0] / stride; 503 uint32_t outH = kResolution[1] / inc; 504 for (unsigned int y = 0, outY = 0; 505 y < kResolution[1]; y+=inc, outY++) { 506 uint8_t *pxY = img + outY * stride; 507 uint8_t *pxVU = img + (outH + outY / 2) * stride; 508 mScene.setReadoutPixel(0,y); 509 for (unsigned int outX = 0; outX < stride; outX++) { 510 int32_t rCount, gCount, bCount; 511 // TODO: Perfect demosaicing is a cheat 512 const uint32_t *pixel = mScene.getPixelElectrons(); 513 rCount = pixel[Scene::R] * scale64x; 514 rCount = rCount < saturationPoint ? rCount : saturationPoint; 515 gCount = pixel[Scene::Gr] * scale64x; 516 gCount = gCount < saturationPoint ? gCount : saturationPoint; 517 bCount = pixel[Scene::B] * scale64x; 518 bCount = bCount < saturationPoint ? bCount : saturationPoint; 519 520 *pxY++ = (rgbToY[0] * rCount + 521 rgbToY[1] * gCount + 522 rgbToY[2] * bCount) / scaleOutSq; 523 if (outY % 2 == 0 && outX % 2 == 0) { 524 *pxVU++ = (rgbToCr[0] * rCount + 525 rgbToCr[1] * gCount + 526 rgbToCr[2] * bCount + 527 rgbToCr[3]) / scaleOutSq; 528 *pxVU++ = (rgbToCb[0] * rCount + 529 rgbToCb[1] * gCount + 530 rgbToCb[2] * bCount + 531 rgbToCb[3]) / scaleOutSq; 532 } 533 for (unsigned int j = 1; j < inc; j++) 534 mScene.getPixelElectrons(); 535 } 536 } 537 ALOGVV("NV21 sensor image captured"); 538 } 539 540 } // namespace android 541