1 /* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #define LOG_TAG "Camera2-JpegProcessor" 18 #define ATRACE_TAG ATRACE_TAG_CAMERA 19 //#define LOG_NDEBUG 0 20 21 #include <netinet/in.h> 22 23 #include <binder/MemoryBase.h> 24 #include <binder/MemoryHeapBase.h> 25 #include <utils/Log.h> 26 #include <utils/Trace.h> 27 #include <gui/Surface.h> 28 29 #include "common/CameraDeviceBase.h" 30 #include "api1/Camera2Client.h" 31 #include "api1/client2/Camera2Heap.h" 32 #include "api1/client2/CaptureSequencer.h" 33 #include "api1/client2/JpegProcessor.h" 34 35 namespace android { 36 namespace camera2 { 37 38 JpegProcessor::JpegProcessor( 39 sp<Camera2Client> client, 40 wp<CaptureSequencer> sequencer): 41 Thread(false), 42 mDevice(client->getCameraDevice()), 43 mSequencer(sequencer), 44 mId(client->getCameraId()), 45 mCaptureDone(false), 46 mCaptureSuccess(false), 47 mCaptureStreamId(NO_STREAM) { 48 } 49 50 JpegProcessor::~JpegProcessor() { 51 ALOGV("%s: Exit", __FUNCTION__); 52 deleteStream(); 53 } 54 55 void JpegProcessor::onFrameAvailable(const BufferItem& /*item*/) { 56 Mutex::Autolock l(mInputMutex); 57 ALOGV("%s", __FUNCTION__); 58 if (!mCaptureDone) { 59 mCaptureDone = true; 60 mCaptureSuccess = true; 61 mCaptureDoneSignal.signal(); 62 } 63 } 64 65 void JpegProcessor::onBufferAcquired(const BufferInfo& /*bufferInfo*/) { 66 // Intentionally left empty 67 } 68 69 void JpegProcessor::onBufferReleased(const BufferInfo& bufferInfo) { 70 ALOGV("%s", __FUNCTION__); 71 if (bufferInfo.mError) { 72 // Only lock in case of error, since we get one of these for each 73 // onFrameAvailable as well, and scheduling may delay this call late 74 // enough to run into later preview restart operations, for non-error 75 // cases. 76 // b/29524651 77 ALOGV("%s: JPEG buffer lost", __FUNCTION__); 78 Mutex::Autolock l(mInputMutex); 79 mCaptureDone = true; 80 mCaptureSuccess = false; 81 mCaptureDoneSignal.signal(); 82 } 83 } 84 85 status_t JpegProcessor::updateStream(const Parameters ¶ms) { 86 ATRACE_CALL(); 87 ALOGV("%s", __FUNCTION__); 88 status_t res; 89 90 Mutex::Autolock l(mInputMutex); 91 92 sp<CameraDeviceBase> device = mDevice.promote(); 93 if (device == 0) { 94 ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); 95 return INVALID_OPERATION; 96 } 97 98 // Find out buffer size for JPEG 99 ssize_t maxJpegSize = device->getJpegBufferSize(params.pictureWidth, params.pictureHeight); 100 if (maxJpegSize <= 0) { 101 ALOGE("%s: Camera %d: Jpeg buffer size (%zu) is invalid ", 102 __FUNCTION__, mId, maxJpegSize); 103 return INVALID_OPERATION; 104 } 105 106 if (mCaptureConsumer == 0) { 107 // Create CPU buffer queue endpoint 108 sp<IGraphicBufferProducer> producer; 109 sp<IGraphicBufferConsumer> consumer; 110 BufferQueue::createBufferQueue(&producer, &consumer); 111 mCaptureConsumer = new CpuConsumer(consumer, 1); 112 mCaptureConsumer->setFrameAvailableListener(this); 113 mCaptureConsumer->setName(String8("Camera2-JpegConsumer")); 114 mCaptureWindow = new Surface(producer); 115 } 116 117 // Since ashmem heaps are rounded up to page size, don't reallocate if 118 // the capture heap isn't exactly the same size as the required JPEG buffer 119 const size_t HEAP_SLACK_FACTOR = 2; 120 if (mCaptureHeap == 0 || 121 (mCaptureHeap->getSize() < static_cast<size_t>(maxJpegSize)) || 122 (mCaptureHeap->getSize() > 123 static_cast<size_t>(maxJpegSize) * HEAP_SLACK_FACTOR) ) { 124 // Create memory for API consumption 125 mCaptureHeap.clear(); 126 mCaptureHeap = 127 new MemoryHeapBase(maxJpegSize, 0, "Camera2Client::CaptureHeap"); 128 if (mCaptureHeap->getSize() == 0) { 129 ALOGE("%s: Camera %d: Unable to allocate memory for capture", 130 __FUNCTION__, mId); 131 return NO_MEMORY; 132 } 133 } 134 ALOGV("%s: Camera %d: JPEG capture heap now %zu bytes; requested %zd bytes", 135 __FUNCTION__, mId, mCaptureHeap->getSize(), maxJpegSize); 136 137 if (mCaptureStreamId != NO_STREAM) { 138 // Check if stream parameters have to change 139 CameraDeviceBase::StreamInfo streamInfo; 140 res = device->getStreamInfo(mCaptureStreamId, &streamInfo); 141 if (res != OK) { 142 ALOGE("%s: Camera %d: Error querying capture output stream info: " 143 "%s (%d)", __FUNCTION__, 144 mId, strerror(-res), res); 145 return res; 146 } 147 if (streamInfo.width != (uint32_t)params.pictureWidth || 148 streamInfo.height != (uint32_t)params.pictureHeight) { 149 ALOGV("%s: Camera %d: Deleting stream %d since the buffer dimensions changed", 150 __FUNCTION__, mId, mCaptureStreamId); 151 res = device->deleteStream(mCaptureStreamId); 152 if (res == -EBUSY) { 153 ALOGV("%s: Camera %d: Device is busy, call updateStream again " 154 " after it becomes idle", __FUNCTION__, mId); 155 return res; 156 } else if (res != OK) { 157 ALOGE("%s: Camera %d: Unable to delete old output stream " 158 "for capture: %s (%d)", __FUNCTION__, 159 mId, strerror(-res), res); 160 return res; 161 } 162 mCaptureStreamId = NO_STREAM; 163 } 164 } 165 166 if (mCaptureStreamId == NO_STREAM) { 167 // Create stream for HAL production 168 res = device->createStream(mCaptureWindow, 169 params.pictureWidth, params.pictureHeight, 170 HAL_PIXEL_FORMAT_BLOB, HAL_DATASPACE_V0_JFIF, 171 CAMERA3_STREAM_ROTATION_0, &mCaptureStreamId); 172 if (res != OK) { 173 ALOGE("%s: Camera %d: Can't create output stream for capture: " 174 "%s (%d)", __FUNCTION__, mId, 175 strerror(-res), res); 176 return res; 177 } 178 179 res = device->addBufferListenerForStream(mCaptureStreamId, this); 180 if (res != OK) { 181 ALOGE("%s: Camera %d: Can't add buffer listeneri: %s (%d)", 182 __FUNCTION__, mId, strerror(-res), res); 183 return res; 184 } 185 } 186 return OK; 187 } 188 189 status_t JpegProcessor::deleteStream() { 190 ATRACE_CALL(); 191 192 Mutex::Autolock l(mInputMutex); 193 194 if (mCaptureStreamId != NO_STREAM) { 195 sp<CameraDeviceBase> device = mDevice.promote(); 196 if (device == 0) { 197 ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId); 198 return INVALID_OPERATION; 199 } 200 201 device->deleteStream(mCaptureStreamId); 202 203 mCaptureHeap.clear(); 204 mCaptureWindow.clear(); 205 mCaptureConsumer.clear(); 206 207 mCaptureStreamId = NO_STREAM; 208 } 209 return OK; 210 } 211 212 int JpegProcessor::getStreamId() const { 213 Mutex::Autolock l(mInputMutex); 214 return mCaptureStreamId; 215 } 216 217 void JpegProcessor::dump(int /*fd*/, const Vector<String16>& /*args*/) const { 218 } 219 220 bool JpegProcessor::threadLoop() { 221 status_t res; 222 223 bool captureSuccess = false; 224 { 225 Mutex::Autolock l(mInputMutex); 226 227 while (!mCaptureDone) { 228 res = mCaptureDoneSignal.waitRelative(mInputMutex, 229 kWaitDuration); 230 if (res == TIMED_OUT) return true; 231 } 232 233 captureSuccess = mCaptureSuccess; 234 mCaptureDone = false; 235 } 236 237 res = processNewCapture(captureSuccess); 238 239 return true; 240 } 241 242 status_t JpegProcessor::processNewCapture(bool captureSuccess) { 243 ATRACE_CALL(); 244 status_t res; 245 sp<Camera2Heap> captureHeap; 246 sp<MemoryBase> captureBuffer; 247 248 CpuConsumer::LockedBuffer imgBuffer; 249 250 if (captureSuccess) { 251 Mutex::Autolock l(mInputMutex); 252 if (mCaptureStreamId == NO_STREAM) { 253 ALOGW("%s: Camera %d: No stream is available", __FUNCTION__, mId); 254 return INVALID_OPERATION; 255 } 256 257 res = mCaptureConsumer->lockNextBuffer(&imgBuffer); 258 if (res != OK) { 259 if (res != BAD_VALUE) { 260 ALOGE("%s: Camera %d: Error receiving still image buffer: " 261 "%s (%d)", __FUNCTION__, 262 mId, strerror(-res), res); 263 } 264 return res; 265 } 266 267 ALOGV("%s: Camera %d: Still capture available", __FUNCTION__, 268 mId); 269 270 if (imgBuffer.format != HAL_PIXEL_FORMAT_BLOB) { 271 ALOGE("%s: Camera %d: Unexpected format for still image: " 272 "%x, expected %x", __FUNCTION__, mId, 273 imgBuffer.format, 274 HAL_PIXEL_FORMAT_BLOB); 275 mCaptureConsumer->unlockBuffer(imgBuffer); 276 return OK; 277 } 278 279 // Find size of JPEG image 280 size_t jpegSize = findJpegSize(imgBuffer.data, imgBuffer.width); 281 if (jpegSize == 0) { // failed to find size, default to whole buffer 282 jpegSize = imgBuffer.width; 283 } 284 size_t heapSize = mCaptureHeap->getSize(); 285 if (jpegSize > heapSize) { 286 ALOGW("%s: JPEG image is larger than expected, truncating " 287 "(got %zu, expected at most %zu bytes)", 288 __FUNCTION__, jpegSize, heapSize); 289 jpegSize = heapSize; 290 } 291 292 // TODO: Optimize this to avoid memcopy 293 captureBuffer = new MemoryBase(mCaptureHeap, 0, jpegSize); 294 void* captureMemory = mCaptureHeap->getBase(); 295 memcpy(captureMemory, imgBuffer.data, jpegSize); 296 297 mCaptureConsumer->unlockBuffer(imgBuffer); 298 } 299 300 sp<CaptureSequencer> sequencer = mSequencer.promote(); 301 if (sequencer != 0) { 302 sequencer->onCaptureAvailable(imgBuffer.timestamp, captureBuffer, !captureSuccess); 303 } 304 305 return OK; 306 } 307 308 /* 309 * JPEG FILE FORMAT OVERVIEW. 310 * http://www.jpeg.org/public/jfif.pdf 311 * (JPEG is the image compression algorithm, actual file format is called JFIF) 312 * 313 * "Markers" are 2-byte patterns used to distinguish parts of JFIF files. The 314 * first byte is always 0xFF, and the second byte is between 0x01 and 0xFE 315 * (inclusive). Because every marker begins with the same byte, they are 316 * referred to by the second byte's value. 317 * 318 * JFIF files all begin with the Start of Image (SOI) marker, which is 0xD8. 319 * Following it, "segment" sections begin with other markers, followed by a 320 * 2-byte length (in network byte order), then the segment data. 321 * 322 * For our purposes we will ignore the data, and just use the length to skip to 323 * the next segment. This is necessary because the data inside segments are 324 * allowed to contain the End of Image marker (0xFF 0xD9), preventing us from 325 * naievely scanning until the end. 326 * 327 * After all the segments are processed, the jpeg compressed image stream begins. 328 * This can be considered an opaque format with one requirement: all 0xFF bytes 329 * in this stream must be followed with a 0x00 byte. This prevents any of the 330 * image data to be interpreted as a segment. The only exception to this is at 331 * the end of the image stream there is an End of Image (EOI) marker, which is 332 * 0xFF followed by a non-zero (0xD9) byte. 333 */ 334 335 const uint8_t MARK = 0xFF; // First byte of marker 336 const uint8_t SOI = 0xD8; // Start of Image 337 const uint8_t EOI = 0xD9; // End of Image 338 const size_t MARKER_LENGTH = 2; // length of a marker 339 340 #pragma pack(push) 341 #pragma pack(1) 342 typedef struct segment { 343 uint8_t marker[MARKER_LENGTH]; 344 uint16_t length; 345 } segment_t; 346 #pragma pack(pop) 347 348 /* HELPER FUNCTIONS */ 349 350 // check for Start of Image marker 351 bool checkJpegStart(uint8_t* buf) { 352 return buf[0] == MARK && buf[1] == SOI; 353 } 354 // check for End of Image marker 355 bool checkJpegEnd(uint8_t *buf) { 356 return buf[0] == MARK && buf[1] == EOI; 357 } 358 // check for arbitrary marker, returns marker type (second byte) 359 // returns 0 if no marker found. Note: 0x00 is not a valid marker type 360 uint8_t checkJpegMarker(uint8_t *buf) { 361 if (buf[0] == MARK && buf[1] > 0 && buf[1] < 0xFF) { 362 return buf[1]; 363 } 364 return 0; 365 } 366 367 // Return the size of the JPEG, 0 indicates failure 368 size_t JpegProcessor::findJpegSize(uint8_t* jpegBuffer, size_t maxSize) { 369 size_t size; 370 371 // First check for JPEG transport header at the end of the buffer 372 uint8_t *header = jpegBuffer + (maxSize - sizeof(struct camera2_jpeg_blob)); 373 struct camera2_jpeg_blob *blob = (struct camera2_jpeg_blob*)(header); 374 if (blob->jpeg_blob_id == CAMERA2_JPEG_BLOB_ID) { 375 size = blob->jpeg_size; 376 if (size > 0 && size <= maxSize - sizeof(struct camera2_jpeg_blob)) { 377 // Verify SOI and EOI markers 378 size_t offset = size - MARKER_LENGTH; 379 uint8_t *end = jpegBuffer + offset; 380 if (checkJpegStart(jpegBuffer) && checkJpegEnd(end)) { 381 ALOGV("Found JPEG transport header, img size %zu", size); 382 return size; 383 } else { 384 ALOGW("Found JPEG transport header with bad Image Start/End"); 385 } 386 } else { 387 ALOGW("Found JPEG transport header with bad size %zu", size); 388 } 389 } 390 391 // Check Start of Image 392 if ( !checkJpegStart(jpegBuffer) ) { 393 ALOGE("Could not find start of JPEG marker"); 394 return 0; 395 } 396 397 // Read JFIF segment markers, skip over segment data 398 size = 0; 399 while (size <= maxSize - MARKER_LENGTH) { 400 segment_t *segment = (segment_t*)(jpegBuffer + size); 401 uint8_t type = checkJpegMarker(segment->marker); 402 if (type == 0) { // invalid marker, no more segments, begin JPEG data 403 ALOGV("JPEG stream found beginning at offset %zu", size); 404 break; 405 } 406 if (type == EOI || size > maxSize - sizeof(segment_t)) { 407 ALOGE("Got premature End before JPEG data, offset %zu", size); 408 return 0; 409 } 410 size_t length = ntohs(segment->length); 411 ALOGV("JFIF Segment, type %x length %zx", type, length); 412 size += length + MARKER_LENGTH; 413 } 414 415 // Find End of Image 416 // Scan JPEG buffer until End of Image (EOI) 417 bool foundEnd = false; 418 for ( ; size <= maxSize - MARKER_LENGTH; size++) { 419 if ( checkJpegEnd(jpegBuffer + size) ) { 420 foundEnd = true; 421 size += MARKER_LENGTH; 422 break; 423 } 424 } 425 if (!foundEnd) { 426 ALOGE("Could not find end of JPEG marker"); 427 return 0; 428 } 429 430 if (size > maxSize) { 431 ALOGW("JPEG size %zu too large, reducing to maxSize %zu", size, maxSize); 432 size = maxSize; 433 } 434 ALOGV("Final JPEG size %zu", size); 435 return size; 436 } 437 438 }; // namespace camera2 439 }; // namespace android 440