Home | History | Annotate | Download | only in OMXCameraAdapter
      1 /*
      2  * Copyright (C) Texas Instruments - http://www.ti.com/
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 /**
     18 * @file OMXFD.cpp
     19 *
     20 * This file contains functionality for handling face detection.
     21 *
     22 */
     23 
     24 #include "CameraHal.h"
     25 #include "OMXCameraAdapter.h"
     26 
     27 namespace Ti {
     28 namespace Camera {
     29 
     30 const uint32_t OMXCameraAdapter::FACE_DETECTION_THRESHOLD = 80;
     31 
     32 status_t OMXCameraAdapter::setParametersFD(const android::CameraParameters &params,
     33                                            BaseCameraAdapter::AdapterState state)
     34 {
     35     status_t ret = NO_ERROR;
     36 
     37     LOG_FUNCTION_NAME;
     38 
     39     LOG_FUNCTION_NAME_EXIT;
     40 
     41     return ret;
     42 }
     43 
     44 status_t OMXCameraAdapter::startFaceDetection()
     45 {
     46     status_t ret = NO_ERROR;
     47 
     48     android::AutoMutex lock(mFaceDetectionLock);
     49 
     50     ret = setFaceDetection(true, mFaceOrientation);
     51     if (ret != NO_ERROR) {
     52         goto out;
     53     }
     54 
     55     if ( mFaceDetectionRunning ) {
     56         mFDSwitchAlgoPriority = true;
     57     }
     58 
     59     // Note: White balance will not be face prioritized, since
     60     // the algorithm needs full frame statistics, and not face
     61     // regions alone.
     62 
     63     faceDetectionNumFacesLastOutput = 0;
     64  out:
     65     return ret;
     66 }
     67 
     68 status_t OMXCameraAdapter::stopFaceDetection()
     69 {
     70     status_t ret = NO_ERROR;
     71     const char *str = NULL;
     72     BaseCameraAdapter::AdapterState state;
     73     BaseCameraAdapter::getState(state);
     74 
     75     android::AutoMutex lock(mFaceDetectionLock);
     76 
     77     ret = setFaceDetection(false, mFaceOrientation);
     78     if (ret != NO_ERROR) {
     79         goto out;
     80     }
     81 
     82     if ( mFaceDetectionRunning ) {
     83         //Enable region priority and disable face priority for AF
     84         setAlgoPriority(REGION_PRIORITY, FOCUS_ALGO, true);
     85         setAlgoPriority(FACE_PRIORITY, FOCUS_ALGO , false);
     86 
     87         //Enable Region priority and disable Face priority
     88         setAlgoPriority(REGION_PRIORITY, EXPOSURE_ALGO, true);
     89         setAlgoPriority(FACE_PRIORITY, EXPOSURE_ALGO, false);
     90     }
     91 
     92     if (mPending3Asettings) {
     93         apply3Asettings(mParameters3A);
     94     }
     95 
     96     faceDetectionNumFacesLastOutput = 0;
     97  out:
     98     return ret;
     99 }
    100 
    101 void OMXCameraAdapter::pauseFaceDetection(bool pause)
    102 {
    103     android::AutoMutex lock(mFaceDetectionLock);
    104     // pausing will only take affect if fd is already running
    105     if (mFaceDetectionRunning) {
    106         mFaceDetectionPaused = pause;
    107         faceDetectionNumFacesLastOutput = 0;
    108     }
    109 }
    110 
    111 status_t OMXCameraAdapter::setFaceDetectionOrientation(OMX_U32 orientation)
    112 {
    113     status_t ret = NO_ERROR;
    114 
    115     android::AutoMutex lock(mFaceDetectionLock);
    116 
    117     mFaceOrientation = orientation;
    118 
    119     if (mFaceDetectionRunning) {
    120         // restart face detection with new rotation
    121         setFaceDetection(true, orientation);
    122     }
    123 
    124     return ret;
    125 }
    126 
    127 status_t OMXCameraAdapter::setFaceDetection(bool enable, OMX_U32 orientation)
    128 {
    129     status_t ret = NO_ERROR;
    130     OMX_ERRORTYPE eError = OMX_ErrorNone;
    131     OMX_CONFIG_OBJDETECTIONTYPE objDetection;
    132 
    133     LOG_FUNCTION_NAME;
    134 
    135     if ( OMX_StateInvalid == mComponentState )
    136         {
    137         CAMHAL_LOGEA("OMX component is in invalid state");
    138         ret = -EINVAL;
    139         }
    140 
    141     if ( NO_ERROR == ret )
    142         {
    143         if ( orientation > 270 ) {
    144             orientation = 0;
    145         }
    146 
    147         OMX_INIT_STRUCT_PTR (&objDetection, OMX_CONFIG_OBJDETECTIONTYPE);
    148         objDetection.nPortIndex = mCameraAdapterParameters.mPrevPortIndex;
    149         objDetection.nDeviceOrientation = orientation;
    150         if  ( enable )
    151             {
    152             objDetection.bEnable = OMX_TRUE;
    153             }
    154         else
    155             {
    156             objDetection.bEnable = OMX_FALSE;
    157             }
    158 
    159         eError =  OMX_SetConfig(mCameraAdapterParameters.mHandleComp,
    160                                 ( OMX_INDEXTYPE ) OMX_IndexConfigImageFaceDetection,
    161                                 &objDetection);
    162         if ( OMX_ErrorNone != eError )
    163             {
    164             CAMHAL_LOGEB("Error while configuring face detection 0x%x", eError);
    165             ret = -1;
    166             }
    167         else
    168             {
    169             CAMHAL_LOGDA("Face detection configured successfully");
    170             }
    171         }
    172 
    173     if ( NO_ERROR == ret )
    174         {
    175         // TODO(XXX): Should enable/disable FD extra data separately
    176         // on each port.
    177         ret = setExtraData(enable, OMX_ALL, OMX_FaceDetection);
    178 
    179         if ( NO_ERROR != ret )
    180             {
    181             CAMHAL_LOGEA("Error while configuring face detection extra data");
    182             }
    183         else
    184             {
    185             CAMHAL_LOGDA("Face detection extra data configured successfully");
    186             }
    187         }
    188 
    189     if ( NO_ERROR == ret )
    190         {
    191         mFaceDetectionRunning = enable;
    192         mFaceDetectionPaused = !enable;
    193         }
    194 
    195     LOG_FUNCTION_NAME_EXIT;
    196 
    197     return ret;
    198 }
    199 
    200 status_t OMXCameraAdapter::createPreviewMetadata(OMX_BUFFERHEADERTYPE* pBuffHeader,
    201                                           android::sp<CameraMetadataResult> &result,
    202                                           size_t previewWidth,
    203                                           size_t previewHeight)
    204 {
    205     status_t ret = NO_ERROR;
    206     status_t faceRet = NO_ERROR;
    207     status_t metaRet = NO_ERROR;
    208     OMX_FACEDETECTIONTYPE *faceData = NULL;
    209 
    210     LOG_FUNCTION_NAME;
    211 
    212     if ( OMX_StateExecuting != mComponentState ) {
    213         CAMHAL_LOGEA("OMX component is not in executing state");
    214         return NO_INIT;
    215     }
    216 
    217     if ( NULL == pBuffHeader ) {
    218         CAMHAL_LOGEA("Invalid Buffer header");
    219         return-EINVAL;
    220     }
    221 
    222     if ( mFaceDetectionRunning && !mFaceDetectionPaused ) {
    223         OMX_OTHER_EXTRADATATYPE *extraData;
    224 
    225         extraData = getExtradata(pBuffHeader->pPlatformPrivate,
    226                                  (OMX_EXTRADATATYPE)OMX_FaceDetection);
    227 
    228         if ( NULL != extraData ) {
    229             CAMHAL_LOGVB("Size = %d, sizeof = %d, eType = 0x%x, nDataSize= %d, nPortIndex = 0x%x, nVersion = 0x%x",
    230                          extraData->nSize,
    231                          sizeof(OMX_OTHER_EXTRADATATYPE),
    232                          extraData->eType,
    233                          extraData->nDataSize,
    234                          extraData->nPortIndex,
    235                          extraData->nVersion);
    236         } else {
    237             CAMHAL_LOGD("FD extra data not found!");
    238             return -EINVAL;
    239         }
    240 
    241         faceData = ( OMX_FACEDETECTIONTYPE * ) extraData->data;
    242         if ( NULL != faceData ) {
    243             if ( sizeof(OMX_FACEDETECTIONTYPE) == faceData->nSize ) {
    244                 CAMHAL_LOGVB("Faces detected %d",
    245                              faceData->ulFaceCount,
    246                              faceData->nSize,
    247                              sizeof(OMX_FACEDETECTIONTYPE),
    248                              faceData->eCameraView,
    249                              faceData->nPortIndex,
    250                              faceData->nVersion);
    251             } else {
    252                 CAMHAL_LOGEB("OMX_FACEDETECTIONTYPE size mismatch: expected = %d, received = %d",
    253                              ( unsigned int ) sizeof(OMX_FACEDETECTIONTYPE),
    254                              ( unsigned int ) faceData->nSize);
    255                 return -EINVAL;
    256             }
    257         } else {
    258             CAMHAL_LOGEA("Invalid OMX_FACEDETECTIONTYPE");
    259             return -EINVAL;
    260         }
    261     }
    262 
    263     result = new (std::nothrow) CameraMetadataResult;
    264     if(NULL == result.get()) {
    265         ret = NO_MEMORY;
    266         return ret;
    267     }
    268 
    269     //Encode face coordinates
    270     faceRet = encodeFaceCoordinates(faceData, result->getMetadataResult()
    271                                             , previewWidth, previewHeight);
    272     if ((NO_ERROR == faceRet) || (NOT_ENOUGH_DATA == faceRet)) {
    273         // Ignore harmless errors (no error and no update) and go ahead and encode
    274         // the preview meta data
    275         metaRet = encodePreviewMetadata(result->getMetadataResult()
    276                                         , pBuffHeader->pPlatformPrivate);
    277         if ( (NO_ERROR != metaRet) && (NOT_ENOUGH_DATA != metaRet) )  {
    278            // Some 'real' error occurred during preview meta data encod, clear metadata
    279            // result and return correct error code
    280            result.clear();
    281            ret = metaRet;
    282         }
    283     } else {
    284         //Some real error occurred during face encoding, clear metadata result
    285         // and return correct error code
    286         result.clear();
    287         ret = faceRet;
    288     }
    289 
    290     if((NOT_ENOUGH_DATA == faceRet) && (NOT_ENOUGH_DATA == metaRet)) {
    291         //No point sending the callback if nothing is changed
    292         result.clear();
    293         ret = faceRet;
    294     }
    295 
    296     LOG_FUNCTION_NAME_EXIT;
    297 
    298     return ret;
    299 }
    300 
    301 status_t OMXCameraAdapter::encodeFaceCoordinates(const OMX_FACEDETECTIONTYPE *faceData,
    302                                                  camera_frame_metadata_t *metadataResult,
    303                                                  size_t previewWidth,
    304                                                  size_t previewHeight)
    305 {
    306     status_t ret = NO_ERROR;
    307     camera_face_t *faces;
    308     size_t hRange, vRange;
    309     double tmp;
    310     bool faceArrayChanged = false;
    311 
    312     LOG_FUNCTION_NAME;
    313 
    314     hRange = CameraMetadataResult::RIGHT - CameraMetadataResult::LEFT;
    315     vRange = CameraMetadataResult::BOTTOM - CameraMetadataResult::TOP;
    316 
    317     android::AutoMutex lock(mFaceDetectionLock);
    318 
    319     // Avoid memory leak if called twice on same CameraMetadataResult
    320     if ( (0 < metadataResult->number_of_faces) && (NULL != metadataResult->faces) ) {
    321         free(metadataResult->faces);
    322         metadataResult->number_of_faces = 0;
    323         metadataResult->faces = NULL;
    324     }
    325 
    326     if ( (NULL != faceData) && (0 < faceData->ulFaceCount) ) {
    327         int orient_mult;
    328         int trans_left, trans_top, trans_right, trans_bot;
    329 
    330         faces = ( camera_face_t * ) malloc(sizeof(camera_face_t)*faceData->ulFaceCount);
    331         if ( NULL == faces ) {
    332             ret = NO_MEMORY;
    333             goto out;
    334         }
    335 
    336         /**
    337         / * When device is 180 degrees oriented to the sensor, need to translate
    338         / * the output from Ducati to what Android expects
    339         / * Ducati always gives face coordinates in this form, irrespective of
    340         / * rotation, i.e (l,t) always represents the point towards the left eye
    341         / * and top of hair.
    342         / * (l, t)
    343         / *   ---------------
    344         / *   -   ,,,,,,,   -
    345         / *   -  |       |  -
    346         / *   -  |<a   <a|  -
    347         / *   - (|   ^   |) -
    348         / *   -  |  -=-  |  -
    349         / *   -   \_____/   -
    350         / *   ---------------
    351         / *               (r, b)
    352         / *
    353         / * However, Android expects the coords to be in respect with what the
    354         / * sensor is viewing, i.e Android expects sensor to see this with (l,t)
    355         / * and (r,b) like so:
    356         / * (l, t)
    357         / *   ---------------
    358         / *   -    _____    -
    359         / *   -   /     \   -
    360         / *   -  |  -=-  |  -
    361         / *   - (|   ^   |) -
    362         / *   -  |a>   a>|  -
    363         / *   -  |       |  -
    364         / *   -   ,,,,,,,   -
    365         / *   ---------------
    366         / *               (r, b)
    367           */
    368 
    369         if (mFaceOrientation == 180) {
    370             orient_mult = -1;
    371             trans_left = 2; // right is now left
    372             trans_top = 3; // bottom is now top
    373             trans_right = 0; // left is now right
    374             trans_bot = 1; // top is not bottom
    375         } else {
    376             orient_mult = 1;
    377             trans_left = 0; // left
    378             trans_top = 1; // top
    379             trans_right = 2; // right
    380             trans_bot = 3; // bottom
    381         }
    382 
    383         int j = 0, i = 0;
    384         for ( ; j < faceData->ulFaceCount ; j++)
    385             {
    386              OMX_S32 nLeft = 0;
    387              OMX_S32 nTop = 0;
    388              //Face filtering
    389              //For real faces, it is seen that the h/w passes a score >=80
    390              //For false faces, we seem to get even a score of 70 sometimes.
    391              //In order to avoid any issue at application level, we filter
    392              //<=70 score here.
    393             if(faceData->tFacePosition[j].nScore <= FACE_DETECTION_THRESHOLD)
    394              continue;
    395 
    396             if (mFaceOrientation == 180) {
    397                 // from sensor pov, the left pos is the right corner of the face in pov of frame
    398                 nLeft = faceData->tFacePosition[j].nLeft + faceData->tFacePosition[j].nWidth;
    399                 nTop =  faceData->tFacePosition[j].nTop + faceData->tFacePosition[j].nHeight;
    400             } else {
    401                 nLeft = faceData->tFacePosition[j].nLeft;
    402                 nTop =  faceData->tFacePosition[j].nTop;
    403             }
    404 
    405             tmp = ( double ) nLeft / ( double ) previewWidth;
    406             tmp *= hRange;
    407             tmp -= hRange/2;
    408             faces[i].rect[trans_left] = tmp;
    409 
    410             tmp = ( double ) nTop / ( double )previewHeight;
    411             tmp *= vRange;
    412             tmp -= vRange/2;
    413             faces[i].rect[trans_top] = tmp;
    414 
    415             tmp = ( double ) faceData->tFacePosition[j].nWidth / ( double ) previewWidth;
    416             tmp *= hRange;
    417             tmp *= orient_mult;
    418             faces[i].rect[trans_right] = faces[i].rect[trans_left] + tmp;
    419 
    420             tmp = ( double ) faceData->tFacePosition[j].nHeight / ( double ) previewHeight;
    421             tmp *= vRange;
    422             tmp *= orient_mult;
    423             faces[i].rect[trans_bot] = faces[i].rect[trans_top] + tmp;
    424 
    425             faces[i].score = faceData->tFacePosition[j].nScore;
    426             faces[i].id = 0;
    427             faces[i].left_eye[0] = CameraMetadataResult::INVALID_DATA;
    428             faces[i].left_eye[1] = CameraMetadataResult::INVALID_DATA;
    429             faces[i].right_eye[0] = CameraMetadataResult::INVALID_DATA;
    430             faces[i].right_eye[1] = CameraMetadataResult::INVALID_DATA;
    431             faces[i].mouth[0] = CameraMetadataResult::INVALID_DATA;
    432             faces[i].mouth[1] = CameraMetadataResult::INVALID_DATA;
    433             i++;
    434             }
    435 
    436         metadataResult->number_of_faces = i;
    437         metadataResult->faces = faces;
    438 
    439         for (int i = 0; i  < metadataResult->number_of_faces; i++)
    440         {
    441             bool faceChanged = true;
    442             int centerX = (faces[i].rect[trans_left] + faces[i].rect[trans_right] ) / 2;
    443             int centerY = (faces[i].rect[trans_top] + faces[i].rect[trans_bot] ) / 2;
    444 
    445             int sizeX = (faces[i].rect[trans_right] - faces[i].rect[trans_left] ) ;
    446             int sizeY = (faces[i].rect[trans_bot] - faces[i].rect[trans_top] ) ;
    447 
    448             for (int j = 0; j < faceDetectionNumFacesLastOutput; j++)
    449             {
    450                 int tempCenterX = (faceDetectionLastOutput[j].rect[trans_left] +
    451                                   faceDetectionLastOutput[j].rect[trans_right] ) / 2;
    452                 int tempCenterY = (faceDetectionLastOutput[j].rect[trans_top] +
    453                                   faceDetectionLastOutput[j].rect[trans_bot] ) / 2;
    454                 int tempSizeX = (faceDetectionLastOutput[j].rect[trans_right] -
    455                                 faceDetectionLastOutput[j].rect[trans_left] ) ;
    456                 int tempSizeY = (faceDetectionLastOutput[j].rect[trans_bot] -
    457                                 faceDetectionLastOutput[j].rect[trans_top] ) ;
    458 
    459                 if ( ( tempCenterX == centerX) &&
    460                      ( tempCenterY == centerY) ) {
    461                     // Found Face.
    462                     // Now check size of rectangle
    463                     // compare to last output.
    464                     if ( ( tempSizeX == sizeX ) &&
    465                          ( tempSizeY == sizeY ) ) {
    466                         faceChanged = false;
    467                     }
    468                 }
    469             }
    470             // Send face detection data after some face coordinate changes
    471             if (faceChanged) {
    472                 faceArrayChanged = true;
    473             }
    474         }
    475 
    476         // Save this output for next iteration
    477         for (int i = 0; i  < metadataResult->number_of_faces; i++)
    478         {
    479             faceDetectionLastOutput[i] = faces[i];
    480         }
    481     } else {
    482         metadataResult->number_of_faces = 0;
    483         metadataResult->faces = NULL;
    484     }
    485 
    486     // Send face detection data after face count changes
    487     if (faceDetectionNumFacesLastOutput != metadataResult->number_of_faces) {
    488         faceArrayChanged = true;
    489     }
    490     faceDetectionNumFacesLastOutput = metadataResult->number_of_faces;
    491 
    492     if ( !faceArrayChanged ) {
    493         ret = NOT_ENOUGH_DATA;
    494     }
    495 
    496     LOG_FUNCTION_NAME_EXIT;
    497 
    498 out:
    499 
    500     return ret;
    501 }
    502 
    503 } // namespace Camera
    504 } // namespace Ti
    505