Home | History | Annotate | Download | only in scene4
      1 # Copyright 2015 The Android Open Source Project
      2 #
      3 # Licensed under the Apache License, Version 2.0 (the "License");
      4 # you may not use this file except in compliance with the License.
      5 # You may obtain a copy of the License at
      6 #
      7 #      http://www.apache.org/licenses/LICENSE-2.0
      8 #
      9 # Unless required by applicable law or agreed to in writing, software
     10 # distributed under the License is distributed on an "AS IS" BASIS,
     11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 # See the License for the specific language governing permissions and
     13 # limitations under the License.
     14 
     15 import math
     16 import os.path
     17 import cv2
     18 import its.caps
     19 import its.device
     20 import its.image
     21 import its.objects
     22 import numpy as np
     23 
     24 FMT_ATOL = 0.01  # Absolute tolerance on format ratio
     25 AR_CHECKED = ["4:3", "16:9", "18:9"]  # Aspect ratios checked
     26 FOV_PERCENT_RTOL = 0.15  # Relative tolerance on circle FoV % to expected
     27 LARGE_SIZE = 2000   # Define the size of a large image
     28 NAME = os.path.basename(__file__).split(".")[0]
     29 NUM_DISTORT_PARAMS = 5
     30 THRESH_L_AR = 0.02  # aspect ratio test threshold of large images
     31 THRESH_XS_AR = 0.075  # aspect ratio test threshold of mini images
     32 THRESH_L_CP = 0.02  # Crop test threshold of large images
     33 THRESH_XS_CP = 0.075  # Crop test threshold of mini images
     34 THRESH_MIN_PIXEL = 4  # Crop test allowed offset
     35 PREVIEW_SIZE = (1920, 1080)  # preview size
     36 
     37 
     38 def convert_ar_to_float(ar_string):
     39     """Convert aspect ratio string into float.
     40 
     41     Args:
     42         ar_string:  "4:3" or "16:9"
     43     Returns:
     44         float(ar_string)
     45     """
     46     ar_list = [float(x) for x in ar_string.split(":")]
     47     return ar_list[0] / ar_list[1]
     48 
     49 
     50 def determine_sensor_aspect_ratio(props):
     51     """Determine the aspect ratio of the sensor.
     52 
     53     Args:
     54         props:      camera properties
     55     Returns:
     56         matched entry in AR_CHECKED
     57     """
     58     match_ar = None
     59     sensor_size = props["android.sensor.info.preCorrectionActiveArraySize"]
     60     sensor_ar = (float(abs(sensor_size["right"] - sensor_size["left"])) /
     61                  abs(sensor_size["bottom"] - sensor_size["top"]))
     62     for ar_string in AR_CHECKED:
     63         if np.isclose(sensor_ar, convert_ar_to_float(ar_string), atol=FMT_ATOL):
     64             match_ar = ar_string
     65     if not match_ar:
     66         print "Warning! RAW aspect ratio not in:", AR_CHECKED
     67     return match_ar
     68 
     69 
     70 def aspect_ratio_scale_factors(ref_ar_string, props):
     71     """Determine scale factors for each aspect ratio to correct cropping.
     72 
     73     Args:
     74         ref_ar_string:      camera aspect ratio that is the reference
     75         props:              camera properties
     76     Returns:
     77         dict of correction ratios with AR_CHECKED values as keys
     78     """
     79     ref_ar = convert_ar_to_float(ref_ar_string)
     80 
     81     # find sensor area
     82     height_max = 0
     83     width_max = 0
     84     for ar_string in AR_CHECKED:
     85         match_ar = [float(x) for x in ar_string.split(":")]
     86         try:
     87             f = its.objects.get_largest_yuv_format(props, match_ar=match_ar)
     88             if f["height"] > height_max:
     89                 height_max = f["height"]
     90             if f["width"] > width_max:
     91                 width_max = f["width"]
     92         except IndexError:
     93             continue
     94     sensor_ar = float(width_max) / height_max
     95 
     96     # apply scaling
     97     ar_scaling = {}
     98     for ar_string in AR_CHECKED:
     99         target_ar = convert_ar_to_float(ar_string)
    100         # scale down to sensor with greater (or equal) dims
    101         if ref_ar >= sensor_ar:
    102             scaling = sensor_ar / ref_ar
    103         else:
    104             scaling = ref_ar / sensor_ar
    105 
    106         # scale up due to cropping to other format
    107         if target_ar >= sensor_ar:
    108             scaling = scaling * target_ar / sensor_ar
    109         else:
    110             scaling = scaling * sensor_ar / target_ar
    111 
    112         ar_scaling[ar_string] = scaling
    113     return ar_scaling
    114 
    115 
    116 def find_yuv_fov_reference(cam, req, props):
    117     """Determine the circle coverage of the image in YUV reference image.
    118 
    119     Args:
    120         cam:        camera object
    121         req:        camera request
    122         props:      camera properties
    123 
    124     Returns:
    125         ref_fov:    dict with [fmt, % coverage, w, h]
    126     """
    127     ref_fov = {}
    128     fmt_dict = {}
    129 
    130     # find number of pixels in different formats
    131     for ar in AR_CHECKED:
    132         match_ar = [float(x) for x in ar.split(":")]
    133         try:
    134             f = its.objects.get_largest_yuv_format(props, match_ar=match_ar)
    135             fmt_dict[f["height"]*f["width"]] = {"fmt": f, "ar": ar}
    136         except IndexError:
    137             continue
    138 
    139     # use image with largest coverage as reference
    140     ar_max_pixels = max(fmt_dict, key=int)
    141 
    142     # capture and determine circle area in image
    143     cap = cam.do_capture(req, fmt_dict[ar_max_pixels]["fmt"])
    144     w = cap["width"]
    145     h = cap["height"]
    146     img = its.image.convert_capture_to_rgb_image(cap, props=props)
    147     print "Captured %s %dx%d" % ("yuv", w, h)
    148     img_name = "%s_%s_w%d_h%d.png" % (NAME, "yuv", w, h)
    149     _, _, circle_size = measure_aspect_ratio(img, False, img_name, True)
    150     fov_percent = calc_circle_image_ratio(circle_size[1], circle_size[0], w, h)
    151     ref_fov["fmt"] = fmt_dict[ar_max_pixels]["ar"]
    152     ref_fov["percent"] = fov_percent
    153     ref_fov["w"] = w
    154     ref_fov["h"] = h
    155     print "Using YUV reference:", ref_fov
    156     return ref_fov
    157 
    158 
    159 def calc_circle_image_ratio(circle_w, circle_h, image_w, image_h):
    160     """Calculate the circle coverage of the image.
    161 
    162     Args:
    163         circle_w (int):      width of circle
    164         circle_h (int):      height of circle
    165         image_w (int):       width of image
    166         image_h (int):       height of image
    167     Returns:
    168         fov_percent (float): % of image covered by circle
    169     """
    170     circle_area = math.pi * math.pow(np.mean([circle_w, circle_h])/2.0, 2)
    171     image_area = image_w * image_h
    172     fov_percent = 100*circle_area/image_area
    173     return fov_percent
    174 
    175 
    176 def main():
    177     """Test aspect ratio & check if images are cropped correctly for each fmt.
    178 
    179     Aspect ratio test runs on level3, full and limited devices. Crop test only
    180     runs on full and level3 devices.
    181     The test image is a black circle inside a black square. When raw capture is
    182     available, set the height vs. width ratio of the circle in the full-frame
    183     raw as ground truth. Then compare with images of request combinations of
    184     different formats ("jpeg" and "yuv") and sizes.
    185     If raw capture is unavailable, take a picture of the test image right in
    186     front to eliminate shooting angle effect. the height vs. width ratio for
    187     the circle should be close to 1. Considering shooting position error, aspect
    188     ratio greater than 1+THRESH_*_AR or less than 1-THRESH_*_AR will FAIL.
    189     """
    190     aspect_ratio_gt = 1  # ground truth
    191     failed_ar = []  # streams failed the aspect ration test
    192     failed_crop = []  # streams failed the crop test
    193     failed_fov = []  # streams that fail FoV test
    194     format_list = []  # format list for multiple capture objects.
    195     # Do multi-capture of "iter" and "cmpr". Iterate through all the
    196     # available sizes of "iter", and only use the size specified for "cmpr"
    197     # Do single-capture to cover untouched sizes in multi-capture when needed.
    198     format_list.append({"iter": "yuv", "iter_max": None,
    199                         "cmpr": "yuv", "cmpr_size": PREVIEW_SIZE})
    200     format_list.append({"iter": "yuv", "iter_max": PREVIEW_SIZE,
    201                         "cmpr": "jpeg", "cmpr_size": None})
    202     format_list.append({"iter": "yuv", "iter_max": PREVIEW_SIZE,
    203                         "cmpr": "raw", "cmpr_size": None})
    204     format_list.append({"iter": "jpeg", "iter_max": None,
    205                         "cmpr": "raw", "cmpr_size": None})
    206     format_list.append({"iter": "jpeg", "iter_max": None,
    207                         "cmpr": "yuv", "cmpr_size": PREVIEW_SIZE})
    208     ref_fov = {}
    209     with its.device.ItsSession() as cam:
    210         props = cam.get_camera_properties()
    211         props = cam.override_with_hidden_physical_camera_props(props)
    212         its.caps.skip_unless(its.caps.read_3a(props))
    213         full_device = its.caps.full_or_better(props)
    214         limited_device = its.caps.limited(props)
    215         its.caps.skip_unless(full_device or limited_device)
    216         level3_device = its.caps.level3(props)
    217         raw_avlb = its.caps.raw16(props)
    218         mono_camera = its.caps.mono_camera(props)
    219         run_crop_test = (level3_device or full_device) and raw_avlb
    220         if not run_crop_test:
    221             print "Crop test skipped"
    222         debug = its.caps.debug_mode()
    223         # Converge 3A and get the estimates.
    224         sens, exp, gains, xform, focus = cam.do_3a(get_results=True,
    225                                                    lock_ae=True, lock_awb=True,
    226                                                    mono_camera=mono_camera)
    227         print "AE sensitivity %d, exposure %dms" % (sens, exp / 1000000.0)
    228         print "AWB gains", gains
    229         print "AWB transform", xform
    230         print "AF distance", focus
    231         req = its.objects.manual_capture_request(
    232                 sens, exp, focus, True, props)
    233         xform_rat = its.objects.float_to_rational(xform)
    234         req["android.colorCorrection.gains"] = gains
    235         req["android.colorCorrection.transform"] = xform_rat
    236 
    237         # If raw capture is available, use it as ground truth.
    238         if raw_avlb:
    239             # Capture full-frame raw. Use its aspect ratio and circle center
    240             # location as ground truth for the other jepg or yuv images.
    241             print "Creating references for fov_coverage from RAW"
    242             out_surface = {"format": "raw"}
    243             cap_raw = cam.do_capture(req, out_surface)
    244             print "Captured %s %dx%d" % ("raw", cap_raw["width"],
    245                                          cap_raw["height"])
    246             img_raw = its.image.convert_capture_to_rgb_image(cap_raw,
    247                                                              props=props)
    248             if its.caps.distortion_correction(props):
    249                 # The intrinsics and distortion coefficients are meant for full
    250                 # size RAW. Resize back to full size here.
    251                 img_raw = cv2.resize(img_raw, (0, 0), fx=2.0, fy=2.0)
    252                 # Intrinsic cal is of format: [f_x, f_y, c_x, c_y, s]
    253                 # [f_x, f_y] is the horizontal and vertical focal lengths,
    254                 # [c_x, c_y] is the position of the optical axis,
    255                 # and s is skew of sensor plane vs lens plane.
    256                 print "Applying intrinsic calibration and distortion params"
    257                 ical = np.array(props["android.lens.intrinsicCalibration"])
    258                 msg = "Cannot include lens distortion without intrinsic cal!"
    259                 assert len(ical) == 5, msg
    260                 sensor_h = props["android.sensor.info.physicalSize"]["height"]
    261                 sensor_w = props["android.sensor.info.physicalSize"]["width"]
    262                 pixel_h = props["android.sensor.info.pixelArraySize"]["height"]
    263                 pixel_w = props["android.sensor.info.pixelArraySize"]["width"]
    264                 fd = float(cap_raw["metadata"]["android.lens.focalLength"])
    265                 fd_w_pix = pixel_w * fd / sensor_w
    266                 fd_h_pix = pixel_h * fd / sensor_h
    267                 # transformation matrix
    268                 # k = [[f_x, s, c_x],
    269                 #      [0, f_y, c_y],
    270                 #      [0,   0,   1]]
    271                 k = np.array([[ical[0], ical[4], ical[2]],
    272                               [0, ical[1], ical[3]],
    273                               [0, 0, 1]])
    274                 print "k:", k
    275                 e_msg = "fd_w(pixels): %.2f\tcal[0](pixels): %.2f\tTOL=20%%" % (
    276                         fd_w_pix, ical[0])
    277                 assert np.isclose(fd_w_pix, ical[0], rtol=0.20), e_msg
    278                 e_msg = "fd_h(pixels): %.2f\tcal[1](pixels): %.2f\tTOL=20%%" % (
    279                         fd_h_pix, ical[0])
    280                 assert np.isclose(fd_h_pix, ical[1], rtol=0.20), e_msg
    281 
    282                 # distortion
    283                 rad_dist = props["android.lens.distortion"]
    284                 print "android.lens.distortion:", rad_dist
    285                 e_msg = "%s param(s) found. %d expected." % (len(rad_dist),
    286                                                              NUM_DISTORT_PARAMS)
    287                 assert len(rad_dist) == NUM_DISTORT_PARAMS, e_msg
    288                 opencv_dist = np.array([rad_dist[0], rad_dist[1],
    289                                         rad_dist[3], rad_dist[4],
    290                                         rad_dist[2]])
    291                 print "dist:", opencv_dist
    292                 img_raw = cv2.undistort(img_raw, k, opencv_dist)
    293             size_raw = img_raw.shape
    294             w_raw = size_raw[1]
    295             h_raw = size_raw[0]
    296             img_name = "%s_%s_w%d_h%d.png" % (NAME, "raw", w_raw, h_raw)
    297             aspect_ratio_gt, cc_ct_gt, circle_size_raw = measure_aspect_ratio(
    298                     img_raw, raw_avlb, img_name, debug)
    299             raw_fov_percent = calc_circle_image_ratio(
    300                     circle_size_raw[1], circle_size_raw[0], w_raw, h_raw)
    301             # Normalize the circle size to 1/4 of the image size, so that
    302             # circle size won't affect the crop test result
    303             factor_cp_thres = (min(size_raw[0:1])/4.0) / max(circle_size_raw)
    304             thres_l_cp_test = THRESH_L_CP * factor_cp_thres
    305             thres_xs_cp_test = THRESH_XS_CP * factor_cp_thres
    306             # If RAW in AR_CHECKED, use it as reference
    307             ref_fov["fmt"] = determine_sensor_aspect_ratio(props)
    308             if ref_fov["fmt"]:
    309                 ref_fov["percent"] = raw_fov_percent
    310                 ref_fov["w"] = w_raw
    311                 ref_fov["h"] = h_raw
    312                 print "Using RAW reference:", ref_fov
    313             else:
    314                 ref_fov = find_yuv_fov_reference(cam, req, props)
    315         else:
    316             ref_fov = find_yuv_fov_reference(cam, req, props)
    317 
    318         # Determine scaling factors for AR calculations
    319         ar_scaling = aspect_ratio_scale_factors(ref_fov["fmt"], props)
    320 
    321         # Take pictures of each settings with all the image sizes available.
    322         for fmt in format_list:
    323             fmt_iter = fmt["iter"]
    324             fmt_cmpr = fmt["cmpr"]
    325             dual_target = fmt_cmpr is not "none"
    326             # Get the size of "cmpr"
    327             if dual_target:
    328                 sizes = its.objects.get_available_output_sizes(
    329                         fmt_cmpr, props, fmt["cmpr_size"])
    330                 if not sizes:  # device might not support RAW
    331                     continue
    332                 size_cmpr = sizes[0]
    333             for size_iter in its.objects.get_available_output_sizes(
    334                     fmt_iter, props, fmt["iter_max"]):
    335                 w_iter = size_iter[0]
    336                 h_iter = size_iter[1]
    337                 # Skip testing same format/size combination
    338                 # ITS does not handle that properly now
    339                 if (dual_target
    340                             and w_iter*h_iter == size_cmpr[0]*size_cmpr[1]
    341                             and fmt_iter == fmt_cmpr):
    342                     continue
    343                 out_surface = [{"width": w_iter,
    344                                 "height": h_iter,
    345                                 "format": fmt_iter}]
    346                 if dual_target:
    347                     out_surface.append({"width": size_cmpr[0],
    348                                         "height": size_cmpr[1],
    349                                         "format": fmt_cmpr})
    350                 cap = cam.do_capture(req, out_surface)
    351                 if dual_target:
    352                     frm_iter = cap[0]
    353                 else:
    354                     frm_iter = cap
    355                 assert frm_iter["format"] == fmt_iter
    356                 assert frm_iter["width"] == w_iter
    357                 assert frm_iter["height"] == h_iter
    358                 print "Captured %s with %s %dx%d. Compared size: %dx%d" % (
    359                         fmt_iter, fmt_cmpr, w_iter, h_iter, size_cmpr[0],
    360                         size_cmpr[1])
    361                 img = its.image.convert_capture_to_rgb_image(frm_iter)
    362                 if its.caps.distortion_correction(props) and raw_avlb:
    363                     w_scale = float(w_iter)/w_raw
    364                     h_scale = float(h_iter)/h_raw
    365                     k_scale = np.array([[ical[0]*w_scale, ical[4],
    366                                          ical[2]*w_scale],
    367                                         [0, ical[1]*h_scale, ical[3]*h_scale],
    368                                         [0, 0, 1]])
    369                     print "k_scale:", k_scale
    370                     img = cv2.undistort(img, k_scale, opencv_dist)
    371                 img_name = "%s_%s_with_%s_w%d_h%d.png" % (NAME,
    372                                                           fmt_iter, fmt_cmpr,
    373                                                           w_iter, h_iter)
    374                 aspect_ratio, cc_ct, (cc_w, cc_h) = measure_aspect_ratio(
    375                         img, raw_avlb, img_name, debug)
    376                 # check fov coverage for all fmts in AR_CHECKED
    377                 fov_percent = calc_circle_image_ratio(
    378                         cc_w, cc_h, w_iter, h_iter)
    379                 for ar_check in AR_CHECKED:
    380                     match_ar_list = [float(x) for x in ar_check.split(":")]
    381                     match_ar = match_ar_list[0] / match_ar_list[1]
    382                     if np.isclose(float(w_iter)/h_iter, match_ar,
    383                                   atol=FMT_ATOL):
    384                         # scale check value based on aspect ratio
    385                         chk_percent = ref_fov["percent"] * ar_scaling[ar_check]
    386                         if not np.isclose(fov_percent, chk_percent,
    387                                           rtol=FOV_PERCENT_RTOL):
    388                             msg = "FoV %%: %.2f, Ref FoV %%: %.2f, " % (
    389                                     fov_percent, chk_percent)
    390                             msg += "TOL=%.f%%, img: %dx%d, ref: %dx%d" % (
    391                                     FOV_PERCENT_RTOL*100, w_iter, h_iter,
    392                                     ref_fov["w"], ref_fov["h"])
    393                             failed_fov.append(msg)
    394                             its.image.write_image(img/255, img_name, True)
    395                 # check pass/fail for aspect ratio
    396                 # image size >= LARGE_SIZE: use THRESH_L_AR
    397                 # image size == 0 (extreme case): THRESH_XS_AR
    398                 # 0 < image size < LARGE_SIZE: scale between THRESH_XS_AR
    399                 # and THRESH_L_AR
    400                 thres_ar_test = max(
    401                         THRESH_L_AR, THRESH_XS_AR + max(w_iter, h_iter) *
    402                         (THRESH_L_AR-THRESH_XS_AR)/LARGE_SIZE)
    403                 thres_range_ar = (aspect_ratio_gt-thres_ar_test,
    404                                   aspect_ratio_gt+thres_ar_test)
    405                 if (aspect_ratio < thres_range_ar[0] or
    406                             aspect_ratio > thres_range_ar[1]):
    407                     failed_ar.append({"fmt_iter": fmt_iter,
    408                                       "fmt_cmpr": fmt_cmpr,
    409                                       "w": w_iter, "h": h_iter,
    410                                       "ar": aspect_ratio,
    411                                       "valid_range": thres_range_ar})
    412                     its.image.write_image(img/255, img_name, True)
    413 
    414                 # check pass/fail for crop
    415                 if run_crop_test:
    416                     # image size >= LARGE_SIZE: use thres_l_cp_test
    417                     # image size == 0 (extreme case): thres_xs_cp_test
    418                     # 0 < image size < LARGE_SIZE: scale between
    419                     # thres_xs_cp_test and thres_l_cp_test
    420                     # Also, allow at least THRESH_MIN_PIXEL off to
    421                     # prevent threshold being too tight for very
    422                     # small circle
    423                     thres_hori_cp_test = max(
    424                             thres_l_cp_test, thres_xs_cp_test + w_iter *
    425                             (thres_l_cp_test-thres_xs_cp_test)/LARGE_SIZE)
    426                     min_threshold_h = THRESH_MIN_PIXEL / cc_w
    427                     thres_hori_cp_test = max(thres_hori_cp_test,
    428                                              min_threshold_h)
    429                     thres_range_h_cp = (cc_ct_gt["hori"]-thres_hori_cp_test,
    430                                         cc_ct_gt["hori"]+thres_hori_cp_test)
    431                     thres_vert_cp_test = max(
    432                             thres_l_cp_test, thres_xs_cp_test + h_iter *
    433                             (thres_l_cp_test-thres_xs_cp_test)/LARGE_SIZE)
    434                     min_threshold_v = THRESH_MIN_PIXEL / cc_h
    435                     thres_vert_cp_test = max(thres_vert_cp_test,
    436                                              min_threshold_v)
    437                     thres_range_v_cp = (cc_ct_gt["vert"]-thres_vert_cp_test,
    438                                         cc_ct_gt["vert"]+thres_vert_cp_test)
    439                     if (cc_ct["hori"] < thres_range_h_cp[0]
    440                                 or cc_ct["hori"] > thres_range_h_cp[1]
    441                                 or cc_ct["vert"] < thres_range_v_cp[0]
    442                                 or cc_ct["vert"] > thres_range_v_cp[1]):
    443                         failed_crop.append({"fmt_iter": fmt_iter,
    444                                             "fmt_cmpr": fmt_cmpr,
    445                                             "w": w_iter, "h": h_iter,
    446                                             "ct_hori": cc_ct["hori"],
    447                                             "ct_vert": cc_ct["vert"],
    448                                             "valid_range_h": thres_range_h_cp,
    449                                             "valid_range_v": thres_range_v_cp})
    450                         its.image.write_image(img/255, img_name, True)
    451 
    452         # Print aspect ratio test results
    453         failed_image_number_for_aspect_ratio_test = len(failed_ar)
    454         if failed_image_number_for_aspect_ratio_test > 0:
    455             print "\nAspect ratio test summary"
    456             print "Images failed in the aspect ratio test:"
    457             print "Aspect ratio value: width / height"
    458             for fa in failed_ar:
    459                 print "%s with %s %dx%d: %.3f;" % (
    460                         fa["fmt_iter"], fa["fmt_cmpr"],
    461                         fa["w"], fa["h"], fa["ar"]),
    462                 print "valid range: %.3f ~ %.3f" % (
    463                         fa["valid_range"][0], fa["valid_range"][1])
    464 
    465         # Print FoV test results
    466         failed_image_number_for_fov_test = len(failed_fov)
    467         if failed_image_number_for_fov_test > 0:
    468             print "\nFoV test summary"
    469             print "Images failed in the FoV test:"
    470             for fov in failed_fov:
    471                 print fov
    472 
    473         # Print crop test results
    474         failed_image_number_for_crop_test = len(failed_crop)
    475         if failed_image_number_for_crop_test > 0:
    476             print "\nCrop test summary"
    477             print "Images failed in the crop test:"
    478             print "Circle center position, (horizontal x vertical), listed",
    479             print "below is relative to the image center."
    480             for fc in failed_crop:
    481                 print "%s with %s %dx%d: %.3f x %.3f;" % (
    482                         fc["fmt_iter"], fc["fmt_cmpr"], fc["w"], fc["h"],
    483                         fc["ct_hori"], fc["ct_vert"]),
    484                 print "valid horizontal range: %.3f ~ %.3f;" % (
    485                         fc["valid_range_h"][0], fc["valid_range_h"][1]),
    486                 print "valid vertical range: %.3f ~ %.3f" % (
    487                         fc["valid_range_v"][0], fc["valid_range_v"][1])
    488 
    489         assert failed_image_number_for_aspect_ratio_test == 0
    490         assert failed_image_number_for_fov_test == 0
    491         if level3_device:
    492             assert failed_image_number_for_crop_test == 0
    493 
    494 
    495 def measure_aspect_ratio(img, raw_avlb, img_name, debug):
    496     """Measure the aspect ratio of the black circle in the test image.
    497 
    498     Args:
    499         img: Numpy float image array in RGB, with pixel values in [0,1].
    500         raw_avlb: True: raw capture is available; False: raw capture is not
    501              available.
    502         img_name: string with image info of format and size.
    503         debug: boolean for whether in debug mode.
    504     Returns:
    505         aspect_ratio: aspect ratio number in float.
    506         cc_ct: circle center position relative to the center of image.
    507         (circle_w, circle_h): tuple of the circle size
    508     """
    509     size = img.shape
    510     img *= 255
    511     # Gray image
    512     img_gray = 0.299*img[:, :, 2] + 0.587*img[:, :, 1] + 0.114*img[:, :, 0]
    513 
    514     # otsu threshold to binarize the image
    515     _, img_bw = cv2.threshold(np.uint8(img_gray), 0, 255,
    516                               cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    517 
    518     # connected component
    519     cv2_version = cv2.__version__
    520     if cv2_version.startswith("2.4."):
    521         contours, hierarchy = cv2.findContours(255-img_bw, cv2.RETR_TREE,
    522                                                cv2.CHAIN_APPROX_SIMPLE)
    523     elif cv2_version.startswith("3.2."):
    524         _, contours, hierarchy = cv2.findContours(255-img_bw, cv2.RETR_TREE,
    525                                                   cv2.CHAIN_APPROX_SIMPLE)
    526 
    527     # Check each component and find the black circle
    528     min_cmpt = size[0] * size[1] * 0.005
    529     max_cmpt = size[0] * size[1] * 0.35
    530     num_circle = 0
    531     aspect_ratio = 0
    532     for ct, hrch in zip(contours, hierarchy[0]):
    533         # The radius of the circle is 1/3 of the length of the square, meaning
    534         # around 1/3 of the area of the square
    535         # Parental component should exist and the area is acceptable.
    536         # The coutour of a circle should have at least 5 points
    537         child_area = cv2.contourArea(ct)
    538         if (hrch[3] == -1 or child_area < min_cmpt or child_area > max_cmpt
    539                     or len(ct) < 15):
    540             continue
    541         # Check the shapes of current component and its parent
    542         child_shape = component_shape(ct)
    543         parent = hrch[3]
    544         prt_shape = component_shape(contours[parent])
    545         prt_area = cv2.contourArea(contours[parent])
    546         dist_x = abs(child_shape["ctx"]-prt_shape["ctx"])
    547         dist_y = abs(child_shape["cty"]-prt_shape["cty"])
    548         # 1. 0.56*Parent"s width < Child"s width < 0.76*Parent"s width.
    549         # 2. 0.56*Parent"s height < Child"s height < 0.76*Parent"s height.
    550         # 3. Child"s width > 0.1*Image width
    551         # 4. Child"s height > 0.1*Image height
    552         # 5. 0.25*Parent"s area < Child"s area < 0.45*Parent"s area
    553         # 6. Child is a black, and Parent is white
    554         # 7. Center of Child and center of parent should overlap
    555         if (prt_shape["width"] * 0.56 < child_shape["width"]
    556                     < prt_shape["width"] * 0.76
    557                     and prt_shape["height"] * 0.56 < child_shape["height"]
    558                     < prt_shape["height"] * 0.76
    559                     and child_shape["width"] > 0.1 * size[1]
    560                     and child_shape["height"] > 0.1 * size[0]
    561                     and 0.30 * prt_area < child_area < 0.50 * prt_area
    562                     and img_bw[child_shape["cty"]][child_shape["ctx"]] == 0
    563                     and img_bw[child_shape["top"]][child_shape["left"]] == 255
    564                     and dist_x < 0.1 * child_shape["width"]
    565                     and dist_y < 0.1 * child_shape["height"]):
    566             # If raw capture is not available, check the camera is placed right
    567             # in front of the test page:
    568             # 1. Distances between parent and child horizontally on both side,0
    569             #    dist_left and dist_right, should be close.
    570             # 2. Distances between parent and child vertically on both side,
    571             #    dist_top and dist_bottom, should be close.
    572             if not raw_avlb:
    573                 dist_left = child_shape["left"] - prt_shape["left"]
    574                 dist_right = prt_shape["right"] - child_shape["right"]
    575                 dist_top = child_shape["top"] - prt_shape["top"]
    576                 dist_bottom = prt_shape["bottom"] - child_shape["bottom"]
    577                 if (abs(dist_left-dist_right) > 0.05 * child_shape["width"]
    578                             or abs(dist_top-dist_bottom) > 0.05 * child_shape["height"]):
    579                     continue
    580             # Calculate aspect ratio
    581             aspect_ratio = float(child_shape["width"]) / child_shape["height"]
    582             circle_ctx = child_shape["ctx"]
    583             circle_cty = child_shape["cty"]
    584             circle_w = float(child_shape["width"])
    585             circle_h = float(child_shape["height"])
    586             cc_ct = {"hori": float(child_shape["ctx"]-size[1]/2) / circle_w,
    587                      "vert": float(child_shape["cty"]-size[0]/2) / circle_h}
    588             num_circle += 1
    589             # If more than one circle found, break
    590             if num_circle == 2:
    591                 break
    592 
    593     if num_circle == 0:
    594         its.image.write_image(img/255, img_name, True)
    595         print "No black circle was detected. Please take pictures according",
    596         print "to instruction carefully!\n"
    597         assert num_circle == 1
    598 
    599     if num_circle > 1:
    600         its.image.write_image(img/255, img_name, True)
    601         print "More than one black circle was detected. Background of scene",
    602         print "may be too complex.\n"
    603         assert num_circle == 1
    604 
    605     # draw circle center and image center, and save the image
    606     line_width = max(1, max(size)/500)
    607     move_text_dist = line_width * 3
    608     cv2.line(img, (circle_ctx, circle_cty), (size[1]/2, size[0]/2),
    609              (255, 0, 0), line_width)
    610     if circle_cty > size[0]/2:
    611         move_text_down_circle = 4
    612         move_text_down_image = -1
    613     else:
    614         move_text_down_circle = -1
    615         move_text_down_image = 4
    616     if circle_ctx > size[1]/2:
    617         move_text_right_circle = 2
    618         move_text_right_image = -1
    619     else:
    620         move_text_right_circle = -1
    621         move_text_right_image = 2
    622     # circle center
    623     text_circle_x = move_text_dist * move_text_right_circle + circle_ctx
    624     text_circle_y = move_text_dist * move_text_down_circle + circle_cty
    625     cv2.circle(img, (circle_ctx, circle_cty), line_width*2, (255, 0, 0), -1)
    626     cv2.putText(img, "circle center", (text_circle_x, text_circle_y),
    627                 cv2.FONT_HERSHEY_SIMPLEX, line_width/2.0, (255, 0, 0),
    628                 line_width)
    629     # image center
    630     text_imgct_x = move_text_dist * move_text_right_image + size[1]/2
    631     text_imgct_y = move_text_dist * move_text_down_image + size[0]/2
    632     cv2.circle(img, (size[1]/2, size[0]/2), line_width*2, (255, 0, 0), -1)
    633     cv2.putText(img, "image center", (text_imgct_x, text_imgct_y),
    634                 cv2.FONT_HERSHEY_SIMPLEX, line_width/2.0, (255, 0, 0),
    635                 line_width)
    636     if debug:
    637         its.image.write_image(img/255, img_name, True)
    638 
    639     print "Aspect ratio: %.3f" % aspect_ratio
    640     print "Circle center position wrt to image center:",
    641     print "%.3fx%.3f" % (cc_ct["vert"], cc_ct["hori"])
    642     return aspect_ratio, cc_ct, (circle_w, circle_h)
    643 
    644 
    645 def component_shape(contour):
    646     """Measure the shape for a connected component in the aspect ratio test.
    647 
    648     Args:
    649         contour: return from cv2.findContours. A list of pixel coordinates of
    650         the contour.
    651 
    652     Returns:
    653         The most left, right, top, bottom pixel location, height, width, and
    654         the center pixel location of the contour.
    655     """
    656     shape = {"left": np.inf, "right": 0, "top": np.inf, "bottom": 0,
    657              "width": 0, "height": 0, "ctx": 0, "cty": 0}
    658     for pt in contour:
    659         if pt[0][0] < shape["left"]:
    660             shape["left"] = pt[0][0]
    661         if pt[0][0] > shape["right"]:
    662             shape["right"] = pt[0][0]
    663         if pt[0][1] < shape["top"]:
    664             shape["top"] = pt[0][1]
    665         if pt[0][1] > shape["bottom"]:
    666             shape["bottom"] = pt[0][1]
    667     shape["width"] = shape["right"] - shape["left"] + 1
    668     shape["height"] = shape["bottom"] - shape["top"] + 1
    669     shape["ctx"] = (shape["left"]+shape["right"])/2
    670     shape["cty"] = (shape["top"]+shape["bottom"])/2
    671     return shape
    672 
    673 
    674 if __name__ == "__main__":
    675     main()
    676