Home | History | Annotate | Download | only in videoproc
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 package android.filterpacks.videoproc;
     18 
     19 import android.filterfw.core.Filter;
     20 import android.filterfw.core.FilterContext;
     21 import android.filterfw.core.GenerateFieldPort;
     22 import android.filterfw.core.GenerateFinalPort;
     23 import android.filterfw.core.Frame;
     24 import android.filterfw.core.GLFrame;
     25 import android.filterfw.core.FrameFormat;
     26 import android.filterfw.core.MutableFrameFormat;
     27 import android.filterfw.core.Program;
     28 import android.filterfw.core.ShaderProgram;
     29 import android.filterfw.format.ImageFormat;
     30 import android.opengl.GLES20;
     31 import android.os.SystemClock;
     32 import android.os.SystemProperties;
     33 import android.util.Log;
     34 
     35 import java.lang.ArrayIndexOutOfBoundsException;
     36 import java.lang.Math;
     37 import java.util.Arrays;
     38 import java.nio.ByteBuffer;
     39 
     40 /**
     41  * @hide
     42  */
     43 public class BackDropperFilter extends Filter {
     44     /** User-visible parameters */
     45 
     46     private final int BACKGROUND_STRETCH   = 0;
     47     private final int BACKGROUND_FIT       = 1;
     48     private final int BACKGROUND_FILL_CROP = 2;
     49 
     50     @GenerateFieldPort(name = "backgroundFitMode", hasDefault = true)
     51     private int mBackgroundFitMode = BACKGROUND_FILL_CROP;
     52     @GenerateFieldPort(name = "learningDuration", hasDefault = true)
     53     private int mLearningDuration = DEFAULT_LEARNING_DURATION;
     54     @GenerateFieldPort(name = "learningVerifyDuration", hasDefault = true)
     55     private int mLearningVerifyDuration = DEFAULT_LEARNING_VERIFY_DURATION;
     56     @GenerateFieldPort(name = "acceptStddev", hasDefault = true)
     57     private float mAcceptStddev = DEFAULT_ACCEPT_STDDEV;
     58     @GenerateFieldPort(name = "hierLrgScale", hasDefault = true)
     59     private float mHierarchyLrgScale = DEFAULT_HIER_LRG_SCALE;
     60     @GenerateFieldPort(name = "hierMidScale", hasDefault = true)
     61     private float mHierarchyMidScale = DEFAULT_HIER_MID_SCALE;
     62     @GenerateFieldPort(name = "hierSmlScale", hasDefault = true)
     63     private float mHierarchySmlScale = DEFAULT_HIER_SML_SCALE;
     64 
     65     // Dimensions of foreground / background mask. Optimum value should take into account only
     66     // image contents, NOT dimensions of input video stream.
     67     @GenerateFieldPort(name = "maskWidthExp", hasDefault = true)
     68     private int mMaskWidthExp = DEFAULT_MASK_WIDTH_EXPONENT;
     69     @GenerateFieldPort(name = "maskHeightExp", hasDefault = true)
     70     private int mMaskHeightExp = DEFAULT_MASK_HEIGHT_EXPONENT;
     71 
     72     // Levels at which to compute foreground / background decision. Think of them as are deltas
     73     // SUBTRACTED from maskWidthExp and maskHeightExp.
     74     @GenerateFieldPort(name = "hierLrgExp", hasDefault = true)
     75     private int mHierarchyLrgExp = DEFAULT_HIER_LRG_EXPONENT;
     76     @GenerateFieldPort(name = "hierMidExp", hasDefault = true)
     77     private int mHierarchyMidExp = DEFAULT_HIER_MID_EXPONENT;
     78     @GenerateFieldPort(name = "hierSmlExp", hasDefault = true)
     79     private int mHierarchySmlExp = DEFAULT_HIER_SML_EXPONENT;
     80 
     81     @GenerateFieldPort(name = "lumScale", hasDefault = true)
     82     private float mLumScale = DEFAULT_Y_SCALE_FACTOR;
     83     @GenerateFieldPort(name = "chromaScale", hasDefault = true)
     84     private float mChromaScale = DEFAULT_UV_SCALE_FACTOR;
     85     @GenerateFieldPort(name = "maskBg", hasDefault = true)
     86     private float mMaskBg = DEFAULT_MASK_BLEND_BG;
     87     @GenerateFieldPort(name = "maskFg", hasDefault = true)
     88     private float mMaskFg = DEFAULT_MASK_BLEND_FG;
     89     @GenerateFieldPort(name = "exposureChange", hasDefault = true)
     90     private float mExposureChange = DEFAULT_EXPOSURE_CHANGE;
     91     @GenerateFieldPort(name = "whitebalanceredChange", hasDefault = true)
     92     private float mWhiteBalanceRedChange = DEFAULT_WHITE_BALANCE_RED_CHANGE;
     93     @GenerateFieldPort(name = "whitebalanceblueChange", hasDefault = true)
     94     private float mWhiteBalanceBlueChange = DEFAULT_WHITE_BALANCE_BLUE_CHANGE;
     95     @GenerateFieldPort(name = "autowbToggle", hasDefault = true)
     96     private int mAutoWBToggle = DEFAULT_WHITE_BALANCE_TOGGLE;
     97 
     98     // TODO: These are not updatable:
     99     @GenerateFieldPort(name = "learningAdaptRate", hasDefault = true)
    100     private float mAdaptRateLearning = DEFAULT_LEARNING_ADAPT_RATE;
    101     @GenerateFieldPort(name = "adaptRateBg", hasDefault = true)
    102     private float mAdaptRateBg = DEFAULT_ADAPT_RATE_BG;
    103     @GenerateFieldPort(name = "adaptRateFg", hasDefault = true)
    104     private float mAdaptRateFg = DEFAULT_ADAPT_RATE_FG;
    105     @GenerateFieldPort(name = "maskVerifyRate", hasDefault = true)
    106     private float mVerifyRate = DEFAULT_MASK_VERIFY_RATE;
    107     @GenerateFieldPort(name = "learningDoneListener", hasDefault = true)
    108     private LearningDoneListener mLearningDoneListener = null;
    109 
    110     @GenerateFieldPort(name = "useTheForce", hasDefault = true)
    111     private boolean mUseTheForce = false;
    112 
    113     @GenerateFinalPort(name = "provideDebugOutputs", hasDefault = true)
    114     private boolean mProvideDebugOutputs = false;
    115 
    116     // Whether to mirror the background or not. For ex, the Camera app
    117     // would mirror the preview for the front camera
    118     @GenerateFieldPort(name = "mirrorBg", hasDefault = true)
    119     private boolean mMirrorBg = false;
    120 
    121     // The orientation of the display. This will change the flipping
    122     // coordinates, if we were to mirror the background
    123     @GenerateFieldPort(name = "orientation", hasDefault = true)
    124     private int mOrientation = 0;
    125 
    126     /** Default algorithm parameter values, for non-shader use */
    127 
    128     // Frame count for learning bg model
    129     private static final int DEFAULT_LEARNING_DURATION = 40;
    130     // Frame count for learning verification
    131     private static final int DEFAULT_LEARNING_VERIFY_DURATION = 10;
    132     // Maximum distance (in standard deviations) for considering a pixel as background
    133     private static final float DEFAULT_ACCEPT_STDDEV = 0.85f;
    134     // Variance threshold scale factor for large scale of hierarchy
    135     private static final float DEFAULT_HIER_LRG_SCALE = 0.7f;
    136     // Variance threshold scale factor for medium scale of hierarchy
    137     private static final float DEFAULT_HIER_MID_SCALE = 0.6f;
    138     // Variance threshold scale factor for small scale of hierarchy
    139     private static final float DEFAULT_HIER_SML_SCALE = 0.5f;
    140     // Width of foreground / background mask.
    141     private static final int DEFAULT_MASK_WIDTH_EXPONENT = 8;
    142     // Height of foreground / background mask.
    143     private static final int DEFAULT_MASK_HEIGHT_EXPONENT = 8;
    144     // Area over which to average for large scale (length in pixels = 2^HIERARCHY_*_EXPONENT)
    145     private static final int DEFAULT_HIER_LRG_EXPONENT = 3;
    146     // Area over which to average for medium scale
    147     private static final int DEFAULT_HIER_MID_EXPONENT = 2;
    148     // Area over which to average for small scale
    149     private static final int DEFAULT_HIER_SML_EXPONENT = 0;
    150     // Scale factor for luminance channel in distance calculations (larger = more significant)
    151     private static final float DEFAULT_Y_SCALE_FACTOR = 0.40f;
    152     // Scale factor for chroma channels in distance calculations
    153     private static final float DEFAULT_UV_SCALE_FACTOR = 1.35f;
    154     // Mask value to start blending away from background
    155     private static final float DEFAULT_MASK_BLEND_BG = 0.65f;
    156     // Mask value to start blending away from foreground
    157     private static final float DEFAULT_MASK_BLEND_FG = 0.95f;
    158     // Exposure stop number to change the brightness of foreground
    159     private static final float DEFAULT_EXPOSURE_CHANGE = 1.0f;
    160     // White balance change in Red channel for foreground
    161     private static final float DEFAULT_WHITE_BALANCE_RED_CHANGE = 0.0f;
    162     // White balance change in Blue channel for foreground
    163     private static final float DEFAULT_WHITE_BALANCE_BLUE_CHANGE = 0.0f;
    164     // Variable to control automatic white balance effect
    165     // 0.f -> Auto WB is off; 1.f-> Auto WB is on
    166     private static final int DEFAULT_WHITE_BALANCE_TOGGLE = 0;
    167 
    168     // Default rate at which to learn bg model during learning period
    169     private static final float DEFAULT_LEARNING_ADAPT_RATE = 0.2f;
    170     // Default rate at which to learn bg model from new background pixels
    171     private static final float DEFAULT_ADAPT_RATE_BG = 0.0f;
    172     // Default rate at which to learn bg model from new foreground pixels
    173     private static final float DEFAULT_ADAPT_RATE_FG = 0.0f;
    174     // Default rate at which to verify whether background is stable
    175     private static final float DEFAULT_MASK_VERIFY_RATE = 0.25f;
    176     // Default rate at which to verify whether background is stable
    177     private static final int   DEFAULT_LEARNING_DONE_THRESHOLD = 20;
    178 
    179     // Default 3x3 matrix, column major, for fitting background 1:1
    180     private static final float[] DEFAULT_BG_FIT_TRANSFORM = new float[] {
    181         1.0f, 0.0f, 0.0f,
    182         0.0f, 1.0f, 0.0f,
    183         0.0f, 0.0f, 1.0f
    184     };
    185 
    186     /** Default algorithm parameter values, for shader use */
    187 
    188     // Area over which to blur binary mask values (length in pixels = 2^MASK_SMOOTH_EXPONENT)
    189     private static final String MASK_SMOOTH_EXPONENT = "2.0";
    190     // Scale value for mapping variance distance to fit nicely to 0-1, 8-bit
    191     private static final String DISTANCE_STORAGE_SCALE = "0.6";
    192     // Scale value for mapping variance to fit nicely to 0-1, 8-bit
    193     private static final String VARIANCE_STORAGE_SCALE = "5.0";
    194     // Default scale of auto white balance parameters
    195     private static final String DEFAULT_AUTO_WB_SCALE = "0.25";
    196     // Minimum variance (0-255 scale)
    197     private static final String MIN_VARIANCE = "3.0";
    198     // Column-major array for 4x4 matrix converting RGB to YCbCr, JPEG definition (no pedestal)
    199     private static final String RGB_TO_YUV_MATRIX = "0.299, -0.168736,  0.5,      0.000, " +
    200                                                     "0.587, -0.331264, -0.418688, 0.000, " +
    201                                                     "0.114,  0.5,      -0.081312, 0.000, " +
    202                                                     "0.000,  0.5,       0.5,      1.000 ";
    203     /** Stream names */
    204 
    205     private static final String[] mInputNames = {"video",
    206                                                  "background"};
    207 
    208     private static final String[] mOutputNames = {"video"};
    209 
    210     private static final String[] mDebugOutputNames = {"debug1",
    211                                                        "debug2"};
    212 
    213     /** Other private variables */
    214 
    215     private FrameFormat mOutputFormat;
    216     private MutableFrameFormat mMemoryFormat;
    217     private MutableFrameFormat mMaskFormat;
    218     private MutableFrameFormat mAverageFormat;
    219 
    220     private final boolean mLogVerbose;
    221     private static final String TAG = "BackDropperFilter";
    222 
    223     /** Shader source code */
    224 
    225     // Shared uniforms and utility functions
    226     private static String mSharedUtilShader =
    227             "precision mediump float;\n" +
    228             "uniform float fg_adapt_rate;\n" +
    229             "uniform float bg_adapt_rate;\n" +
    230             "const mat4 coeff_yuv = mat4(" + RGB_TO_YUV_MATRIX + ");\n" +
    231             "const float dist_scale = " + DISTANCE_STORAGE_SCALE + ";\n" +
    232             "const float inv_dist_scale = 1. / dist_scale;\n" +
    233             "const float var_scale=" + VARIANCE_STORAGE_SCALE + ";\n" +
    234             "const float inv_var_scale = 1. / var_scale;\n" +
    235             "const float min_variance = inv_var_scale *" + MIN_VARIANCE + "/ 256.;\n" +
    236             "const float auto_wb_scale = " + DEFAULT_AUTO_WB_SCALE + ";\n" +
    237             "\n" +
    238             // Variance distance in luminance between current pixel and background model
    239             "float gauss_dist_y(float y, float mean, float variance) {\n" +
    240             "  float dist = (y - mean) * (y - mean) / variance;\n" +
    241             "  return dist;\n" +
    242             "}\n" +
    243             // Sum of variance distances in chroma between current pixel and background
    244             // model
    245             "float gauss_dist_uv(vec2 uv, vec2 mean, vec2 variance) {\n" +
    246             "  vec2 dist = (uv - mean) * (uv - mean) / variance;\n" +
    247             "  return dist.r + dist.g;\n" +
    248             "}\n" +
    249             // Select learning rate for pixel based on smoothed decision mask alpha
    250             "float local_adapt_rate(float alpha) {\n" +
    251             "  return mix(bg_adapt_rate, fg_adapt_rate, alpha);\n" +
    252             "}\n" +
    253             "\n";
    254 
    255     // Distance calculation shader. Calculates a distance metric between the foreground and the
    256     //   current background model, in both luminance and in chroma (yuv space).  Distance is
    257     //   measured in variances from the mean background value. For chroma, the distance is the sum
    258     //   of the two individual color channel distances. The distances are output on the b and alpha
    259     //   channels, r and g are for debug information.
    260     // Inputs:
    261     //   tex_sampler_0: Mip-map for foreground (live) video frame.
    262     //   tex_sampler_1: Background mean mask.
    263     //   tex_sampler_2: Background variance mask.
    264     //   subsample_level: Level on foreground frame's mip-map.
    265     private static final String mBgDistanceShader =
    266             "uniform sampler2D tex_sampler_0;\n" +
    267             "uniform sampler2D tex_sampler_1;\n" +
    268             "uniform sampler2D tex_sampler_2;\n" +
    269             "uniform float subsample_level;\n" +
    270             "varying vec2 v_texcoord;\n" +
    271             "void main() {\n" +
    272             "  vec4 fg_rgb = texture2D(tex_sampler_0, v_texcoord, subsample_level);\n" +
    273             "  vec4 fg = coeff_yuv * vec4(fg_rgb.rgb, 1.);\n" +
    274             "  vec4 mean = texture2D(tex_sampler_1, v_texcoord);\n" +
    275             "  vec4 variance = inv_var_scale * texture2D(tex_sampler_2, v_texcoord);\n" +
    276             "\n" +
    277             "  float dist_y = gauss_dist_y(fg.r, mean.r, variance.r);\n" +
    278             "  float dist_uv = gauss_dist_uv(fg.gb, mean.gb, variance.gb);\n" +
    279             "  gl_FragColor = vec4(0.5*fg.rg, dist_scale*dist_y, dist_scale*dist_uv);\n" +
    280             "}\n";
    281 
    282     // Foreground/background mask decision shader. Decides whether a frame is in the foreground or
    283     //   the background using a hierarchical threshold on the distance. Binary foreground/background
    284     //   mask is placed in the alpha channel. The RGB channels contain debug information.
    285     private static final String mBgMaskShader =
    286             "uniform sampler2D tex_sampler_0;\n" +
    287             "uniform float accept_variance;\n" +
    288             "uniform vec2 yuv_weights;\n" +
    289             "uniform float scale_lrg;\n" +
    290             "uniform float scale_mid;\n" +
    291             "uniform float scale_sml;\n" +
    292             "uniform float exp_lrg;\n" +
    293             "uniform float exp_mid;\n" +
    294             "uniform float exp_sml;\n" +
    295             "varying vec2 v_texcoord;\n" +
    296             // Decide whether pixel is foreground or background based on Y and UV
    297             //   distance and maximum acceptable variance.
    298             // yuv_weights.x is smaller than yuv_weights.y to discount the influence of shadow
    299             "bool is_fg(vec2 dist_yc, float accept_variance) {\n" +
    300             "  return ( dot(yuv_weights, dist_yc) >= accept_variance );\n" +
    301             "}\n" +
    302             "void main() {\n" +
    303             "  vec4 dist_lrg_sc = texture2D(tex_sampler_0, v_texcoord, exp_lrg);\n" +
    304             "  vec4 dist_mid_sc = texture2D(tex_sampler_0, v_texcoord, exp_mid);\n" +
    305             "  vec4 dist_sml_sc = texture2D(tex_sampler_0, v_texcoord, exp_sml);\n" +
    306             "  vec2 dist_lrg = inv_dist_scale * dist_lrg_sc.ba;\n" +
    307             "  vec2 dist_mid = inv_dist_scale * dist_mid_sc.ba;\n" +
    308             "  vec2 dist_sml = inv_dist_scale * dist_sml_sc.ba;\n" +
    309             "  vec2 norm_dist = 0.75 * dist_sml / accept_variance;\n" + // For debug viz
    310             "  bool is_fg_lrg = is_fg(dist_lrg, accept_variance * scale_lrg);\n" +
    311             "  bool is_fg_mid = is_fg_lrg || is_fg(dist_mid, accept_variance * scale_mid);\n" +
    312             "  float is_fg_sml =\n" +
    313             "      float(is_fg_mid || is_fg(dist_sml, accept_variance * scale_sml));\n" +
    314             "  float alpha = 0.5 * is_fg_sml + 0.3 * float(is_fg_mid) + 0.2 * float(is_fg_lrg);\n" +
    315             "  gl_FragColor = vec4(alpha, norm_dist, is_fg_sml);\n" +
    316             "}\n";
    317 
    318     // Automatic White Balance parameter decision shader
    319     // Use the Gray World assumption that in a white balance corrected image, the average of R, G, B
    320     //   channel will be a common gray value.
    321     // To match the white balance of foreground and background, the average of R, G, B channel of
    322     //   two videos should match.
    323     // Inputs:
    324     //   tex_sampler_0: Mip-map for foreground (live) video frame.
    325     //   tex_sampler_1: Mip-map for background (playback) video frame.
    326     //   pyramid_depth: Depth of input frames' mip-maps.
    327     private static final String mAutomaticWhiteBalance =
    328             "uniform sampler2D tex_sampler_0;\n" +
    329             "uniform sampler2D tex_sampler_1;\n" +
    330             "uniform float pyramid_depth;\n" +
    331             "uniform bool autowb_toggle;\n" +
    332             "varying vec2 v_texcoord;\n" +
    333             "void main() {\n" +
    334             "   vec4 mean_video = texture2D(tex_sampler_0, v_texcoord, pyramid_depth);\n"+
    335             "   vec4 mean_bg = texture2D(tex_sampler_1, v_texcoord, pyramid_depth);\n" +
    336             // If Auto WB is toggled off, the return texture will be a unicolor texture of value 1
    337             // If Auto WB is toggled on, the return texture will be a unicolor texture with
    338             //   adjustment parameters for R and B channels stored in the corresponding channel
    339             "   float green_normalizer = mean_video.g / mean_bg.g;\n"+
    340             "   vec4 adjusted_value = vec4(mean_bg.r / mean_video.r * green_normalizer, 1., \n" +
    341             "                         mean_bg.b / mean_video.b * green_normalizer, 1.) * auto_wb_scale; \n" +
    342             "   gl_FragColor = autowb_toggle ? adjusted_value : vec4(auto_wb_scale);\n" +
    343             "}\n";
    344 
    345 
    346     // Background subtraction shader. Uses a mipmap of the binary mask map to blend smoothly between
    347     //   foreground and background
    348     // Inputs:
    349     //   tex_sampler_0: Foreground (live) video frame.
    350     //   tex_sampler_1: Background (playback) video frame.
    351     //   tex_sampler_2: Foreground/background mask.
    352     //   tex_sampler_3: Auto white-balance factors.
    353     private static final String mBgSubtractShader =
    354             "uniform mat3 bg_fit_transform;\n" +
    355             "uniform float mask_blend_bg;\n" +
    356             "uniform float mask_blend_fg;\n" +
    357             "uniform float exposure_change;\n" +
    358             "uniform float whitebalancered_change;\n" +
    359             "uniform float whitebalanceblue_change;\n" +
    360             "uniform sampler2D tex_sampler_0;\n" +
    361             "uniform sampler2D tex_sampler_1;\n" +
    362             "uniform sampler2D tex_sampler_2;\n" +
    363             "uniform sampler2D tex_sampler_3;\n" +
    364             "varying vec2 v_texcoord;\n" +
    365             "void main() {\n" +
    366             "  vec2 bg_texcoord = (bg_fit_transform * vec3(v_texcoord, 1.)).xy;\n" +
    367             "  vec4 bg_rgb = texture2D(tex_sampler_1, bg_texcoord);\n" +
    368             // The foreground texture is modified by multiplying both manual and auto white balance changes in R and B
    369             //   channel and multiplying exposure change in all R, G, B channels.
    370             "  vec4 wb_auto_scale = texture2D(tex_sampler_3, v_texcoord) * exposure_change / auto_wb_scale;\n" +
    371             "  vec4 wb_manual_scale = vec4(1. + whitebalancered_change, 1., 1. + whitebalanceblue_change, 1.);\n" +
    372             "  vec4 fg_rgb = texture2D(tex_sampler_0, v_texcoord);\n" +
    373             "  vec4 fg_adjusted = fg_rgb * wb_manual_scale * wb_auto_scale;\n"+
    374             "  vec4 mask = texture2D(tex_sampler_2, v_texcoord, \n" +
    375             "                      " + MASK_SMOOTH_EXPONENT + ");\n" +
    376             "  float alpha = smoothstep(mask_blend_bg, mask_blend_fg, mask.a);\n" +
    377             "  gl_FragColor = mix(bg_rgb, fg_adjusted, alpha);\n";
    378 
    379     // May the Force... Makes the foreground object translucent blue, with a bright
    380     // blue-white outline
    381     private static final String mBgSubtractForceShader =
    382             "  vec4 ghost_rgb = (fg_adjusted * 0.7 + vec4(0.3,0.3,0.4,0.))*0.65 + \n" +
    383             "                   0.35*bg_rgb;\n" +
    384             "  float glow_start = 0.75 * mask_blend_bg; \n"+
    385             "  float glow_max   = mask_blend_bg; \n"+
    386             "  gl_FragColor = mask.a < glow_start ? bg_rgb : \n" +
    387             "                 mask.a < glow_max ? mix(bg_rgb, vec4(0.9,0.9,1.0,1.0), \n" +
    388             "                                     (mask.a - glow_start) / (glow_max - glow_start) ) : \n" +
    389             "                 mask.a < mask_blend_fg ? mix(vec4(0.9,0.9,1.0,1.0), ghost_rgb, \n" +
    390             "                                    (mask.a - glow_max) / (mask_blend_fg - glow_max) ) : \n" +
    391             "                 ghost_rgb;\n" +
    392             "}\n";
    393 
    394     // Background model mean update shader. Skews the current model mean toward the most recent pixel
    395     //   value for a pixel, weighted by the learning rate and by whether the pixel is classified as
    396     //   foreground or background.
    397     // Inputs:
    398     //   tex_sampler_0: Mip-map for foreground (live) video frame.
    399     //   tex_sampler_1: Background mean mask.
    400     //   tex_sampler_2: Foreground/background mask.
    401     //   subsample_level: Level on foreground frame's mip-map.
    402     private static final String mUpdateBgModelMeanShader =
    403             "uniform sampler2D tex_sampler_0;\n" +
    404             "uniform sampler2D tex_sampler_1;\n" +
    405             "uniform sampler2D tex_sampler_2;\n" +
    406             "uniform float subsample_level;\n" +
    407             "varying vec2 v_texcoord;\n" +
    408             "void main() {\n" +
    409             "  vec4 fg_rgb = texture2D(tex_sampler_0, v_texcoord, subsample_level);\n" +
    410             "  vec4 fg = coeff_yuv * vec4(fg_rgb.rgb, 1.);\n" +
    411             "  vec4 mean = texture2D(tex_sampler_1, v_texcoord);\n" +
    412             "  vec4 mask = texture2D(tex_sampler_2, v_texcoord, \n" +
    413             "                      " + MASK_SMOOTH_EXPONENT + ");\n" +
    414             "\n" +
    415             "  float alpha = local_adapt_rate(mask.a);\n" +
    416             "  vec4 new_mean = mix(mean, fg, alpha);\n" +
    417             "  gl_FragColor = new_mean;\n" +
    418             "}\n";
    419 
    420     // Background model variance update shader. Skews the current model variance toward the most
    421     //   recent variance for the pixel, weighted by the learning rate and by whether the pixel is
    422     //   classified as foreground or background.
    423     // Inputs:
    424     //   tex_sampler_0: Mip-map for foreground (live) video frame.
    425     //   tex_sampler_1: Background mean mask.
    426     //   tex_sampler_2: Background variance mask.
    427     //   tex_sampler_3: Foreground/background mask.
    428     //   subsample_level: Level on foreground frame's mip-map.
    429     // TODO: to improve efficiency, use single mark for mean + variance, then merge this into
    430     // mUpdateBgModelMeanShader.
    431     private static final String mUpdateBgModelVarianceShader =
    432             "uniform sampler2D tex_sampler_0;\n" +
    433             "uniform sampler2D tex_sampler_1;\n" +
    434             "uniform sampler2D tex_sampler_2;\n" +
    435             "uniform sampler2D tex_sampler_3;\n" +
    436             "uniform float subsample_level;\n" +
    437             "varying vec2 v_texcoord;\n" +
    438             "void main() {\n" +
    439             "  vec4 fg_rgb = texture2D(tex_sampler_0, v_texcoord, subsample_level);\n" +
    440             "  vec4 fg = coeff_yuv * vec4(fg_rgb.rgb, 1.);\n" +
    441             "  vec4 mean = texture2D(tex_sampler_1, v_texcoord);\n" +
    442             "  vec4 variance = inv_var_scale * texture2D(tex_sampler_2, v_texcoord);\n" +
    443             "  vec4 mask = texture2D(tex_sampler_3, v_texcoord, \n" +
    444             "                      " + MASK_SMOOTH_EXPONENT + ");\n" +
    445             "\n" +
    446             "  float alpha = local_adapt_rate(mask.a);\n" +
    447             "  vec4 cur_variance = (fg-mean)*(fg-mean);\n" +
    448             "  vec4 new_variance = mix(variance, cur_variance, alpha);\n" +
    449             "  new_variance = max(new_variance, vec4(min_variance));\n" +
    450             "  gl_FragColor = var_scale * new_variance;\n" +
    451             "}\n";
    452 
    453     // Background verification shader. Skews the current background verification mask towards the
    454     //   most recent frame, weighted by the learning rate.
    455     private static final String mMaskVerifyShader =
    456             "uniform sampler2D tex_sampler_0;\n" +
    457             "uniform sampler2D tex_sampler_1;\n" +
    458             "uniform float verify_rate;\n" +
    459             "varying vec2 v_texcoord;\n" +
    460             "void main() {\n" +
    461             "  vec4 lastmask = texture2D(tex_sampler_0, v_texcoord);\n" +
    462             "  vec4 mask = texture2D(tex_sampler_1, v_texcoord);\n" +
    463             "  float newmask = mix(lastmask.a, mask.a, verify_rate);\n" +
    464             "  gl_FragColor = vec4(0., 0., 0., newmask);\n" +
    465             "}\n";
    466 
    467     /** Shader program objects */
    468 
    469     private ShaderProgram mBgDistProgram;
    470     private ShaderProgram mBgMaskProgram;
    471     private ShaderProgram mBgSubtractProgram;
    472     private ShaderProgram mBgUpdateMeanProgram;
    473     private ShaderProgram mBgUpdateVarianceProgram;
    474     private ShaderProgram mCopyOutProgram;
    475     private ShaderProgram mAutomaticWhiteBalanceProgram;
    476     private ShaderProgram mMaskVerifyProgram;
    477     private ShaderProgram copyShaderProgram;
    478 
    479     /** Background model storage */
    480 
    481     private boolean mPingPong;
    482     private GLFrame mBgMean[];
    483     private GLFrame mBgVariance[];
    484     private GLFrame mMaskVerify[];
    485     private GLFrame mDistance;
    486     private GLFrame mAutoWB;
    487     private GLFrame mMask;
    488     private GLFrame mVideoInput;
    489     private GLFrame mBgInput;
    490     private GLFrame mMaskAverage;
    491 
    492     /** Overall filter state */
    493 
    494     private boolean isOpen;
    495     private int mFrameCount;
    496     private boolean mStartLearning;
    497     private boolean mBackgroundFitModeChanged;
    498     private float mRelativeAspect;
    499     private int mPyramidDepth;
    500     private int mSubsampleLevel;
    501 
    502     /** Learning listener object */
    503 
    504     public interface LearningDoneListener {
    505         public void onLearningDone(BackDropperFilter filter);
    506     }
    507 
    508     /** Public Filter methods */
    509 
    510     public BackDropperFilter(String name) {
    511         super(name);
    512 
    513         mLogVerbose = Log.isLoggable(TAG, Log.VERBOSE);
    514 
    515         String adjStr = SystemProperties.get("ro.media.effect.bgdropper.adj");
    516         if (adjStr.length() > 0) {
    517             try {
    518                 mAcceptStddev += Float.parseFloat(adjStr);
    519                 if (mLogVerbose) {
    520                     Log.v(TAG, "Adjusting accept threshold by " + adjStr +
    521                             ", now " + mAcceptStddev);
    522                 }
    523             } catch (NumberFormatException e) {
    524                 Log.e(TAG,
    525                         "Badly formatted property ro.media.effect.bgdropper.adj: " + adjStr);
    526             }
    527         }
    528     }
    529 
    530     @Override
    531     public void setupPorts() {
    532         // Inputs.
    533         // TODO: Target should be GPU, but relaxed for now.
    534         FrameFormat imageFormat = ImageFormat.create(ImageFormat.COLORSPACE_RGBA,
    535                                                      FrameFormat.TARGET_UNSPECIFIED);
    536         for (String inputName : mInputNames) {
    537             addMaskedInputPort(inputName, imageFormat);
    538         }
    539         // Normal outputs
    540         for (String outputName : mOutputNames) {
    541             addOutputBasedOnInput(outputName, "video");
    542         }
    543 
    544         // Debug outputs
    545         if (mProvideDebugOutputs) {
    546             for (String outputName : mDebugOutputNames) {
    547                 addOutputBasedOnInput(outputName, "video");
    548             }
    549         }
    550     }
    551 
    552     @Override
    553     public FrameFormat getOutputFormat(String portName, FrameFormat inputFormat) {
    554         // Create memory format based on video input.
    555         MutableFrameFormat format = inputFormat.mutableCopy();
    556         // Is this a debug output port? If so, leave dimensions unspecified.
    557         if (!Arrays.asList(mOutputNames).contains(portName)) {
    558             format.setDimensions(FrameFormat.SIZE_UNSPECIFIED, FrameFormat.SIZE_UNSPECIFIED);
    559         }
    560         return format;
    561     }
    562 
    563     private boolean createMemoryFormat(FrameFormat inputFormat) {
    564         // We can't resize because that would require re-learning.
    565         if (mMemoryFormat != null) {
    566             return false;
    567         }
    568 
    569         if (inputFormat.getWidth() == FrameFormat.SIZE_UNSPECIFIED ||
    570             inputFormat.getHeight() == FrameFormat.SIZE_UNSPECIFIED) {
    571             throw new RuntimeException("Attempting to process input frame with unknown size");
    572         }
    573 
    574         mMaskFormat = inputFormat.mutableCopy();
    575         int maskWidth = (int)Math.pow(2, mMaskWidthExp);
    576         int maskHeight = (int)Math.pow(2, mMaskHeightExp);
    577         mMaskFormat.setDimensions(maskWidth, maskHeight);
    578 
    579         mPyramidDepth = Math.max(mMaskWidthExp, mMaskHeightExp);
    580         mMemoryFormat = mMaskFormat.mutableCopy();
    581         int widthExp = Math.max(mMaskWidthExp, pyramidLevel(inputFormat.getWidth()));
    582         int heightExp = Math.max(mMaskHeightExp, pyramidLevel(inputFormat.getHeight()));
    583         mPyramidDepth = Math.max(widthExp, heightExp);
    584         int memWidth = Math.max(maskWidth, (int)Math.pow(2, widthExp));
    585         int memHeight = Math.max(maskHeight, (int)Math.pow(2, heightExp));
    586         mMemoryFormat.setDimensions(memWidth, memHeight);
    587         mSubsampleLevel = mPyramidDepth - Math.max(mMaskWidthExp, mMaskHeightExp);
    588 
    589         if (mLogVerbose) {
    590             Log.v(TAG, "Mask frames size " + maskWidth + " x " + maskHeight);
    591             Log.v(TAG, "Pyramid levels " + widthExp + " x " + heightExp);
    592             Log.v(TAG, "Memory frames size " + memWidth + " x " + memHeight);
    593         }
    594 
    595         mAverageFormat = inputFormat.mutableCopy();
    596         mAverageFormat.setDimensions(1,1);
    597         return true;
    598     }
    599 
    600     public void prepare(FilterContext context){
    601         if (mLogVerbose) Log.v(TAG, "Preparing BackDropperFilter!");
    602 
    603         mBgMean = new GLFrame[2];
    604         mBgVariance = new GLFrame[2];
    605         mMaskVerify = new GLFrame[2];
    606         copyShaderProgram = ShaderProgram.createIdentity(context);
    607     }
    608 
    609     private void allocateFrames(FrameFormat inputFormat, FilterContext context) {
    610         if (!createMemoryFormat(inputFormat)) {
    611             return;  // All set.
    612         }
    613         if (mLogVerbose) Log.v(TAG, "Allocating BackDropperFilter frames");
    614 
    615         // Create initial background model values
    616         int numBytes = mMaskFormat.getSize();
    617         byte[] initialBgMean = new byte[numBytes];
    618         byte[] initialBgVariance = new byte[numBytes];
    619         byte[] initialMaskVerify = new byte[numBytes];
    620         for (int i = 0; i < numBytes; i++) {
    621             initialBgMean[i] = (byte)128;
    622             initialBgVariance[i] = (byte)10;
    623             initialMaskVerify[i] = (byte)0;
    624         }
    625 
    626         // Get frames to store background model in
    627         for (int i = 0; i < 2; i++) {
    628             mBgMean[i] = (GLFrame)context.getFrameManager().newFrame(mMaskFormat);
    629             mBgMean[i].setData(initialBgMean, 0, numBytes);
    630 
    631             mBgVariance[i] = (GLFrame)context.getFrameManager().newFrame(mMaskFormat);
    632             mBgVariance[i].setData(initialBgVariance, 0, numBytes);
    633 
    634             mMaskVerify[i] = (GLFrame)context.getFrameManager().newFrame(mMaskFormat);
    635             mMaskVerify[i].setData(initialMaskVerify, 0, numBytes);
    636         }
    637 
    638         // Get frames to store other textures in
    639         if (mLogVerbose) Log.v(TAG, "Done allocating texture for Mean and Variance objects!");
    640 
    641         mDistance = (GLFrame)context.getFrameManager().newFrame(mMaskFormat);
    642         mMask = (GLFrame)context.getFrameManager().newFrame(mMaskFormat);
    643         mAutoWB = (GLFrame)context.getFrameManager().newFrame(mAverageFormat);
    644         mVideoInput = (GLFrame)context.getFrameManager().newFrame(mMemoryFormat);
    645         mBgInput = (GLFrame)context.getFrameManager().newFrame(mMemoryFormat);
    646         mMaskAverage = (GLFrame)context.getFrameManager().newFrame(mAverageFormat);
    647 
    648         // Create shader programs
    649         mBgDistProgram = new ShaderProgram(context, mSharedUtilShader + mBgDistanceShader);
    650         mBgDistProgram.setHostValue("subsample_level", (float)mSubsampleLevel);
    651 
    652         mBgMaskProgram = new ShaderProgram(context, mSharedUtilShader + mBgMaskShader);
    653         mBgMaskProgram.setHostValue("accept_variance", mAcceptStddev * mAcceptStddev);
    654         float[] yuvWeights = { mLumScale, mChromaScale };
    655         mBgMaskProgram.setHostValue("yuv_weights", yuvWeights );
    656         mBgMaskProgram.setHostValue("scale_lrg", mHierarchyLrgScale);
    657         mBgMaskProgram.setHostValue("scale_mid", mHierarchyMidScale);
    658         mBgMaskProgram.setHostValue("scale_sml", mHierarchySmlScale);
    659         mBgMaskProgram.setHostValue("exp_lrg", (float)(mSubsampleLevel + mHierarchyLrgExp));
    660         mBgMaskProgram.setHostValue("exp_mid", (float)(mSubsampleLevel + mHierarchyMidExp));
    661         mBgMaskProgram.setHostValue("exp_sml", (float)(mSubsampleLevel + mHierarchySmlExp));
    662 
    663         if (mUseTheForce) {
    664             mBgSubtractProgram = new ShaderProgram(context, mSharedUtilShader + mBgSubtractShader + mBgSubtractForceShader);
    665         } else {
    666             mBgSubtractProgram = new ShaderProgram(context, mSharedUtilShader + mBgSubtractShader + "}\n");
    667         }
    668         mBgSubtractProgram.setHostValue("bg_fit_transform", DEFAULT_BG_FIT_TRANSFORM);
    669         mBgSubtractProgram.setHostValue("mask_blend_bg", mMaskBg);
    670         mBgSubtractProgram.setHostValue("mask_blend_fg", mMaskFg);
    671         mBgSubtractProgram.setHostValue("exposure_change", mExposureChange);
    672         mBgSubtractProgram.setHostValue("whitebalanceblue_change", mWhiteBalanceBlueChange);
    673         mBgSubtractProgram.setHostValue("whitebalancered_change", mWhiteBalanceRedChange);
    674 
    675 
    676         mBgUpdateMeanProgram = new ShaderProgram(context, mSharedUtilShader + mUpdateBgModelMeanShader);
    677         mBgUpdateMeanProgram.setHostValue("subsample_level", (float)mSubsampleLevel);
    678 
    679         mBgUpdateVarianceProgram = new ShaderProgram(context, mSharedUtilShader + mUpdateBgModelVarianceShader);
    680         mBgUpdateVarianceProgram.setHostValue("subsample_level", (float)mSubsampleLevel);
    681 
    682         mCopyOutProgram = ShaderProgram.createIdentity(context);
    683 
    684         mAutomaticWhiteBalanceProgram = new ShaderProgram(context, mSharedUtilShader + mAutomaticWhiteBalance);
    685         mAutomaticWhiteBalanceProgram.setHostValue("pyramid_depth", (float)mPyramidDepth);
    686         mAutomaticWhiteBalanceProgram.setHostValue("autowb_toggle", mAutoWBToggle);
    687 
    688         mMaskVerifyProgram = new ShaderProgram(context, mSharedUtilShader + mMaskVerifyShader);
    689         mMaskVerifyProgram.setHostValue("verify_rate", mVerifyRate);
    690 
    691         if (mLogVerbose) Log.v(TAG, "Shader width set to " + mMemoryFormat.getWidth());
    692 
    693         mRelativeAspect = 1.f;
    694 
    695         mFrameCount = 0;
    696         mStartLearning = true;
    697     }
    698 
    699     public void process(FilterContext context) {
    700         // Grab inputs and ready intermediate frames and outputs.
    701         Frame video = pullInput("video");
    702         Frame background = pullInput("background");
    703         allocateFrames(video.getFormat(), context);
    704 
    705         // Update learning rate after initial learning period
    706         if (mStartLearning) {
    707             if (mLogVerbose) Log.v(TAG, "Starting learning");
    708             mBgUpdateMeanProgram.setHostValue("bg_adapt_rate", mAdaptRateLearning);
    709             mBgUpdateMeanProgram.setHostValue("fg_adapt_rate", mAdaptRateLearning);
    710             mBgUpdateVarianceProgram.setHostValue("bg_adapt_rate", mAdaptRateLearning);
    711             mBgUpdateVarianceProgram.setHostValue("fg_adapt_rate", mAdaptRateLearning);
    712             mFrameCount = 0;
    713         }
    714 
    715         // Select correct pingpong buffers
    716         int inputIndex = mPingPong ? 0 : 1;
    717         int outputIndex = mPingPong ? 1 : 0;
    718         mPingPong = !mPingPong;
    719 
    720         // Check relative aspect ratios
    721         updateBgScaling(video, background, mBackgroundFitModeChanged);
    722         mBackgroundFitModeChanged = false;
    723 
    724         // Make copies for input frames to GLFrames
    725 
    726         copyShaderProgram.process(video, mVideoInput);
    727         copyShaderProgram.process(background, mBgInput);
    728 
    729         mVideoInput.generateMipMap();
    730         mVideoInput.setTextureParameter(GLES20.GL_TEXTURE_MIN_FILTER,
    731                                         GLES20.GL_LINEAR_MIPMAP_NEAREST);
    732 
    733         mBgInput.generateMipMap();
    734         mBgInput.setTextureParameter(GLES20.GL_TEXTURE_MIN_FILTER,
    735                                      GLES20.GL_LINEAR_MIPMAP_NEAREST);
    736 
    737         if (mStartLearning) {
    738             copyShaderProgram.process(mVideoInput, mBgMean[inputIndex]);
    739             mStartLearning = false;
    740         }
    741 
    742         // Process shaders
    743         Frame[] distInputs = { mVideoInput, mBgMean[inputIndex], mBgVariance[inputIndex] };
    744         mBgDistProgram.process(distInputs, mDistance);
    745         mDistance.generateMipMap();
    746         mDistance.setTextureParameter(GLES20.GL_TEXTURE_MIN_FILTER,
    747                                       GLES20.GL_LINEAR_MIPMAP_NEAREST);
    748 
    749         mBgMaskProgram.process(mDistance, mMask);
    750         mMask.generateMipMap();
    751         mMask.setTextureParameter(GLES20.GL_TEXTURE_MIN_FILTER,
    752                                   GLES20.GL_LINEAR_MIPMAP_NEAREST);
    753 
    754         Frame[] autoWBInputs = { mVideoInput, mBgInput };
    755         mAutomaticWhiteBalanceProgram.process(autoWBInputs, mAutoWB);
    756 
    757         if (mFrameCount <= mLearningDuration) {
    758             // During learning
    759             pushOutput("video", video);
    760 
    761             if (mFrameCount == mLearningDuration - mLearningVerifyDuration) {
    762                 copyShaderProgram.process(mMask, mMaskVerify[outputIndex]);
    763 
    764                 mBgUpdateMeanProgram.setHostValue("bg_adapt_rate", mAdaptRateBg);
    765                 mBgUpdateMeanProgram.setHostValue("fg_adapt_rate", mAdaptRateFg);
    766                 mBgUpdateVarianceProgram.setHostValue("bg_adapt_rate", mAdaptRateBg);
    767                 mBgUpdateVarianceProgram.setHostValue("fg_adapt_rate", mAdaptRateFg);
    768 
    769 
    770             } else if (mFrameCount > mLearningDuration - mLearningVerifyDuration) {
    771                 // In the learning verification stage, compute background masks and a weighted average
    772                 //   with weights grow exponentially with time
    773                 Frame[] maskVerifyInputs = {mMaskVerify[inputIndex], mMask};
    774                 mMaskVerifyProgram.process(maskVerifyInputs, mMaskVerify[outputIndex]);
    775                 mMaskVerify[outputIndex].generateMipMap();
    776                 mMaskVerify[outputIndex].setTextureParameter(GLES20.GL_TEXTURE_MIN_FILTER,
    777                                                              GLES20.GL_LINEAR_MIPMAP_NEAREST);
    778             }
    779 
    780             if (mFrameCount == mLearningDuration) {
    781                 // In the last verification frame, verify if the verification mask is almost blank
    782                 // If not, restart learning
    783                 copyShaderProgram.process(mMaskVerify[outputIndex], mMaskAverage);
    784                 ByteBuffer mMaskAverageByteBuffer = mMaskAverage.getData();
    785                 byte[] mask_average = mMaskAverageByteBuffer.array();
    786                 int bi = (int)(mask_average[3] & 0xFF);
    787 
    788                 if (mLogVerbose) {
    789                     Log.v(TAG,
    790                             String.format("Mask_average is %d, threshold is %d",
    791                                     bi, DEFAULT_LEARNING_DONE_THRESHOLD));
    792                 }
    793 
    794                 if (bi >= DEFAULT_LEARNING_DONE_THRESHOLD) {
    795                     mStartLearning = true;                                      // Restart learning
    796                 } else {
    797                   if (mLogVerbose) Log.v(TAG, "Learning done");
    798                   if (mLearningDoneListener != null) {
    799                       mLearningDoneListener.onLearningDone(this);
    800                    }
    801                 }
    802             }
    803         } else {
    804             Frame output = context.getFrameManager().newFrame(video.getFormat());
    805             Frame[] subtractInputs = { video, background, mMask, mAutoWB };
    806             mBgSubtractProgram.process(subtractInputs, output);
    807             pushOutput("video", output);
    808             output.release();
    809         }
    810 
    811         // Compute mean and variance of the background
    812         if (mFrameCount < mLearningDuration - mLearningVerifyDuration ||
    813             mAdaptRateBg > 0.0 || mAdaptRateFg > 0.0) {
    814             Frame[] meanUpdateInputs = { mVideoInput, mBgMean[inputIndex], mMask };
    815             mBgUpdateMeanProgram.process(meanUpdateInputs, mBgMean[outputIndex]);
    816             mBgMean[outputIndex].generateMipMap();
    817             mBgMean[outputIndex].setTextureParameter(GLES20.GL_TEXTURE_MIN_FILTER,
    818                                                      GLES20.GL_LINEAR_MIPMAP_NEAREST);
    819 
    820             Frame[] varianceUpdateInputs = {
    821               mVideoInput, mBgMean[inputIndex], mBgVariance[inputIndex], mMask
    822             };
    823             mBgUpdateVarianceProgram.process(varianceUpdateInputs, mBgVariance[outputIndex]);
    824             mBgVariance[outputIndex].generateMipMap();
    825             mBgVariance[outputIndex].setTextureParameter(GLES20.GL_TEXTURE_MIN_FILTER,
    826                                                          GLES20.GL_LINEAR_MIPMAP_NEAREST);
    827         }
    828 
    829         // Provide debug output to two smaller viewers
    830         if (mProvideDebugOutputs) {
    831             Frame dbg1 = context.getFrameManager().newFrame(video.getFormat());
    832             mCopyOutProgram.process(video, dbg1);
    833             pushOutput("debug1", dbg1);
    834             dbg1.release();
    835 
    836             Frame dbg2 = context.getFrameManager().newFrame(mMemoryFormat);
    837             mCopyOutProgram.process(mMask, dbg2);
    838             pushOutput("debug2", dbg2);
    839             dbg2.release();
    840         }
    841 
    842         mFrameCount++;
    843 
    844         if (mLogVerbose) {
    845             if (mFrameCount % 30 == 0) {
    846                 if (startTime == -1) {
    847                     context.getGLEnvironment().activate();
    848                     GLES20.glFinish();
    849                     startTime = SystemClock.elapsedRealtime();
    850                 } else {
    851                     context.getGLEnvironment().activate();
    852                     GLES20.glFinish();
    853                     long endTime = SystemClock.elapsedRealtime();
    854                     Log.v(TAG, "Avg. frame duration: " + String.format("%.2f",(endTime-startTime)/30.) +
    855                           " ms. Avg. fps: " + String.format("%.2f", 1000./((endTime-startTime)/30.)) );
    856                     startTime = endTime;
    857                 }
    858             }
    859         }
    860     }
    861 
    862     private long startTime = -1;
    863 
    864     public void close(FilterContext context) {
    865         if (mMemoryFormat == null) {
    866             return;
    867         }
    868 
    869         if (mLogVerbose) Log.v(TAG, "Filter Closing!");
    870         for (int i = 0; i < 2; i++) {
    871             mBgMean[i].release();
    872             mBgVariance[i].release();
    873             mMaskVerify[i].release();
    874         }
    875         mDistance.release();
    876         mMask.release();
    877         mAutoWB.release();
    878         mVideoInput.release();
    879         mBgInput.release();
    880         mMaskAverage.release();
    881 
    882         mMemoryFormat = null;
    883     }
    884 
    885     // Relearn background model
    886     synchronized public void relearn() {
    887         // Let the processing thread know about learning restart
    888         mStartLearning = true;
    889     }
    890 
    891     @Override
    892     public void fieldPortValueUpdated(String name, FilterContext context) {
    893         // TODO: Many of these can be made ProgramPorts!
    894         if (name.equals("backgroundFitMode")) {
    895             mBackgroundFitModeChanged = true;
    896         } else if (name.equals("acceptStddev")) {
    897             mBgMaskProgram.setHostValue("accept_variance", mAcceptStddev * mAcceptStddev);
    898         } else if (name.equals("hierLrgScale")) {
    899             mBgMaskProgram.setHostValue("scale_lrg", mHierarchyLrgScale);
    900         } else if (name.equals("hierMidScale")) {
    901             mBgMaskProgram.setHostValue("scale_mid", mHierarchyMidScale);
    902         } else if (name.equals("hierSmlScale")) {
    903             mBgMaskProgram.setHostValue("scale_sml", mHierarchySmlScale);
    904         } else if (name.equals("hierLrgExp")) {
    905             mBgMaskProgram.setHostValue("exp_lrg", (float)(mSubsampleLevel + mHierarchyLrgExp));
    906         } else if (name.equals("hierMidExp")) {
    907             mBgMaskProgram.setHostValue("exp_mid", (float)(mSubsampleLevel + mHierarchyMidExp));
    908         } else if (name.equals("hierSmlExp")) {
    909             mBgMaskProgram.setHostValue("exp_sml", (float)(mSubsampleLevel + mHierarchySmlExp));
    910         } else if (name.equals("lumScale") || name.equals("chromaScale")) {
    911             float[] yuvWeights = { mLumScale, mChromaScale };
    912             mBgMaskProgram.setHostValue("yuv_weights", yuvWeights );
    913         } else if (name.equals("maskBg")) {
    914             mBgSubtractProgram.setHostValue("mask_blend_bg", mMaskBg);
    915         } else if (name.equals("maskFg")) {
    916             mBgSubtractProgram.setHostValue("mask_blend_fg", mMaskFg);
    917         } else if (name.equals("exposureChange")) {
    918             mBgSubtractProgram.setHostValue("exposure_change", mExposureChange);
    919         } else if (name.equals("whitebalanceredChange")) {
    920             mBgSubtractProgram.setHostValue("whitebalancered_change", mWhiteBalanceRedChange);
    921         } else if (name.equals("whitebalanceblueChange")) {
    922             mBgSubtractProgram.setHostValue("whitebalanceblue_change", mWhiteBalanceBlueChange);
    923         } else if (name.equals("autowbToggle")){
    924             mAutomaticWhiteBalanceProgram.setHostValue("autowb_toggle", mAutoWBToggle);
    925         }
    926     }
    927 
    928     private void updateBgScaling(Frame video, Frame background, boolean fitModeChanged) {
    929         float foregroundAspect = (float)video.getFormat().getWidth() / video.getFormat().getHeight();
    930         float backgroundAspect = (float)background.getFormat().getWidth() / background.getFormat().getHeight();
    931         float currentRelativeAspect = foregroundAspect/backgroundAspect;
    932         if (currentRelativeAspect != mRelativeAspect || fitModeChanged) {
    933             mRelativeAspect = currentRelativeAspect;
    934             float xMin = 0.f, xWidth = 1.f, yMin = 0.f, yWidth = 1.f;
    935             switch (mBackgroundFitMode) {
    936                 case BACKGROUND_STRETCH:
    937                     // Just map 1:1
    938                     break;
    939                 case BACKGROUND_FIT:
    940                     if (mRelativeAspect > 1.0f) {
    941                         // Foreground is wider than background, scale down
    942                         // background in X
    943                         xMin = 0.5f - 0.5f * mRelativeAspect;
    944                         xWidth = 1.f * mRelativeAspect;
    945                     } else {
    946                         // Foreground is taller than background, scale down
    947                         // background in Y
    948                         yMin = 0.5f - 0.5f / mRelativeAspect;
    949                         yWidth = 1 / mRelativeAspect;
    950                     }
    951                     break;
    952                 case BACKGROUND_FILL_CROP:
    953                     if (mRelativeAspect > 1.0f) {
    954                         // Foreground is wider than background, crop
    955                         // background in Y
    956                         yMin = 0.5f - 0.5f / mRelativeAspect;
    957                         yWidth = 1.f / mRelativeAspect;
    958                     } else {
    959                         // Foreground is taller than background, crop
    960                         // background in X
    961                         xMin = 0.5f - 0.5f * mRelativeAspect;
    962                         xWidth = mRelativeAspect;
    963                     }
    964                     break;
    965             }
    966             // If mirroring is required (for ex. the camera mirrors the preview
    967             // in the front camera)
    968             // TODO: Backdropper does not attempt to apply any other transformation
    969             // than just flipping. However, in the current state, it's "x-axis" is always aligned
    970             // with the Camera's width. Hence, we need to define the mirroring based on the camera
    971             // orientation. In the future, a cleaner design would be to cast away all the rotation
    972             // in a separate place.
    973             if (mMirrorBg) {
    974                 if (mLogVerbose) Log.v(TAG, "Mirroring the background!");
    975                 // Mirroring in portrait
    976                 if (mOrientation == 0 || mOrientation == 180) {
    977                     xWidth = -xWidth;
    978                     xMin = 1.0f - xMin;
    979                 } else {
    980                     // Mirroring in landscape
    981                     yWidth = -yWidth;
    982                     yMin = 1.0f - yMin;
    983                 }
    984             }
    985             if (mLogVerbose) Log.v(TAG, "bgTransform: xMin, yMin, xWidth, yWidth : " +
    986                     xMin + ", " + yMin + ", " + xWidth + ", " + yWidth +
    987                     ", mRelAspRatio = " + mRelativeAspect);
    988             // The following matrix is the transpose of the actual matrix
    989             float[] bgTransform = {xWidth, 0.f, 0.f,
    990                                    0.f, yWidth, 0.f,
    991                                    xMin, yMin,  1.f};
    992             mBgSubtractProgram.setHostValue("bg_fit_transform", bgTransform);
    993         }
    994     }
    995 
    996     private int pyramidLevel(int size) {
    997         return (int)Math.floor(Math.log10(size) / Math.log10(2)) - 1;
    998     }
    999 
   1000 }
   1001