Home | History | Annotate | Download | only in vp8
      1 /*
      2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  *
     10  * This file contains the WEBRTC VP8 wrapper implementation
     11  *
     12  */
     13 
     14 #include "webrtc/modules/video_coding/codecs/vp8/vp8_impl.h"
     15 
     16 #include <stdlib.h>
     17 #include <string.h>
     18 #include <time.h>
     19 #include <vector>
     20 
     21 #include "vpx/vpx_encoder.h"
     22 #include "vpx/vpx_decoder.h"
     23 #include "vpx/vp8cx.h"
     24 #include "vpx/vp8dx.h"
     25 
     26 #include "webrtc/common.h"
     27 #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
     28 #include "webrtc/modules/interface/module_common_types.h"
     29 #include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
     30 #include "webrtc/modules/video_coding/codecs/vp8/reference_picture_selection.h"
     31 #include "webrtc/system_wrappers/interface/tick_util.h"
     32 #include "webrtc/system_wrappers/interface/trace_event.h"
     33 
     34 enum { kVp8ErrorPropagationTh = 30 };
     35 
     36 namespace webrtc {
     37 
     38 VP8EncoderImpl::VP8EncoderImpl()
     39     : encoded_image_(),
     40       encoded_complete_callback_(NULL),
     41       inited_(false),
     42       timestamp_(0),
     43       picture_id_(0),
     44       feedback_mode_(false),
     45       cpu_speed_(-6),  // default value
     46       rc_max_intra_target_(0),
     47       token_partitions_(VP8_ONE_TOKENPARTITION),
     48       rps_(new ReferencePictureSelection),
     49       temporal_layers_(NULL),
     50       encoder_(NULL),
     51       config_(NULL),
     52       raw_(NULL) {
     53   memset(&codec_, 0, sizeof(codec_));
     54   uint32_t seed = static_cast<uint32_t>(TickTime::MillisecondTimestamp());
     55   srand(seed);
     56 }
     57 
     58 VP8EncoderImpl::~VP8EncoderImpl() {
     59   Release();
     60   delete rps_;
     61 }
     62 
     63 int VP8EncoderImpl::Release() {
     64   if (encoded_image_._buffer != NULL) {
     65     delete [] encoded_image_._buffer;
     66     encoded_image_._buffer = NULL;
     67   }
     68   if (encoder_ != NULL) {
     69     if (vpx_codec_destroy(encoder_)) {
     70       return WEBRTC_VIDEO_CODEC_MEMORY;
     71     }
     72     delete encoder_;
     73     encoder_ = NULL;
     74   }
     75   if (config_ != NULL) {
     76     delete config_;
     77     config_ = NULL;
     78   }
     79   if (raw_ != NULL) {
     80     vpx_img_free(raw_);
     81     raw_ = NULL;
     82   }
     83   delete temporal_layers_;
     84   temporal_layers_ = NULL;
     85   inited_ = false;
     86   return WEBRTC_VIDEO_CODEC_OK;
     87 }
     88 
     89 int VP8EncoderImpl::SetRates(uint32_t new_bitrate_kbit,
     90                              uint32_t new_framerate) {
     91   if (!inited_) {
     92     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
     93   }
     94   if (encoder_->err) {
     95     return WEBRTC_VIDEO_CODEC_ERROR;
     96   }
     97   if (new_framerate < 1) {
     98     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
     99   }
    100   // update bit rate
    101   if (codec_.maxBitrate > 0 && new_bitrate_kbit > codec_.maxBitrate) {
    102     new_bitrate_kbit = codec_.maxBitrate;
    103   }
    104   config_->rc_target_bitrate = new_bitrate_kbit;  // in kbit/s
    105   temporal_layers_->ConfigureBitrates(new_bitrate_kbit, codec_.maxBitrate,
    106                                       new_framerate, config_);
    107   codec_.maxFramerate = new_framerate;
    108 
    109   // update encoder context
    110   if (vpx_codec_enc_config_set(encoder_, config_)) {
    111     return WEBRTC_VIDEO_CODEC_ERROR;
    112   }
    113   return WEBRTC_VIDEO_CODEC_OK;
    114 }
    115 
    116 int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
    117                                int number_of_cores,
    118                                uint32_t /*max_payload_size*/) {
    119   if (inst == NULL) {
    120     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
    121   }
    122   if (inst->maxFramerate < 1) {
    123     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
    124   }
    125   // allow zero to represent an unspecified maxBitRate
    126   if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) {
    127     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
    128   }
    129   if (inst->width < 1 || inst->height < 1) {
    130     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
    131   }
    132   if (number_of_cores < 1) {
    133     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
    134   }
    135   feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn;
    136 
    137   int retVal = Release();
    138   if (retVal < 0) {
    139     return retVal;
    140   }
    141   if (encoder_ == NULL) {
    142     encoder_ = new vpx_codec_ctx_t;
    143   }
    144   if (config_ == NULL) {
    145     config_ = new vpx_codec_enc_cfg_t;
    146   }
    147   timestamp_ = 0;
    148 
    149   if (&codec_ != inst) {
    150     codec_ = *inst;
    151   }
    152 
    153   // TODO(andresp): assert(inst->extra_options) and cleanup.
    154   Config default_options;
    155   const Config& options =
    156       inst->extra_options ? *inst->extra_options : default_options;
    157 
    158   int num_temporal_layers = inst->codecSpecific.VP8.numberOfTemporalLayers > 1 ?
    159       inst->codecSpecific.VP8.numberOfTemporalLayers : 1;
    160   assert(temporal_layers_ == NULL);
    161   temporal_layers_ = options.Get<TemporalLayers::Factory>()
    162                          .Create(num_temporal_layers, rand());
    163   // random start 16 bits is enough.
    164   picture_id_ = static_cast<uint16_t>(rand()) & 0x7FFF;
    165 
    166   // allocate memory for encoded image
    167   if (encoded_image_._buffer != NULL) {
    168     delete [] encoded_image_._buffer;
    169   }
    170   encoded_image_._size = CalcBufferSize(kI420, codec_.width, codec_.height);
    171   encoded_image_._buffer = new uint8_t[encoded_image_._size];
    172   encoded_image_._completeFrame = true;
    173 
    174   // Creating a wrapper to the image - setting image data to NULL. Actual
    175   // pointer will be set in encode. Setting align to 1, as it is meaningless
    176   // (actual memory is not allocated).
    177   raw_ = vpx_img_wrap(NULL, IMG_FMT_I420, codec_.width, codec_.height,
    178                       1, NULL);
    179   // populate encoder configuration with default values
    180   if (vpx_codec_enc_config_default(vpx_codec_vp8_cx(), config_, 0)) {
    181     return WEBRTC_VIDEO_CODEC_ERROR;
    182   }
    183   config_->g_w = codec_.width;
    184   config_->g_h = codec_.height;
    185   config_->rc_target_bitrate = inst->startBitrate;  // in kbit/s
    186   temporal_layers_->ConfigureBitrates(inst->startBitrate, inst->maxBitrate,
    187                                       inst->maxFramerate, config_);
    188   // setting the time base of the codec
    189   config_->g_timebase.num = 1;
    190   config_->g_timebase.den = 90000;
    191 
    192   // Set the error resilience mode according to user settings.
    193   switch (inst->codecSpecific.VP8.resilience) {
    194     case kResilienceOff:
    195       config_->g_error_resilient = 0;
    196       if (num_temporal_layers > 1) {
    197         // Must be on for temporal layers (i.e., |num_temporal_layers| > 1).
    198         config_->g_error_resilient = 1;
    199       }
    200       break;
    201     case kResilientStream:
    202       config_->g_error_resilient = 1;  // TODO(holmer): Replace with
    203       // VPX_ERROR_RESILIENT_DEFAULT when we
    204       // drop support for libvpx 9.6.0.
    205       break;
    206     case kResilientFrames:
    207 #ifdef INDEPENDENT_PARTITIONS
    208       config_->g_error_resilient = VPX_ERROR_RESILIENT_DEFAULT |
    209       VPX_ERROR_RESILIENT_PARTITIONS;
    210       break;
    211 #else
    212       return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;  // Not supported
    213 #endif
    214   }
    215   config_->g_lag_in_frames = 0;  // 0- no frame lagging
    216 
    217   if (codec_.width * codec_.height >= 1920 * 1080 && number_of_cores > 8) {
    218     config_->g_threads = 8;  // 8 threads for 1080p on high perf machines.
    219   } else if (codec_.width * codec_.height > 1280 * 960 &&
    220       number_of_cores >= 6) {
    221     config_->g_threads = 3;  // 3 threads for 1080p.
    222   } else if (codec_.width * codec_.height > 640 * 480 && number_of_cores >= 3) {
    223     config_->g_threads = 2;  // 2 threads for qHD/HD.
    224   } else {
    225     config_->g_threads = 1;  // 1 thread for VGA or less
    226   }
    227 
    228   // rate control settings
    229   config_->rc_dropframe_thresh = inst->codecSpecific.VP8.frameDroppingOn ?
    230       30 : 0;
    231   config_->rc_end_usage = VPX_CBR;
    232   config_->g_pass = VPX_RC_ONE_PASS;
    233   config_->rc_resize_allowed = inst->codecSpecific.VP8.automaticResizeOn ?
    234       1 : 0;
    235   config_->rc_min_quantizer = 2;
    236   config_->rc_max_quantizer = inst->qpMax;
    237   config_->rc_undershoot_pct = 100;
    238   config_->rc_overshoot_pct = 15;
    239   config_->rc_buf_initial_sz = 500;
    240   config_->rc_buf_optimal_sz = 600;
    241   config_->rc_buf_sz = 1000;
    242   // set the maximum target size of any key-frame.
    243   rc_max_intra_target_ = MaxIntraTarget(config_->rc_buf_optimal_sz);
    244 
    245   if (feedback_mode_) {
    246     // Disable periodic key frames if we get feedback from the decoder
    247     // through SLI and RPSI.
    248     config_->kf_mode = VPX_KF_DISABLED;
    249   } else if (inst->codecSpecific.VP8.keyFrameInterval  > 0) {
    250     config_->kf_mode = VPX_KF_AUTO;
    251     config_->kf_max_dist = inst->codecSpecific.VP8.keyFrameInterval;
    252   } else {
    253     config_->kf_mode = VPX_KF_DISABLED;
    254   }
    255   switch (inst->codecSpecific.VP8.complexity) {
    256     case kComplexityHigh:
    257       cpu_speed_ = -5;
    258       break;
    259     case kComplexityHigher:
    260       cpu_speed_ = -4;
    261       break;
    262     case kComplexityMax:
    263       cpu_speed_ = -3;
    264       break;
    265     default:
    266       cpu_speed_ = -6;
    267       break;
    268   }
    269 #if defined(WEBRTC_ARCH_ARM)
    270   // On mobile platform, always set to -12 to leverage between cpu usage
    271   // and video quality
    272   cpu_speed_ = -12;
    273 #endif
    274   rps_->Init();
    275   return InitAndSetControlSettings(inst);
    276 }
    277 
    278 int VP8EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
    279   vpx_codec_flags_t flags = 0;
    280   // TODO(holmer): We should make a smarter decision on the number of
    281   // partitions. Eight is probably not the optimal number for low resolution
    282   // video.
    283   flags |= VPX_CODEC_USE_OUTPUT_PARTITION;
    284   if (vpx_codec_enc_init(encoder_, vpx_codec_vp8_cx(), config_, flags)) {
    285     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
    286   }
    287   vpx_codec_control(encoder_, VP8E_SET_STATIC_THRESHOLD, 1);
    288   vpx_codec_control(encoder_, VP8E_SET_CPUUSED, cpu_speed_);
    289   vpx_codec_control(encoder_, VP8E_SET_TOKEN_PARTITIONS,
    290                     static_cast<vp8e_token_partitions>(token_partitions_));
    291 #if !defined(WEBRTC_ARCH_ARM)
    292   // TODO(fbarchard): Enable Noise reduction for ARM once optimized.
    293   vpx_codec_control(encoder_, VP8E_SET_NOISE_SENSITIVITY,
    294                     inst->codecSpecific.VP8.denoisingOn ? 1 : 0);
    295 #endif
    296   vpx_codec_control(encoder_, VP8E_SET_MAX_INTRA_BITRATE_PCT,
    297                     rc_max_intra_target_);
    298   inited_ = true;
    299   return WEBRTC_VIDEO_CODEC_OK;
    300 }
    301 
    302 uint32_t VP8EncoderImpl::MaxIntraTarget(uint32_t optimalBuffersize) {
    303   // Set max to the optimal buffer level (normalized by target BR),
    304   // and scaled by a scalePar.
    305   // Max target size = scalePar * optimalBufferSize * targetBR[Kbps].
    306   // This values is presented in percentage of perFrameBw:
    307   // perFrameBw = targetBR[Kbps] * 1000 / frameRate.
    308   // The target in % is as follows:
    309 
    310   float scalePar = 0.5;
    311   uint32_t targetPct = optimalBuffersize * scalePar * codec_.maxFramerate / 10;
    312 
    313   // Don't go below 3 times the per frame bandwidth.
    314   const uint32_t minIntraTh = 300;
    315   return (targetPct < minIntraTh) ? minIntraTh: targetPct;
    316 }
    317 
    318 int VP8EncoderImpl::Encode(const I420VideoFrame& input_image,
    319                            const CodecSpecificInfo* codec_specific_info,
    320                            const std::vector<VideoFrameType>* frame_types) {
    321   TRACE_EVENT1("webrtc", "VP8::Encode", "timestamp", input_image.timestamp());
    322 
    323   if (!inited_) {
    324     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
    325   }
    326   if (input_image.IsZeroSize()) {
    327     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
    328   }
    329   if (encoded_complete_callback_ == NULL) {
    330     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
    331   }
    332 
    333   VideoFrameType frame_type = kDeltaFrame;
    334   // We only support one stream at the moment.
    335   if (frame_types && frame_types->size() > 0) {
    336     frame_type = (*frame_types)[0];
    337   }
    338 
    339   // Check for change in frame size.
    340   if (input_image.width() != codec_.width ||
    341       input_image.height() != codec_.height) {
    342     int ret = UpdateCodecFrameSize(input_image);
    343     if (ret < 0) {
    344       return ret;
    345     }
    346   }
    347   // Image in vpx_image_t format.
    348   // Input image is const. VP8's raw image is not defined as const.
    349   raw_->planes[PLANE_Y] = const_cast<uint8_t*>(input_image.buffer(kYPlane));
    350   raw_->planes[PLANE_U] = const_cast<uint8_t*>(input_image.buffer(kUPlane));
    351   raw_->planes[PLANE_V] = const_cast<uint8_t*>(input_image.buffer(kVPlane));
    352   // TODO(mikhal): Stride should be set in initialization.
    353   raw_->stride[VPX_PLANE_Y] = input_image.stride(kYPlane);
    354   raw_->stride[VPX_PLANE_U] = input_image.stride(kUPlane);
    355   raw_->stride[VPX_PLANE_V] = input_image.stride(kVPlane);
    356 
    357   int flags = temporal_layers_->EncodeFlags(input_image.timestamp());
    358 
    359   bool send_keyframe = (frame_type == kKeyFrame);
    360   if (send_keyframe) {
    361     // Key frame request from caller.
    362     // Will update both golden and alt-ref.
    363     flags = VPX_EFLAG_FORCE_KF;
    364   } else if (feedback_mode_ && codec_specific_info) {
    365     // Handle RPSI and SLI messages and set up the appropriate encode flags.
    366     bool sendRefresh = false;
    367     if (codec_specific_info->codecType == kVideoCodecVP8) {
    368       if (codec_specific_info->codecSpecific.VP8.hasReceivedRPSI) {
    369         rps_->ReceivedRPSI(
    370             codec_specific_info->codecSpecific.VP8.pictureIdRPSI);
    371       }
    372       if (codec_specific_info->codecSpecific.VP8.hasReceivedSLI) {
    373         sendRefresh = rps_->ReceivedSLI(input_image.timestamp());
    374       }
    375     }
    376     flags = rps_->EncodeFlags(picture_id_, sendRefresh,
    377                               input_image.timestamp());
    378   }
    379 
    380   // TODO(holmer): Ideally the duration should be the timestamp diff of this
    381   // frame and the next frame to be encoded, which we don't have. Instead we
    382   // would like to use the duration of the previous frame. Unfortunately the
    383   // rate control seems to be off with that setup. Using the average input
    384   // frame rate to calculate an average duration for now.
    385   assert(codec_.maxFramerate > 0);
    386   uint32_t duration = 90000 / codec_.maxFramerate;
    387   if (vpx_codec_encode(encoder_, raw_, timestamp_, duration, flags,
    388                        VPX_DL_REALTIME)) {
    389     return WEBRTC_VIDEO_CODEC_ERROR;
    390   }
    391   timestamp_ += duration;
    392 
    393   return GetEncodedPartitions(input_image);
    394 }
    395 
    396 int VP8EncoderImpl::UpdateCodecFrameSize(const I420VideoFrame& input_image) {
    397   codec_.width = input_image.width();
    398   codec_.height = input_image.height();
    399   raw_->w = codec_.width;
    400   raw_->h = codec_.height;
    401   raw_->d_w = codec_.width;
    402   raw_->d_h = codec_.height;
    403 
    404   raw_->stride[VPX_PLANE_Y] = input_image.stride(kYPlane);
    405   raw_->stride[VPX_PLANE_U] = input_image.stride(kUPlane);
    406   raw_->stride[VPX_PLANE_V] = input_image.stride(kVPlane);
    407   vpx_img_set_rect(raw_, 0, 0, codec_.width, codec_.height);
    408 
    409   // Update encoder context for new frame size.
    410   // Change of frame size will automatically trigger a key frame.
    411   config_->g_w = codec_.width;
    412   config_->g_h = codec_.height;
    413   if (vpx_codec_enc_config_set(encoder_, config_)) {
    414     return WEBRTC_VIDEO_CODEC_ERROR;
    415   }
    416   return WEBRTC_VIDEO_CODEC_OK;
    417 }
    418 
    419 void VP8EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
    420                                        const vpx_codec_cx_pkt& pkt,
    421                                        uint32_t timestamp) {
    422   assert(codec_specific != NULL);
    423   codec_specific->codecType = kVideoCodecVP8;
    424   CodecSpecificInfoVP8 *vp8Info = &(codec_specific->codecSpecific.VP8);
    425   vp8Info->pictureId = picture_id_;
    426   vp8Info->simulcastIdx = 0;
    427   vp8Info->keyIdx = kNoKeyIdx;  // TODO(hlundin) populate this
    428   vp8Info->nonReference = (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) != 0;
    429   temporal_layers_->PopulateCodecSpecific(
    430       (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ? true : false, vp8Info,
    431           timestamp);
    432   picture_id_ = (picture_id_ + 1) & 0x7FFF;  // prepare next
    433 }
    434 
    435 int VP8EncoderImpl::GetEncodedPartitions(const I420VideoFrame& input_image) {
    436   vpx_codec_iter_t iter = NULL;
    437   int part_idx = 0;
    438   encoded_image_._length = 0;
    439   encoded_image_._frameType = kDeltaFrame;
    440   RTPFragmentationHeader frag_info;
    441   frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_) + 1);
    442   CodecSpecificInfo codec_specific;
    443 
    444   const vpx_codec_cx_pkt_t *pkt = NULL;
    445   while ((pkt = vpx_codec_get_cx_data(encoder_, &iter)) != NULL) {
    446     switch (pkt->kind) {
    447       case VPX_CODEC_CX_FRAME_PKT: {
    448         memcpy(&encoded_image_._buffer[encoded_image_._length],
    449                pkt->data.frame.buf,
    450                pkt->data.frame.sz);
    451         frag_info.fragmentationOffset[part_idx] = encoded_image_._length;
    452         frag_info.fragmentationLength[part_idx] =  pkt->data.frame.sz;
    453         frag_info.fragmentationPlType[part_idx] = 0;  // not known here
    454         frag_info.fragmentationTimeDiff[part_idx] = 0;
    455         encoded_image_._length += pkt->data.frame.sz;
    456         assert(encoded_image_._length <= encoded_image_._size);
    457         ++part_idx;
    458         break;
    459       }
    460       default: {
    461         break;
    462       }
    463     }
    464     // End of frame
    465     if ((pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT) == 0) {
    466       // check if encoded frame is a key frame
    467       if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
    468           encoded_image_._frameType = kKeyFrame;
    469           rps_->EncodedKeyFrame(picture_id_);
    470       }
    471       PopulateCodecSpecific(&codec_specific, *pkt, input_image.timestamp());
    472       break;
    473     }
    474   }
    475   if (encoded_image_._length > 0) {
    476     TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length);
    477     encoded_image_._timeStamp = input_image.timestamp();
    478     encoded_image_.capture_time_ms_ = input_image.render_time_ms();
    479     encoded_image_._encodedHeight = codec_.height;
    480     encoded_image_._encodedWidth = codec_.width;
    481     encoded_complete_callback_->Encoded(encoded_image_, &codec_specific,
    482                                       &frag_info);
    483   }
    484   return WEBRTC_VIDEO_CODEC_OK;
    485 }
    486 
    487 int VP8EncoderImpl::SetChannelParameters(uint32_t /*packet_loss*/, int rtt) {
    488   rps_->SetRtt(rtt);
    489   return WEBRTC_VIDEO_CODEC_OK;
    490 }
    491 
    492 int VP8EncoderImpl::RegisterEncodeCompleteCallback(
    493     EncodedImageCallback* callback) {
    494   encoded_complete_callback_ = callback;
    495   return WEBRTC_VIDEO_CODEC_OK;
    496 }
    497 
    498 VP8DecoderImpl::VP8DecoderImpl()
    499     : decode_complete_callback_(NULL),
    500       inited_(false),
    501       feedback_mode_(false),
    502       decoder_(NULL),
    503       last_keyframe_(),
    504       image_format_(VPX_IMG_FMT_NONE),
    505       ref_frame_(NULL),
    506       propagation_cnt_(-1),
    507       mfqe_enabled_(false),
    508       key_frame_required_(true) {
    509   memset(&codec_, 0, sizeof(codec_));
    510 }
    511 
    512 VP8DecoderImpl::~VP8DecoderImpl() {
    513   inited_ = true;  // in order to do the actual release
    514   Release();
    515 }
    516 
    517 int VP8DecoderImpl::Reset() {
    518   if (!inited_) {
    519     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
    520   }
    521   InitDecode(&codec_, 1);
    522   propagation_cnt_ = -1;
    523   mfqe_enabled_ = false;
    524   return WEBRTC_VIDEO_CODEC_OK;
    525 }
    526 
    527 int VP8DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
    528   if (inst == NULL) {
    529     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
    530   }
    531   int ret_val = Release();
    532   if (ret_val < 0) {
    533     return ret_val;
    534   }
    535   if (decoder_ == NULL) {
    536     decoder_ = new vpx_dec_ctx_t;
    537   }
    538   if (inst->codecType == kVideoCodecVP8) {
    539     feedback_mode_ = inst->codecSpecific.VP8.feedbackModeOn;
    540   }
    541   vpx_codec_dec_cfg_t  cfg;
    542   // Setting number of threads to a constant value (1)
    543   cfg.threads = 1;
    544   cfg.h = cfg.w = 0;  // set after decode
    545 
    546   vpx_codec_flags_t flags = 0;
    547 #ifndef WEBRTC_ARCH_ARM
    548   flags = VPX_CODEC_USE_POSTPROC;
    549   if (inst->codecSpecific.VP8.errorConcealmentOn) {
    550     flags |= VPX_CODEC_USE_ERROR_CONCEALMENT;
    551   }
    552 #ifdef INDEPENDENT_PARTITIONS
    553   flags |= VPX_CODEC_USE_INPUT_PARTITION;
    554 #endif
    555 #endif
    556 
    557   if (vpx_codec_dec_init(decoder_, vpx_codec_vp8_dx(), &cfg, flags)) {
    558     return WEBRTC_VIDEO_CODEC_MEMORY;
    559   }
    560 
    561 #ifndef WEBRTC_ARCH_ARM
    562   vp8_postproc_cfg_t  ppcfg;
    563   ppcfg.post_proc_flag = VP8_DEMACROBLOCK | VP8_DEBLOCK;
    564   // Strength of deblocking filter. Valid range:[0,16]
    565   ppcfg.deblocking_level = 3;
    566   vpx_codec_control(decoder_, VP8_SET_POSTPROC, &ppcfg);
    567 #endif
    568 
    569   if (&codec_ != inst) {
    570     // Save VideoCodec instance for later; mainly for duplicating the decoder.
    571     codec_ = *inst;
    572   }
    573 
    574   propagation_cnt_ = -1;
    575 
    576   inited_ = true;
    577 
    578   // Always start with a complete key frame.
    579   key_frame_required_ = true;
    580 
    581   return WEBRTC_VIDEO_CODEC_OK;
    582 }
    583 
    584 int VP8DecoderImpl::Decode(const EncodedImage& input_image,
    585                            bool missing_frames,
    586                            const RTPFragmentationHeader* fragmentation,
    587                            const CodecSpecificInfo* codec_specific_info,
    588                            int64_t /*render_time_ms*/) {
    589   if (!inited_) {
    590     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
    591   }
    592   if (decode_complete_callback_ == NULL) {
    593     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
    594   }
    595   if (input_image._buffer == NULL && input_image._length > 0) {
    596     // Reset to avoid requesting key frames too often.
    597     if (propagation_cnt_ > 0)
    598       propagation_cnt_ = 0;
    599     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
    600   }
    601 
    602 #ifdef INDEPENDENT_PARTITIONS
    603   if (fragmentation == NULL) {
    604     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
    605   }
    606 #endif
    607 
    608 #ifndef WEBRTC_ARCH_ARM
    609   if (!mfqe_enabled_ && codec_specific_info &&
    610       codec_specific_info->codecSpecific.VP8.temporalIdx > 0) {
    611     // Enable MFQE if we are receiving layers.
    612     // temporalIdx is set in the jitter buffer according to what the RTP
    613     // header says.
    614     mfqe_enabled_ = true;
    615     vp8_postproc_cfg_t  ppcfg;
    616     ppcfg.post_proc_flag = VP8_MFQE | VP8_DEMACROBLOCK | VP8_DEBLOCK;
    617     ppcfg.deblocking_level = 3;
    618     vpx_codec_control(decoder_, VP8_SET_POSTPROC, &ppcfg);
    619   }
    620 #endif
    621 
    622 
    623   // Always start with a complete key frame.
    624   if (key_frame_required_) {
    625     if (input_image._frameType != kKeyFrame)
    626       return WEBRTC_VIDEO_CODEC_ERROR;
    627     // We have a key frame - is it complete?
    628     if (input_image._completeFrame) {
    629       key_frame_required_ = false;
    630     } else {
    631       return WEBRTC_VIDEO_CODEC_ERROR;
    632     }
    633   }
    634   // Restrict error propagation using key frame requests. Disabled when
    635   // the feedback mode is enabled (RPS).
    636   // Reset on a key frame refresh.
    637   if (!feedback_mode_) {
    638     if (input_image._frameType == kKeyFrame && input_image._completeFrame)
    639       propagation_cnt_ = -1;
    640     // Start count on first loss.
    641     else if ((!input_image._completeFrame || missing_frames) &&
    642         propagation_cnt_ == -1)
    643       propagation_cnt_ = 0;
    644     if (propagation_cnt_ >= 0)
    645       propagation_cnt_++;
    646   }
    647 
    648   vpx_codec_iter_t iter = NULL;
    649   vpx_image_t* img;
    650   int ret;
    651 
    652   // Check for missing frames.
    653   if (missing_frames) {
    654     // Call decoder with zero data length to signal missing frames.
    655     if (vpx_codec_decode(decoder_, NULL, 0, 0, VPX_DL_REALTIME)) {
    656       // Reset to avoid requesting key frames too often.
    657       if (propagation_cnt_ > 0)
    658         propagation_cnt_ = 0;
    659       return WEBRTC_VIDEO_CODEC_ERROR;
    660     }
    661     // We don't render this frame.
    662     vpx_codec_get_frame(decoder_, &iter);
    663     iter = NULL;
    664   }
    665 
    666 #ifdef INDEPENDENT_PARTITIONS
    667   if (DecodePartitions(inputImage, fragmentation)) {
    668     // Reset to avoid requesting key frames too often.
    669     if (propagation_cnt_ > 0) {
    670       propagation_cnt_ = 0;
    671     }
    672     return WEBRTC_VIDEO_CODEC_ERROR;
    673   }
    674 #else
    675   uint8_t* buffer = input_image._buffer;
    676   if (input_image._length == 0) {
    677     buffer = NULL;  // Triggers full frame concealment.
    678   }
    679   if (vpx_codec_decode(decoder_,
    680                        buffer,
    681                        input_image._length,
    682                        0,
    683                        VPX_DL_REALTIME)) {
    684     // Reset to avoid requesting key frames too often.
    685     if (propagation_cnt_ > 0)
    686       propagation_cnt_ = 0;
    687     return WEBRTC_VIDEO_CODEC_ERROR;
    688   }
    689 #endif
    690 
    691   // Store encoded frame if key frame. (Used in Copy method.)
    692   if (input_image._frameType == kKeyFrame && input_image._buffer != NULL) {
    693     const uint32_t bytes_to_copy = input_image._length;
    694     if (last_keyframe_._size < bytes_to_copy) {
    695       delete [] last_keyframe_._buffer;
    696       last_keyframe_._buffer = NULL;
    697       last_keyframe_._size = 0;
    698     }
    699 
    700     uint8_t* temp_buffer = last_keyframe_._buffer;  // Save buffer ptr.
    701     uint32_t temp_size = last_keyframe_._size;  // Save size.
    702     last_keyframe_ = input_image;  // Shallow copy.
    703     last_keyframe_._buffer = temp_buffer;  // Restore buffer ptr.
    704     last_keyframe_._size = temp_size;  // Restore buffer size.
    705     if (!last_keyframe_._buffer) {
    706       // Allocate memory.
    707       last_keyframe_._size = bytes_to_copy;
    708       last_keyframe_._buffer = new uint8_t[last_keyframe_._size];
    709     }
    710     // Copy encoded frame.
    711     memcpy(last_keyframe_._buffer, input_image._buffer, bytes_to_copy);
    712     last_keyframe_._length = bytes_to_copy;
    713   }
    714 
    715   img = vpx_codec_get_frame(decoder_, &iter);
    716   ret = ReturnFrame(img, input_image._timeStamp, input_image.ntp_time_ms_);
    717   if (ret != 0) {
    718     // Reset to avoid requesting key frames too often.
    719     if (ret < 0 && propagation_cnt_ > 0)
    720       propagation_cnt_ = 0;
    721     return ret;
    722   }
    723   if (feedback_mode_) {
    724     // Whenever we receive an incomplete key frame all reference buffers will
    725     // be corrupt. If that happens we must request new key frames until we
    726     // decode a complete.
    727     if (input_image._frameType == kKeyFrame && !input_image._completeFrame)
    728       return WEBRTC_VIDEO_CODEC_ERROR;
    729 
    730     // Check for reference updates and last reference buffer corruption and
    731     // signal successful reference propagation or frame corruption to the
    732     // encoder.
    733     int reference_updates = 0;
    734     if (vpx_codec_control(decoder_, VP8D_GET_LAST_REF_UPDATES,
    735                           &reference_updates)) {
    736       // Reset to avoid requesting key frames too often.
    737       if (propagation_cnt_ > 0)
    738         propagation_cnt_ = 0;
    739       return WEBRTC_VIDEO_CODEC_ERROR;
    740     }
    741     int corrupted = 0;
    742     if (vpx_codec_control(decoder_, VP8D_GET_FRAME_CORRUPTED, &corrupted)) {
    743       // Reset to avoid requesting key frames too often.
    744       if (propagation_cnt_ > 0)
    745         propagation_cnt_ = 0;
    746       return WEBRTC_VIDEO_CODEC_ERROR;
    747     }
    748     int16_t picture_id = -1;
    749     if (codec_specific_info) {
    750       picture_id = codec_specific_info->codecSpecific.VP8.pictureId;
    751     }
    752     if (picture_id > -1) {
    753       if (((reference_updates & VP8_GOLD_FRAME) ||
    754           (reference_updates & VP8_ALTR_FRAME)) && !corrupted) {
    755         decode_complete_callback_->ReceivedDecodedReferenceFrame(picture_id);
    756       }
    757       decode_complete_callback_->ReceivedDecodedFrame(picture_id);
    758     }
    759     if (corrupted) {
    760       // we can decode but with artifacts
    761       return WEBRTC_VIDEO_CODEC_REQUEST_SLI;
    762     }
    763   }
    764   // Check Vs. threshold
    765   if (propagation_cnt_ > kVp8ErrorPropagationTh) {
    766     // Reset to avoid requesting key frames too often.
    767     propagation_cnt_ = 0;
    768     return WEBRTC_VIDEO_CODEC_ERROR;
    769   }
    770   return WEBRTC_VIDEO_CODEC_OK;
    771 }
    772 
    773 int VP8DecoderImpl::DecodePartitions(
    774     const EncodedImage& input_image,
    775     const RTPFragmentationHeader* fragmentation) {
    776   for (int i = 0; i < fragmentation->fragmentationVectorSize; ++i) {
    777     const uint8_t* partition = input_image._buffer +
    778         fragmentation->fragmentationOffset[i];
    779     const uint32_t partition_length =
    780         fragmentation->fragmentationLength[i];
    781     if (vpx_codec_decode(decoder_,
    782                          partition,
    783                          partition_length,
    784                          0,
    785                          VPX_DL_REALTIME)) {
    786       return WEBRTC_VIDEO_CODEC_ERROR;
    787     }
    788   }
    789   // Signal end of frame data. If there was no frame data this will trigger
    790   // a full frame concealment.
    791   if (vpx_codec_decode(decoder_, NULL, 0, 0, VPX_DL_REALTIME))
    792     return WEBRTC_VIDEO_CODEC_ERROR;
    793   return WEBRTC_VIDEO_CODEC_OK;
    794 }
    795 
    796 int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img,
    797                                 uint32_t timestamp,
    798                                 int64_t ntp_time_ms) {
    799   if (img == NULL) {
    800     // Decoder OK and NULL image => No show frame
    801     return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
    802   }
    803   int half_height = (img->d_h + 1) / 2;
    804   int size_y = img->stride[VPX_PLANE_Y] * img->d_h;
    805   int size_u = img->stride[VPX_PLANE_U] * half_height;
    806   int size_v = img->stride[VPX_PLANE_V] * half_height;
    807   // TODO(mikhal): This does  a copy - need to SwapBuffers.
    808   decoded_image_.CreateFrame(size_y, img->planes[VPX_PLANE_Y],
    809                              size_u, img->planes[VPX_PLANE_U],
    810                              size_v, img->planes[VPX_PLANE_V],
    811                              img->d_w, img->d_h,
    812                              img->stride[VPX_PLANE_Y],
    813                              img->stride[VPX_PLANE_U],
    814                              img->stride[VPX_PLANE_V]);
    815   decoded_image_.set_timestamp(timestamp);
    816   decoded_image_.set_ntp_time_ms(ntp_time_ms);
    817   int ret = decode_complete_callback_->Decoded(decoded_image_);
    818   if (ret != 0)
    819     return ret;
    820 
    821   // Remember image format for later
    822   image_format_ = img->fmt;
    823   return WEBRTC_VIDEO_CODEC_OK;
    824 }
    825 
    826 int VP8DecoderImpl::RegisterDecodeCompleteCallback(
    827     DecodedImageCallback* callback) {
    828   decode_complete_callback_ = callback;
    829   return WEBRTC_VIDEO_CODEC_OK;
    830 }
    831 
    832 int VP8DecoderImpl::Release() {
    833   if (last_keyframe_._buffer != NULL) {
    834     delete [] last_keyframe_._buffer;
    835     last_keyframe_._buffer = NULL;
    836   }
    837   if (decoder_ != NULL) {
    838     if (vpx_codec_destroy(decoder_)) {
    839       return WEBRTC_VIDEO_CODEC_MEMORY;
    840     }
    841     delete decoder_;
    842     decoder_ = NULL;
    843   }
    844   if (ref_frame_ != NULL) {
    845     vpx_img_free(&ref_frame_->img);
    846     delete ref_frame_;
    847     ref_frame_ = NULL;
    848   }
    849   inited_ = false;
    850   return WEBRTC_VIDEO_CODEC_OK;
    851 }
    852 
    853 VideoDecoder* VP8DecoderImpl::Copy() {
    854   // Sanity checks.
    855   if (!inited_) {
    856     // Not initialized.
    857     assert(false);
    858     return NULL;
    859   }
    860   if (decoded_image_.IsZeroSize()) {
    861     // Nothing has been decoded before; cannot clone.
    862     return NULL;
    863   }
    864   if (last_keyframe_._buffer == NULL) {
    865     // Cannot clone if we have no key frame to start with.
    866     return NULL;
    867   }
    868   // Create a new VideoDecoder object
    869   VP8DecoderImpl *copy = new VP8DecoderImpl;
    870 
    871   // Initialize the new decoder
    872   if (copy->InitDecode(&codec_, 1) != WEBRTC_VIDEO_CODEC_OK) {
    873     delete copy;
    874     return NULL;
    875   }
    876   // Inject last key frame into new decoder.
    877   if (vpx_codec_decode(copy->decoder_, last_keyframe_._buffer,
    878                        last_keyframe_._length, NULL, VPX_DL_REALTIME)) {
    879     delete copy;
    880     return NULL;
    881   }
    882   // Allocate memory for reference image copy
    883   assert(decoded_image_.width() > 0);
    884   assert(decoded_image_.height() > 0);
    885   assert(image_format_ > VPX_IMG_FMT_NONE);
    886   // Check if frame format has changed.
    887   if (ref_frame_ &&
    888       (decoded_image_.width() != static_cast<int>(ref_frame_->img.d_w) ||
    889           decoded_image_.height() != static_cast<int>(ref_frame_->img.d_h) ||
    890           image_format_ != ref_frame_->img.fmt)) {
    891     vpx_img_free(&ref_frame_->img);
    892     delete ref_frame_;
    893     ref_frame_ = NULL;
    894   }
    895 
    896 
    897   if (!ref_frame_) {
    898     ref_frame_ = new vpx_ref_frame_t;
    899 
    900     unsigned int align = 16;
    901     if (!vpx_img_alloc(&ref_frame_->img,
    902                        static_cast<vpx_img_fmt_t>(image_format_),
    903                        decoded_image_.width(), decoded_image_.height(),
    904                        align)) {
    905       assert(false);
    906       delete copy;
    907       return NULL;
    908     }
    909   }
    910   const vpx_ref_frame_type_t type_vec[] = { VP8_LAST_FRAME, VP8_GOLD_FRAME,
    911       VP8_ALTR_FRAME };
    912   for (uint32_t ix = 0;
    913       ix < sizeof(type_vec) / sizeof(vpx_ref_frame_type_t); ++ix) {
    914     ref_frame_->frame_type = type_vec[ix];
    915     if (CopyReference(copy) < 0) {
    916       delete copy;
    917       return NULL;
    918     }
    919   }
    920   // Copy all member variables (that are not set in initialization).
    921   copy->feedback_mode_ = feedback_mode_;
    922   copy->image_format_ = image_format_;
    923   copy->last_keyframe_ = last_keyframe_;  // Shallow copy.
    924   // Allocate memory. (Discard copied _buffer pointer.)
    925   copy->last_keyframe_._buffer = new uint8_t[last_keyframe_._size];
    926   memcpy(copy->last_keyframe_._buffer, last_keyframe_._buffer,
    927          last_keyframe_._length);
    928 
    929   return static_cast<VideoDecoder*>(copy);
    930 }
    931 
    932 int VP8DecoderImpl::CopyReference(VP8Decoder* copyTo) {
    933   // The type of frame to copy should be set in ref_frame_->frame_type
    934   // before the call to this function.
    935   if (vpx_codec_control(decoder_, VP8_COPY_REFERENCE, ref_frame_)
    936       != VPX_CODEC_OK) {
    937     return -1;
    938   }
    939   if (vpx_codec_control(static_cast<VP8DecoderImpl*>(copyTo)->decoder_,
    940                         VP8_SET_REFERENCE, ref_frame_) != VPX_CODEC_OK) {
    941     return -1;
    942   }
    943   return 0;
    944 }
    945 
    946 }  // namespace webrtc
    947