Home | History | Annotate | Download | only in filters
      1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "media/filters/ffmpeg_demuxer.h"
      6 
      7 #include <algorithm>
      8 #include <string>
      9 
     10 #include "base/base64.h"
     11 #include "base/bind.h"
     12 #include "base/callback.h"
     13 #include "base/callback_helpers.h"
     14 #include "base/memory/scoped_ptr.h"
     15 #include "base/message_loop/message_loop_proxy.h"
     16 #include "base/metrics/sparse_histogram.h"
     17 #include "base/strings/string_number_conversions.h"
     18 #include "base/strings/string_util.h"
     19 #include "base/strings/stringprintf.h"
     20 #include "base/sys_byteorder.h"
     21 #include "base/task_runner_util.h"
     22 #include "base/time/time.h"
     23 #include "media/base/audio_decoder_config.h"
     24 #include "media/base/bind_to_current_loop.h"
     25 #include "media/base/decoder_buffer.h"
     26 #include "media/base/decrypt_config.h"
     27 #include "media/base/limits.h"
     28 #include "media/base/media_log.h"
     29 #include "media/base/video_decoder_config.h"
     30 #include "media/ffmpeg/ffmpeg_common.h"
     31 #include "media/filters/ffmpeg_glue.h"
     32 #include "media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.h"
     33 #include "media/filters/webvtt_util.h"
     34 #include "media/formats/webm/webm_crypto_helpers.h"
     35 
     36 namespace media {
     37 
     38 static base::Time ExtractTimelineOffset(AVFormatContext* format_context) {
     39   if (strstr(format_context->iformat->name, "webm") ||
     40       strstr(format_context->iformat->name, "matroska")) {
     41     const AVDictionaryEntry* entry =
     42         av_dict_get(format_context->metadata, "creation_time", NULL, 0);
     43 
     44     base::Time timeline_offset;
     45     if (entry != NULL && entry->value != NULL &&
     46         FFmpegUTCDateToTime(entry->value, &timeline_offset)) {
     47       return timeline_offset;
     48     }
     49   }
     50 
     51   return base::Time();
     52 }
     53 
     54 static base::TimeDelta FramesToTimeDelta(int frames, double sample_rate) {
     55   return base::TimeDelta::FromMicroseconds(
     56       frames * base::Time::kMicrosecondsPerSecond / sample_rate);
     57 }
     58 
     59 static base::TimeDelta ExtractStartTime(AVStream* stream,
     60                                         base::TimeDelta start_time_estimate) {
     61   DCHECK(start_time_estimate != kNoTimestamp());
     62   if (stream->start_time == static_cast<int64_t>(AV_NOPTS_VALUE)) {
     63     return start_time_estimate == kInfiniteDuration() ? kNoTimestamp()
     64                                                       : start_time_estimate;
     65   }
     66 
     67   // First try the lower of the estimate and the |start_time| value.
     68   base::TimeDelta start_time =
     69       std::min(ConvertFromTimeBase(stream->time_base, stream->start_time),
     70                start_time_estimate);
     71 
     72   // Next see if the first buffered pts value is usable.
     73   if (stream->pts_buffer[0] != static_cast<int64_t>(AV_NOPTS_VALUE)) {
     74     const base::TimeDelta buffered_pts =
     75         ConvertFromTimeBase(stream->time_base, stream->pts_buffer[0]);
     76     if (buffered_pts < start_time)
     77       start_time = buffered_pts;
     78   }
     79 
     80   // NOTE: Do not use AVStream->first_dts since |start_time| should be a
     81   // presentation timestamp.
     82   return start_time;
     83 }
     84 
     85 //
     86 // FFmpegDemuxerStream
     87 //
     88 FFmpegDemuxerStream::FFmpegDemuxerStream(FFmpegDemuxer* demuxer,
     89                                          AVStream* stream)
     90     : demuxer_(demuxer),
     91       task_runner_(base::MessageLoopProxy::current()),
     92       stream_(stream),
     93       type_(UNKNOWN),
     94       end_of_stream_(false),
     95       last_packet_timestamp_(kNoTimestamp()),
     96       last_packet_duration_(kNoTimestamp()),
     97       video_rotation_(VIDEO_ROTATION_0),
     98       bitstream_converter_enabled_(false),
     99       fixup_negative_ogg_timestamps_(false) {
    100   DCHECK(demuxer_);
    101 
    102   bool is_encrypted = false;
    103   int rotation = 0;
    104   AVDictionaryEntry* rotation_entry = NULL;
    105 
    106   // Determine our media format.
    107   switch (stream->codec->codec_type) {
    108     case AVMEDIA_TYPE_AUDIO:
    109       type_ = AUDIO;
    110       AVStreamToAudioDecoderConfig(stream, &audio_config_, true);
    111       is_encrypted = audio_config_.is_encrypted();
    112       break;
    113     case AVMEDIA_TYPE_VIDEO:
    114       type_ = VIDEO;
    115       AVStreamToVideoDecoderConfig(stream, &video_config_, true);
    116       is_encrypted = video_config_.is_encrypted();
    117 
    118       rotation_entry = av_dict_get(stream->metadata, "rotate", NULL, 0);
    119       if (rotation_entry && rotation_entry->value && rotation_entry->value[0])
    120         base::StringToInt(rotation_entry->value, &rotation);
    121 
    122       switch (rotation) {
    123         case 0:
    124           break;
    125         case 90:
    126           video_rotation_ = VIDEO_ROTATION_90;
    127           break;
    128         case 180:
    129           video_rotation_ = VIDEO_ROTATION_180;
    130           break;
    131         case 270:
    132           video_rotation_ = VIDEO_ROTATION_270;
    133           break;
    134         default:
    135           LOG(ERROR) << "Unsupported video rotation metadata: " << rotation;
    136           break;
    137       }
    138 
    139       break;
    140     case AVMEDIA_TYPE_SUBTITLE:
    141       type_ = TEXT;
    142       break;
    143     default:
    144       NOTREACHED();
    145       break;
    146   }
    147 
    148   // Calculate the duration.
    149   duration_ = ConvertStreamTimestamp(stream->time_base, stream->duration);
    150 
    151 #if defined(USE_PROPRIETARY_CODECS)
    152   if (stream_->codec->codec_id == AV_CODEC_ID_H264) {
    153     bitstream_converter_.reset(
    154         new FFmpegH264ToAnnexBBitstreamConverter(stream_->codec));
    155   }
    156 #endif
    157 
    158   if (is_encrypted) {
    159     AVDictionaryEntry* key = av_dict_get(stream->metadata, "enc_key_id", NULL,
    160                                          0);
    161     DCHECK(key);
    162     DCHECK(key->value);
    163     if (!key || !key->value)
    164       return;
    165     base::StringPiece base64_key_id(key->value);
    166     std::string enc_key_id;
    167     base::Base64Decode(base64_key_id, &enc_key_id);
    168     DCHECK(!enc_key_id.empty());
    169     if (enc_key_id.empty())
    170       return;
    171 
    172     encryption_key_id_.assign(enc_key_id);
    173     demuxer_->FireNeedKey(kWebMEncryptInitDataType, enc_key_id);
    174   }
    175 }
    176 
    177 void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet) {
    178   DCHECK(task_runner_->BelongsToCurrentThread());
    179 
    180   if (!demuxer_ || end_of_stream_) {
    181     NOTREACHED() << "Attempted to enqueue packet on a stopped stream";
    182     return;
    183   }
    184 
    185 #if defined(USE_PROPRIETARY_CODECS)
    186   // Convert the packet if there is a bitstream filter.
    187   if (packet->data && bitstream_converter_enabled_ &&
    188       !bitstream_converter_->ConvertPacket(packet.get())) {
    189     LOG(ERROR) << "Format conversion failed.";
    190   }
    191 #endif
    192 
    193   // Get side data if any. For now, the only type of side_data is VP8 Alpha. We
    194   // keep this generic so that other side_data types in the future can be
    195   // handled the same way as well.
    196   av_packet_split_side_data(packet.get());
    197 
    198   scoped_refptr<DecoderBuffer> buffer;
    199 
    200   if (type() == DemuxerStream::TEXT) {
    201     int id_size = 0;
    202     uint8* id_data = av_packet_get_side_data(
    203         packet.get(),
    204         AV_PKT_DATA_WEBVTT_IDENTIFIER,
    205         &id_size);
    206 
    207     int settings_size = 0;
    208     uint8* settings_data = av_packet_get_side_data(
    209         packet.get(),
    210         AV_PKT_DATA_WEBVTT_SETTINGS,
    211         &settings_size);
    212 
    213     std::vector<uint8> side_data;
    214     MakeSideData(id_data, id_data + id_size,
    215                  settings_data, settings_data + settings_size,
    216                  &side_data);
    217 
    218     buffer = DecoderBuffer::CopyFrom(packet.get()->data, packet.get()->size,
    219                                      side_data.data(), side_data.size());
    220   } else {
    221     int side_data_size = 0;
    222     uint8* side_data = av_packet_get_side_data(
    223         packet.get(),
    224         AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
    225         &side_data_size);
    226 
    227     scoped_ptr<DecryptConfig> decrypt_config;
    228     int data_offset = 0;
    229     if ((type() == DemuxerStream::AUDIO && audio_config_.is_encrypted()) ||
    230         (type() == DemuxerStream::VIDEO && video_config_.is_encrypted())) {
    231       if (!WebMCreateDecryptConfig(
    232           packet->data, packet->size,
    233           reinterpret_cast<const uint8*>(encryption_key_id_.data()),
    234           encryption_key_id_.size(),
    235           &decrypt_config,
    236           &data_offset)) {
    237         LOG(ERROR) << "Creation of DecryptConfig failed.";
    238       }
    239     }
    240 
    241     // If a packet is returned by FFmpeg's av_parser_parse2() the packet will
    242     // reference inner memory of FFmpeg.  As such we should transfer the packet
    243     // into memory we control.
    244     if (side_data_size > 0) {
    245       buffer = DecoderBuffer::CopyFrom(packet.get()->data + data_offset,
    246                                        packet.get()->size - data_offset,
    247                                        side_data, side_data_size);
    248     } else {
    249       buffer = DecoderBuffer::CopyFrom(packet.get()->data + data_offset,
    250                                        packet.get()->size - data_offset);
    251     }
    252 
    253     int skip_samples_size = 0;
    254     const uint32* skip_samples_ptr =
    255         reinterpret_cast<const uint32*>(av_packet_get_side_data(
    256             packet.get(), AV_PKT_DATA_SKIP_SAMPLES, &skip_samples_size));
    257     const int kSkipSamplesValidSize = 10;
    258     const int kSkipEndSamplesOffset = 1;
    259     if (skip_samples_size >= kSkipSamplesValidSize) {
    260       // Because FFmpeg rolls codec delay and skip samples into one we can only
    261       // allow front discard padding on the first buffer.  Otherwise the discard
    262       // helper can't figure out which data to discard.  See AudioDiscardHelper.
    263       int discard_front_samples = base::ByteSwapToLE32(*skip_samples_ptr);
    264       if (last_packet_timestamp_ != kNoTimestamp()) {
    265         DLOG(ERROR) << "Skip samples are only allowed for the first packet.";
    266         discard_front_samples = 0;
    267       }
    268 
    269       const int discard_end_samples =
    270           base::ByteSwapToLE32(*(skip_samples_ptr + kSkipEndSamplesOffset));
    271       const int samples_per_second =
    272           audio_decoder_config().samples_per_second();
    273       buffer->set_discard_padding(std::make_pair(
    274           FramesToTimeDelta(discard_front_samples, samples_per_second),
    275           FramesToTimeDelta(discard_end_samples, samples_per_second)));
    276     }
    277 
    278     if (decrypt_config)
    279       buffer->set_decrypt_config(decrypt_config.Pass());
    280   }
    281 
    282   if (packet->duration >= 0) {
    283     buffer->set_duration(
    284         ConvertStreamTimestamp(stream_->time_base, packet->duration));
    285   } else {
    286     // TODO(wolenetz): Remove when FFmpeg stops returning negative durations.
    287     // https://crbug.com/394418
    288     DVLOG(1) << "FFmpeg returned a buffer with a negative duration! "
    289              << packet->duration;
    290     buffer->set_duration(kNoTimestamp());
    291   }
    292 
    293   // Note: If pts is AV_NOPTS_VALUE, stream_timestamp will be kNoTimestamp().
    294   const base::TimeDelta stream_timestamp =
    295       ConvertStreamTimestamp(stream_->time_base, packet->pts);
    296 
    297   if (stream_timestamp != kNoTimestamp()) {
    298     const bool is_audio = type() == AUDIO;
    299 
    300     // If this is an OGG file with negative timestamps don't rebase any other
    301     // stream types against the negative starting time.
    302     base::TimeDelta start_time = demuxer_->start_time();
    303     if (fixup_negative_ogg_timestamps_ && !is_audio &&
    304         start_time < base::TimeDelta()) {
    305       start_time = base::TimeDelta();
    306     }
    307 
    308     // Don't rebase timestamps for positive start times, the HTML Media Spec
    309     // details this in section "4.8.10.6 Offsets into the media resource." We
    310     // will still need to rebase timestamps before seeking with FFmpeg though.
    311     if (start_time > base::TimeDelta())
    312       start_time = base::TimeDelta();
    313 
    314     buffer->set_timestamp(stream_timestamp - start_time);
    315 
    316     // If enabled, mark audio packets with negative timestamps for post-decode
    317     // discard.
    318     if (fixup_negative_ogg_timestamps_ && is_audio &&
    319         stream_timestamp < base::TimeDelta() &&
    320         buffer->duration() != kNoTimestamp()) {
    321       if (stream_timestamp + buffer->duration() < base::TimeDelta()) {
    322         // Discard the entire packet if it's entirely before zero.
    323         buffer->set_discard_padding(
    324             std::make_pair(kInfiniteDuration(), base::TimeDelta()));
    325       } else {
    326         // Only discard part of the frame if it overlaps zero.
    327         buffer->set_discard_padding(
    328             std::make_pair(-stream_timestamp, base::TimeDelta()));
    329       }
    330     }
    331   } else {
    332     // If this happens on the first packet, decoders will throw an error.
    333     buffer->set_timestamp(kNoTimestamp());
    334   }
    335 
    336   if (last_packet_timestamp_ != kNoTimestamp()) {
    337     // FFmpeg doesn't support chained ogg correctly.  Instead of guaranteeing
    338     // continuity across links in the chain it uses the timestamp information
    339     // from each link directly.  Doing so can lead to timestamps which appear to
    340     // go backwards in time.
    341     //
    342     // If the new link starts with a negative timestamp or a timestamp less than
    343     // the original (positive) |start_time|, we will get a negative timestamp
    344     // here.  It's also possible FFmpeg returns kNoTimestamp() here if it's not
    345     // able to work out a timestamp using the previous link and the next.
    346     //
    347     // Fixing chained ogg is non-trivial, so for now just reuse the last good
    348     // timestamp.  The decoder will rewrite the timestamps to be sample accurate
    349     // later.  See http://crbug.com/396864.
    350     if (fixup_negative_ogg_timestamps_ &&
    351         (buffer->timestamp() == kNoTimestamp() ||
    352          buffer->timestamp() < last_packet_timestamp_)) {
    353       buffer->set_timestamp(last_packet_timestamp_ +
    354                             (last_packet_duration_ != kNoTimestamp()
    355                                  ? last_packet_duration_
    356                                  : base::TimeDelta::FromMicroseconds(1)));
    357     }
    358 
    359     // The demuxer should always output positive timestamps.
    360     DCHECK(buffer->timestamp() >= base::TimeDelta());
    361     DCHECK(buffer->timestamp() != kNoTimestamp());
    362 
    363     if (last_packet_timestamp_ < buffer->timestamp()) {
    364       buffered_ranges_.Add(last_packet_timestamp_, buffer->timestamp());
    365       demuxer_->NotifyBufferingChanged();
    366     }
    367   }
    368 
    369   last_packet_timestamp_ = buffer->timestamp();
    370   last_packet_duration_ = buffer->duration();
    371 
    372   buffer_queue_.Push(buffer);
    373   SatisfyPendingRead();
    374 }
    375 
    376 void FFmpegDemuxerStream::SetEndOfStream() {
    377   DCHECK(task_runner_->BelongsToCurrentThread());
    378   end_of_stream_ = true;
    379   SatisfyPendingRead();
    380 }
    381 
    382 void FFmpegDemuxerStream::FlushBuffers() {
    383   DCHECK(task_runner_->BelongsToCurrentThread());
    384   DCHECK(read_cb_.is_null()) << "There should be no pending read";
    385   buffer_queue_.Clear();
    386   end_of_stream_ = false;
    387   last_packet_timestamp_ = kNoTimestamp();
    388   last_packet_duration_ = kNoTimestamp();
    389 }
    390 
    391 void FFmpegDemuxerStream::Stop() {
    392   DCHECK(task_runner_->BelongsToCurrentThread());
    393   buffer_queue_.Clear();
    394   if (!read_cb_.is_null()) {
    395     base::ResetAndReturn(&read_cb_).Run(
    396         DemuxerStream::kOk, DecoderBuffer::CreateEOSBuffer());
    397   }
    398   demuxer_ = NULL;
    399   stream_ = NULL;
    400   end_of_stream_ = true;
    401 }
    402 
    403 DemuxerStream::Type FFmpegDemuxerStream::type() {
    404   DCHECK(task_runner_->BelongsToCurrentThread());
    405   return type_;
    406 }
    407 
    408 void FFmpegDemuxerStream::Read(const ReadCB& read_cb) {
    409   DCHECK(task_runner_->BelongsToCurrentThread());
    410   CHECK(read_cb_.is_null()) << "Overlapping reads are not supported";
    411   read_cb_ = BindToCurrentLoop(read_cb);
    412 
    413   // Don't accept any additional reads if we've been told to stop.
    414   // The |demuxer_| may have been destroyed in the pipeline thread.
    415   //
    416   // TODO(scherkus): it would be cleaner to reply with an error message.
    417   if (!demuxer_) {
    418     base::ResetAndReturn(&read_cb_).Run(
    419         DemuxerStream::kOk, DecoderBuffer::CreateEOSBuffer());
    420     return;
    421   }
    422 
    423   SatisfyPendingRead();
    424 }
    425 
    426 void FFmpegDemuxerStream::EnableBitstreamConverter() {
    427   DCHECK(task_runner_->BelongsToCurrentThread());
    428 
    429 #if defined(USE_PROPRIETARY_CODECS)
    430   CHECK(bitstream_converter_.get());
    431   bitstream_converter_enabled_ = true;
    432 #else
    433   NOTREACHED() << "Proprietary codecs not enabled.";
    434 #endif
    435 }
    436 
    437 bool FFmpegDemuxerStream::SupportsConfigChanges() { return false; }
    438 
    439 AudioDecoderConfig FFmpegDemuxerStream::audio_decoder_config() {
    440   DCHECK(task_runner_->BelongsToCurrentThread());
    441   CHECK_EQ(type_, AUDIO);
    442   return audio_config_;
    443 }
    444 
    445 VideoDecoderConfig FFmpegDemuxerStream::video_decoder_config() {
    446   DCHECK(task_runner_->BelongsToCurrentThread());
    447   CHECK_EQ(type_, VIDEO);
    448   return video_config_;
    449 }
    450 
    451 VideoRotation FFmpegDemuxerStream::video_rotation() {
    452   return video_rotation_;
    453 }
    454 
    455 FFmpegDemuxerStream::~FFmpegDemuxerStream() {
    456   DCHECK(!demuxer_);
    457   DCHECK(read_cb_.is_null());
    458   DCHECK(buffer_queue_.IsEmpty());
    459 }
    460 
    461 base::TimeDelta FFmpegDemuxerStream::GetElapsedTime() const {
    462   return ConvertStreamTimestamp(stream_->time_base, stream_->cur_dts);
    463 }
    464 
    465 Ranges<base::TimeDelta> FFmpegDemuxerStream::GetBufferedRanges() const {
    466   return buffered_ranges_;
    467 }
    468 
    469 void FFmpegDemuxerStream::SatisfyPendingRead() {
    470   DCHECK(task_runner_->BelongsToCurrentThread());
    471   if (!read_cb_.is_null()) {
    472     if (!buffer_queue_.IsEmpty()) {
    473       base::ResetAndReturn(&read_cb_).Run(
    474           DemuxerStream::kOk, buffer_queue_.Pop());
    475     } else if (end_of_stream_) {
    476       base::ResetAndReturn(&read_cb_).Run(
    477           DemuxerStream::kOk, DecoderBuffer::CreateEOSBuffer());
    478     }
    479   }
    480 
    481   // Have capacity? Ask for more!
    482   if (HasAvailableCapacity() && !end_of_stream_) {
    483     demuxer_->NotifyCapacityAvailable();
    484   }
    485 }
    486 
    487 bool FFmpegDemuxerStream::HasAvailableCapacity() {
    488   // TODO(scherkus): Remove this return and reenable time-based capacity
    489   // after our data sources support canceling/concurrent reads, see
    490   // http://crbug.com/165762 for details.
    491 #if 1
    492   return !read_cb_.is_null();
    493 #else
    494   // Try to have one second's worth of encoded data per stream.
    495   const base::TimeDelta kCapacity = base::TimeDelta::FromSeconds(1);
    496   return buffer_queue_.IsEmpty() || buffer_queue_.Duration() < kCapacity;
    497 #endif
    498 }
    499 
    500 size_t FFmpegDemuxerStream::MemoryUsage() const {
    501   return buffer_queue_.data_size();
    502 }
    503 
    504 TextKind FFmpegDemuxerStream::GetTextKind() const {
    505   DCHECK_EQ(type_, DemuxerStream::TEXT);
    506 
    507   if (stream_->disposition & AV_DISPOSITION_CAPTIONS)
    508     return kTextCaptions;
    509 
    510   if (stream_->disposition & AV_DISPOSITION_DESCRIPTIONS)
    511     return kTextDescriptions;
    512 
    513   if (stream_->disposition & AV_DISPOSITION_METADATA)
    514     return kTextMetadata;
    515 
    516   return kTextSubtitles;
    517 }
    518 
    519 std::string FFmpegDemuxerStream::GetMetadata(const char* key) const {
    520   const AVDictionaryEntry* entry =
    521       av_dict_get(stream_->metadata, key, NULL, 0);
    522   return (entry == NULL || entry->value == NULL) ? "" : entry->value;
    523 }
    524 
    525 // static
    526 base::TimeDelta FFmpegDemuxerStream::ConvertStreamTimestamp(
    527     const AVRational& time_base, int64 timestamp) {
    528   if (timestamp == static_cast<int64>(AV_NOPTS_VALUE))
    529     return kNoTimestamp();
    530 
    531   return ConvertFromTimeBase(time_base, timestamp);
    532 }
    533 
    534 //
    535 // FFmpegDemuxer
    536 //
    537 FFmpegDemuxer::FFmpegDemuxer(
    538     const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
    539     DataSource* data_source,
    540     const NeedKeyCB& need_key_cb,
    541     const scoped_refptr<MediaLog>& media_log)
    542     : host_(NULL),
    543       task_runner_(task_runner),
    544       blocking_thread_("FFmpegDemuxer"),
    545       pending_read_(false),
    546       pending_seek_(false),
    547       data_source_(data_source),
    548       media_log_(media_log),
    549       bitrate_(0),
    550       start_time_(kNoTimestamp()),
    551       preferred_stream_for_seeking_(-1, kNoTimestamp()),
    552       fallback_stream_for_seeking_(-1, kNoTimestamp()),
    553       liveness_(LIVENESS_UNKNOWN),
    554       text_enabled_(false),
    555       duration_known_(false),
    556       need_key_cb_(need_key_cb),
    557       weak_factory_(this) {
    558   DCHECK(task_runner_.get());
    559   DCHECK(data_source_);
    560 }
    561 
    562 FFmpegDemuxer::~FFmpegDemuxer() {}
    563 
    564 void FFmpegDemuxer::Stop() {
    565   DCHECK(task_runner_->BelongsToCurrentThread());
    566 
    567   // The order of Stop() and Abort() is important here.  If Abort() is called
    568   // first, control may pass into FFmpeg where it can destruct buffers that are
    569   // in the process of being fulfilled by the DataSource.
    570   data_source_->Stop();
    571   url_protocol_->Abort();
    572 
    573   // This will block until all tasks complete. Note that after this returns it's
    574   // possible for reply tasks (e.g., OnReadFrameDone()) to be queued on this
    575   // thread. Each of the reply task methods must check whether we've stopped the
    576   // thread and drop their results on the floor.
    577   blocking_thread_.Stop();
    578 
    579   StreamVector::iterator iter;
    580   for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
    581     if (*iter)
    582       (*iter)->Stop();
    583   }
    584 
    585   data_source_ = NULL;
    586 }
    587 
    588 void FFmpegDemuxer::Seek(base::TimeDelta time, const PipelineStatusCB& cb) {
    589   DCHECK(task_runner_->BelongsToCurrentThread());
    590   CHECK(!pending_seek_);
    591 
    592   // TODO(scherkus): Inspect |pending_read_| and cancel IO via |blocking_url_|,
    593   // otherwise we can end up waiting for a pre-seek read to complete even though
    594   // we know we're going to drop it on the floor.
    595 
    596   // FFmpeg requires seeks to be adjusted according to the lowest starting time.
    597   // Since EnqueuePacket() rebased negative timestamps by the start time, we
    598   // must correct the shift here.
    599   //
    600   // Additionally, to workaround limitations in how we expose seekable ranges to
    601   // Blink (http://crbug.com/137275), we also want to clamp seeks before the
    602   // start time to the start time.
    603   const base::TimeDelta seek_time =
    604       start_time_ < base::TimeDelta() ? time + start_time_
    605                                       : time < start_time_ ? start_time_ : time;
    606 
    607   // Choose the seeking stream based on whether it contains the seek time, if no
    608   // match can be found prefer the preferred stream.
    609   //
    610   // TODO(dalecurtis): Currently FFmpeg does not ensure that all streams in a
    611   // given container will demux all packets after the seek point.  Instead it
    612   // only guarantees that all packets after the file position of the seek will
    613   // be demuxed.  It's an open question whether FFmpeg should fix this:
    614   // http://lists.ffmpeg.org/pipermail/ffmpeg-devel/2014-June/159212.html
    615   // Tracked by http://crbug.com/387996.
    616   DCHECK(preferred_stream_for_seeking_.second != kNoTimestamp());
    617   const int stream_index =
    618       seek_time < preferred_stream_for_seeking_.second &&
    619               seek_time >= fallback_stream_for_seeking_.second
    620           ? fallback_stream_for_seeking_.first
    621           : preferred_stream_for_seeking_.first;
    622   DCHECK_NE(stream_index, -1);
    623 
    624   const AVStream* seeking_stream =
    625       glue_->format_context()->streams[stream_index];
    626 
    627   pending_seek_ = true;
    628   base::PostTaskAndReplyWithResult(
    629       blocking_thread_.message_loop_proxy().get(),
    630       FROM_HERE,
    631       base::Bind(&av_seek_frame,
    632                  glue_->format_context(),
    633                  seeking_stream->index,
    634                  ConvertToTimeBase(seeking_stream->time_base, seek_time),
    635                  // Always seek to a timestamp <= to the desired timestamp.
    636                  AVSEEK_FLAG_BACKWARD),
    637       base::Bind(
    638           &FFmpegDemuxer::OnSeekFrameDone, weak_factory_.GetWeakPtr(), cb));
    639 }
    640 
    641 void FFmpegDemuxer::Initialize(DemuxerHost* host,
    642                                const PipelineStatusCB& status_cb,
    643                                bool enable_text_tracks) {
    644   DCHECK(task_runner_->BelongsToCurrentThread());
    645   host_ = host;
    646   text_enabled_ = enable_text_tracks;
    647 
    648   url_protocol_.reset(new BlockingUrlProtocol(data_source_, BindToCurrentLoop(
    649       base::Bind(&FFmpegDemuxer::OnDataSourceError, base::Unretained(this)))));
    650   glue_.reset(new FFmpegGlue(url_protocol_.get()));
    651   AVFormatContext* format_context = glue_->format_context();
    652 
    653   // Disable ID3v1 tag reading to avoid costly seeks to end of file for data we
    654   // don't use.  FFmpeg will only read ID3v1 tags if no other metadata is
    655   // available, so add a metadata entry to ensure some is always present.
    656   av_dict_set(&format_context->metadata, "skip_id3v1_tags", "", 0);
    657 
    658   // Open the AVFormatContext using our glue layer.
    659   CHECK(blocking_thread_.Start());
    660   base::PostTaskAndReplyWithResult(
    661       blocking_thread_.message_loop_proxy().get(),
    662       FROM_HERE,
    663       base::Bind(&FFmpegGlue::OpenContext, base::Unretained(glue_.get())),
    664       base::Bind(&FFmpegDemuxer::OnOpenContextDone,
    665                  weak_factory_.GetWeakPtr(),
    666                  status_cb));
    667 }
    668 
    669 base::Time FFmpegDemuxer::GetTimelineOffset() const {
    670   return timeline_offset_;
    671 }
    672 
    673 DemuxerStream* FFmpegDemuxer::GetStream(DemuxerStream::Type type) {
    674   DCHECK(task_runner_->BelongsToCurrentThread());
    675   return GetFFmpegStream(type);
    676 }
    677 
    678 FFmpegDemuxerStream* FFmpegDemuxer::GetFFmpegStream(
    679     DemuxerStream::Type type) const {
    680   StreamVector::const_iterator iter;
    681   for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
    682     if (*iter && (*iter)->type() == type) {
    683       return *iter;
    684     }
    685   }
    686   return NULL;
    687 }
    688 
    689 base::TimeDelta FFmpegDemuxer::GetStartTime() const {
    690   return std::max(start_time_, base::TimeDelta());
    691 }
    692 
    693 Demuxer::Liveness FFmpegDemuxer::GetLiveness() const {
    694   DCHECK(task_runner_->BelongsToCurrentThread());
    695   return liveness_;
    696 }
    697 
    698 void FFmpegDemuxer::AddTextStreams() {
    699   DCHECK(task_runner_->BelongsToCurrentThread());
    700 
    701   for (StreamVector::size_type idx = 0; idx < streams_.size(); ++idx) {
    702     FFmpegDemuxerStream* stream = streams_[idx];
    703     if (stream == NULL || stream->type() != DemuxerStream::TEXT)
    704       continue;
    705 
    706     TextKind kind = stream->GetTextKind();
    707     std::string title = stream->GetMetadata("title");
    708     std::string language = stream->GetMetadata("language");
    709 
    710     // TODO: Implement "id" metadata in FFMPEG.
    711     // See: http://crbug.com/323183
    712     host_->AddTextStream(stream, TextTrackConfig(kind, title, language,
    713         std::string()));
    714   }
    715 }
    716 
    717 // Helper for calculating the bitrate of the media based on information stored
    718 // in |format_context| or failing that the size and duration of the media.
    719 //
    720 // Returns 0 if a bitrate could not be determined.
    721 static int CalculateBitrate(
    722     AVFormatContext* format_context,
    723     const base::TimeDelta& duration,
    724     int64 filesize_in_bytes) {
    725   // If there is a bitrate set on the container, use it.
    726   if (format_context->bit_rate > 0)
    727     return format_context->bit_rate;
    728 
    729   // Then try to sum the bitrates individually per stream.
    730   int bitrate = 0;
    731   for (size_t i = 0; i < format_context->nb_streams; ++i) {
    732     AVCodecContext* codec_context = format_context->streams[i]->codec;
    733     bitrate += codec_context->bit_rate;
    734   }
    735   if (bitrate > 0)
    736     return bitrate;
    737 
    738   // See if we can approximate the bitrate as long as we have a filesize and
    739   // valid duration.
    740   if (duration.InMicroseconds() <= 0 ||
    741       duration == kInfiniteDuration() ||
    742       filesize_in_bytes == 0) {
    743     return 0;
    744   }
    745 
    746   // Do math in floating point as we'd overflow an int64 if the filesize was
    747   // larger than ~1073GB.
    748   double bytes = filesize_in_bytes;
    749   double duration_us = duration.InMicroseconds();
    750   return bytes * 8000000.0 / duration_us;
    751 }
    752 
    753 void FFmpegDemuxer::OnOpenContextDone(const PipelineStatusCB& status_cb,
    754                                       bool result) {
    755   DCHECK(task_runner_->BelongsToCurrentThread());
    756   if (!blocking_thread_.IsRunning()) {
    757     status_cb.Run(PIPELINE_ERROR_ABORT);
    758     return;
    759   }
    760 
    761   if (!result) {
    762     status_cb.Run(DEMUXER_ERROR_COULD_NOT_OPEN);
    763     return;
    764   }
    765 
    766   // Fully initialize AVFormatContext by parsing the stream a little.
    767   base::PostTaskAndReplyWithResult(
    768       blocking_thread_.message_loop_proxy().get(),
    769       FROM_HERE,
    770       base::Bind(&avformat_find_stream_info,
    771                  glue_->format_context(),
    772                  static_cast<AVDictionary**>(NULL)),
    773       base::Bind(&FFmpegDemuxer::OnFindStreamInfoDone,
    774                  weak_factory_.GetWeakPtr(),
    775                  status_cb));
    776 }
    777 
    778 void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB& status_cb,
    779                                          int result) {
    780   DCHECK(task_runner_->BelongsToCurrentThread());
    781   if (!blocking_thread_.IsRunning() || !data_source_) {
    782     status_cb.Run(PIPELINE_ERROR_ABORT);
    783     return;
    784   }
    785 
    786   if (result < 0) {
    787     status_cb.Run(DEMUXER_ERROR_COULD_NOT_PARSE);
    788     return;
    789   }
    790 
    791   // Create demuxer stream entries for each possible AVStream. Each stream
    792   // is examined to determine if it is supported or not (is the codec enabled
    793   // for it in this release?). Unsupported streams are skipped, allowing for
    794   // partial playback. At least one audio or video stream must be playable.
    795   AVFormatContext* format_context = glue_->format_context();
    796   streams_.resize(format_context->nb_streams);
    797 
    798   // Estimate the start time for each stream by looking through the packets
    799   // buffered during avformat_find_stream_info().  These values will be
    800   // considered later when determining the actual stream start time.
    801   //
    802   // These packets haven't been completely processed yet, so only look through
    803   // these values if the AVFormatContext has a valid start time.
    804   //
    805   // If no estimate is found, the stream entry will be kInfiniteDuration().
    806   std::vector<base::TimeDelta> start_time_estimates(format_context->nb_streams,
    807                                                     kInfiniteDuration());
    808   if (format_context->packet_buffer &&
    809       format_context->start_time != static_cast<int64>(AV_NOPTS_VALUE)) {
    810     struct AVPacketList* packet_buffer = format_context->packet_buffer;
    811     while (packet_buffer != format_context->packet_buffer_end) {
    812       DCHECK_LT(static_cast<size_t>(packet_buffer->pkt.stream_index),
    813                 start_time_estimates.size());
    814       const AVStream* stream =
    815           format_context->streams[packet_buffer->pkt.stream_index];
    816       if (packet_buffer->pkt.pts != static_cast<int64>(AV_NOPTS_VALUE)) {
    817         const base::TimeDelta packet_pts =
    818             ConvertFromTimeBase(stream->time_base, packet_buffer->pkt.pts);
    819         if (packet_pts < start_time_estimates[stream->index])
    820           start_time_estimates[stream->index] = packet_pts;
    821       }
    822       packet_buffer = packet_buffer->next;
    823     }
    824   }
    825 
    826   AVStream* audio_stream = NULL;
    827   AudioDecoderConfig audio_config;
    828 
    829   AVStream* video_stream = NULL;
    830   VideoDecoderConfig video_config;
    831 
    832   // If available, |start_time_| will be set to the lowest stream start time.
    833   start_time_ = kInfiniteDuration();
    834 
    835   base::TimeDelta max_duration;
    836   for (size_t i = 0; i < format_context->nb_streams; ++i) {
    837     AVStream* stream = format_context->streams[i];
    838     const AVCodecContext* codec_context = stream->codec;
    839     const AVMediaType codec_type = codec_context->codec_type;
    840 
    841     if (codec_type == AVMEDIA_TYPE_AUDIO) {
    842       if (audio_stream)
    843         continue;
    844 
    845       // Log the codec detected, whether it is supported or not.
    846       UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedAudioCodec",
    847                                   codec_context->codec_id);
    848       // Ensure the codec is supported. IsValidConfig() also checks that the
    849       // channel layout and sample format are valid.
    850       AVStreamToAudioDecoderConfig(stream, &audio_config, false);
    851       if (!audio_config.IsValidConfig())
    852         continue;
    853       audio_stream = stream;
    854     } else if (codec_type == AVMEDIA_TYPE_VIDEO) {
    855       if (video_stream)
    856         continue;
    857 
    858       // Log the codec detected, whether it is supported or not.
    859       UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodec",
    860                                   codec_context->codec_id);
    861       // Ensure the codec is supported. IsValidConfig() also checks that the
    862       // frame size and visible size are valid.
    863       AVStreamToVideoDecoderConfig(stream, &video_config, false);
    864 
    865       if (!video_config.IsValidConfig())
    866         continue;
    867       video_stream = stream;
    868     } else if (codec_type == AVMEDIA_TYPE_SUBTITLE) {
    869       if (codec_context->codec_id != AV_CODEC_ID_WEBVTT || !text_enabled_) {
    870         continue;
    871       }
    872     } else {
    873       continue;
    874     }
    875 
    876     streams_[i] = new FFmpegDemuxerStream(this, stream);
    877     max_duration = std::max(max_duration, streams_[i]->duration());
    878 
    879     const base::TimeDelta start_time =
    880         ExtractStartTime(stream, start_time_estimates[i]);
    881     const bool has_start_time = start_time != kNoTimestamp();
    882 
    883     // Always prefer the video stream for seeking.  If none exists, we'll swap
    884     // the fallback stream with the preferred stream below.
    885     if (codec_type == AVMEDIA_TYPE_VIDEO) {
    886       preferred_stream_for_seeking_ =
    887           StreamSeekInfo(i, has_start_time ? start_time : base::TimeDelta());
    888     }
    889 
    890     if (!has_start_time)
    891       continue;
    892 
    893     if (start_time < start_time_) {
    894       start_time_ = start_time;
    895 
    896       // Choose the stream with the lowest starting time as the fallback stream
    897       // for seeking.  Video should always be preferred.
    898       fallback_stream_for_seeking_ = StreamSeekInfo(i, start_time);
    899     }
    900   }
    901 
    902   if (!audio_stream && !video_stream) {
    903     status_cb.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS);
    904     return;
    905   }
    906 
    907   if (text_enabled_)
    908     AddTextStreams();
    909 
    910   if (format_context->duration != static_cast<int64_t>(AV_NOPTS_VALUE)) {
    911     // If there is a duration value in the container use that to find the
    912     // maximum between it and the duration from A/V streams.
    913     const AVRational av_time_base = {1, AV_TIME_BASE};
    914     max_duration =
    915         std::max(max_duration,
    916                  ConvertFromTimeBase(av_time_base, format_context->duration));
    917   } else {
    918     // The duration is unknown, in which case this is likely a live stream.
    919     max_duration = kInfiniteDuration();
    920   }
    921 
    922   // Ogg has some peculiarities around negative timestamps, so use this flag to
    923   // setup the FFmpegDemuxerStreams appropriately.
    924   //
    925   // Post-decode frame dropping for packets with negative timestamps is outlined
    926   // in section A.2 in the Ogg Vorbis spec:
    927   // http://xiph.org/vorbis/doc/Vorbis_I_spec.html
    928   if (strcmp(format_context->iformat->name, "ogg") == 0 && audio_stream &&
    929       audio_stream->codec->codec_id == AV_CODEC_ID_VORBIS) {
    930     for (size_t i = 0; i < streams_.size(); ++i) {
    931       if (streams_[i])
    932         streams_[i]->enable_negative_timestamp_fixups_for_ogg();
    933     }
    934 
    935     // Fixup the seeking information to avoid selecting the audio stream simply
    936     // because it has a lower starting time.
    937     if (fallback_stream_for_seeking_.first == audio_stream->index &&
    938         fallback_stream_for_seeking_.second < base::TimeDelta()) {
    939       fallback_stream_for_seeking_.second = base::TimeDelta();
    940     }
    941   }
    942 
    943   // If no start time could be determined, default to zero and prefer the video
    944   // stream over the audio stream for seeking.  E.g., The WAV demuxer does not
    945   // put timestamps on its frames.
    946   if (start_time_ == kInfiniteDuration()) {
    947     start_time_ = base::TimeDelta();
    948     preferred_stream_for_seeking_ = StreamSeekInfo(
    949         video_stream ? video_stream->index : audio_stream->index, start_time_);
    950   } else if (!video_stream) {
    951     // If no video stream exists, use the audio or text stream found above.
    952     preferred_stream_for_seeking_ = fallback_stream_for_seeking_;
    953   }
    954 
    955   // MPEG-4 B-frames cause grief for a simple container like AVI. Enable PTS
    956   // generation so we always get timestamps, see http://crbug.com/169570
    957   if (strcmp(format_context->iformat->name, "avi") == 0)
    958     format_context->flags |= AVFMT_FLAG_GENPTS;
    959 
    960   // For testing purposes, don't overwrite the timeline offset if set already.
    961   if (timeline_offset_.is_null())
    962     timeline_offset_ = ExtractTimelineOffset(format_context);
    963 
    964   // Since we're shifting the externally visible start time to zero, we need to
    965   // adjust the timeline offset to compensate.
    966   if (!timeline_offset_.is_null() && start_time_ < base::TimeDelta())
    967     timeline_offset_ += start_time_;
    968 
    969   if (max_duration == kInfiniteDuration() && !timeline_offset_.is_null()) {
    970     liveness_ = LIVENESS_LIVE;
    971   } else if (max_duration != kInfiniteDuration()) {
    972     liveness_ = LIVENESS_RECORDED;
    973   } else {
    974     liveness_ = LIVENESS_UNKNOWN;
    975   }
    976 
    977   // Good to go: set the duration and bitrate and notify we're done
    978   // initializing.
    979   host_->SetDuration(max_duration);
    980   duration_known_ = (max_duration != kInfiniteDuration());
    981 
    982   int64 filesize_in_bytes = 0;
    983   url_protocol_->GetSize(&filesize_in_bytes);
    984   bitrate_ = CalculateBitrate(format_context, max_duration, filesize_in_bytes);
    985   if (bitrate_ > 0)
    986     data_source_->SetBitrate(bitrate_);
    987 
    988   // Audio logging
    989   if (audio_stream) {
    990     AVCodecContext* audio_codec = audio_stream->codec;
    991     media_log_->SetBooleanProperty("found_audio_stream", true);
    992 
    993     SampleFormat sample_format = audio_config.sample_format();
    994     std::string sample_name = SampleFormatToString(sample_format);
    995 
    996     media_log_->SetStringProperty("audio_sample_format", sample_name);
    997 
    998     AVCodec* codec = avcodec_find_decoder(audio_codec->codec_id);
    999     if (codec) {
   1000       media_log_->SetStringProperty("audio_codec_name", codec->name);
   1001     }
   1002 
   1003     media_log_->SetIntegerProperty("audio_channels_count",
   1004                                    audio_codec->channels);
   1005     media_log_->SetIntegerProperty("audio_samples_per_second",
   1006                                    audio_config.samples_per_second());
   1007   } else {
   1008     media_log_->SetBooleanProperty("found_audio_stream", false);
   1009   }
   1010 
   1011   // Video logging
   1012   if (video_stream) {
   1013     AVCodecContext* video_codec = video_stream->codec;
   1014     media_log_->SetBooleanProperty("found_video_stream", true);
   1015 
   1016     AVCodec* codec = avcodec_find_decoder(video_codec->codec_id);
   1017     if (codec) {
   1018       media_log_->SetStringProperty("video_codec_name", codec->name);
   1019     }
   1020 
   1021     media_log_->SetIntegerProperty("width", video_codec->width);
   1022     media_log_->SetIntegerProperty("height", video_codec->height);
   1023     media_log_->SetIntegerProperty("coded_width",
   1024                                    video_codec->coded_width);
   1025     media_log_->SetIntegerProperty("coded_height",
   1026                                    video_codec->coded_height);
   1027     media_log_->SetStringProperty(
   1028         "time_base",
   1029         base::StringPrintf("%d/%d",
   1030                            video_codec->time_base.num,
   1031                            video_codec->time_base.den));
   1032     media_log_->SetStringProperty(
   1033         "video_format", VideoFrame::FormatToString(video_config.format()));
   1034     media_log_->SetBooleanProperty("video_is_encrypted",
   1035                                    video_config.is_encrypted());
   1036   } else {
   1037     media_log_->SetBooleanProperty("found_video_stream", false);
   1038   }
   1039 
   1040   media_log_->SetTimeProperty("max_duration", max_duration);
   1041   media_log_->SetTimeProperty("start_time", start_time_);
   1042   media_log_->SetIntegerProperty("bitrate", bitrate_);
   1043 
   1044   status_cb.Run(PIPELINE_OK);
   1045 }
   1046 
   1047 void FFmpegDemuxer::OnSeekFrameDone(const PipelineStatusCB& cb, int result) {
   1048   DCHECK(task_runner_->BelongsToCurrentThread());
   1049   CHECK(pending_seek_);
   1050   pending_seek_ = false;
   1051 
   1052   if (!blocking_thread_.IsRunning()) {
   1053     cb.Run(PIPELINE_ERROR_ABORT);
   1054     return;
   1055   }
   1056 
   1057   if (result < 0) {
   1058     // Use VLOG(1) instead of NOTIMPLEMENTED() to prevent the message being
   1059     // captured from stdout and contaminates testing.
   1060     // TODO(scherkus): Implement this properly and signal error (BUG=23447).
   1061     VLOG(1) << "Not implemented";
   1062   }
   1063 
   1064   // Tell streams to flush buffers due to seeking.
   1065   StreamVector::iterator iter;
   1066   for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
   1067     if (*iter)
   1068       (*iter)->FlushBuffers();
   1069   }
   1070 
   1071   // Resume reading until capacity.
   1072   ReadFrameIfNeeded();
   1073 
   1074   // Notify we're finished seeking.
   1075   cb.Run(PIPELINE_OK);
   1076 }
   1077 
   1078 void FFmpegDemuxer::ReadFrameIfNeeded() {
   1079   DCHECK(task_runner_->BelongsToCurrentThread());
   1080 
   1081   // Make sure we have work to do before reading.
   1082   if (!blocking_thread_.IsRunning() || !StreamsHaveAvailableCapacity() ||
   1083       pending_read_ || pending_seek_) {
   1084     return;
   1085   }
   1086 
   1087   // Allocate and read an AVPacket from the media. Save |packet_ptr| since
   1088   // evaluation order of packet.get() and base::Passed(&packet) is
   1089   // undefined.
   1090   ScopedAVPacket packet(new AVPacket());
   1091   AVPacket* packet_ptr = packet.get();
   1092 
   1093   pending_read_ = true;
   1094   base::PostTaskAndReplyWithResult(
   1095       blocking_thread_.message_loop_proxy().get(),
   1096       FROM_HERE,
   1097       base::Bind(&av_read_frame, glue_->format_context(), packet_ptr),
   1098       base::Bind(&FFmpegDemuxer::OnReadFrameDone,
   1099                  weak_factory_.GetWeakPtr(),
   1100                  base::Passed(&packet)));
   1101 }
   1102 
   1103 void FFmpegDemuxer::OnReadFrameDone(ScopedAVPacket packet, int result) {
   1104   DCHECK(task_runner_->BelongsToCurrentThread());
   1105   DCHECK(pending_read_);
   1106   pending_read_ = false;
   1107 
   1108   if (!blocking_thread_.IsRunning() || pending_seek_) {
   1109     return;
   1110   }
   1111 
   1112   // Consider the stream as ended if:
   1113   // - either underlying ffmpeg returned an error
   1114   // - or FFMpegDemuxer reached the maximum allowed memory usage.
   1115   if (result < 0 || IsMaxMemoryUsageReached()) {
   1116     // Update the duration based on the highest elapsed time across all streams
   1117     // if it was previously unknown.
   1118     if (!duration_known_) {
   1119       base::TimeDelta max_duration;
   1120 
   1121       for (StreamVector::iterator iter = streams_.begin();
   1122            iter != streams_.end();
   1123            ++iter) {
   1124         if (!*iter)
   1125           continue;
   1126 
   1127         base::TimeDelta duration = (*iter)->GetElapsedTime();
   1128         if (duration != kNoTimestamp() && duration > max_duration)
   1129           max_duration = duration;
   1130       }
   1131 
   1132       if (max_duration > base::TimeDelta()) {
   1133         host_->SetDuration(max_duration);
   1134         duration_known_ = true;
   1135       }
   1136     }
   1137     // If we have reached the end of stream, tell the downstream filters about
   1138     // the event.
   1139     StreamHasEnded();
   1140     return;
   1141   }
   1142 
   1143   // Queue the packet with the appropriate stream.
   1144   DCHECK_GE(packet->stream_index, 0);
   1145   DCHECK_LT(packet->stream_index, static_cast<int>(streams_.size()));
   1146 
   1147   // Defend against ffmpeg giving us a bad stream index.
   1148   if (packet->stream_index >= 0 &&
   1149       packet->stream_index < static_cast<int>(streams_.size()) &&
   1150       streams_[packet->stream_index]) {
   1151     // TODO(scherkus): Fix demuxing upstream to never return packets w/o data
   1152     // when av_read_frame() returns success code. See bug comment for ideas:
   1153     //
   1154     // https://code.google.com/p/chromium/issues/detail?id=169133#c10
   1155     if (!packet->data) {
   1156       ScopedAVPacket new_packet(new AVPacket());
   1157       av_new_packet(new_packet.get(), 0);
   1158       av_packet_copy_props(new_packet.get(), packet.get());
   1159       packet.swap(new_packet);
   1160     }
   1161 
   1162     // Special case for opus in ogg.  FFmpeg is pre-trimming the codec delay
   1163     // from the packet timestamp.  Chrome expects to handle this itself inside
   1164     // the decoder, so shift timestamps by the delay in this case.
   1165     // TODO(dalecurtis): Try to get fixed upstream.  See http://crbug.com/328207
   1166     if (strcmp(glue_->format_context()->iformat->name, "ogg") == 0) {
   1167       const AVCodecContext* codec_context =
   1168           glue_->format_context()->streams[packet->stream_index]->codec;
   1169       if (codec_context->codec_id == AV_CODEC_ID_OPUS &&
   1170           codec_context->delay > 0) {
   1171         packet->pts += codec_context->delay;
   1172       }
   1173     }
   1174 
   1175     FFmpegDemuxerStream* demuxer_stream = streams_[packet->stream_index];
   1176     demuxer_stream->EnqueuePacket(packet.Pass());
   1177   }
   1178 
   1179   // Keep reading until we've reached capacity.
   1180   ReadFrameIfNeeded();
   1181 }
   1182 
   1183 bool FFmpegDemuxer::StreamsHaveAvailableCapacity() {
   1184   DCHECK(task_runner_->BelongsToCurrentThread());
   1185   StreamVector::iterator iter;
   1186   for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
   1187     if (*iter && (*iter)->HasAvailableCapacity()) {
   1188       return true;
   1189     }
   1190   }
   1191   return false;
   1192 }
   1193 
   1194 bool FFmpegDemuxer::IsMaxMemoryUsageReached() const {
   1195   DCHECK(task_runner_->BelongsToCurrentThread());
   1196 
   1197   // Max allowed memory usage, all streams combined.
   1198   const size_t kDemuxerMemoryLimit = 150 * 1024 * 1024;
   1199 
   1200   size_t memory_left = kDemuxerMemoryLimit;
   1201   for (StreamVector::const_iterator iter = streams_.begin();
   1202        iter != streams_.end(); ++iter) {
   1203     if (!(*iter))
   1204       continue;
   1205 
   1206     size_t stream_memory_usage = (*iter)->MemoryUsage();
   1207     if (stream_memory_usage > memory_left)
   1208       return true;
   1209     memory_left -= stream_memory_usage;
   1210   }
   1211   return false;
   1212 }
   1213 
   1214 void FFmpegDemuxer::StreamHasEnded() {
   1215   DCHECK(task_runner_->BelongsToCurrentThread());
   1216   StreamVector::iterator iter;
   1217   for (iter = streams_.begin(); iter != streams_.end(); ++iter) {
   1218     if (!*iter)
   1219       continue;
   1220     (*iter)->SetEndOfStream();
   1221   }
   1222 }
   1223 
   1224 void FFmpegDemuxer::FireNeedKey(const std::string& init_data_type,
   1225                                 const std::string& encryption_key_id) {
   1226   std::vector<uint8> key_id_local(encryption_key_id.begin(),
   1227                                   encryption_key_id.end());
   1228   need_key_cb_.Run(init_data_type, key_id_local);
   1229 }
   1230 
   1231 void FFmpegDemuxer::NotifyCapacityAvailable() {
   1232   DCHECK(task_runner_->BelongsToCurrentThread());
   1233   ReadFrameIfNeeded();
   1234 }
   1235 
   1236 void FFmpegDemuxer::NotifyBufferingChanged() {
   1237   DCHECK(task_runner_->BelongsToCurrentThread());
   1238   Ranges<base::TimeDelta> buffered;
   1239   FFmpegDemuxerStream* audio = GetFFmpegStream(DemuxerStream::AUDIO);
   1240   FFmpegDemuxerStream* video = GetFFmpegStream(DemuxerStream::VIDEO);
   1241   if (audio && video) {
   1242     buffered = audio->GetBufferedRanges().IntersectionWith(
   1243         video->GetBufferedRanges());
   1244   } else if (audio) {
   1245     buffered = audio->GetBufferedRanges();
   1246   } else if (video) {
   1247     buffered = video->GetBufferedRanges();
   1248   }
   1249   for (size_t i = 0; i < buffered.size(); ++i)
   1250     host_->AddBufferedTimeRange(buffered.start(i), buffered.end(i));
   1251 }
   1252 
   1253 void FFmpegDemuxer::OnDataSourceError() {
   1254   host_->OnDemuxerError(PIPELINE_ERROR_READ);
   1255 }
   1256 
   1257 }  // namespace media
   1258