Home | History | Annotate | Download | only in filters
      1 // Copyright 2014 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "media/filters/frame_processor.h"
      6 
      7 #include <cstdlib>
      8 
      9 #include "base/stl_util.h"
     10 #include "media/base/buffers.h"
     11 #include "media/base/stream_parser_buffer.h"
     12 
     13 namespace media {
     14 
     15 // Helper class to capture per-track details needed by a frame processor. Some
     16 // of this information may be duplicated in the short-term in the associated
     17 // ChunkDemuxerStream and SourceBufferStream for a track.
     18 // This parallels the MSE spec each of a SourceBuffer's Track Buffers at
     19 // http://www.w3.org/TR/media-source/#track-buffers.
     20 class MseTrackBuffer {
     21  public:
     22   explicit MseTrackBuffer(ChunkDemuxerStream* stream);
     23   ~MseTrackBuffer();
     24 
     25   // Get/set |last_decode_timestamp_|.
     26   DecodeTimestamp last_decode_timestamp() const {
     27     return last_decode_timestamp_;
     28   }
     29   void set_last_decode_timestamp(DecodeTimestamp timestamp) {
     30     last_decode_timestamp_ = timestamp;
     31   }
     32 
     33   // Get/set |last_frame_duration_|.
     34   base::TimeDelta last_frame_duration() const {
     35     return last_frame_duration_;
     36   }
     37   void set_last_frame_duration(base::TimeDelta duration) {
     38     last_frame_duration_ = duration;
     39   }
     40 
     41   // Gets |highest_presentation_timestamp_|.
     42   base::TimeDelta highest_presentation_timestamp() const {
     43     return highest_presentation_timestamp_;
     44   }
     45 
     46   // Get/set |needs_random_access_point_|.
     47   bool needs_random_access_point() const {
     48     return needs_random_access_point_;
     49   }
     50   void set_needs_random_access_point(bool needs_random_access_point) {
     51     needs_random_access_point_ = needs_random_access_point;
     52   }
     53 
     54   // Gets a pointer to this track's ChunkDemuxerStream.
     55   ChunkDemuxerStream* stream() const { return stream_; }
     56 
     57   // Unsets |last_decode_timestamp_|, unsets |last_frame_duration_|,
     58   // unsets |highest_presentation_timestamp_|, and sets
     59   // |needs_random_access_point_| to true.
     60   void Reset();
     61 
     62   // If |highest_presentation_timestamp_| is unset or |timestamp| is greater
     63   // than |highest_presentation_timestamp_|, sets
     64   // |highest_presentation_timestamp_| to |timestamp|. Note that bidirectional
     65   // prediction between coded frames can cause |timestamp| to not be
     66   // monotonically increasing even though the decode timestamps are
     67   // monotonically increasing.
     68   void SetHighestPresentationTimestampIfIncreased(base::TimeDelta timestamp);
     69 
     70   // Adds |frame| to the end of |processed_frames_|.
     71   void EnqueueProcessedFrame(const scoped_refptr<StreamParserBuffer>& frame);
     72 
     73   // Appends |processed_frames_|, if not empty, to |stream_| and clears
     74   // |processed_frames_|. Returns false if append failed, true otherwise.
     75   // |processed_frames_| is cleared in both cases.
     76   bool FlushProcessedFrames();
     77 
     78  private:
     79   // The decode timestamp of the last coded frame appended in the current coded
     80   // frame group. Initially kNoTimestamp(), meaning "unset".
     81   DecodeTimestamp last_decode_timestamp_;
     82 
     83   // The coded frame duration of the last coded frame appended in the current
     84   // coded frame group. Initially kNoTimestamp(), meaning "unset".
     85   base::TimeDelta last_frame_duration_;
     86 
     87   // The highest presentation timestamp encountered in a coded frame appended
     88   // in the current coded frame group. Initially kNoTimestamp(), meaning
     89   // "unset".
     90   base::TimeDelta highest_presentation_timestamp_;
     91 
     92   // Keeps track of whether the track buffer is waiting for a random access
     93   // point coded frame. Initially set to true to indicate that a random access
     94   // point coded frame is needed before anything can be added to the track
     95   // buffer.
     96   bool needs_random_access_point_;
     97 
     98   // Pointer to the stream associated with this track. The stream is not owned
     99   // by |this|.
    100   ChunkDemuxerStream* const stream_;
    101 
    102   // Queue of processed frames that have not yet been appended to |stream_|.
    103   // EnqueueProcessedFrame() adds to this queue, and FlushProcessedFrames()
    104   // clears it.
    105   StreamParser::BufferQueue processed_frames_;
    106 
    107   DISALLOW_COPY_AND_ASSIGN(MseTrackBuffer);
    108 };
    109 
    110 MseTrackBuffer::MseTrackBuffer(ChunkDemuxerStream* stream)
    111     : last_decode_timestamp_(kNoDecodeTimestamp()),
    112       last_frame_duration_(kNoTimestamp()),
    113       highest_presentation_timestamp_(kNoTimestamp()),
    114       needs_random_access_point_(true),
    115       stream_(stream) {
    116   DCHECK(stream_);
    117 }
    118 
    119 MseTrackBuffer::~MseTrackBuffer() {
    120   DVLOG(2) << __FUNCTION__ << "()";
    121 }
    122 
    123 void MseTrackBuffer::Reset() {
    124   DVLOG(2) << __FUNCTION__ << "()";
    125 
    126   last_decode_timestamp_ = kNoDecodeTimestamp();
    127   last_frame_duration_ = kNoTimestamp();
    128   highest_presentation_timestamp_ = kNoTimestamp();
    129   needs_random_access_point_ = true;
    130 }
    131 
    132 void MseTrackBuffer::SetHighestPresentationTimestampIfIncreased(
    133     base::TimeDelta timestamp) {
    134   if (highest_presentation_timestamp_ == kNoTimestamp() ||
    135       timestamp > highest_presentation_timestamp_) {
    136     highest_presentation_timestamp_ = timestamp;
    137   }
    138 }
    139 
    140 void MseTrackBuffer::EnqueueProcessedFrame(
    141     const scoped_refptr<StreamParserBuffer>& frame) {
    142   processed_frames_.push_back(frame);
    143 }
    144 
    145 bool MseTrackBuffer::FlushProcessedFrames() {
    146   if (processed_frames_.empty())
    147     return true;
    148 
    149   bool result = stream_->Append(processed_frames_);
    150   processed_frames_.clear();
    151   DVLOG_IF(3, !result) << __FUNCTION__
    152                        << "(): Failure appending processed frames to stream";
    153 
    154   return result;
    155 }
    156 
    157 FrameProcessor::FrameProcessor(const UpdateDurationCB& update_duration_cb)
    158     : sequence_mode_(false),
    159       group_start_timestamp_(kNoTimestamp()),
    160       update_duration_cb_(update_duration_cb) {
    161   DVLOG(2) << __FUNCTION__ << "()";
    162   DCHECK(!update_duration_cb.is_null());
    163 }
    164 
    165 FrameProcessor::~FrameProcessor() {
    166   DVLOG(2) << __FUNCTION__ << "()";
    167   STLDeleteValues(&track_buffers_);
    168 }
    169 
    170 void FrameProcessor::SetSequenceMode(bool sequence_mode) {
    171   DVLOG(2) << __FUNCTION__ << "(" << sequence_mode << ")";
    172 
    173   // Per April 1, 2014 MSE spec editor's draft:
    174   // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/media-source.html#widl-SourceBuffer-mode
    175   // Step 7: If the new mode equals "sequence", then set the group start
    176   // timestamp to the group end timestamp.
    177   if (sequence_mode) {
    178     DCHECK(kNoTimestamp() != group_end_timestamp_);
    179     group_start_timestamp_ = group_end_timestamp_;
    180   }
    181 
    182   // Step 8: Update the attribute to new mode.
    183   sequence_mode_ = sequence_mode;
    184 }
    185 
    186 bool FrameProcessor::ProcessFrames(
    187     const StreamParser::BufferQueue& audio_buffers,
    188     const StreamParser::BufferQueue& video_buffers,
    189     const StreamParser::TextBufferQueueMap& text_map,
    190     base::TimeDelta append_window_start,
    191     base::TimeDelta append_window_end,
    192     bool* new_media_segment,
    193     base::TimeDelta* timestamp_offset) {
    194   StreamParser::BufferQueue frames;
    195   if (!MergeBufferQueues(audio_buffers, video_buffers, text_map, &frames)) {
    196     DVLOG(2) << "Parse error discovered while merging parser's buffers";
    197     return false;
    198   }
    199 
    200   DCHECK(!frames.empty());
    201 
    202   // Implements the coded frame processing algorithm's outer loop for step 1.
    203   // Note that ProcessFrame() implements an inner loop for a single frame that
    204   // handles "jump to the Loop Top step to restart processing of the current
    205   // coded frame" per April 1, 2014 MSE spec editor's draft:
    206   // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/
    207   //     media-source.html#sourcebuffer-coded-frame-processing
    208   // 1. For each coded frame in the media segment run the following steps:
    209   for (StreamParser::BufferQueue::const_iterator frames_itr = frames.begin();
    210        frames_itr != frames.end(); ++frames_itr) {
    211     if (!ProcessFrame(*frames_itr, append_window_start, append_window_end,
    212                       timestamp_offset, new_media_segment)) {
    213       FlushProcessedFrames();
    214       return false;
    215     }
    216   }
    217 
    218   if (!FlushProcessedFrames())
    219     return false;
    220 
    221   // 2. - 4. Are handled by the WebMediaPlayer / Pipeline / Media Element.
    222 
    223   // Step 5:
    224   update_duration_cb_.Run(group_end_timestamp_);
    225 
    226   return true;
    227 }
    228 
    229 void FrameProcessor::SetGroupStartTimestampIfInSequenceMode(
    230     base::TimeDelta timestamp_offset) {
    231   DVLOG(2) << __FUNCTION__ << "(" << timestamp_offset.InSecondsF() << ")";
    232   DCHECK(kNoTimestamp() != timestamp_offset);
    233   if (sequence_mode_)
    234     group_start_timestamp_ = timestamp_offset;
    235 
    236   // Changes to timestampOffset should invalidate the preroll buffer.
    237   audio_preroll_buffer_ = NULL;
    238 }
    239 
    240 bool FrameProcessor::AddTrack(StreamParser::TrackId id,
    241                               ChunkDemuxerStream* stream) {
    242   DVLOG(2) << __FUNCTION__ << "(): id=" << id;
    243 
    244   MseTrackBuffer* existing_track = FindTrack(id);
    245   DCHECK(!existing_track);
    246   if (existing_track)
    247     return false;
    248 
    249   track_buffers_[id] = new MseTrackBuffer(stream);
    250   return true;
    251 }
    252 
    253 bool FrameProcessor::UpdateTrack(StreamParser::TrackId old_id,
    254                                  StreamParser::TrackId new_id) {
    255   DVLOG(2) << __FUNCTION__ << "() : old_id=" << old_id << ", new_id=" << new_id;
    256 
    257   if (old_id == new_id || !FindTrack(old_id) || FindTrack(new_id))
    258     return false;
    259 
    260   track_buffers_[new_id] = track_buffers_[old_id];
    261   CHECK_EQ(1u, track_buffers_.erase(old_id));
    262   return true;
    263 }
    264 
    265 void FrameProcessor::SetAllTrackBuffersNeedRandomAccessPoint() {
    266   for (TrackBufferMap::iterator itr = track_buffers_.begin();
    267        itr != track_buffers_.end();
    268        ++itr) {
    269     itr->second->set_needs_random_access_point(true);
    270   }
    271 }
    272 
    273 void FrameProcessor::Reset() {
    274   DVLOG(2) << __FUNCTION__ << "()";
    275   for (TrackBufferMap::iterator itr = track_buffers_.begin();
    276        itr != track_buffers_.end(); ++itr) {
    277     itr->second->Reset();
    278   }
    279 }
    280 
    281 void FrameProcessor::OnPossibleAudioConfigUpdate(
    282     const AudioDecoderConfig& config) {
    283   DCHECK(config.IsValidConfig());
    284 
    285   // Always clear the preroll buffer when a config update is received.
    286   audio_preroll_buffer_ = NULL;
    287 
    288   if (config.Matches(current_audio_config_))
    289     return;
    290 
    291   current_audio_config_ = config;
    292   sample_duration_ = base::TimeDelta::FromSecondsD(
    293       1.0 / current_audio_config_.samples_per_second());
    294 }
    295 
    296 MseTrackBuffer* FrameProcessor::FindTrack(StreamParser::TrackId id) {
    297   TrackBufferMap::iterator itr = track_buffers_.find(id);
    298   if (itr == track_buffers_.end())
    299     return NULL;
    300 
    301   return itr->second;
    302 }
    303 
    304 void FrameProcessor::NotifyNewMediaSegmentStarting(
    305     DecodeTimestamp segment_timestamp) {
    306   DVLOG(2) << __FUNCTION__ << "(" << segment_timestamp.InSecondsF() << ")";
    307 
    308   for (TrackBufferMap::iterator itr = track_buffers_.begin();
    309        itr != track_buffers_.end();
    310        ++itr) {
    311     itr->second->stream()->OnNewMediaSegment(segment_timestamp);
    312   }
    313 }
    314 
    315 bool FrameProcessor::FlushProcessedFrames() {
    316   DVLOG(2) << __FUNCTION__ << "()";
    317 
    318   bool result = true;
    319   for (TrackBufferMap::iterator itr = track_buffers_.begin();
    320        itr != track_buffers_.end();
    321        ++itr) {
    322     if (!itr->second->FlushProcessedFrames())
    323       result = false;
    324   }
    325 
    326   return result;
    327 }
    328 
    329 bool FrameProcessor::HandlePartialAppendWindowTrimming(
    330     base::TimeDelta append_window_start,
    331     base::TimeDelta append_window_end,
    332     const scoped_refptr<StreamParserBuffer>& buffer) {
    333   DCHECK(buffer->duration() > base::TimeDelta());
    334   DCHECK_EQ(DemuxerStream::AUDIO, buffer->type());
    335   DCHECK(buffer->IsKeyframe());
    336 
    337   const base::TimeDelta frame_end_timestamp =
    338       buffer->timestamp() + buffer->duration();
    339 
    340   // If the buffer is entirely before |append_window_start|, save it as preroll
    341   // for the first buffer which overlaps |append_window_start|.
    342   if (buffer->timestamp() < append_window_start &&
    343       frame_end_timestamp <= append_window_start) {
    344     audio_preroll_buffer_ = buffer;
    345     return false;
    346   }
    347 
    348   // If the buffer is entirely after |append_window_end| there's nothing to do.
    349   if (buffer->timestamp() >= append_window_end)
    350     return false;
    351 
    352   DCHECK(buffer->timestamp() >= append_window_start ||
    353          frame_end_timestamp > append_window_start);
    354 
    355   bool processed_buffer = false;
    356 
    357   // If we have a preroll buffer see if we can attach it to the first buffer
    358   // overlapping or after |append_window_start|.
    359   if (audio_preroll_buffer_.get()) {
    360     // We only want to use the preroll buffer if it directly precedes (less
    361     // than one sample apart) the current buffer.
    362     const int64 delta = std::abs((audio_preroll_buffer_->timestamp() +
    363                                   audio_preroll_buffer_->duration() -
    364                                   buffer->timestamp()).InMicroseconds());
    365     if (delta < sample_duration_.InMicroseconds()) {
    366       DVLOG(1) << "Attaching audio preroll buffer ["
    367                << audio_preroll_buffer_->timestamp().InSecondsF() << ", "
    368                << (audio_preroll_buffer_->timestamp() +
    369                    audio_preroll_buffer_->duration()).InSecondsF() << ") to "
    370                << buffer->timestamp().InSecondsF();
    371       buffer->SetPrerollBuffer(audio_preroll_buffer_);
    372       processed_buffer = true;
    373     } else {
    374       // TODO(dalecurtis): Add a MEDIA_LOG() for when this is dropped unused.
    375     }
    376     audio_preroll_buffer_ = NULL;
    377   }
    378 
    379   // See if a partial discard can be done around |append_window_start|.
    380   if (buffer->timestamp() < append_window_start) {
    381     DVLOG(1) << "Truncating buffer which overlaps append window start."
    382              << " presentation_timestamp " << buffer->timestamp().InSecondsF()
    383              << " frame_end_timestamp " << frame_end_timestamp.InSecondsF()
    384              << " append_window_start " << append_window_start.InSecondsF();
    385 
    386     // Mark the overlapping portion of the buffer for discard.
    387     buffer->set_discard_padding(std::make_pair(
    388         append_window_start - buffer->timestamp(), base::TimeDelta()));
    389 
    390     // Adjust the timestamp of this buffer forward to |append_window_start| and
    391     // decrease the duration to compensate.
    392     buffer->set_timestamp(append_window_start);
    393     buffer->SetDecodeTimestamp(
    394         DecodeTimestamp::FromPresentationTime(append_window_start));
    395     buffer->set_duration(frame_end_timestamp - append_window_start);
    396     processed_buffer = true;
    397   }
    398 
    399   // See if a partial discard can be done around |append_window_end|.
    400   if (frame_end_timestamp > append_window_end) {
    401     DVLOG(1) << "Truncating buffer which overlaps append window end."
    402              << " presentation_timestamp " << buffer->timestamp().InSecondsF()
    403              << " frame_end_timestamp " << frame_end_timestamp.InSecondsF()
    404              << " append_window_end " << append_window_end.InSecondsF();
    405 
    406     // Mark the overlapping portion of the buffer for discard.
    407     buffer->set_discard_padding(
    408         std::make_pair(buffer->discard_padding().first,
    409                        frame_end_timestamp - append_window_end));
    410 
    411     // Decrease the duration of the buffer to remove the discarded portion.
    412     buffer->set_duration(append_window_end - buffer->timestamp());
    413     processed_buffer = true;
    414   }
    415 
    416   return processed_buffer;
    417 }
    418 
    419 bool FrameProcessor::ProcessFrame(
    420     const scoped_refptr<StreamParserBuffer>& frame,
    421     base::TimeDelta append_window_start,
    422     base::TimeDelta append_window_end,
    423     base::TimeDelta* timestamp_offset,
    424     bool* new_media_segment) {
    425   // Implements the loop within step 1 of the coded frame processing algorithm
    426   // for a single input frame per April 1, 2014 MSE spec editor's draft:
    427   // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/
    428   //     media-source.html#sourcebuffer-coded-frame-processing
    429 
    430   while (true) {
    431     // 1. Loop Top: Let presentation timestamp be a double precision floating
    432     //    point representation of the coded frame's presentation timestamp in
    433     //    seconds.
    434     // 2. Let decode timestamp be a double precision floating point
    435     //    representation of the coded frame's decode timestamp in seconds.
    436     // 3. Let frame duration be a double precision floating point representation
    437     //    of the coded frame's duration in seconds.
    438     // We use base::TimeDelta and DecodeTimestamp instead of double.
    439     base::TimeDelta presentation_timestamp = frame->timestamp();
    440     DecodeTimestamp decode_timestamp = frame->GetDecodeTimestamp();
    441     base::TimeDelta frame_duration = frame->duration();
    442 
    443     DVLOG(3) << __FUNCTION__ << ": Processing frame "
    444              << "Type=" << frame->type()
    445              << ", TrackID=" << frame->track_id()
    446              << ", PTS=" << presentation_timestamp.InSecondsF()
    447              << ", DTS=" << decode_timestamp.InSecondsF()
    448              << ", DUR=" << frame_duration.InSecondsF()
    449              << ", RAP=" << frame->IsKeyframe();
    450 
    451     // Sanity check the timestamps.
    452     if (presentation_timestamp == kNoTimestamp()) {
    453       DVLOG(2) << __FUNCTION__ << ": Unknown frame PTS";
    454       return false;
    455     }
    456     if (decode_timestamp == kNoDecodeTimestamp()) {
    457       DVLOG(2) << __FUNCTION__ << ": Unknown frame DTS";
    458       return false;
    459     }
    460     if (decode_timestamp.ToPresentationTime() > presentation_timestamp) {
    461       // TODO(wolenetz): Determine whether DTS>PTS should really be allowed. See
    462       // http://crbug.com/354518.
    463       DVLOG(2) << __FUNCTION__ << ": WARNING: Frame DTS("
    464                << decode_timestamp.InSecondsF() << ") > PTS("
    465                << presentation_timestamp.InSecondsF() << ")";
    466     }
    467 
    468     // TODO(acolwell/wolenetz): All stream parsers must emit valid (positive)
    469     // frame durations. For now, we allow non-negative frame duration.
    470     // See http://crbug.com/351166.
    471     if (frame_duration == kNoTimestamp()) {
    472       DVLOG(2) << __FUNCTION__ << ": Frame missing duration (kNoTimestamp())";
    473       return false;
    474     }
    475     if (frame_duration <  base::TimeDelta()) {
    476       DVLOG(2) << __FUNCTION__ << ": Frame duration negative: "
    477                << frame_duration.InSecondsF();
    478       return false;
    479     }
    480 
    481     // 4. If mode equals "sequence" and group start timestamp is set, then run
    482     //    the following steps:
    483     if (sequence_mode_ && group_start_timestamp_ != kNoTimestamp()) {
    484       // 4.1. Set timestampOffset equal to group start timestamp -
    485       //      presentation timestamp.
    486       *timestamp_offset = group_start_timestamp_ - presentation_timestamp;
    487 
    488       DVLOG(3) << __FUNCTION__ << ": updated timestampOffset is now "
    489                << timestamp_offset->InSecondsF();
    490 
    491       // 4.2. Set group end timestamp equal to group start timestamp.
    492       group_end_timestamp_ = group_start_timestamp_;
    493 
    494       // 4.3. Set the need random access point flag on all track buffers to
    495       //      true.
    496       SetAllTrackBuffersNeedRandomAccessPoint();
    497 
    498       // 4.4. Unset group start timestamp.
    499       group_start_timestamp_ = kNoTimestamp();
    500     }
    501 
    502     // 5. If timestampOffset is not 0, then run the following steps:
    503     if (*timestamp_offset != base::TimeDelta()) {
    504       // 5.1. Add timestampOffset to the presentation timestamp.
    505       // Note: |frame| PTS is only updated if it survives discontinuity
    506       // processing.
    507       presentation_timestamp += *timestamp_offset;
    508 
    509       // 5.2. Add timestampOffset to the decode timestamp.
    510       // Frame DTS is only updated if it survives discontinuity processing.
    511       decode_timestamp += *timestamp_offset;
    512     }
    513 
    514     // 6. Let track buffer equal the track buffer that the coded frame will be
    515     //    added to.
    516 
    517     // Remap audio and video track types to their special singleton identifiers.
    518     StreamParser::TrackId track_id = kAudioTrackId;
    519     switch (frame->type()) {
    520       case DemuxerStream::AUDIO:
    521         break;
    522       case DemuxerStream::VIDEO:
    523         track_id = kVideoTrackId;
    524         break;
    525       case DemuxerStream::TEXT:
    526         track_id = frame->track_id();
    527         break;
    528       case DemuxerStream::UNKNOWN:
    529       case DemuxerStream::NUM_TYPES:
    530         DCHECK(false) << ": Invalid frame type " << frame->type();
    531         return false;
    532     }
    533 
    534     MseTrackBuffer* track_buffer = FindTrack(track_id);
    535     if (!track_buffer) {
    536       DVLOG(2) << __FUNCTION__ << ": Unknown track: type=" << frame->type()
    537                << ", frame processor track id=" << track_id
    538                << ", parser track id=" << frame->track_id();
    539       return false;
    540     }
    541 
    542     // 7. If last decode timestamp for track buffer is set and decode timestamp
    543     //    is less than last decode timestamp
    544     //    OR
    545     //    If last decode timestamp for track buffer is set and the difference
    546     //    between decode timestamp and last decode timestamp is greater than 2
    547     //    times last frame duration:
    548     DecodeTimestamp last_decode_timestamp =
    549         track_buffer->last_decode_timestamp();
    550     if (last_decode_timestamp != kNoDecodeTimestamp()) {
    551       base::TimeDelta dts_delta = decode_timestamp - last_decode_timestamp;
    552       if (dts_delta < base::TimeDelta() ||
    553           dts_delta > 2 * track_buffer->last_frame_duration()) {
    554         // 7.1. If mode equals "segments": Set group end timestamp to
    555         //      presentation timestamp.
    556         //      If mode equals "sequence": Set group start timestamp equal to
    557         //      the group end timestamp.
    558         if (!sequence_mode_) {
    559           group_end_timestamp_ = presentation_timestamp;
    560           // This triggers a discontinuity so we need to treat the next frames
    561           // appended within the append window as if they were the beginning of
    562           // a new segment.
    563           *new_media_segment = true;
    564         } else {
    565           DVLOG(3) << __FUNCTION__ << " : Sequence mode discontinuity, GETS: "
    566                    << group_end_timestamp_.InSecondsF();
    567           DCHECK(kNoTimestamp() != group_end_timestamp_);
    568           group_start_timestamp_ = group_end_timestamp_;
    569         }
    570 
    571         // 7.2. - 7.5.:
    572         Reset();
    573 
    574         // 7.6. Jump to the Loop Top step above to restart processing of the
    575         //      current coded frame.
    576         DVLOG(3) << __FUNCTION__ << ": Discontinuity: reprocessing frame";
    577         continue;
    578       }
    579     }
    580 
    581     // 9. Let frame end timestamp equal the sum of presentation timestamp and
    582     //    frame duration.
    583     base::TimeDelta frame_end_timestamp =
    584         presentation_timestamp + frame_duration;
    585 
    586     // 10.  If presentation timestamp is less than appendWindowStart, then set
    587     //      the need random access point flag to true, drop the coded frame, and
    588     //      jump to the top of the loop to start processing the next coded
    589     //      frame.
    590     // Note: We keep the result of partial discard of a buffer that overlaps
    591     //      |append_window_start| and does not end after |append_window_end|.
    592     // 11. If frame end timestamp is greater than appendWindowEnd, then set the
    593     //     need random access point flag to true, drop the coded frame, and jump
    594     //     to the top of the loop to start processing the next coded frame.
    595     frame->set_timestamp(presentation_timestamp);
    596     frame->SetDecodeTimestamp(decode_timestamp);
    597     if (track_buffer->stream()->supports_partial_append_window_trimming() &&
    598         HandlePartialAppendWindowTrimming(append_window_start,
    599                                           append_window_end,
    600                                           frame)) {
    601       // |frame| has been partially trimmed or had preroll added.  Though
    602       // |frame|'s duration may have changed, do not update |frame_duration|
    603       // here, so |track_buffer|'s last frame duration update uses original
    604       // frame duration and reduces spurious discontinuity detection.
    605       decode_timestamp = frame->GetDecodeTimestamp();
    606       presentation_timestamp = frame->timestamp();
    607       frame_end_timestamp = frame->timestamp() + frame->duration();
    608     }
    609 
    610     if (presentation_timestamp < append_window_start ||
    611         frame_end_timestamp > append_window_end) {
    612       track_buffer->set_needs_random_access_point(true);
    613       DVLOG(3) << "Dropping frame that is outside append window.";
    614       return true;
    615     }
    616 
    617     // Note: This step is relocated, versus April 1 spec, to allow append window
    618     // processing to first filter coded frames shifted by |timestamp_offset_| in
    619     // such a way that their PTS is negative.
    620     // 8. If the presentation timestamp or decode timestamp is less than the
    621     // presentation start time, then run the end of stream algorithm with the
    622     // error parameter set to "decode", and abort these steps.
    623     DCHECK(presentation_timestamp >= base::TimeDelta());
    624     if (decode_timestamp < DecodeTimestamp()) {
    625       // B-frames may still result in negative DTS here after being shifted by
    626       // |timestamp_offset_|.
    627       DVLOG(2) << __FUNCTION__
    628                << ": frame PTS=" << presentation_timestamp.InSecondsF()
    629                << " has negative DTS=" << decode_timestamp.InSecondsF()
    630                << " after applying timestampOffset, handling any discontinuity,"
    631                << " and filtering against append window";
    632       return false;
    633     }
    634 
    635     // 12. If the need random access point flag on track buffer equals true,
    636     //     then run the following steps:
    637     if (track_buffer->needs_random_access_point()) {
    638       // 12.1. If the coded frame is not a random access point, then drop the
    639       //       coded frame and jump to the top of the loop to start processing
    640       //       the next coded frame.
    641       if (!frame->IsKeyframe()) {
    642         DVLOG(3) << __FUNCTION__
    643                  << ": Dropping frame that is not a random access point";
    644         return true;
    645       }
    646 
    647       // 12.2. Set the need random access point flag on track buffer to false.
    648       track_buffer->set_needs_random_access_point(false);
    649     }
    650 
    651     // We now have a processed buffer to append to the track buffer's stream.
    652     // If it is the first in a new media segment or following a discontinuity,
    653     // notify all the track buffers' streams that a new segment is beginning.
    654     if (*new_media_segment) {
    655       // First, complete the append to track buffer streams of previous media
    656       // segment's frames, if any.
    657       if (!FlushProcessedFrames())
    658         return false;
    659 
    660       *new_media_segment = false;
    661 
    662       // TODO(acolwell/wolenetz): This should be changed to a presentation
    663       // timestamp. See http://crbug.com/402502
    664       NotifyNewMediaSegmentStarting(decode_timestamp);
    665     }
    666 
    667     DVLOG(3) << __FUNCTION__ << ": Sending processed frame to stream, "
    668              << "PTS=" << presentation_timestamp.InSecondsF()
    669              << ", DTS=" << decode_timestamp.InSecondsF();
    670 
    671     // Steps 13-18: Note, we optimize by appending groups of contiguous
    672     // processed frames for each track buffer at end of ProcessFrames() or prior
    673     // to NotifyNewMediaSegmentStarting().
    674     // TODO(wolenetz): Refactor SourceBufferStream to conform to spec GC timing.
    675     // See http://crbug.com/371197.
    676     track_buffer->EnqueueProcessedFrame(frame);
    677 
    678     // 19. Set last decode timestamp for track buffer to decode timestamp.
    679     track_buffer->set_last_decode_timestamp(decode_timestamp);
    680 
    681     // 20. Set last frame duration for track buffer to frame duration.
    682     track_buffer->set_last_frame_duration(frame_duration);
    683 
    684     // 21. If highest presentation timestamp for track buffer is unset or frame
    685     //     end timestamp is greater than highest presentation timestamp, then
    686     //     set highest presentation timestamp for track buffer to frame end
    687     //     timestamp.
    688     track_buffer->SetHighestPresentationTimestampIfIncreased(
    689         frame_end_timestamp);
    690 
    691     // 22. If frame end timestamp is greater than group end timestamp, then set
    692     //     group end timestamp equal to frame end timestamp.
    693     if (frame_end_timestamp > group_end_timestamp_)
    694       group_end_timestamp_ = frame_end_timestamp;
    695     DCHECK(group_end_timestamp_ >= base::TimeDelta());
    696 
    697     return true;
    698   }
    699 
    700   NOTREACHED();
    701   return false;
    702 }
    703 
    704 }  // namespace media
    705