Home | History | Annotate | Download | only in source
      1 /*
      2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 #include "webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer_defines.h"
     12 #include "webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h"
     13 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h"
     14 #include "webrtc/modules/audio_processing/include/audio_processing.h"
     15 #include "webrtc/modules/utility/interface/audio_frame_operations.h"
     16 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
     17 #include "webrtc/system_wrappers/interface/trace.h"
     18 
     19 namespace webrtc {
     20 namespace {
     21 
     22 struct ParticipantFramePair {
     23   MixerParticipant* participant;
     24   AudioFrame* audioFrame;
     25 };
     26 
     27 typedef std::list<ParticipantFramePair*> ParticipantFramePairList;
     28 
     29 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing.
     30 // These effects are applied to |frame| itself prior to mixing. Assumes that
     31 // |mixed_frame| always has at least as many channels as |frame|. Supports
     32 // stereo at most.
     33 //
     34 // TODO(andrew): consider not modifying |frame| here.
     35 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) {
     36   assert(mixed_frame->num_channels_ >= frame->num_channels_);
     37   if (use_limiter) {
     38     // Divide by two to avoid saturation in the mixing.
     39     // This is only meaningful if the limiter will be used.
     40     *frame >>= 1;
     41   }
     42   if (mixed_frame->num_channels_ > frame->num_channels_) {
     43     // We only support mono-to-stereo.
     44     assert(mixed_frame->num_channels_ == 2 &&
     45            frame->num_channels_ == 1);
     46     AudioFrameOperations::MonoToStereo(frame);
     47   }
     48 
     49   *mixed_frame += *frame;
     50 }
     51 
     52 // Return the max number of channels from a |list| composed of AudioFrames.
     53 int MaxNumChannels(const AudioFrameList* list) {
     54   int max_num_channels = 1;
     55   for (AudioFrameList::const_iterator iter = list->begin();
     56        iter != list->end();
     57        ++iter) {
     58     max_num_channels = std::max(max_num_channels, (*iter)->num_channels_);
     59   }
     60   return max_num_channels;
     61 }
     62 
     63 void SetParticipantStatistics(ParticipantStatistics* stats,
     64                               const AudioFrame& frame) {
     65     stats->participant = frame.id_;
     66     stats->level = 0;  // TODO(andrew): to what should this be set?
     67 }
     68 
     69 }  // namespace
     70 
     71 MixerParticipant::MixerParticipant()
     72     : _mixHistory(new MixHistory()) {
     73 }
     74 
     75 MixerParticipant::~MixerParticipant() {
     76     delete _mixHistory;
     77 }
     78 
     79 int32_t MixerParticipant::IsMixed(bool& mixed) const {
     80     return _mixHistory->IsMixed(mixed);
     81 }
     82 
     83 MixHistory::MixHistory()
     84     : _isMixed(0) {
     85 }
     86 
     87 MixHistory::~MixHistory() {
     88 }
     89 
     90 int32_t MixHistory::IsMixed(bool& mixed) const {
     91     mixed = _isMixed;
     92     return 0;
     93 }
     94 
     95 int32_t MixHistory::WasMixed(bool& wasMixed) const {
     96     // Was mixed is the same as is mixed depending on perspective. This function
     97     // is for the perspective of AudioConferenceMixerImpl.
     98     return IsMixed(wasMixed);
     99 }
    100 
    101 int32_t MixHistory::SetIsMixed(const bool mixed) {
    102     _isMixed = mixed;
    103     return 0;
    104 }
    105 
    106 void MixHistory::ResetMixedStatus() {
    107     _isMixed = false;
    108 }
    109 
    110 AudioConferenceMixer* AudioConferenceMixer::Create(int id) {
    111     AudioConferenceMixerImpl* mixer = new AudioConferenceMixerImpl(id);
    112     if(!mixer->Init()) {
    113         delete mixer;
    114         return NULL;
    115     }
    116     return mixer;
    117 }
    118 
    119 AudioConferenceMixerImpl::AudioConferenceMixerImpl(int id)
    120     : _scratchParticipantsToMixAmount(0),
    121       _scratchMixedParticipants(),
    122       _scratchVadPositiveParticipantsAmount(0),
    123       _scratchVadPositiveParticipants(),
    124       _id(id),
    125       _minimumMixingFreq(kLowestPossible),
    126       _mixReceiver(NULL),
    127       _mixerStatusCallback(NULL),
    128       _amountOf10MsBetweenCallbacks(1),
    129       _amountOf10MsUntilNextCallback(0),
    130       _mixerStatusCb(false),
    131       _outputFrequency(kDefaultFrequency),
    132       _sampleSize(0),
    133       _audioFramePool(NULL),
    134       _participantList(),
    135       _additionalParticipantList(),
    136       _numMixedParticipants(0),
    137       use_limiter_(true),
    138       _timeStamp(0),
    139       _timeScheduler(kProcessPeriodicityInMs),
    140       _mixedAudioLevel(),
    141       _processCalls(0) {}
    142 
    143 bool AudioConferenceMixerImpl::Init() {
    144     _crit.reset(CriticalSectionWrapper::CreateCriticalSection());
    145     if (_crit.get() == NULL)
    146         return false;
    147 
    148     _cbCrit.reset(CriticalSectionWrapper::CreateCriticalSection());
    149     if(_cbCrit.get() == NULL)
    150         return false;
    151 
    152     Config config;
    153     config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
    154     _limiter.reset(AudioProcessing::Create(config));
    155     if(!_limiter.get())
    156         return false;
    157 
    158     MemoryPool<AudioFrame>::CreateMemoryPool(_audioFramePool,
    159                                              DEFAULT_AUDIO_FRAME_POOLSIZE);
    160     if(_audioFramePool == NULL)
    161         return false;
    162 
    163     if(SetOutputFrequency(kDefaultFrequency) == -1)
    164         return false;
    165 
    166     if(_limiter->gain_control()->set_mode(GainControl::kFixedDigital) !=
    167         _limiter->kNoError)
    168         return false;
    169 
    170     // We smoothly limit the mixed frame to -7 dbFS. -6 would correspond to the
    171     // divide-by-2 but -7 is used instead to give a bit of headroom since the
    172     // AGC is not a hard limiter.
    173     if(_limiter->gain_control()->set_target_level_dbfs(7) != _limiter->kNoError)
    174         return false;
    175 
    176     if(_limiter->gain_control()->set_compression_gain_db(0)
    177         != _limiter->kNoError)
    178         return false;
    179 
    180     if(_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError)
    181         return false;
    182 
    183     if(_limiter->gain_control()->Enable(true) != _limiter->kNoError)
    184         return false;
    185 
    186     return true;
    187 }
    188 
    189 AudioConferenceMixerImpl::~AudioConferenceMixerImpl() {
    190     MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool);
    191     assert(_audioFramePool == NULL);
    192 }
    193 
    194 int32_t AudioConferenceMixerImpl::ChangeUniqueId(const int32_t id) {
    195     _id = id;
    196     return 0;
    197 }
    198 
    199 // Process should be called every kProcessPeriodicityInMs ms
    200 int32_t AudioConferenceMixerImpl::TimeUntilNextProcess() {
    201     int32_t timeUntilNextProcess = 0;
    202     CriticalSectionScoped cs(_crit.get());
    203     if(_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) {
    204         WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
    205                      "failed in TimeToNextUpdate() call");
    206         // Sanity check
    207         assert(false);
    208         return -1;
    209     }
    210     return timeUntilNextProcess;
    211 }
    212 
    213 int32_t AudioConferenceMixerImpl::Process() {
    214     size_t remainingParticipantsAllowedToMix =
    215         kMaximumAmountOfMixedParticipants;
    216     {
    217         CriticalSectionScoped cs(_crit.get());
    218         assert(_processCalls == 0);
    219         _processCalls++;
    220 
    221         // Let the scheduler know that we are running one iteration.
    222         _timeScheduler.UpdateScheduler();
    223     }
    224 
    225     AudioFrameList mixList;
    226     AudioFrameList rampOutList;
    227     AudioFrameList additionalFramesList;
    228     std::map<int, MixerParticipant*> mixedParticipantsMap;
    229     {
    230         CriticalSectionScoped cs(_cbCrit.get());
    231 
    232         int32_t lowFreq = GetLowestMixingFrequency();
    233         // SILK can run in 12 kHz and 24 kHz. These frequencies are not
    234         // supported so use the closest higher frequency to not lose any
    235         // information.
    236         // TODO(henrike): this is probably more appropriate to do in
    237         //                GetLowestMixingFrequency().
    238         if (lowFreq == 12000) {
    239             lowFreq = 16000;
    240         } else if (lowFreq == 24000) {
    241             lowFreq = 32000;
    242         }
    243         if(lowFreq <= 0) {
    244             CriticalSectionScoped cs(_crit.get());
    245             _processCalls--;
    246             return 0;
    247         } else {
    248             switch(lowFreq) {
    249             case 8000:
    250                 if(OutputFrequency() != kNbInHz) {
    251                     SetOutputFrequency(kNbInHz);
    252                 }
    253                 break;
    254             case 16000:
    255                 if(OutputFrequency() != kWbInHz) {
    256                     SetOutputFrequency(kWbInHz);
    257                 }
    258                 break;
    259             case 32000:
    260                 if(OutputFrequency() != kSwbInHz) {
    261                     SetOutputFrequency(kSwbInHz);
    262                 }
    263                 break;
    264             case 48000:
    265                 if(OutputFrequency() != kFbInHz) {
    266                     SetOutputFrequency(kFbInHz);
    267                 }
    268                 break;
    269             default:
    270                 assert(false);
    271 
    272                 CriticalSectionScoped cs(_crit.get());
    273                 _processCalls--;
    274                 return -1;
    275             }
    276         }
    277 
    278         UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap,
    279                     remainingParticipantsAllowedToMix);
    280 
    281         GetAdditionalAudio(&additionalFramesList);
    282         UpdateMixedStatus(mixedParticipantsMap);
    283         _scratchParticipantsToMixAmount = mixedParticipantsMap.size();
    284     }
    285 
    286     // Get an AudioFrame for mixing from the memory pool.
    287     AudioFrame* mixedAudio = NULL;
    288     if(_audioFramePool->PopMemory(mixedAudio) == -1) {
    289         WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
    290                      "failed PopMemory() call");
    291         assert(false);
    292         return -1;
    293     }
    294 
    295     bool timeForMixerCallback = false;
    296     int retval = 0;
    297     int32_t audioLevel = 0;
    298     {
    299         CriticalSectionScoped cs(_crit.get());
    300 
    301         // TODO(henrike): it might be better to decide the number of channels
    302         //                with an API instead of dynamically.
    303 
    304         // Find the max channels over all mixing lists.
    305         const int num_mixed_channels = std::max(MaxNumChannels(&mixList),
    306             std::max(MaxNumChannels(&additionalFramesList),
    307                      MaxNumChannels(&rampOutList)));
    308 
    309         mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency,
    310                                 AudioFrame::kNormalSpeech,
    311                                 AudioFrame::kVadPassive, num_mixed_channels);
    312 
    313         _timeStamp += _sampleSize;
    314 
    315         // We only use the limiter if it supports the output sample rate and
    316         // we're actually mixing multiple streams.
    317         use_limiter_ = _numMixedParticipants > 1 &&
    318                        _outputFrequency <= kAudioProcMaxNativeSampleRateHz;
    319 
    320         MixFromList(*mixedAudio, &mixList);
    321         MixAnonomouslyFromList(*mixedAudio, &additionalFramesList);
    322         MixAnonomouslyFromList(*mixedAudio, &rampOutList);
    323 
    324         if(mixedAudio->samples_per_channel_ == 0) {
    325             // Nothing was mixed, set the audio samples to silence.
    326             mixedAudio->samples_per_channel_ = _sampleSize;
    327             mixedAudio->Mute();
    328         } else {
    329             // Only call the limiter if we have something to mix.
    330             if(!LimitMixedAudio(*mixedAudio))
    331                 retval = -1;
    332         }
    333 
    334         _mixedAudioLevel.ComputeLevel(mixedAudio->data_,_sampleSize);
    335         audioLevel = _mixedAudioLevel.GetLevel();
    336 
    337         if(_mixerStatusCb) {
    338             _scratchVadPositiveParticipantsAmount = 0;
    339             UpdateVADPositiveParticipants(&mixList);
    340             if(_amountOf10MsUntilNextCallback-- == 0) {
    341                 _amountOf10MsUntilNextCallback = _amountOf10MsBetweenCallbacks;
    342                 timeForMixerCallback = true;
    343             }
    344         }
    345     }
    346 
    347     {
    348         CriticalSectionScoped cs(_cbCrit.get());
    349         if(_mixReceiver != NULL) {
    350             const AudioFrame** dummy = NULL;
    351             _mixReceiver->NewMixedAudio(
    352                 _id,
    353                 *mixedAudio,
    354                 dummy,
    355                 0);
    356         }
    357 
    358         if((_mixerStatusCallback != NULL) &&
    359             timeForMixerCallback) {
    360             _mixerStatusCallback->MixedParticipants(
    361                 _id,
    362                 _scratchMixedParticipants,
    363                 static_cast<uint32_t>(_scratchParticipantsToMixAmount));
    364 
    365             _mixerStatusCallback->VADPositiveParticipants(
    366                 _id,
    367                 _scratchVadPositiveParticipants,
    368                 _scratchVadPositiveParticipantsAmount);
    369             _mixerStatusCallback->MixedAudioLevel(_id,audioLevel);
    370         }
    371     }
    372 
    373     // Reclaim all outstanding memory.
    374     _audioFramePool->PushMemory(mixedAudio);
    375     ClearAudioFrameList(&mixList);
    376     ClearAudioFrameList(&rampOutList);
    377     ClearAudioFrameList(&additionalFramesList);
    378     {
    379         CriticalSectionScoped cs(_crit.get());
    380         _processCalls--;
    381     }
    382     return retval;
    383 }
    384 
    385 int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback(
    386     AudioMixerOutputReceiver& mixReceiver) {
    387     CriticalSectionScoped cs(_cbCrit.get());
    388     if(_mixReceiver != NULL) {
    389         return -1;
    390     }
    391     _mixReceiver = &mixReceiver;
    392     return 0;
    393 }
    394 
    395 int32_t AudioConferenceMixerImpl::UnRegisterMixedStreamCallback() {
    396     CriticalSectionScoped cs(_cbCrit.get());
    397     if(_mixReceiver == NULL) {
    398         return -1;
    399     }
    400     _mixReceiver = NULL;
    401     return 0;
    402 }
    403 
    404 int32_t AudioConferenceMixerImpl::SetOutputFrequency(
    405     const Frequency frequency) {
    406     CriticalSectionScoped cs(_crit.get());
    407 
    408     _outputFrequency = frequency;
    409     _sampleSize = (_outputFrequency*kProcessPeriodicityInMs) / 1000;
    410 
    411     return 0;
    412 }
    413 
    414 AudioConferenceMixer::Frequency
    415 AudioConferenceMixerImpl::OutputFrequency() const {
    416     CriticalSectionScoped cs(_crit.get());
    417     return _outputFrequency;
    418 }
    419 
    420 int32_t AudioConferenceMixerImpl::RegisterMixerStatusCallback(
    421     AudioMixerStatusReceiver& mixerStatusCallback,
    422     const uint32_t amountOf10MsBetweenCallbacks) {
    423     if(amountOf10MsBetweenCallbacks == 0) {
    424         WEBRTC_TRACE(
    425             kTraceWarning,
    426             kTraceAudioMixerServer,
    427             _id,
    428             "amountOf10MsBetweenCallbacks(%d) needs to be larger than 0");
    429         return -1;
    430     }
    431     {
    432         CriticalSectionScoped cs(_cbCrit.get());
    433         if(_mixerStatusCallback != NULL) {
    434             WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
    435                          "Mixer status callback already registered");
    436             return -1;
    437         }
    438         _mixerStatusCallback = &mixerStatusCallback;
    439     }
    440     {
    441         CriticalSectionScoped cs(_crit.get());
    442         _amountOf10MsBetweenCallbacks  = amountOf10MsBetweenCallbacks;
    443         _amountOf10MsUntilNextCallback = 0;
    444         _mixerStatusCb                 = true;
    445     }
    446     return 0;
    447 }
    448 
    449 int32_t AudioConferenceMixerImpl::UnRegisterMixerStatusCallback() {
    450     {
    451         CriticalSectionScoped cs(_crit.get());
    452         if(!_mixerStatusCb)
    453         {
    454             WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
    455                          "Mixer status callback not registered");
    456             return -1;
    457         }
    458         _mixerStatusCb = false;
    459     }
    460     {
    461         CriticalSectionScoped cs(_cbCrit.get());
    462         _mixerStatusCallback = NULL;
    463     }
    464     return 0;
    465 }
    466 
    467 int32_t AudioConferenceMixerImpl::SetMixabilityStatus(
    468     MixerParticipant& participant,
    469     bool mixable) {
    470     if (!mixable) {
    471         // Anonymous participants are in a separate list. Make sure that the
    472         // participant is in the _participantList if it is being mixed.
    473         SetAnonymousMixabilityStatus(participant, false);
    474     }
    475     size_t numMixedParticipants;
    476     {
    477         CriticalSectionScoped cs(_cbCrit.get());
    478         const bool isMixed =
    479             IsParticipantInList(participant, &_participantList);
    480         // API must be called with a new state.
    481         if(!(mixable ^ isMixed)) {
    482             WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
    483                          "Mixable is aready %s",
    484                          isMixed ? "ON" : "off");
    485             return -1;
    486         }
    487         bool success = false;
    488         if(mixable) {
    489             success = AddParticipantToList(participant, &_participantList);
    490         } else {
    491             success = RemoveParticipantFromList(participant, &_participantList);
    492         }
    493         if(!success) {
    494             WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
    495                          "failed to %s participant",
    496                          mixable ? "add" : "remove");
    497             assert(false);
    498             return -1;
    499         }
    500 
    501         size_t numMixedNonAnonymous = _participantList.size();
    502         if (numMixedNonAnonymous > kMaximumAmountOfMixedParticipants) {
    503             numMixedNonAnonymous = kMaximumAmountOfMixedParticipants;
    504         }
    505         numMixedParticipants =
    506             numMixedNonAnonymous + _additionalParticipantList.size();
    507     }
    508     // A MixerParticipant was added or removed. Make sure the scratch
    509     // buffer is updated if necessary.
    510     // Note: The scratch buffer may only be updated in Process().
    511     CriticalSectionScoped cs(_crit.get());
    512     _numMixedParticipants = numMixedParticipants;
    513     return 0;
    514 }
    515 
    516 int32_t AudioConferenceMixerImpl::MixabilityStatus(
    517     MixerParticipant& participant,
    518     bool& mixable) {
    519     CriticalSectionScoped cs(_cbCrit.get());
    520     mixable = IsParticipantInList(participant, &_participantList);
    521     return 0;
    522 }
    523 
    524 int32_t AudioConferenceMixerImpl::SetAnonymousMixabilityStatus(
    525     MixerParticipant& participant, const bool anonymous) {
    526     CriticalSectionScoped cs(_cbCrit.get());
    527     if(IsParticipantInList(participant, &_additionalParticipantList)) {
    528         if(anonymous) {
    529             return 0;
    530         }
    531         if(!RemoveParticipantFromList(participant,
    532                                       &_additionalParticipantList)) {
    533             WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
    534                          "unable to remove participant from anonymous list");
    535             assert(false);
    536             return -1;
    537         }
    538         return AddParticipantToList(participant, &_participantList) ? 0 : -1;
    539     }
    540     if(!anonymous) {
    541         return 0;
    542     }
    543     const bool mixable = RemoveParticipantFromList(participant,
    544                                                    &_participantList);
    545     if(!mixable) {
    546         WEBRTC_TRACE(
    547             kTraceWarning,
    548             kTraceAudioMixerServer,
    549             _id,
    550             "participant must be registered before turning it into anonymous");
    551         // Setting anonymous status is only possible if MixerParticipant is
    552         // already registered.
    553         return -1;
    554     }
    555     return AddParticipantToList(participant, &_additionalParticipantList) ?
    556         0 : -1;
    557 }
    558 
    559 int32_t AudioConferenceMixerImpl::AnonymousMixabilityStatus(
    560     MixerParticipant& participant, bool& mixable) {
    561     CriticalSectionScoped cs(_cbCrit.get());
    562     mixable = IsParticipantInList(participant,
    563                                   &_additionalParticipantList);
    564     return 0;
    565 }
    566 
    567 int32_t AudioConferenceMixerImpl::SetMinimumMixingFrequency(
    568     Frequency freq) {
    569     // Make sure that only allowed sampling frequencies are used. Use closest
    570     // higher sampling frequency to avoid losing information.
    571     if (static_cast<int>(freq) == 12000) {
    572          freq = kWbInHz;
    573     } else if (static_cast<int>(freq) == 24000) {
    574         freq = kSwbInHz;
    575     }
    576 
    577     if((freq == kNbInHz) || (freq == kWbInHz) || (freq == kSwbInHz) ||
    578        (freq == kLowestPossible)) {
    579         _minimumMixingFreq=freq;
    580         return 0;
    581     } else {
    582         WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
    583                      "SetMinimumMixingFrequency incorrect frequency: %i",freq);
    584         assert(false);
    585         return -1;
    586     }
    587 }
    588 
    589 // Check all AudioFrames that are to be mixed. The highest sampling frequency
    590 // found is the lowest that can be used without losing information.
    591 int32_t AudioConferenceMixerImpl::GetLowestMixingFrequency() {
    592     const int participantListFrequency =
    593         GetLowestMixingFrequencyFromList(&_participantList);
    594     const int anonymousListFrequency =
    595         GetLowestMixingFrequencyFromList(&_additionalParticipantList);
    596     const int highestFreq =
    597         (participantListFrequency > anonymousListFrequency) ?
    598             participantListFrequency : anonymousListFrequency;
    599     // Check if the user specified a lowest mixing frequency.
    600     if(_minimumMixingFreq != kLowestPossible) {
    601         if(_minimumMixingFreq > highestFreq) {
    602             return _minimumMixingFreq;
    603         }
    604     }
    605     return highestFreq;
    606 }
    607 
    608 int32_t AudioConferenceMixerImpl::GetLowestMixingFrequencyFromList(
    609     MixerParticipantList* mixList) {
    610     int32_t highestFreq = 8000;
    611     for (MixerParticipantList::iterator iter = mixList->begin();
    612          iter != mixList->end();
    613          ++iter) {
    614         const int32_t neededFrequency = (*iter)->NeededFrequency(_id);
    615         if(neededFrequency > highestFreq) {
    616             highestFreq = neededFrequency;
    617         }
    618     }
    619     return highestFreq;
    620 }
    621 
    622 void AudioConferenceMixerImpl::UpdateToMix(
    623     AudioFrameList* mixList,
    624     AudioFrameList* rampOutList,
    625     std::map<int, MixerParticipant*>* mixParticipantList,
    626     size_t& maxAudioFrameCounter) {
    627     WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
    628                  "UpdateToMix(mixList,rampOutList,mixParticipantList,%d)",
    629                  maxAudioFrameCounter);
    630     const size_t mixListStartSize = mixList->size();
    631     AudioFrameList activeList;
    632     // Struct needed by the passive lists to keep track of which AudioFrame
    633     // belongs to which MixerParticipant.
    634     ParticipantFramePairList passiveWasNotMixedList;
    635     ParticipantFramePairList passiveWasMixedList;
    636     for (MixerParticipantList::iterator participant = _participantList.begin();
    637          participant != _participantList.end();
    638          ++participant) {
    639         // Stop keeping track of passive participants if there are already
    640         // enough participants available (they wont be mixed anyway).
    641         bool mustAddToPassiveList = (maxAudioFrameCounter >
    642                                     (activeList.size() +
    643                                      passiveWasMixedList.size() +
    644                                      passiveWasNotMixedList.size()));
    645 
    646         bool wasMixed = false;
    647         (*participant)->_mixHistory->WasMixed(wasMixed);
    648         AudioFrame* audioFrame = NULL;
    649         if(_audioFramePool->PopMemory(audioFrame) == -1) {
    650             WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
    651                          "failed PopMemory() call");
    652             assert(false);
    653             return;
    654         }
    655         audioFrame->sample_rate_hz_ = _outputFrequency;
    656 
    657         if((*participant)->GetAudioFrame(_id,*audioFrame) != 0) {
    658             WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
    659                          "failed to GetAudioFrame() from participant");
    660             _audioFramePool->PushMemory(audioFrame);
    661             continue;
    662         }
    663         if (_participantList.size() != 1) {
    664           // TODO(wu): Issue 3390, add support for multiple participants case.
    665           audioFrame->ntp_time_ms_ = -1;
    666         }
    667 
    668         // TODO(henrike): this assert triggers in some test cases where SRTP is
    669         // used which prevents NetEQ from making a VAD. Temporarily disable this
    670         // assert until the problem is fixed on a higher level.
    671         // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown);
    672         if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) {
    673             WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
    674                          "invalid VAD state from participant");
    675         }
    676 
    677         if(audioFrame->vad_activity_ == AudioFrame::kVadActive) {
    678             if(!wasMixed) {
    679                 RampIn(*audioFrame);
    680             }
    681 
    682             if(activeList.size() >= maxAudioFrameCounter) {
    683                 // There are already more active participants than should be
    684                 // mixed. Only keep the ones with the highest energy.
    685                 AudioFrameList::iterator replaceItem;
    686                 CalculateEnergy(*audioFrame);
    687                 uint32_t lowestEnergy = audioFrame->energy_;
    688 
    689                 bool found_replace_item = false;
    690                 for (AudioFrameList::iterator iter = activeList.begin();
    691                      iter != activeList.end();
    692                      ++iter) {
    693                     CalculateEnergy(**iter);
    694                     if((*iter)->energy_ < lowestEnergy) {
    695                         replaceItem = iter;
    696                         lowestEnergy = (*iter)->energy_;
    697                         found_replace_item = true;
    698                     }
    699                 }
    700                 if(found_replace_item) {
    701                     AudioFrame* replaceFrame = *replaceItem;
    702 
    703                     bool replaceWasMixed = false;
    704                     std::map<int, MixerParticipant*>::iterator it =
    705                         mixParticipantList->find(replaceFrame->id_);
    706 
    707                     // When a frame is pushed to |activeList| it is also pushed
    708                     // to mixParticipantList with the frame's id. This means
    709                     // that the Find call above should never fail.
    710                     assert(it != mixParticipantList->end());
    711                     it->second->_mixHistory->WasMixed(replaceWasMixed);
    712 
    713                     mixParticipantList->erase(replaceFrame->id_);
    714                     activeList.erase(replaceItem);
    715 
    716                     activeList.push_front(audioFrame);
    717                     (*mixParticipantList)[audioFrame->id_] = *participant;
    718                     assert(mixParticipantList->size() <=
    719                            kMaximumAmountOfMixedParticipants);
    720 
    721                     if (replaceWasMixed) {
    722                       RampOut(*replaceFrame);
    723                       rampOutList->push_back(replaceFrame);
    724                       assert(rampOutList->size() <=
    725                              kMaximumAmountOfMixedParticipants);
    726                     } else {
    727                       _audioFramePool->PushMemory(replaceFrame);
    728                     }
    729                 } else {
    730                     if(wasMixed) {
    731                         RampOut(*audioFrame);
    732                         rampOutList->push_back(audioFrame);
    733                         assert(rampOutList->size() <=
    734                                kMaximumAmountOfMixedParticipants);
    735                     } else {
    736                         _audioFramePool->PushMemory(audioFrame);
    737                     }
    738                 }
    739             } else {
    740                 activeList.push_front(audioFrame);
    741                 (*mixParticipantList)[audioFrame->id_] = *participant;
    742                 assert(mixParticipantList->size() <=
    743                        kMaximumAmountOfMixedParticipants);
    744             }
    745         } else {
    746             if(wasMixed) {
    747                 ParticipantFramePair* pair = new ParticipantFramePair;
    748                 pair->audioFrame  = audioFrame;
    749                 pair->participant = *participant;
    750                 passiveWasMixedList.push_back(pair);
    751             } else if(mustAddToPassiveList) {
    752                 RampIn(*audioFrame);
    753                 ParticipantFramePair* pair = new ParticipantFramePair;
    754                 pair->audioFrame  = audioFrame;
    755                 pair->participant = *participant;
    756                 passiveWasNotMixedList.push_back(pair);
    757             } else {
    758                 _audioFramePool->PushMemory(audioFrame);
    759             }
    760         }
    761     }
    762     assert(activeList.size() <= maxAudioFrameCounter);
    763     // At this point it is known which participants should be mixed. Transfer
    764     // this information to this functions output parameters.
    765     for (AudioFrameList::iterator iter = activeList.begin();
    766          iter != activeList.end();
    767          ++iter) {
    768         mixList->push_back(*iter);
    769     }
    770     activeList.clear();
    771     // Always mix a constant number of AudioFrames. If there aren't enough
    772     // active participants mix passive ones. Starting with those that was mixed
    773     // last iteration.
    774     for (ParticipantFramePairList::iterator iter = passiveWasMixedList.begin();
    775          iter != passiveWasMixedList.end();
    776          ++iter) {
    777         if(mixList->size() < maxAudioFrameCounter + mixListStartSize) {
    778             mixList->push_back((*iter)->audioFrame);
    779             (*mixParticipantList)[(*iter)->audioFrame->id_] =
    780                 (*iter)->participant;
    781             assert(mixParticipantList->size() <=
    782                    kMaximumAmountOfMixedParticipants);
    783         } else {
    784             _audioFramePool->PushMemory((*iter)->audioFrame);
    785         }
    786         delete *iter;
    787     }
    788     // And finally the ones that have not been mixed for a while.
    789     for (ParticipantFramePairList::iterator iter =
    790              passiveWasNotMixedList.begin();
    791          iter != passiveWasNotMixedList.end();
    792          ++iter) {
    793         if(mixList->size() <  maxAudioFrameCounter + mixListStartSize) {
    794           mixList->push_back((*iter)->audioFrame);
    795             (*mixParticipantList)[(*iter)->audioFrame->id_] =
    796                 (*iter)->participant;
    797             assert(mixParticipantList->size() <=
    798                    kMaximumAmountOfMixedParticipants);
    799         } else {
    800             _audioFramePool->PushMemory((*iter)->audioFrame);
    801         }
    802         delete *iter;
    803     }
    804     assert(maxAudioFrameCounter + mixListStartSize >= mixList->size());
    805     maxAudioFrameCounter += mixListStartSize - mixList->size();
    806 }
    807 
    808 void AudioConferenceMixerImpl::GetAdditionalAudio(
    809     AudioFrameList* additionalFramesList) {
    810     WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
    811                  "GetAdditionalAudio(additionalFramesList)");
    812     // The GetAudioFrame() callback may result in the participant being removed
    813     // from additionalParticipantList_. If that happens it will invalidate any
    814     // iterators. Create a copy of the participants list such that the list of
    815     // participants can be traversed safely.
    816     MixerParticipantList additionalParticipantList;
    817     additionalParticipantList.insert(additionalParticipantList.begin(),
    818                                      _additionalParticipantList.begin(),
    819                                      _additionalParticipantList.end());
    820 
    821     for (MixerParticipantList::iterator participant =
    822              additionalParticipantList.begin();
    823          participant != additionalParticipantList.end();
    824          ++participant) {
    825         AudioFrame* audioFrame = NULL;
    826         if(_audioFramePool->PopMemory(audioFrame) == -1) {
    827             WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
    828                          "failed PopMemory() call");
    829             assert(false);
    830             return;
    831         }
    832         audioFrame->sample_rate_hz_ = _outputFrequency;
    833         if((*participant)->GetAudioFrame(_id, *audioFrame) != 0) {
    834             WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
    835                          "failed to GetAudioFrame() from participant");
    836             _audioFramePool->PushMemory(audioFrame);
    837             continue;
    838         }
    839         if(audioFrame->samples_per_channel_ == 0) {
    840             // Empty frame. Don't use it.
    841             _audioFramePool->PushMemory(audioFrame);
    842             continue;
    843         }
    844         additionalFramesList->push_back(audioFrame);
    845     }
    846 }
    847 
    848 void AudioConferenceMixerImpl::UpdateMixedStatus(
    849     std::map<int, MixerParticipant*>& mixedParticipantsMap) {
    850     WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
    851                  "UpdateMixedStatus(mixedParticipantsMap)");
    852     assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants);
    853 
    854     // Loop through all participants. If they are in the mix map they
    855     // were mixed.
    856     for (MixerParticipantList::iterator participant = _participantList.begin();
    857          participant != _participantList.end();
    858          ++participant) {
    859         bool isMixed = false;
    860         for (std::map<int, MixerParticipant*>::iterator it =
    861                  mixedParticipantsMap.begin();
    862              it != mixedParticipantsMap.end();
    863              ++it) {
    864           if (it->second == *participant) {
    865             isMixed = true;
    866             break;
    867           }
    868         }
    869         (*participant)->_mixHistory->SetIsMixed(isMixed);
    870     }
    871 }
    872 
    873 void AudioConferenceMixerImpl::ClearAudioFrameList(
    874     AudioFrameList* audioFrameList) {
    875     WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
    876                  "ClearAudioFrameList(audioFrameList)");
    877     for (AudioFrameList::iterator iter = audioFrameList->begin();
    878          iter != audioFrameList->end();
    879          ++iter) {
    880         _audioFramePool->PushMemory(*iter);
    881     }
    882     audioFrameList->clear();
    883 }
    884 
    885 void AudioConferenceMixerImpl::UpdateVADPositiveParticipants(
    886     AudioFrameList* mixList) {
    887     WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
    888                  "UpdateVADPositiveParticipants(mixList)");
    889 
    890     for (AudioFrameList::iterator iter = mixList->begin();
    891          iter != mixList->end();
    892          ++iter) {
    893         CalculateEnergy(**iter);
    894         if((*iter)->vad_activity_ == AudioFrame::kVadActive) {
    895             _scratchVadPositiveParticipants[
    896                 _scratchVadPositiveParticipantsAmount].participant =
    897                 (*iter)->id_;
    898             // TODO(andrew): to what should this be set?
    899             _scratchVadPositiveParticipants[
    900                 _scratchVadPositiveParticipantsAmount].level = 0;
    901             _scratchVadPositiveParticipantsAmount++;
    902         }
    903     }
    904 }
    905 
    906 bool AudioConferenceMixerImpl::IsParticipantInList(
    907     MixerParticipant& participant,
    908     MixerParticipantList* participantList) const {
    909     WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
    910                  "IsParticipantInList(participant,participantList)");
    911     for (MixerParticipantList::const_iterator iter = participantList->begin();
    912          iter != participantList->end();
    913          ++iter) {
    914         if(&participant == *iter) {
    915             return true;
    916         }
    917     }
    918     return false;
    919 }
    920 
    921 bool AudioConferenceMixerImpl::AddParticipantToList(
    922     MixerParticipant& participant,
    923     MixerParticipantList* participantList) {
    924     WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
    925                  "AddParticipantToList(participant, participantList)");
    926     participantList->push_back(&participant);
    927     // Make sure that the mixed status is correct for new MixerParticipant.
    928     participant._mixHistory->ResetMixedStatus();
    929     return true;
    930 }
    931 
    932 bool AudioConferenceMixerImpl::RemoveParticipantFromList(
    933     MixerParticipant& participant,
    934     MixerParticipantList* participantList) {
    935     WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
    936                  "RemoveParticipantFromList(participant, participantList)");
    937     for (MixerParticipantList::iterator iter = participantList->begin();
    938          iter != participantList->end();
    939          ++iter) {
    940         if(*iter == &participant) {
    941             participantList->erase(iter);
    942             // Participant is no longer mixed, reset to default.
    943             participant._mixHistory->ResetMixedStatus();
    944             return true;
    945         }
    946     }
    947     return false;
    948 }
    949 
    950 int32_t AudioConferenceMixerImpl::MixFromList(
    951     AudioFrame& mixedAudio,
    952     const AudioFrameList* audioFrameList) {
    953     WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
    954                  "MixFromList(mixedAudio, audioFrameList)");
    955     if(audioFrameList->empty()) return 0;
    956 
    957     uint32_t position = 0;
    958 
    959     if (_numMixedParticipants == 1) {
    960       mixedAudio.timestamp_ = audioFrameList->front()->timestamp_;
    961       mixedAudio.elapsed_time_ms_ = audioFrameList->front()->elapsed_time_ms_;
    962     } else {
    963       // TODO(wu): Issue 3390.
    964       // Audio frame timestamp is only supported in one channel case.
    965       mixedAudio.timestamp_ = 0;
    966       mixedAudio.elapsed_time_ms_ = -1;
    967     }
    968 
    969     for (AudioFrameList::const_iterator iter = audioFrameList->begin();
    970          iter != audioFrameList->end();
    971          ++iter) {
    972         if(position >= kMaximumAmountOfMixedParticipants) {
    973             WEBRTC_TRACE(
    974                 kTraceMemory,
    975                 kTraceAudioMixerServer,
    976                 _id,
    977                 "Trying to mix more than max amount of mixed participants:%d!",
    978                 kMaximumAmountOfMixedParticipants);
    979             // Assert and avoid crash
    980             assert(false);
    981             position = 0;
    982         }
    983         MixFrames(&mixedAudio, (*iter), use_limiter_);
    984 
    985         SetParticipantStatistics(&_scratchMixedParticipants[position],
    986                                  **iter);
    987 
    988         position++;
    989     }
    990 
    991     return 0;
    992 }
    993 
    994 // TODO(andrew): consolidate this function with MixFromList.
    995 int32_t AudioConferenceMixerImpl::MixAnonomouslyFromList(
    996     AudioFrame& mixedAudio,
    997     const AudioFrameList* audioFrameList) {
    998     WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
    999                  "MixAnonomouslyFromList(mixedAudio, audioFrameList)");
   1000 
   1001     if(audioFrameList->empty()) return 0;
   1002 
   1003     for (AudioFrameList::const_iterator iter = audioFrameList->begin();
   1004          iter != audioFrameList->end();
   1005          ++iter) {
   1006         MixFrames(&mixedAudio, *iter, use_limiter_);
   1007     }
   1008     return 0;
   1009 }
   1010 
   1011 bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame& mixedAudio) {
   1012     if (!use_limiter_) {
   1013       return true;
   1014     }
   1015 
   1016     // Smoothly limit the mixed frame.
   1017     const int error = _limiter->ProcessStream(&mixedAudio);
   1018 
   1019     // And now we can safely restore the level. This procedure results in
   1020     // some loss of resolution, deemed acceptable.
   1021     //
   1022     // It's possible to apply the gain in the AGC (with a target level of 0 dbFS
   1023     // and compression gain of 6 dB). However, in the transition frame when this
   1024     // is enabled (moving from one to two participants) it has the potential to
   1025     // create discontinuities in the mixed frame.
   1026     //
   1027     // Instead we double the frame (with addition since left-shifting a
   1028     // negative value is undefined).
   1029     mixedAudio += mixedAudio;
   1030 
   1031     if(error != _limiter->kNoError) {
   1032         WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
   1033                      "Error from AudioProcessing: %d", error);
   1034         assert(false);
   1035         return false;
   1036     }
   1037     return true;
   1038 }
   1039 }  // namespace webrtc
   1040