1 /* 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #include "webrtc/voice_engine/voe_audio_processing_impl.h" 12 13 #include "webrtc/base/logging.h" 14 #include "webrtc/modules/audio_processing/include/audio_processing.h" 15 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" 16 #include "webrtc/system_wrappers/include/trace.h" 17 #include "webrtc/voice_engine/channel.h" 18 #include "webrtc/voice_engine/include/voe_errors.h" 19 #include "webrtc/voice_engine/transmit_mixer.h" 20 #include "webrtc/voice_engine/voice_engine_impl.h" 21 22 // TODO(andrew): move to a common place. 23 #define WEBRTC_VOICE_INIT_CHECK() \ 24 do { \ 25 if (!_shared->statistics().Initialized()) { \ 26 _shared->SetLastError(VE_NOT_INITED, kTraceError); \ 27 return -1; \ 28 } \ 29 } while (0) 30 31 #define WEBRTC_VOICE_INIT_CHECK_BOOL() \ 32 do { \ 33 if (!_shared->statistics().Initialized()) { \ 34 _shared->SetLastError(VE_NOT_INITED, kTraceError); \ 35 return false; \ 36 } \ 37 } while (0) 38 39 namespace webrtc { 40 41 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) 42 static const EcModes kDefaultEcMode = kEcAecm; 43 #else 44 static const EcModes kDefaultEcMode = kEcAec; 45 #endif 46 47 VoEAudioProcessing* VoEAudioProcessing::GetInterface(VoiceEngine* voiceEngine) { 48 #ifndef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API 49 return NULL; 50 #else 51 if (NULL == voiceEngine) { 52 return NULL; 53 } 54 VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine); 55 s->AddRef(); 56 return s; 57 #endif 58 } 59 60 #ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API 61 VoEAudioProcessingImpl::VoEAudioProcessingImpl(voe::SharedData* shared) 62 : _isAecMode(kDefaultEcMode == kEcAec), _shared(shared) { 63 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), 64 "VoEAudioProcessingImpl::VoEAudioProcessingImpl() - ctor"); 65 } 66 67 VoEAudioProcessingImpl::~VoEAudioProcessingImpl() { 68 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), 69 "VoEAudioProcessingImpl::~VoEAudioProcessingImpl() - dtor"); 70 } 71 72 int VoEAudioProcessingImpl::SetNsStatus(bool enable, NsModes mode) { 73 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 74 "SetNsStatus(enable=%d, mode=%d)", enable, mode); 75 #ifdef WEBRTC_VOICE_ENGINE_NR 76 if (!_shared->statistics().Initialized()) { 77 _shared->SetLastError(VE_NOT_INITED, kTraceError); 78 return -1; 79 } 80 81 NoiseSuppression::Level nsLevel = kDefaultNsMode; 82 switch (mode) { 83 case kNsDefault: 84 nsLevel = kDefaultNsMode; 85 break; 86 case kNsUnchanged: 87 nsLevel = _shared->audio_processing()->noise_suppression()->level(); 88 break; 89 case kNsConference: 90 nsLevel = NoiseSuppression::kHigh; 91 break; 92 case kNsLowSuppression: 93 nsLevel = NoiseSuppression::kLow; 94 break; 95 case kNsModerateSuppression: 96 nsLevel = NoiseSuppression::kModerate; 97 break; 98 case kNsHighSuppression: 99 nsLevel = NoiseSuppression::kHigh; 100 break; 101 case kNsVeryHighSuppression: 102 nsLevel = NoiseSuppression::kVeryHigh; 103 break; 104 } 105 106 if (_shared->audio_processing()->noise_suppression()->set_level(nsLevel) != 107 0) { 108 _shared->SetLastError(VE_APM_ERROR, kTraceError, 109 "SetNsStatus() failed to set Ns mode"); 110 return -1; 111 } 112 if (_shared->audio_processing()->noise_suppression()->Enable(enable) != 0) { 113 _shared->SetLastError(VE_APM_ERROR, kTraceError, 114 "SetNsStatus() failed to set Ns state"); 115 return -1; 116 } 117 118 return 0; 119 #else 120 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 121 "SetNsStatus() Ns is not supported"); 122 return -1; 123 #endif 124 } 125 126 int VoEAudioProcessingImpl::GetNsStatus(bool& enabled, NsModes& mode) { 127 #ifdef WEBRTC_VOICE_ENGINE_NR 128 if (!_shared->statistics().Initialized()) { 129 _shared->SetLastError(VE_NOT_INITED, kTraceError); 130 return -1; 131 } 132 133 enabled = _shared->audio_processing()->noise_suppression()->is_enabled(); 134 NoiseSuppression::Level nsLevel = 135 _shared->audio_processing()->noise_suppression()->level(); 136 137 switch (nsLevel) { 138 case NoiseSuppression::kLow: 139 mode = kNsLowSuppression; 140 break; 141 case NoiseSuppression::kModerate: 142 mode = kNsModerateSuppression; 143 break; 144 case NoiseSuppression::kHigh: 145 mode = kNsHighSuppression; 146 break; 147 case NoiseSuppression::kVeryHigh: 148 mode = kNsVeryHighSuppression; 149 break; 150 } 151 return 0; 152 #else 153 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 154 "GetNsStatus() Ns is not supported"); 155 return -1; 156 #endif 157 } 158 159 int VoEAudioProcessingImpl::SetAgcStatus(bool enable, AgcModes mode) { 160 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 161 "SetAgcStatus(enable=%d, mode=%d)", enable, mode); 162 #ifdef WEBRTC_VOICE_ENGINE_AGC 163 if (!_shared->statistics().Initialized()) { 164 _shared->SetLastError(VE_NOT_INITED, kTraceError); 165 return -1; 166 } 167 168 #if defined(WEBRTC_IOS) || defined(ATA) || defined(WEBRTC_ANDROID) 169 if (mode == kAgcAdaptiveAnalog) { 170 _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, 171 "SetAgcStatus() invalid Agc mode for mobile device"); 172 return -1; 173 } 174 #endif 175 176 GainControl::Mode agcMode = kDefaultAgcMode; 177 switch (mode) { 178 case kAgcDefault: 179 agcMode = kDefaultAgcMode; 180 break; 181 case kAgcUnchanged: 182 agcMode = _shared->audio_processing()->gain_control()->mode(); 183 break; 184 case kAgcFixedDigital: 185 agcMode = GainControl::kFixedDigital; 186 break; 187 case kAgcAdaptiveAnalog: 188 agcMode = GainControl::kAdaptiveAnalog; 189 break; 190 case kAgcAdaptiveDigital: 191 agcMode = GainControl::kAdaptiveDigital; 192 break; 193 } 194 195 if (_shared->audio_processing()->gain_control()->set_mode(agcMode) != 0) { 196 _shared->SetLastError(VE_APM_ERROR, kTraceError, 197 "SetAgcStatus() failed to set Agc mode"); 198 return -1; 199 } 200 if (_shared->audio_processing()->gain_control()->Enable(enable) != 0) { 201 _shared->SetLastError(VE_APM_ERROR, kTraceError, 202 "SetAgcStatus() failed to set Agc state"); 203 return -1; 204 } 205 206 if (agcMode != GainControl::kFixedDigital) { 207 // Set Agc state in the ADM when adaptive Agc mode has been selected. 208 // Note that we also enable the ADM Agc when Adaptive Digital mode is 209 // used since we want to be able to provide the APM with updated mic 210 // levels when the user modifies the mic level manually. 211 if (_shared->audio_device()->SetAGC(enable) != 0) { 212 _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning, 213 "SetAgcStatus() failed to set Agc mode"); 214 } 215 } 216 217 return 0; 218 #else 219 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 220 "SetAgcStatus() Agc is not supported"); 221 return -1; 222 #endif 223 } 224 225 int VoEAudioProcessingImpl::GetAgcStatus(bool& enabled, AgcModes& mode) { 226 #ifdef WEBRTC_VOICE_ENGINE_AGC 227 if (!_shared->statistics().Initialized()) { 228 _shared->SetLastError(VE_NOT_INITED, kTraceError); 229 return -1; 230 } 231 232 enabled = _shared->audio_processing()->gain_control()->is_enabled(); 233 GainControl::Mode agcMode = 234 _shared->audio_processing()->gain_control()->mode(); 235 236 switch (agcMode) { 237 case GainControl::kFixedDigital: 238 mode = kAgcFixedDigital; 239 break; 240 case GainControl::kAdaptiveAnalog: 241 mode = kAgcAdaptiveAnalog; 242 break; 243 case GainControl::kAdaptiveDigital: 244 mode = kAgcAdaptiveDigital; 245 break; 246 } 247 248 return 0; 249 #else 250 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 251 "GetAgcStatus() Agc is not supported"); 252 return -1; 253 #endif 254 } 255 256 int VoEAudioProcessingImpl::SetAgcConfig(AgcConfig config) { 257 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 258 "SetAgcConfig()"); 259 #ifdef WEBRTC_VOICE_ENGINE_AGC 260 if (!_shared->statistics().Initialized()) { 261 _shared->SetLastError(VE_NOT_INITED, kTraceError); 262 return -1; 263 } 264 265 if (_shared->audio_processing()->gain_control()->set_target_level_dbfs( 266 config.targetLeveldBOv) != 0) { 267 _shared->SetLastError(VE_APM_ERROR, kTraceError, 268 "SetAgcConfig() failed to set target peak |level|" 269 " (or envelope) of the Agc"); 270 return -1; 271 } 272 if (_shared->audio_processing()->gain_control()->set_compression_gain_db( 273 config.digitalCompressionGaindB) != 0) { 274 _shared->SetLastError(VE_APM_ERROR, kTraceError, 275 "SetAgcConfig() failed to set the range in |gain| " 276 "the digital compression stage may apply"); 277 return -1; 278 } 279 if (_shared->audio_processing()->gain_control()->enable_limiter( 280 config.limiterEnable) != 0) { 281 _shared->SetLastError( 282 VE_APM_ERROR, kTraceError, 283 "SetAgcConfig() failed to set hard limiter to the signal"); 284 return -1; 285 } 286 287 return 0; 288 #else 289 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 290 "SetAgcConfig() EC is not supported"); 291 return -1; 292 #endif 293 } 294 295 int VoEAudioProcessingImpl::GetAgcConfig(AgcConfig& config) { 296 #ifdef WEBRTC_VOICE_ENGINE_AGC 297 if (!_shared->statistics().Initialized()) { 298 _shared->SetLastError(VE_NOT_INITED, kTraceError); 299 return -1; 300 } 301 302 config.targetLeveldBOv = 303 _shared->audio_processing()->gain_control()->target_level_dbfs(); 304 config.digitalCompressionGaindB = 305 _shared->audio_processing()->gain_control()->compression_gain_db(); 306 config.limiterEnable = 307 _shared->audio_processing()->gain_control()->is_limiter_enabled(); 308 309 return 0; 310 #else 311 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 312 "GetAgcConfig() EC is not supported"); 313 return -1; 314 #endif 315 } 316 317 int VoEAudioProcessingImpl::SetRxNsStatus(int channel, 318 bool enable, 319 NsModes mode) { 320 #ifdef WEBRTC_VOICE_ENGINE_NR 321 if (!_shared->statistics().Initialized()) { 322 _shared->SetLastError(VE_NOT_INITED, kTraceError); 323 return -1; 324 } 325 326 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); 327 voe::Channel* channelPtr = ch.channel(); 328 if (channelPtr == NULL) { 329 _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, 330 "SetRxNsStatus() failed to locate channel"); 331 return -1; 332 } 333 return channelPtr->SetRxNsStatus(enable, mode); 334 #else 335 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 336 "SetRxNsStatus() NS is not supported"); 337 return -1; 338 #endif 339 } 340 341 int VoEAudioProcessingImpl::GetRxNsStatus(int channel, 342 bool& enabled, 343 NsModes& mode) { 344 #ifdef WEBRTC_VOICE_ENGINE_NR 345 if (!_shared->statistics().Initialized()) { 346 _shared->SetLastError(VE_NOT_INITED, kTraceError); 347 return -1; 348 } 349 350 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); 351 voe::Channel* channelPtr = ch.channel(); 352 if (channelPtr == NULL) { 353 _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, 354 "GetRxNsStatus() failed to locate channel"); 355 return -1; 356 } 357 return channelPtr->GetRxNsStatus(enabled, mode); 358 #else 359 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 360 "GetRxNsStatus() NS is not supported"); 361 return -1; 362 #endif 363 } 364 365 int VoEAudioProcessingImpl::SetRxAgcStatus(int channel, 366 bool enable, 367 AgcModes mode) { 368 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 369 "SetRxAgcStatus(channel=%d, enable=%d, mode=%d)", channel, 370 (int)enable, (int)mode); 371 #ifdef WEBRTC_VOICE_ENGINE_AGC 372 if (!_shared->statistics().Initialized()) { 373 _shared->SetLastError(VE_NOT_INITED, kTraceError); 374 return -1; 375 } 376 377 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); 378 voe::Channel* channelPtr = ch.channel(); 379 if (channelPtr == NULL) { 380 _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, 381 "SetRxAgcStatus() failed to locate channel"); 382 return -1; 383 } 384 return channelPtr->SetRxAgcStatus(enable, mode); 385 #else 386 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 387 "SetRxAgcStatus() Agc is not supported"); 388 return -1; 389 #endif 390 } 391 392 int VoEAudioProcessingImpl::GetRxAgcStatus(int channel, 393 bool& enabled, 394 AgcModes& mode) { 395 #ifdef WEBRTC_VOICE_ENGINE_AGC 396 if (!_shared->statistics().Initialized()) { 397 _shared->SetLastError(VE_NOT_INITED, kTraceError); 398 return -1; 399 } 400 401 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); 402 voe::Channel* channelPtr = ch.channel(); 403 if (channelPtr == NULL) { 404 _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, 405 "GetRxAgcStatus() failed to locate channel"); 406 return -1; 407 } 408 return channelPtr->GetRxAgcStatus(enabled, mode); 409 #else 410 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 411 "GetRxAgcStatus() Agc is not supported"); 412 return -1; 413 #endif 414 } 415 416 int VoEAudioProcessingImpl::SetRxAgcConfig(int channel, AgcConfig config) { 417 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 418 "SetRxAgcConfig(channel=%d)", channel); 419 #ifdef WEBRTC_VOICE_ENGINE_AGC 420 if (!_shared->statistics().Initialized()) { 421 _shared->SetLastError(VE_NOT_INITED, kTraceError); 422 return -1; 423 } 424 425 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); 426 voe::Channel* channelPtr = ch.channel(); 427 if (channelPtr == NULL) { 428 _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, 429 "SetRxAgcConfig() failed to locate channel"); 430 return -1; 431 } 432 return channelPtr->SetRxAgcConfig(config); 433 #else 434 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 435 "SetRxAgcConfig() Agc is not supported"); 436 return -1; 437 #endif 438 } 439 440 int VoEAudioProcessingImpl::GetRxAgcConfig(int channel, AgcConfig& config) { 441 #ifdef WEBRTC_VOICE_ENGINE_AGC 442 if (!_shared->statistics().Initialized()) { 443 _shared->SetLastError(VE_NOT_INITED, kTraceError); 444 return -1; 445 } 446 447 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); 448 voe::Channel* channelPtr = ch.channel(); 449 if (channelPtr == NULL) { 450 _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, 451 "GetRxAgcConfig() failed to locate channel"); 452 return -1; 453 } 454 return channelPtr->GetRxAgcConfig(config); 455 #else 456 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 457 "GetRxAgcConfig() Agc is not supported"); 458 return -1; 459 #endif 460 } 461 462 bool VoEAudioProcessing::DriftCompensationSupported() { 463 #if defined(WEBRTC_DRIFT_COMPENSATION_SUPPORTED) 464 return true; 465 #else 466 return false; 467 #endif 468 } 469 470 int VoEAudioProcessingImpl::EnableDriftCompensation(bool enable) { 471 WEBRTC_VOICE_INIT_CHECK(); 472 473 if (!DriftCompensationSupported()) { 474 _shared->SetLastError( 475 VE_APM_ERROR, kTraceWarning, 476 "Drift compensation is not supported on this platform."); 477 return -1; 478 } 479 480 EchoCancellation* aec = _shared->audio_processing()->echo_cancellation(); 481 if (aec->enable_drift_compensation(enable) != 0) { 482 _shared->SetLastError(VE_APM_ERROR, kTraceError, 483 "aec->enable_drift_compensation() failed"); 484 return -1; 485 } 486 return 0; 487 } 488 489 bool VoEAudioProcessingImpl::DriftCompensationEnabled() { 490 WEBRTC_VOICE_INIT_CHECK_BOOL(); 491 492 EchoCancellation* aec = _shared->audio_processing()->echo_cancellation(); 493 return aec->is_drift_compensation_enabled(); 494 } 495 496 int VoEAudioProcessingImpl::SetEcStatus(bool enable, EcModes mode) { 497 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 498 "SetEcStatus(enable=%d, mode=%d)", enable, mode); 499 #ifdef WEBRTC_VOICE_ENGINE_ECHO 500 if (!_shared->statistics().Initialized()) { 501 _shared->SetLastError(VE_NOT_INITED, kTraceError); 502 return -1; 503 } 504 505 // AEC mode 506 if ((mode == kEcDefault) || (mode == kEcConference) || (mode == kEcAec) || 507 ((mode == kEcUnchanged) && (_isAecMode == true))) { 508 if (enable) { 509 // Disable the AECM before enable the AEC 510 if (_shared->audio_processing()->echo_control_mobile()->is_enabled()) { 511 _shared->SetLastError(VE_APM_ERROR, kTraceWarning, 512 "SetEcStatus() disable AECM before enabling AEC"); 513 if (_shared->audio_processing()->echo_control_mobile()->Enable(false) != 514 0) { 515 _shared->SetLastError(VE_APM_ERROR, kTraceError, 516 "SetEcStatus() failed to disable AECM"); 517 return -1; 518 } 519 } 520 } 521 if (_shared->audio_processing()->echo_cancellation()->Enable(enable) != 0) { 522 _shared->SetLastError(VE_APM_ERROR, kTraceError, 523 "SetEcStatus() failed to set AEC state"); 524 return -1; 525 } 526 if (mode == kEcConference) { 527 if (_shared->audio_processing() 528 ->echo_cancellation() 529 ->set_suppression_level(EchoCancellation::kHighSuppression) != 530 0) { 531 _shared->SetLastError( 532 VE_APM_ERROR, kTraceError, 533 "SetEcStatus() failed to set aggressiveness to high"); 534 return -1; 535 } 536 } else { 537 if (_shared->audio_processing() 538 ->echo_cancellation() 539 ->set_suppression_level(EchoCancellation::kModerateSuppression) != 540 0) { 541 _shared->SetLastError( 542 VE_APM_ERROR, kTraceError, 543 "SetEcStatus() failed to set aggressiveness to moderate"); 544 return -1; 545 } 546 } 547 548 _isAecMode = true; 549 } else if ((mode == kEcAecm) || 550 ((mode == kEcUnchanged) && (_isAecMode == false))) { 551 if (enable) { 552 // Disable the AEC before enable the AECM 553 if (_shared->audio_processing()->echo_cancellation()->is_enabled()) { 554 _shared->SetLastError(VE_APM_ERROR, kTraceWarning, 555 "SetEcStatus() disable AEC before enabling AECM"); 556 if (_shared->audio_processing()->echo_cancellation()->Enable(false) != 557 0) { 558 _shared->SetLastError(VE_APM_ERROR, kTraceError, 559 "SetEcStatus() failed to disable AEC"); 560 return -1; 561 } 562 } 563 } 564 if (_shared->audio_processing()->echo_control_mobile()->Enable(enable) != 565 0) { 566 _shared->SetLastError(VE_APM_ERROR, kTraceError, 567 "SetEcStatus() failed to set AECM state"); 568 return -1; 569 } 570 _isAecMode = false; 571 } else { 572 _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, 573 "SetEcStatus() invalid EC mode"); 574 return -1; 575 } 576 577 return 0; 578 #else 579 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 580 "SetEcStatus() EC is not supported"); 581 return -1; 582 #endif 583 } 584 585 int VoEAudioProcessingImpl::GetEcStatus(bool& enabled, EcModes& mode) { 586 #ifdef WEBRTC_VOICE_ENGINE_ECHO 587 if (!_shared->statistics().Initialized()) { 588 _shared->SetLastError(VE_NOT_INITED, kTraceError); 589 return -1; 590 } 591 592 if (_isAecMode == true) { 593 mode = kEcAec; 594 enabled = _shared->audio_processing()->echo_cancellation()->is_enabled(); 595 } else { 596 mode = kEcAecm; 597 enabled = _shared->audio_processing()->echo_control_mobile()->is_enabled(); 598 } 599 600 return 0; 601 #else 602 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 603 "GetEcStatus() EC is not supported"); 604 return -1; 605 #endif 606 } 607 608 void VoEAudioProcessingImpl::SetDelayOffsetMs(int offset) { 609 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 610 "SetDelayOffsetMs(offset = %d)", offset); 611 _shared->audio_processing()->set_delay_offset_ms(offset); 612 } 613 614 int VoEAudioProcessingImpl::DelayOffsetMs() { 615 return _shared->audio_processing()->delay_offset_ms(); 616 } 617 618 int VoEAudioProcessingImpl::SetAecmMode(AecmModes mode, bool enableCNG) { 619 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 620 "SetAECMMode(mode = %d)", mode); 621 #ifdef WEBRTC_VOICE_ENGINE_ECHO 622 if (!_shared->statistics().Initialized()) { 623 _shared->SetLastError(VE_NOT_INITED, kTraceError); 624 return -1; 625 } 626 627 EchoControlMobile::RoutingMode aecmMode( 628 EchoControlMobile::kQuietEarpieceOrHeadset); 629 630 switch (mode) { 631 case kAecmQuietEarpieceOrHeadset: 632 aecmMode = EchoControlMobile::kQuietEarpieceOrHeadset; 633 break; 634 case kAecmEarpiece: 635 aecmMode = EchoControlMobile::kEarpiece; 636 break; 637 case kAecmLoudEarpiece: 638 aecmMode = EchoControlMobile::kLoudEarpiece; 639 break; 640 case kAecmSpeakerphone: 641 aecmMode = EchoControlMobile::kSpeakerphone; 642 break; 643 case kAecmLoudSpeakerphone: 644 aecmMode = EchoControlMobile::kLoudSpeakerphone; 645 break; 646 } 647 648 if (_shared->audio_processing()->echo_control_mobile()->set_routing_mode( 649 aecmMode) != 0) { 650 _shared->SetLastError(VE_APM_ERROR, kTraceError, 651 "SetAECMMode() failed to set AECM routing mode"); 652 return -1; 653 } 654 if (_shared->audio_processing()->echo_control_mobile()->enable_comfort_noise( 655 enableCNG) != 0) { 656 _shared->SetLastError( 657 VE_APM_ERROR, kTraceError, 658 "SetAECMMode() failed to set comfort noise state for AECM"); 659 return -1; 660 } 661 662 return 0; 663 #else 664 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 665 "SetAECMMode() EC is not supported"); 666 return -1; 667 #endif 668 } 669 670 int VoEAudioProcessingImpl::GetAecmMode(AecmModes& mode, bool& enabledCNG) { 671 #ifdef WEBRTC_VOICE_ENGINE_ECHO 672 if (!_shared->statistics().Initialized()) { 673 _shared->SetLastError(VE_NOT_INITED, kTraceError); 674 return -1; 675 } 676 677 enabledCNG = false; 678 679 EchoControlMobile::RoutingMode aecmMode = 680 _shared->audio_processing()->echo_control_mobile()->routing_mode(); 681 enabledCNG = _shared->audio_processing() 682 ->echo_control_mobile() 683 ->is_comfort_noise_enabled(); 684 685 switch (aecmMode) { 686 case EchoControlMobile::kQuietEarpieceOrHeadset: 687 mode = kAecmQuietEarpieceOrHeadset; 688 break; 689 case EchoControlMobile::kEarpiece: 690 mode = kAecmEarpiece; 691 break; 692 case EchoControlMobile::kLoudEarpiece: 693 mode = kAecmLoudEarpiece; 694 break; 695 case EchoControlMobile::kSpeakerphone: 696 mode = kAecmSpeakerphone; 697 break; 698 case EchoControlMobile::kLoudSpeakerphone: 699 mode = kAecmLoudSpeakerphone; 700 break; 701 } 702 703 return 0; 704 #else 705 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 706 "GetAECMMode() EC is not supported"); 707 return -1; 708 #endif 709 } 710 711 int VoEAudioProcessingImpl::EnableHighPassFilter(bool enable) { 712 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 713 "EnableHighPassFilter(%d)", enable); 714 if (_shared->audio_processing()->high_pass_filter()->Enable(enable) != 715 AudioProcessing::kNoError) { 716 _shared->SetLastError(VE_APM_ERROR, kTraceError, 717 "HighPassFilter::Enable() failed."); 718 return -1; 719 } 720 721 return 0; 722 } 723 724 bool VoEAudioProcessingImpl::IsHighPassFilterEnabled() { 725 return _shared->audio_processing()->high_pass_filter()->is_enabled(); 726 } 727 728 int VoEAudioProcessingImpl::RegisterRxVadObserver(int channel, 729 VoERxVadCallback& observer) { 730 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 731 "RegisterRxVadObserver()"); 732 if (!_shared->statistics().Initialized()) { 733 _shared->SetLastError(VE_NOT_INITED, kTraceError); 734 return -1; 735 } 736 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); 737 voe::Channel* channelPtr = ch.channel(); 738 if (channelPtr == NULL) { 739 _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, 740 "RegisterRxVadObserver() failed to locate channel"); 741 return -1; 742 } 743 return channelPtr->RegisterRxVadObserver(observer); 744 } 745 746 int VoEAudioProcessingImpl::DeRegisterRxVadObserver(int channel) { 747 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 748 "DeRegisterRxVadObserver()"); 749 if (!_shared->statistics().Initialized()) { 750 _shared->SetLastError(VE_NOT_INITED, kTraceError); 751 return -1; 752 } 753 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); 754 voe::Channel* channelPtr = ch.channel(); 755 if (channelPtr == NULL) { 756 _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, 757 "DeRegisterRxVadObserver() failed to locate channel"); 758 return -1; 759 } 760 761 return channelPtr->DeRegisterRxVadObserver(); 762 } 763 764 int VoEAudioProcessingImpl::VoiceActivityIndicator(int channel) { 765 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 766 "VoiceActivityIndicator(channel=%d)", channel); 767 if (!_shared->statistics().Initialized()) { 768 _shared->SetLastError(VE_NOT_INITED, kTraceError); 769 return -1; 770 } 771 772 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); 773 voe::Channel* channelPtr = ch.channel(); 774 if (channelPtr == NULL) { 775 _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, 776 "DeRegisterRxVadObserver() failed to locate channel"); 777 return -1; 778 } 779 int activity(-1); 780 channelPtr->VoiceActivityIndicator(activity); 781 782 return activity; 783 } 784 785 int VoEAudioProcessingImpl::SetEcMetricsStatus(bool enable) { 786 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 787 "SetEcMetricsStatus(enable=%d)", enable); 788 #ifdef WEBRTC_VOICE_ENGINE_ECHO 789 if (!_shared->statistics().Initialized()) { 790 _shared->SetLastError(VE_NOT_INITED, kTraceError); 791 return -1; 792 } 793 794 if ((_shared->audio_processing()->echo_cancellation()->enable_metrics( 795 enable) != 0) || 796 (_shared->audio_processing()->echo_cancellation()->enable_delay_logging( 797 enable) != 0)) { 798 _shared->SetLastError(VE_APM_ERROR, kTraceError, 799 "SetEcMetricsStatus() unable to set EC metrics mode"); 800 return -1; 801 } 802 return 0; 803 #else 804 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 805 "SetEcStatus() EC is not supported"); 806 return -1; 807 #endif 808 } 809 810 int VoEAudioProcessingImpl::GetEcMetricsStatus(bool& enabled) { 811 #ifdef WEBRTC_VOICE_ENGINE_ECHO 812 if (!_shared->statistics().Initialized()) { 813 _shared->SetLastError(VE_NOT_INITED, kTraceError); 814 return -1; 815 } 816 817 bool echo_mode = 818 _shared->audio_processing()->echo_cancellation()->are_metrics_enabled(); 819 bool delay_mode = _shared->audio_processing() 820 ->echo_cancellation() 821 ->is_delay_logging_enabled(); 822 823 if (echo_mode != delay_mode) { 824 _shared->SetLastError( 825 VE_APM_ERROR, kTraceError, 826 "GetEcMetricsStatus() delay logging and echo mode are not the same"); 827 return -1; 828 } 829 830 enabled = echo_mode; 831 832 return 0; 833 #else 834 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 835 "SetEcStatus() EC is not supported"); 836 return -1; 837 #endif 838 } 839 840 int VoEAudioProcessingImpl::GetEchoMetrics(int& ERL, 841 int& ERLE, 842 int& RERL, 843 int& A_NLP) { 844 #ifdef WEBRTC_VOICE_ENGINE_ECHO 845 if (!_shared->statistics().Initialized()) { 846 _shared->SetLastError(VE_NOT_INITED, kTraceError); 847 return -1; 848 } 849 if (!_shared->audio_processing()->echo_cancellation()->is_enabled()) { 850 _shared->SetLastError( 851 VE_APM_ERROR, kTraceWarning, 852 "GetEchoMetrics() AudioProcessingModule AEC is not enabled"); 853 return -1; 854 } 855 856 // Get Echo Metrics from Audio Processing Module. 857 EchoCancellation::Metrics echoMetrics; 858 if (_shared->audio_processing()->echo_cancellation()->GetMetrics( 859 &echoMetrics)) { 860 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1), 861 "GetEchoMetrics(), AudioProcessingModule metrics error"); 862 return -1; 863 } 864 865 // Echo quality metrics. 866 ERL = echoMetrics.echo_return_loss.instant; 867 ERLE = echoMetrics.echo_return_loss_enhancement.instant; 868 RERL = echoMetrics.residual_echo_return_loss.instant; 869 A_NLP = echoMetrics.a_nlp.instant; 870 871 return 0; 872 #else 873 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 874 "SetEcStatus() EC is not supported"); 875 return -1; 876 #endif 877 } 878 879 int VoEAudioProcessingImpl::GetEcDelayMetrics(int& delay_median, 880 int& delay_std, 881 float& fraction_poor_delays) { 882 #ifdef WEBRTC_VOICE_ENGINE_ECHO 883 if (!_shared->statistics().Initialized()) { 884 _shared->SetLastError(VE_NOT_INITED, kTraceError); 885 return -1; 886 } 887 if (!_shared->audio_processing()->echo_cancellation()->is_enabled()) { 888 _shared->SetLastError( 889 VE_APM_ERROR, kTraceWarning, 890 "GetEcDelayMetrics() AudioProcessingModule AEC is not enabled"); 891 return -1; 892 } 893 894 int median = 0; 895 int std = 0; 896 float poor_fraction = 0; 897 // Get delay-logging values from Audio Processing Module. 898 if (_shared->audio_processing()->echo_cancellation()->GetDelayMetrics( 899 &median, &std, &poor_fraction)) { 900 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1), 901 "GetEcDelayMetrics(), AudioProcessingModule delay-logging " 902 "error"); 903 return -1; 904 } 905 906 // EC delay-logging metrics 907 delay_median = median; 908 delay_std = std; 909 fraction_poor_delays = poor_fraction; 910 911 return 0; 912 #else 913 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 914 "SetEcStatus() EC is not supported"); 915 return -1; 916 #endif 917 } 918 919 int VoEAudioProcessingImpl::StartDebugRecording(const char* fileNameUTF8) { 920 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 921 "StartDebugRecording()"); 922 if (!_shared->statistics().Initialized()) { 923 _shared->SetLastError(VE_NOT_INITED, kTraceError); 924 return -1; 925 } 926 927 return _shared->audio_processing()->StartDebugRecording(fileNameUTF8); 928 } 929 930 int VoEAudioProcessingImpl::StartDebugRecording(FILE* file_handle) { 931 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 932 "StartDebugRecording()"); 933 if (!_shared->statistics().Initialized()) { 934 _shared->SetLastError(VE_NOT_INITED, kTraceError); 935 return -1; 936 } 937 938 return _shared->audio_processing()->StartDebugRecording(file_handle); 939 } 940 941 int VoEAudioProcessingImpl::StopDebugRecording() { 942 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 943 "StopDebugRecording()"); 944 if (!_shared->statistics().Initialized()) { 945 _shared->SetLastError(VE_NOT_INITED, kTraceError); 946 return -1; 947 } 948 949 return _shared->audio_processing()->StopDebugRecording(); 950 } 951 952 int VoEAudioProcessingImpl::SetTypingDetectionStatus(bool enable) { 953 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 954 "SetTypingDetectionStatus()"); 955 #if !defined(WEBRTC_VOICE_ENGINE_TYPING_DETECTION) 956 NOT_SUPPORTED(_shared->statistics()); 957 #else 958 if (!_shared->statistics().Initialized()) { 959 _shared->SetLastError(VE_NOT_INITED, kTraceError); 960 return -1; 961 } 962 963 // Just use the VAD state to determine if we should enable typing detection 964 // or not 965 966 if (_shared->audio_processing()->voice_detection()->Enable(enable)) { 967 _shared->SetLastError(VE_APM_ERROR, kTraceWarning, 968 "SetTypingDetectionStatus() failed to set VAD state"); 969 return -1; 970 } 971 if (_shared->audio_processing()->voice_detection()->set_likelihood( 972 VoiceDetection::kVeryLowLikelihood)) { 973 _shared->SetLastError( 974 VE_APM_ERROR, kTraceWarning, 975 "SetTypingDetectionStatus() failed to set VAD likelihood to low"); 976 return -1; 977 } 978 979 return 0; 980 #endif 981 } 982 983 int VoEAudioProcessingImpl::GetTypingDetectionStatus(bool& enabled) { 984 if (!_shared->statistics().Initialized()) { 985 _shared->SetLastError(VE_NOT_INITED, kTraceError); 986 return -1; 987 } 988 // Just use the VAD state to determine if we should enable typing 989 // detection or not 990 991 enabled = _shared->audio_processing()->voice_detection()->is_enabled(); 992 993 return 0; 994 } 995 996 int VoEAudioProcessingImpl::TimeSinceLastTyping(int& seconds) { 997 #if !defined(WEBRTC_VOICE_ENGINE_TYPING_DETECTION) 998 NOT_SUPPORTED(_shared->statistics()); 999 #else 1000 if (!_shared->statistics().Initialized()) { 1001 _shared->SetLastError(VE_NOT_INITED, kTraceError); 1002 return -1; 1003 } 1004 // Check if typing detection is enabled 1005 bool enabled = _shared->audio_processing()->voice_detection()->is_enabled(); 1006 if (enabled) { 1007 _shared->transmit_mixer()->TimeSinceLastTyping(seconds); 1008 return 0; 1009 } else { 1010 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, 1011 "SetTypingDetectionStatus is not enabled"); 1012 return -1; 1013 } 1014 #endif 1015 } 1016 1017 int VoEAudioProcessingImpl::SetTypingDetectionParameters(int timeWindow, 1018 int costPerTyping, 1019 int reportingThreshold, 1020 int penaltyDecay, 1021 int typeEventDelay) { 1022 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), 1023 "SetTypingDetectionParameters()"); 1024 #if !defined(WEBRTC_VOICE_ENGINE_TYPING_DETECTION) 1025 NOT_SUPPORTED(_shared->statistics()); 1026 #else 1027 if (!_shared->statistics().Initialized()) { 1028 _shared->statistics().SetLastError(VE_NOT_INITED, kTraceError); 1029 return -1; 1030 } 1031 return (_shared->transmit_mixer()->SetTypingDetectionParameters( 1032 timeWindow, costPerTyping, reportingThreshold, penaltyDecay, 1033 typeEventDelay)); 1034 #endif 1035 } 1036 1037 void VoEAudioProcessingImpl::EnableStereoChannelSwapping(bool enable) { 1038 _shared->transmit_mixer()->EnableStereoChannelSwapping(enable); 1039 } 1040 1041 bool VoEAudioProcessingImpl::IsStereoChannelSwappingEnabled() { 1042 return _shared->transmit_mixer()->IsStereoChannelSwappingEnabled(); 1043 } 1044 1045 #endif // #ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API 1046 1047 } // namespace webrtc 1048