1 /* 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #include "webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h" 12 13 #include <assert.h> 14 #include <stdlib.h> 15 #include <vector> 16 17 #include "webrtc/engine_configurations.h" 18 #include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h" 19 #include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h" 20 #include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h" 21 #include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h" 22 #include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h" 23 #include "webrtc/modules/audio_coding/main/acm2/call_statistics.h" 24 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h" 25 #include "webrtc/system_wrappers/interface/rw_lock_wrapper.h" 26 #include "webrtc/system_wrappers/interface/trace.h" 27 #include "webrtc/typedefs.h" 28 29 namespace webrtc { 30 31 namespace acm2 { 32 33 enum { 34 kACMToneEnd = 999 35 }; 36 37 // Maximum number of bytes in one packet (PCM16B, 20 ms packets, stereo). 38 enum { 39 kMaxPacketSize = 2560 40 }; 41 42 // Maximum number of payloads that can be packed in one RED packet. For 43 // regular RED, we only pack two payloads. In case of dual-streaming, in worst 44 // case we might pack 3 payloads in one RED packet. 45 enum { 46 kNumRedFragmentationVectors = 2, 47 kMaxNumFragmentationVectors = 3 48 }; 49 50 // If packet N is arrived all packets prior to N - |kNackThresholdPackets| which 51 // are not received are considered as lost, and appear in NACK list. 52 enum { 53 kNackThresholdPackets = 2 54 }; 55 56 namespace { 57 58 // TODO(turajs): the same functionality is used in NetEq. If both classes 59 // need them, make it a static function in ACMCodecDB. 60 bool IsCodecRED(const CodecInst* codec) { 61 return (STR_CASE_CMP(codec->plname, "RED") == 0); 62 } 63 64 bool IsCodecRED(int index) { 65 return (IsCodecRED(&ACMCodecDB::database_[index])); 66 } 67 68 bool IsCodecCN(const CodecInst* codec) { 69 return (STR_CASE_CMP(codec->plname, "CN") == 0); 70 } 71 72 bool IsCodecCN(int index) { 73 return (IsCodecCN(&ACMCodecDB::database_[index])); 74 } 75 76 // Stereo-to-mono can be used as in-place. 77 int DownMix(const AudioFrame& frame, int length_out_buff, int16_t* out_buff) { 78 if (length_out_buff < frame.samples_per_channel_) { 79 return -1; 80 } 81 for (int n = 0; n < frame.samples_per_channel_; ++n) 82 out_buff[n] = (frame.data_[2 * n] + frame.data_[2 * n + 1]) >> 1; 83 return 0; 84 } 85 86 // Mono-to-stereo can be used as in-place. 87 int UpMix(const AudioFrame& frame, int length_out_buff, int16_t* out_buff) { 88 if (length_out_buff < frame.samples_per_channel_) { 89 return -1; 90 } 91 for (int n = frame.samples_per_channel_ - 1; n >= 0; --n) { 92 out_buff[2 * n + 1] = frame.data_[n]; 93 out_buff[2 * n] = frame.data_[n]; 94 } 95 return 0; 96 } 97 98 // Return 1 if timestamp t1 is less than timestamp t2, while compensating for 99 // wrap-around. 100 static int TimestampLessThan(uint32_t t1, uint32_t t2) { 101 uint32_t kHalfFullRange = static_cast<uint32_t>(0xFFFFFFFF) / 2; 102 if (t1 == t2) { 103 return 0; 104 } else if (t1 < t2) { 105 if (t2 - t1 < kHalfFullRange) 106 return 1; 107 return 0; 108 } else { 109 if (t1 - t2 < kHalfFullRange) 110 return 0; 111 return 1; 112 } 113 } 114 115 } // namespace 116 117 AudioCodingModuleImpl::AudioCodingModuleImpl( 118 const AudioCodingModule::Config& config) 119 : acm_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()), 120 id_(config.id), 121 expected_codec_ts_(0xD87F3F9F), 122 expected_in_ts_(0xD87F3F9F), 123 send_codec_inst_(), 124 cng_nb_pltype_(255), 125 cng_wb_pltype_(255), 126 cng_swb_pltype_(255), 127 cng_fb_pltype_(255), 128 red_pltype_(255), 129 vad_enabled_(false), 130 dtx_enabled_(false), 131 vad_mode_(VADNormal), 132 stereo_send_(false), 133 current_send_codec_idx_(-1), 134 send_codec_registered_(false), 135 receiver_(config), 136 is_first_red_(true), 137 red_enabled_(false), 138 last_red_timestamp_(0), 139 codec_fec_enabled_(false), 140 previous_pltype_(255), 141 aux_rtp_header_(NULL), 142 receiver_initialized_(false), 143 secondary_send_codec_inst_(), 144 codec_timestamp_(expected_codec_ts_), 145 first_10ms_data_(false), 146 callback_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()), 147 packetization_callback_(NULL), 148 vad_callback_(NULL) { 149 150 // Nullify send codec memory, set payload type and set codec name to 151 // invalid values. 152 const char no_name[] = "noCodecRegistered"; 153 strncpy(send_codec_inst_.plname, no_name, RTP_PAYLOAD_NAME_SIZE - 1); 154 send_codec_inst_.pltype = -1; 155 156 strncpy(secondary_send_codec_inst_.plname, no_name, 157 RTP_PAYLOAD_NAME_SIZE - 1); 158 secondary_send_codec_inst_.pltype = -1; 159 160 for (int i = 0; i < ACMCodecDB::kMaxNumCodecs; i++) { 161 codecs_[i] = NULL; 162 mirror_codec_idx_[i] = -1; 163 } 164 165 // Allocate memory for RED. 166 red_buffer_ = new uint8_t[MAX_PAYLOAD_SIZE_BYTE]; 167 168 // TODO(turajs): This might not be exactly how this class is supposed to work. 169 // The external usage might be that |fragmentationVectorSize| has to match 170 // the allocated space for the member-arrays, while here, we allocate 171 // according to the maximum number of fragmentations and change 172 // |fragmentationVectorSize| on-the-fly based on actual number of 173 // fragmentations. However, due to copying to local variable before calling 174 // SendData, the RTP module receives a "valid" fragmentation, where allocated 175 // space matches |fragmentationVectorSize|, therefore, this should not cause 176 // any problem. A better approach is not using RTPFragmentationHeader as 177 // member variable, instead, use an ACM-specific structure to hold RED-related 178 // data. See module_common_type.h for the definition of 179 // RTPFragmentationHeader. 180 fragmentation_.VerifyAndAllocateFragmentationHeader( 181 kMaxNumFragmentationVectors); 182 183 // Register the default payload type for RED and for CNG at sampling rates of 184 // 8, 16, 32 and 48 kHz. 185 for (int i = (ACMCodecDB::kNumCodecs - 1); i >= 0; i--) { 186 if (IsCodecRED(i)) { 187 red_pltype_ = static_cast<uint8_t>(ACMCodecDB::database_[i].pltype); 188 } else if (IsCodecCN(i)) { 189 if (ACMCodecDB::database_[i].plfreq == 8000) { 190 cng_nb_pltype_ = static_cast<uint8_t>(ACMCodecDB::database_[i].pltype); 191 } else if (ACMCodecDB::database_[i].plfreq == 16000) { 192 cng_wb_pltype_ = static_cast<uint8_t>(ACMCodecDB::database_[i].pltype); 193 } else if (ACMCodecDB::database_[i].plfreq == 32000) { 194 cng_swb_pltype_ = static_cast<uint8_t>(ACMCodecDB::database_[i].pltype); 195 } else if (ACMCodecDB::database_[i].plfreq == 48000) { 196 cng_fb_pltype_ = static_cast<uint8_t>(ACMCodecDB::database_[i].pltype); 197 } 198 } 199 } 200 201 if (InitializeReceiverSafe() < 0) { 202 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 203 "Cannot initialize receiver"); 204 } 205 WEBRTC_TRACE(webrtc::kTraceMemory, webrtc::kTraceAudioCoding, id_, "Created"); 206 } 207 208 AudioCodingModuleImpl::~AudioCodingModuleImpl() { 209 { 210 CriticalSectionScoped lock(acm_crit_sect_); 211 current_send_codec_idx_ = -1; 212 213 for (int i = 0; i < ACMCodecDB::kMaxNumCodecs; i++) { 214 if (codecs_[i] != NULL) { 215 // Mirror index holds the address of the codec memory. 216 assert(mirror_codec_idx_[i] > -1); 217 if (codecs_[mirror_codec_idx_[i]] != NULL) { 218 delete codecs_[mirror_codec_idx_[i]]; 219 codecs_[mirror_codec_idx_[i]] = NULL; 220 } 221 222 codecs_[i] = NULL; 223 } 224 } 225 226 if (red_buffer_ != NULL) { 227 delete[] red_buffer_; 228 red_buffer_ = NULL; 229 } 230 } 231 232 if (aux_rtp_header_ != NULL) { 233 delete aux_rtp_header_; 234 aux_rtp_header_ = NULL; 235 } 236 237 delete callback_crit_sect_; 238 callback_crit_sect_ = NULL; 239 240 delete acm_crit_sect_; 241 acm_crit_sect_ = NULL; 242 WEBRTC_TRACE(webrtc::kTraceMemory, webrtc::kTraceAudioCoding, id_, 243 "Destroyed"); 244 } 245 246 int32_t AudioCodingModuleImpl::ChangeUniqueId(const int32_t id) { 247 { 248 CriticalSectionScoped lock(acm_crit_sect_); 249 id_ = id; 250 251 for (int i = 0; i < ACMCodecDB::kMaxNumCodecs; i++) { 252 if (codecs_[i] != NULL) { 253 codecs_[i]->SetUniqueID(id); 254 } 255 } 256 } 257 258 receiver_.set_id(id_); 259 return 0; 260 } 261 262 // Returns the number of milliseconds until the module want a 263 // worker thread to call Process. 264 int32_t AudioCodingModuleImpl::TimeUntilNextProcess() { 265 CriticalSectionScoped lock(acm_crit_sect_); 266 267 if (!HaveValidEncoder("TimeUntilNextProcess")) { 268 return -1; 269 } 270 return codecs_[current_send_codec_idx_]->SamplesLeftToEncode() / 271 (send_codec_inst_.plfreq / 1000); 272 } 273 274 int32_t AudioCodingModuleImpl::Process() { 275 bool dual_stream; 276 { 277 CriticalSectionScoped lock(acm_crit_sect_); 278 dual_stream = (secondary_encoder_.get() != NULL); 279 } 280 if (dual_stream) { 281 return ProcessDualStream(); 282 } 283 return ProcessSingleStream(); 284 } 285 286 int AudioCodingModuleImpl::EncodeFragmentation(int fragmentation_index, 287 int payload_type, 288 uint32_t current_timestamp, 289 ACMGenericCodec* encoder, 290 uint8_t* stream) { 291 int16_t len_bytes = MAX_PAYLOAD_SIZE_BYTE; 292 uint32_t rtp_timestamp; 293 WebRtcACMEncodingType encoding_type; 294 if (encoder->Encode(stream, &len_bytes, &rtp_timestamp, &encoding_type) < 0) { 295 return -1; 296 } 297 assert(encoding_type == kActiveNormalEncoded); 298 assert(len_bytes > 0); 299 300 fragmentation_.fragmentationLength[fragmentation_index] = len_bytes; 301 fragmentation_.fragmentationPlType[fragmentation_index] = payload_type; 302 fragmentation_.fragmentationTimeDiff[fragmentation_index] = 303 static_cast<uint16_t>(current_timestamp - rtp_timestamp); 304 fragmentation_.fragmentationVectorSize++; 305 return len_bytes; 306 } 307 308 // Primary payloads are sent immediately, whereas a single secondary payload is 309 // buffered to be combined with "the next payload." 310 // Normally "the next payload" would be a primary payload. In case two 311 // consecutive secondary payloads are generated with no primary payload in 312 // between, then two secondary payloads are packed in one RED. 313 int AudioCodingModuleImpl::ProcessDualStream() { 314 uint8_t stream[kMaxNumFragmentationVectors * MAX_PAYLOAD_SIZE_BYTE]; 315 uint32_t current_timestamp; 316 int16_t length_bytes = 0; 317 RTPFragmentationHeader my_fragmentation; 318 319 uint8_t my_red_payload_type; 320 321 { 322 CriticalSectionScoped lock(acm_crit_sect_); 323 // Check if there is an encoder before. 324 if (!HaveValidEncoder("ProcessDualStream") || 325 secondary_encoder_.get() == NULL) { 326 return -1; 327 } 328 ACMGenericCodec* primary_encoder = codecs_[current_send_codec_idx_]; 329 // If primary encoder has a full frame of audio to generate payload. 330 bool primary_ready_to_encode = primary_encoder->HasFrameToEncode(); 331 // If the secondary encoder has a frame of audio to generate a payload. 332 bool secondary_ready_to_encode = secondary_encoder_->HasFrameToEncode(); 333 334 if (!primary_ready_to_encode && !secondary_ready_to_encode) { 335 // Nothing to send. 336 return 0; 337 } 338 int len_bytes_previous_secondary = static_cast<int>( 339 fragmentation_.fragmentationLength[2]); 340 assert(len_bytes_previous_secondary <= MAX_PAYLOAD_SIZE_BYTE); 341 bool has_previous_payload = len_bytes_previous_secondary > 0; 342 343 uint32_t primary_timestamp = primary_encoder->EarliestTimestamp(); 344 uint32_t secondary_timestamp = secondary_encoder_->EarliestTimestamp(); 345 346 if (!has_previous_payload && !primary_ready_to_encode && 347 secondary_ready_to_encode) { 348 // Secondary payload will be the ONLY bit-stream. Encode by secondary 349 // encoder, store the payload, and return. No packet is sent. 350 int16_t len_bytes = MAX_PAYLOAD_SIZE_BYTE; 351 WebRtcACMEncodingType encoding_type; 352 if (secondary_encoder_->Encode(red_buffer_, &len_bytes, 353 &last_red_timestamp_, 354 &encoding_type) < 0) { 355 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 356 "ProcessDual(): Encoding of secondary encoder Failed"); 357 return -1; 358 } 359 assert(len_bytes > 0); 360 assert(encoding_type == kActiveNormalEncoded); 361 assert(len_bytes <= MAX_PAYLOAD_SIZE_BYTE); 362 fragmentation_.fragmentationLength[2] = len_bytes; 363 return 0; 364 } 365 366 // Initialize with invalid but different values, so later can have sanity 367 // check if they are different. 368 int index_primary = -1; 369 int index_secondary = -2; 370 int index_previous_secondary = -3; 371 372 if (primary_ready_to_encode) { 373 index_primary = secondary_ready_to_encode ? 374 TimestampLessThan(primary_timestamp, secondary_timestamp) : 0; 375 index_primary += has_previous_payload ? 376 TimestampLessThan(primary_timestamp, last_red_timestamp_) : 0; 377 } 378 379 if (secondary_ready_to_encode) { 380 // Timestamp of secondary payload can only be less than primary payload, 381 // but is always larger than the timestamp of previous secondary payload. 382 index_secondary = primary_ready_to_encode ? 383 (1 - TimestampLessThan(primary_timestamp, secondary_timestamp)) : 0; 384 } 385 386 if (has_previous_payload) { 387 index_previous_secondary = primary_ready_to_encode ? 388 (1 - TimestampLessThan(primary_timestamp, last_red_timestamp_)) : 0; 389 // If secondary is ready it always have a timestamp larger than previous 390 // secondary. So the index is either 0 or 1. 391 index_previous_secondary += secondary_ready_to_encode ? 1 : 0; 392 } 393 394 // Indices must not be equal. 395 assert(index_primary != index_secondary); 396 assert(index_primary != index_previous_secondary); 397 assert(index_secondary != index_previous_secondary); 398 399 // One of the payloads has to be at position zero. 400 assert(index_primary == 0 || index_secondary == 0 || 401 index_previous_secondary == 0); 402 403 // Timestamp of the RED payload. 404 if (index_primary == 0) { 405 current_timestamp = primary_timestamp; 406 } else if (index_secondary == 0) { 407 current_timestamp = secondary_timestamp; 408 } else { 409 current_timestamp = last_red_timestamp_; 410 } 411 412 fragmentation_.fragmentationVectorSize = 0; 413 if (has_previous_payload) { 414 assert(index_previous_secondary >= 0 && 415 index_previous_secondary < kMaxNumFragmentationVectors); 416 assert(len_bytes_previous_secondary <= MAX_PAYLOAD_SIZE_BYTE); 417 memcpy(&stream[index_previous_secondary * MAX_PAYLOAD_SIZE_BYTE], 418 red_buffer_, sizeof(stream[0]) * len_bytes_previous_secondary); 419 fragmentation_.fragmentationLength[index_previous_secondary] = 420 len_bytes_previous_secondary; 421 fragmentation_.fragmentationPlType[index_previous_secondary] = 422 secondary_send_codec_inst_.pltype; 423 fragmentation_.fragmentationTimeDiff[index_previous_secondary] = 424 static_cast<uint16_t>(current_timestamp - last_red_timestamp_); 425 fragmentation_.fragmentationVectorSize++; 426 } 427 428 if (primary_ready_to_encode) { 429 assert(index_primary >= 0 && index_primary < kMaxNumFragmentationVectors); 430 int i = index_primary * MAX_PAYLOAD_SIZE_BYTE; 431 if (EncodeFragmentation(index_primary, send_codec_inst_.pltype, 432 current_timestamp, primary_encoder, 433 &stream[i]) < 0) { 434 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 435 "ProcessDualStream(): Encoding of primary encoder Failed"); 436 return -1; 437 } 438 } 439 440 if (secondary_ready_to_encode) { 441 assert(index_secondary >= 0 && 442 index_secondary < kMaxNumFragmentationVectors - 1); 443 int i = index_secondary * MAX_PAYLOAD_SIZE_BYTE; 444 if (EncodeFragmentation(index_secondary, 445 secondary_send_codec_inst_.pltype, 446 current_timestamp, secondary_encoder_.get(), 447 &stream[i]) < 0) { 448 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 449 "ProcessDualStream(): Encoding of secondary encoder " 450 "Failed"); 451 return -1; 452 } 453 } 454 // Copy to local variable, as it will be used outside the ACM lock. 455 my_fragmentation.CopyFrom(fragmentation_); 456 my_red_payload_type = red_pltype_; 457 length_bytes = 0; 458 for (int n = 0; n < fragmentation_.fragmentationVectorSize; n++) { 459 length_bytes += fragmentation_.fragmentationLength[n]; 460 } 461 } 462 463 { 464 CriticalSectionScoped lock(callback_crit_sect_); 465 if (packetization_callback_ != NULL) { 466 // Callback with payload data, including redundant data (RED). 467 if (packetization_callback_->SendData(kAudioFrameSpeech, 468 my_red_payload_type, 469 current_timestamp, stream, 470 length_bytes, 471 &my_fragmentation) < 0) { 472 return -1; 473 } 474 } 475 } 476 477 { 478 CriticalSectionScoped lock(acm_crit_sect_); 479 // Now that data is sent, clean up fragmentation. 480 ResetFragmentation(0); 481 } 482 return 0; 483 } 484 485 // Process any pending tasks such as timeouts. 486 int AudioCodingModuleImpl::ProcessSingleStream() { 487 // Make room for 1 RED payload. 488 uint8_t stream[2 * MAX_PAYLOAD_SIZE_BYTE]; 489 // TODO(turajs): |length_bytes| & |red_length_bytes| can be of type int if 490 // ACMGenericCodec::Encode() & ACMGenericCodec::GetRedPayload() allows. 491 int16_t length_bytes = 2 * MAX_PAYLOAD_SIZE_BYTE; 492 int16_t red_length_bytes = length_bytes; 493 uint32_t rtp_timestamp; 494 int status; 495 WebRtcACMEncodingType encoding_type; 496 FrameType frame_type = kAudioFrameSpeech; 497 uint8_t current_payload_type = 0; 498 bool has_data_to_send = false; 499 bool red_active = false; 500 RTPFragmentationHeader my_fragmentation; 501 502 // Keep the scope of the ACM critical section limited. 503 { 504 CriticalSectionScoped lock(acm_crit_sect_); 505 // Check if there is an encoder before. 506 if (!HaveValidEncoder("ProcessSingleStream")) { 507 return -1; 508 } 509 status = codecs_[current_send_codec_idx_]->Encode(stream, &length_bytes, 510 &rtp_timestamp, 511 &encoding_type); 512 if (status < 0) { 513 // Encode failed. 514 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 515 "ProcessSingleStream(): Encoding Failed"); 516 length_bytes = 0; 517 return -1; 518 } else if (status == 0) { 519 // Not enough data. 520 return 0; 521 } else { 522 switch (encoding_type) { 523 case kNoEncoding: { 524 current_payload_type = previous_pltype_; 525 frame_type = kFrameEmpty; 526 length_bytes = 0; 527 break; 528 } 529 case kActiveNormalEncoded: 530 case kPassiveNormalEncoded: { 531 current_payload_type = static_cast<uint8_t>(send_codec_inst_.pltype); 532 frame_type = kAudioFrameSpeech; 533 break; 534 } 535 case kPassiveDTXNB: { 536 current_payload_type = cng_nb_pltype_; 537 frame_type = kAudioFrameCN; 538 is_first_red_ = true; 539 break; 540 } 541 case kPassiveDTXWB: { 542 current_payload_type = cng_wb_pltype_; 543 frame_type = kAudioFrameCN; 544 is_first_red_ = true; 545 break; 546 } 547 case kPassiveDTXSWB: { 548 current_payload_type = cng_swb_pltype_; 549 frame_type = kAudioFrameCN; 550 is_first_red_ = true; 551 break; 552 } 553 case kPassiveDTXFB: { 554 current_payload_type = cng_fb_pltype_; 555 frame_type = kAudioFrameCN; 556 is_first_red_ = true; 557 break; 558 } 559 } 560 has_data_to_send = true; 561 previous_pltype_ = current_payload_type; 562 563 // Redundancy encode is done here. The two bitstreams packetized into 564 // one RTP packet and the fragmentation points are set. 565 // Only apply RED on speech data. 566 if ((red_enabled_) && 567 ((encoding_type == kActiveNormalEncoded) || 568 (encoding_type == kPassiveNormalEncoded))) { 569 // RED is enabled within this scope. 570 // 571 // Note that, a special solution exists for iSAC since it is the only 572 // codec for which GetRedPayload has a non-empty implementation. 573 // 574 // Summary of the RED scheme below (use iSAC as example): 575 // 576 // 1st (is_first_red_ is true) encoded iSAC frame (primary #1) => 577 // - call GetRedPayload() and store redundancy for packet #1 in 578 // second fragment of RED buffer (old data) 579 // - drop the primary iSAC frame 580 // - don't call SendData 581 // 2nd (is_first_red_ is false) encoded iSAC frame (primary #2) => 582 // - store primary #2 in 1st fragment of RED buffer and send the 583 // combined packet 584 // - the transmitted packet contains primary #2 (new) and 585 // redundancy for packet #1 (old) 586 // - call GetRed_Payload() and store redundancy for packet #2 in 587 // second fragment of RED buffer 588 // 589 // ... 590 // 591 // Nth encoded iSAC frame (primary #N) => 592 // - store primary #N in 1st fragment of RED buffer and send the 593 // combined packet 594 // - the transmitted packet contains primary #N (new) and 595 // reduncancy for packet #(N-1) (old) 596 // - call GetRedPayload() and store redundancy for packet #N in 597 // second fragment of RED buffer 598 // 599 // For all other codecs, GetRedPayload does nothing and returns -1 => 600 // redundant data is only a copy. 601 // 602 // First combined packet contains : #2 (new) and #1 (old) 603 // Second combined packet contains: #3 (new) and #2 (old) 604 // Third combined packet contains : #4 (new) and #3 (old) 605 // 606 // Hence, even if every second packet is dropped, perfect 607 // reconstruction is possible. 608 red_active = true; 609 610 has_data_to_send = false; 611 // Skip the following part for the first packet in a RED session. 612 if (!is_first_red_) { 613 // Rearrange stream such that RED packets are included. 614 // Replace stream now that we have stored current stream. 615 memcpy(stream + fragmentation_.fragmentationOffset[1], red_buffer_, 616 fragmentation_.fragmentationLength[1]); 617 // Update the fragmentation time difference vector, in number of 618 // timestamps. 619 uint16_t time_since_last = static_cast<uint16_t>( 620 rtp_timestamp - last_red_timestamp_); 621 622 // Update fragmentation vectors. 623 fragmentation_.fragmentationPlType[1] = 624 fragmentation_.fragmentationPlType[0]; 625 fragmentation_.fragmentationTimeDiff[1] = time_since_last; 626 has_data_to_send = true; 627 } 628 629 // Insert new packet length. 630 fragmentation_.fragmentationLength[0] = length_bytes; 631 632 // Insert new packet payload type. 633 fragmentation_.fragmentationPlType[0] = current_payload_type; 634 last_red_timestamp_ = rtp_timestamp; 635 636 // Can be modified by the GetRedPayload() call if iSAC is utilized. 637 red_length_bytes = length_bytes; 638 639 // A fragmentation header is provided => packetization according to 640 // RFC 2198 (RTP Payload for Redundant Audio Data) will be used. 641 // First fragment is the current data (new). 642 // Second fragment is the previous data (old). 643 length_bytes = static_cast<int16_t>( 644 fragmentation_.fragmentationLength[0] + 645 fragmentation_.fragmentationLength[1]); 646 647 // Get, and store, redundant data from the encoder based on the recently 648 // encoded frame. 649 // NOTE - only iSAC contains an implementation; all other codecs does 650 // nothing and returns -1. 651 if (codecs_[current_send_codec_idx_]->GetRedPayload( 652 red_buffer_, &red_length_bytes) == -1) { 653 // The codec was not iSAC => use current encoder output as redundant 654 // data instead (trivial RED scheme). 655 memcpy(red_buffer_, stream, red_length_bytes); 656 } 657 658 is_first_red_ = false; 659 // Update payload type with RED payload type. 660 current_payload_type = red_pltype_; 661 // We have packed 2 payloads. 662 fragmentation_.fragmentationVectorSize = kNumRedFragmentationVectors; 663 664 // Copy to local variable, as it will be used outside ACM lock. 665 my_fragmentation.CopyFrom(fragmentation_); 666 // Store RED length. 667 fragmentation_.fragmentationLength[1] = red_length_bytes; 668 } 669 } 670 } 671 672 if (has_data_to_send) { 673 CriticalSectionScoped lock(callback_crit_sect_); 674 675 if (packetization_callback_ != NULL) { 676 if (red_active) { 677 // Callback with payload data, including redundant data (RED). 678 packetization_callback_->SendData(frame_type, current_payload_type, 679 rtp_timestamp, stream, length_bytes, 680 &my_fragmentation); 681 } else { 682 // Callback with payload data. 683 packetization_callback_->SendData(frame_type, current_payload_type, 684 rtp_timestamp, stream, length_bytes, 685 NULL); 686 } 687 } 688 689 if (vad_callback_ != NULL) { 690 // Callback with VAD decision. 691 vad_callback_->InFrameType(static_cast<int16_t>(encoding_type)); 692 } 693 } 694 return length_bytes; 695 } 696 697 ///////////////////////////////////////// 698 // Sender 699 // 700 701 // Initialize send codec. 702 int AudioCodingModuleImpl::InitializeSender() { 703 CriticalSectionScoped lock(acm_crit_sect_); 704 705 // Start with invalid values. 706 send_codec_registered_ = false; 707 current_send_codec_idx_ = -1; 708 send_codec_inst_.plname[0] = '\0'; 709 710 // Delete all encoders to start fresh. 711 for (int id = 0; id < ACMCodecDB::kMaxNumCodecs; id++) { 712 if (codecs_[id] != NULL) { 713 codecs_[id]->DestructEncoder(); 714 } 715 } 716 717 // Initialize RED. 718 is_first_red_ = true; 719 if (red_enabled_ || secondary_encoder_.get() != NULL) { 720 if (red_buffer_ != NULL) { 721 memset(red_buffer_, 0, MAX_PAYLOAD_SIZE_BYTE); 722 } 723 if (red_enabled_) { 724 ResetFragmentation(kNumRedFragmentationVectors); 725 } else { 726 ResetFragmentation(0); 727 } 728 } 729 730 return 0; 731 } 732 733 int AudioCodingModuleImpl::ResetEncoder() { 734 CriticalSectionScoped lock(acm_crit_sect_); 735 if (!HaveValidEncoder("ResetEncoder")) { 736 return -1; 737 } 738 return codecs_[current_send_codec_idx_]->ResetEncoder(); 739 } 740 741 ACMGenericCodec* AudioCodingModuleImpl::CreateCodec(const CodecInst& codec) { 742 ACMGenericCodec* my_codec = NULL; 743 744 my_codec = ACMCodecDB::CreateCodecInstance(codec); 745 if (my_codec == NULL) { 746 // Error, could not create the codec. 747 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 748 "ACMCodecDB::CreateCodecInstance() failed in CreateCodec()"); 749 return my_codec; 750 } 751 my_codec->SetUniqueID(id_); 752 my_codec->SetNetEqDecodeLock(receiver_.DecodeLock()); 753 754 return my_codec; 755 } 756 757 // Check if the given codec is a valid to be registered as send codec. 758 static int IsValidSendCodec(const CodecInst& send_codec, 759 bool is_primary_encoder, 760 int acm_id, 761 int* mirror_id) { 762 if ((send_codec.channels != 1) && (send_codec.channels != 2)) { 763 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, acm_id, 764 "Wrong number of channels (%d, only mono and stereo are " 765 "supported) for %s encoder", send_codec.channels, 766 is_primary_encoder ? "primary" : "secondary"); 767 return -1; 768 } 769 770 int codec_id = ACMCodecDB::CodecNumber(send_codec, mirror_id); 771 if (codec_id < 0) { 772 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, acm_id, 773 "Invalid codec setting for the send codec."); 774 return -1; 775 } 776 777 // TODO(tlegrand): Remove this check. Already taken care of in 778 // ACMCodecDB::CodecNumber(). 779 // Check if the payload-type is valid 780 if (!ACMCodecDB::ValidPayloadType(send_codec.pltype)) { 781 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, acm_id, 782 "Invalid payload-type %d for %s.", send_codec.pltype, 783 send_codec.plname); 784 return -1; 785 } 786 787 // Telephone-event cannot be a send codec. 788 if (!STR_CASE_CMP(send_codec.plname, "telephone-event")) { 789 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, acm_id, 790 "telephone-event cannot be a send codec"); 791 *mirror_id = -1; 792 return -1; 793 } 794 795 if (ACMCodecDB::codec_settings_[codec_id].channel_support 796 < send_codec.channels) { 797 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, acm_id, 798 "%d number of channels not supportedn for %s.", 799 send_codec.channels, send_codec.plname); 800 *mirror_id = -1; 801 return -1; 802 } 803 804 if (!is_primary_encoder) { 805 // If registering the secondary encoder, then RED and CN are not valid 806 // choices as encoder. 807 if (IsCodecRED(&send_codec)) { 808 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, acm_id, 809 "RED cannot be secondary codec"); 810 *mirror_id = -1; 811 return -1; 812 } 813 814 if (IsCodecCN(&send_codec)) { 815 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, acm_id, 816 "DTX cannot be secondary codec"); 817 *mirror_id = -1; 818 return -1; 819 } 820 } 821 return codec_id; 822 } 823 824 int AudioCodingModuleImpl::RegisterSecondarySendCodec( 825 const CodecInst& send_codec) { 826 CriticalSectionScoped lock(acm_crit_sect_); 827 if (!send_codec_registered_) { 828 return -1; 829 } 830 // Primary and Secondary codecs should have the same sampling rates. 831 if (send_codec.plfreq != send_codec_inst_.plfreq) { 832 return -1; 833 } 834 int mirror_id; 835 int codec_id = IsValidSendCodec(send_codec, false, id_, &mirror_id); 836 if (codec_id < 0) { 837 return -1; 838 } 839 ACMGenericCodec* encoder = CreateCodec(send_codec); 840 WebRtcACMCodecParams codec_params; 841 // Initialize the codec before registering. For secondary codec VAD & DTX are 842 // disabled. 843 memcpy(&(codec_params.codec_inst), &send_codec, sizeof(CodecInst)); 844 codec_params.enable_vad = false; 845 codec_params.enable_dtx = false; 846 codec_params.vad_mode = VADNormal; 847 // Force initialization. 848 if (encoder->InitEncoder(&codec_params, true) < 0) { 849 // Could not initialize, therefore cannot be registered. 850 delete encoder; 851 return -1; 852 } 853 secondary_encoder_.reset(encoder); 854 memcpy(&secondary_send_codec_inst_, &send_codec, sizeof(send_codec)); 855 856 // Disable VAD & DTX. 857 SetVADSafe(false, false, VADNormal); 858 859 // Cleaning. 860 if (red_buffer_) { 861 memset(red_buffer_, 0, MAX_PAYLOAD_SIZE_BYTE); 862 } 863 ResetFragmentation(0); 864 return 0; 865 } 866 867 void AudioCodingModuleImpl::UnregisterSecondarySendCodec() { 868 CriticalSectionScoped lock(acm_crit_sect_); 869 if (secondary_encoder_.get() == NULL) { 870 return; 871 } 872 secondary_encoder_.reset(); 873 ResetFragmentation(0); 874 } 875 876 int AudioCodingModuleImpl::SecondarySendCodec( 877 CodecInst* secondary_codec) const { 878 CriticalSectionScoped lock(acm_crit_sect_); 879 if (secondary_encoder_.get() == NULL) { 880 return -1; 881 } 882 memcpy(secondary_codec, &secondary_send_codec_inst_, 883 sizeof(secondary_send_codec_inst_)); 884 return 0; 885 } 886 887 // Can be called multiple times for Codec, CNG, RED. 888 int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) { 889 int mirror_id; 890 int codec_id = IsValidSendCodec(send_codec, true, id_, &mirror_id); 891 892 CriticalSectionScoped lock(acm_crit_sect_); 893 894 // Check for reported errors from function IsValidSendCodec(). 895 if (codec_id < 0) { 896 if (!send_codec_registered_) { 897 // This values has to be NULL if there is no codec registered. 898 current_send_codec_idx_ = -1; 899 } 900 return -1; 901 } 902 903 // RED can be registered with other payload type. If not registered a default 904 // payload type is used. 905 if (IsCodecRED(&send_codec)) { 906 // TODO(tlegrand): Remove this check. Already taken care of in 907 // ACMCodecDB::CodecNumber(). 908 // Check if the payload-type is valid 909 if (!ACMCodecDB::ValidPayloadType(send_codec.pltype)) { 910 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 911 "Invalid payload-type %d for %s.", send_codec.pltype, 912 send_codec.plname); 913 return -1; 914 } 915 // Set RED payload type. 916 red_pltype_ = static_cast<uint8_t>(send_codec.pltype); 917 return 0; 918 } 919 920 // CNG can be registered with other payload type. If not registered the 921 // default payload types from codec database will be used. 922 if (IsCodecCN(&send_codec)) { 923 // CNG is registered. 924 switch (send_codec.plfreq) { 925 case 8000: { 926 cng_nb_pltype_ = static_cast<uint8_t>(send_codec.pltype); 927 break; 928 } 929 case 16000: { 930 cng_wb_pltype_ = static_cast<uint8_t>(send_codec.pltype); 931 break; 932 } 933 case 32000: { 934 cng_swb_pltype_ = static_cast<uint8_t>(send_codec.pltype); 935 break; 936 } 937 case 48000: { 938 cng_fb_pltype_ = static_cast<uint8_t>(send_codec.pltype); 939 break; 940 } 941 default: { 942 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 943 "RegisterSendCodec() failed, invalid frequency for CNG " 944 "registration"); 945 return -1; 946 } 947 } 948 return 0; 949 } 950 951 // Set Stereo, and make sure VAD and DTX is turned off. 952 if (send_codec.channels == 2) { 953 stereo_send_ = true; 954 if (vad_enabled_ || dtx_enabled_) { 955 WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_, 956 "VAD/DTX is turned off, not supported when sending stereo."); 957 } 958 vad_enabled_ = false; 959 dtx_enabled_ = false; 960 } else { 961 stereo_send_ = false; 962 } 963 964 // Check if the codec is already registered as send codec. 965 bool is_send_codec; 966 if (send_codec_registered_) { 967 int send_codec_mirror_id; 968 int send_codec_id = ACMCodecDB::CodecNumber(send_codec_inst_, 969 &send_codec_mirror_id); 970 assert(send_codec_id >= 0); 971 is_send_codec = (send_codec_id == codec_id) || 972 (mirror_id == send_codec_mirror_id); 973 } else { 974 is_send_codec = false; 975 } 976 977 // If there is secondary codec registered and the new send codec has a 978 // sampling rate different than that of secondary codec, then unregister the 979 // secondary codec. 980 if (secondary_encoder_.get() != NULL && 981 secondary_send_codec_inst_.plfreq != send_codec.plfreq) { 982 secondary_encoder_.reset(); 983 ResetFragmentation(0); 984 } 985 986 // If new codec, or new settings, register. 987 if (!is_send_codec) { 988 if (codecs_[mirror_id] == NULL) { 989 codecs_[mirror_id] = CreateCodec(send_codec); 990 if (codecs_[mirror_id] == NULL) { 991 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 992 "Cannot Create the codec"); 993 return -1; 994 } 995 mirror_codec_idx_[mirror_id] = mirror_id; 996 } 997 998 if (mirror_id != codec_id) { 999 codecs_[codec_id] = codecs_[mirror_id]; 1000 mirror_codec_idx_[codec_id] = mirror_id; 1001 } 1002 1003 ACMGenericCodec* codec_ptr = codecs_[codec_id]; 1004 WebRtcACMCodecParams codec_params; 1005 1006 memcpy(&(codec_params.codec_inst), &send_codec, sizeof(CodecInst)); 1007 codec_params.enable_vad = vad_enabled_; 1008 codec_params.enable_dtx = dtx_enabled_; 1009 codec_params.vad_mode = vad_mode_; 1010 // Force initialization. 1011 if (codec_ptr->InitEncoder(&codec_params, true) < 0) { 1012 // Could not initialize the encoder. 1013 1014 // Check if already have a registered codec. 1015 // Depending on that different messages are logged. 1016 if (!send_codec_registered_) { 1017 current_send_codec_idx_ = -1; 1018 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1019 "Cannot Initialize the encoder No Encoder is registered"); 1020 } else { 1021 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1022 "Cannot Initialize the encoder, continue encoding with " 1023 "the previously registered codec"); 1024 } 1025 return -1; 1026 } 1027 1028 // Update states. 1029 dtx_enabled_ = codec_params.enable_dtx; 1030 vad_enabled_ = codec_params.enable_vad; 1031 vad_mode_ = codec_params.vad_mode; 1032 1033 // Everything is fine so we can replace the previous codec with this one. 1034 if (send_codec_registered_) { 1035 // If we change codec we start fresh with RED. 1036 // This is not strictly required by the standard. 1037 is_first_red_ = true; 1038 codec_ptr->SetVAD(&dtx_enabled_, &vad_enabled_, &vad_mode_); 1039 1040 if (!codec_ptr->HasInternalFEC()) { 1041 codec_fec_enabled_ = false; 1042 } else { 1043 if (codec_ptr->SetFEC(codec_fec_enabled_) < 0) { 1044 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1045 "Cannot set codec FEC"); 1046 return -1; 1047 } 1048 } 1049 } 1050 1051 current_send_codec_idx_ = codec_id; 1052 send_codec_registered_ = true; 1053 memcpy(&send_codec_inst_, &send_codec, sizeof(CodecInst)); 1054 previous_pltype_ = send_codec_inst_.pltype; 1055 return 0; 1056 } else { 1057 // If codec is the same as already registered check if any parameters 1058 // has changed compared to the current values. 1059 // If any parameter is valid then apply it and record. 1060 bool force_init = false; 1061 1062 if (mirror_id != codec_id) { 1063 codecs_[codec_id] = codecs_[mirror_id]; 1064 mirror_codec_idx_[codec_id] = mirror_id; 1065 } 1066 1067 // Check the payload type. 1068 if (send_codec.pltype != send_codec_inst_.pltype) { 1069 // At this point check if the given payload type is valid. 1070 // Record it later when the sampling frequency is changed 1071 // successfully. 1072 if (!ACMCodecDB::ValidPayloadType(send_codec.pltype)) { 1073 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1074 "Out of range payload type"); 1075 return -1; 1076 } 1077 } 1078 1079 // If there is a codec that ONE instance of codec supports multiple 1080 // sampling frequencies, then we need to take care of it here. 1081 // one such a codec is iSAC. Both WB and SWB are encoded and decoded 1082 // with one iSAC instance. Therefore, we need to update the encoder 1083 // frequency if required. 1084 if (send_codec_inst_.plfreq != send_codec.plfreq) { 1085 force_init = true; 1086 1087 // If sampling frequency is changed we have to start fresh with RED. 1088 is_first_red_ = true; 1089 } 1090 1091 // If packet size or number of channels has changed, we need to 1092 // re-initialize the encoder. 1093 if (send_codec_inst_.pacsize != send_codec.pacsize) { 1094 force_init = true; 1095 } 1096 if (send_codec_inst_.channels != send_codec.channels) { 1097 force_init = true; 1098 } 1099 1100 if (force_init) { 1101 WebRtcACMCodecParams codec_params; 1102 1103 memcpy(&(codec_params.codec_inst), &send_codec, sizeof(CodecInst)); 1104 codec_params.enable_vad = vad_enabled_; 1105 codec_params.enable_dtx = dtx_enabled_; 1106 codec_params.vad_mode = vad_mode_; 1107 1108 // Force initialization. 1109 if (codecs_[current_send_codec_idx_]->InitEncoder(&codec_params, 1110 true) < 0) { 1111 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1112 "Could not change the codec packet-size."); 1113 return -1; 1114 } 1115 1116 send_codec_inst_.plfreq = send_codec.plfreq; 1117 send_codec_inst_.pacsize = send_codec.pacsize; 1118 send_codec_inst_.channels = send_codec.channels; 1119 } 1120 1121 // If the change of sampling frequency has been successful then 1122 // we store the payload-type. 1123 send_codec_inst_.pltype = send_codec.pltype; 1124 1125 // Check if a change in Rate is required. 1126 if (send_codec.rate != send_codec_inst_.rate) { 1127 if (codecs_[codec_id]->SetBitRate(send_codec.rate) < 0) { 1128 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1129 "Could not change the codec rate."); 1130 return -1; 1131 } 1132 send_codec_inst_.rate = send_codec.rate; 1133 } 1134 1135 if (!codecs_[codec_id]->HasInternalFEC()) { 1136 codec_fec_enabled_ = false; 1137 } else { 1138 if (codecs_[codec_id]->SetFEC(codec_fec_enabled_) < 0) { 1139 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1140 "Cannot set codec FEC"); 1141 return -1; 1142 } 1143 } 1144 1145 previous_pltype_ = send_codec_inst_.pltype; 1146 return 0; 1147 } 1148 } 1149 1150 // Get current send codec. 1151 int AudioCodingModuleImpl::SendCodec( 1152 CodecInst* current_codec) const { 1153 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, 1154 "SendCodec()"); 1155 CriticalSectionScoped lock(acm_crit_sect_); 1156 1157 if (!send_codec_registered_) { 1158 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, 1159 "SendCodec Failed, no codec is registered"); 1160 return -1; 1161 } 1162 WebRtcACMCodecParams encoder_param; 1163 codecs_[current_send_codec_idx_]->EncoderParams(&encoder_param); 1164 encoder_param.codec_inst.pltype = send_codec_inst_.pltype; 1165 memcpy(current_codec, &(encoder_param.codec_inst), sizeof(CodecInst)); 1166 1167 return 0; 1168 } 1169 1170 // Get current send frequency. 1171 int AudioCodingModuleImpl::SendFrequency() const { 1172 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, 1173 "SendFrequency()"); 1174 CriticalSectionScoped lock(acm_crit_sect_); 1175 1176 if (!send_codec_registered_) { 1177 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, 1178 "SendFrequency Failed, no codec is registered"); 1179 return -1; 1180 } 1181 1182 return send_codec_inst_.plfreq; 1183 } 1184 1185 // Get encode bitrate. 1186 // Adaptive rate codecs return their current encode target rate, while other 1187 // codecs return there longterm avarage or their fixed rate. 1188 int AudioCodingModuleImpl::SendBitrate() const { 1189 CriticalSectionScoped lock(acm_crit_sect_); 1190 1191 if (!send_codec_registered_) { 1192 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, 1193 "SendBitrate Failed, no codec is registered"); 1194 return -1; 1195 } 1196 1197 WebRtcACMCodecParams encoder_param; 1198 codecs_[current_send_codec_idx_]->EncoderParams(&encoder_param); 1199 1200 return encoder_param.codec_inst.rate; 1201 } 1202 1203 // Set available bandwidth, inform the encoder about the estimated bandwidth 1204 // received from the remote party. 1205 int AudioCodingModuleImpl::SetReceivedEstimatedBandwidth(int bw) { 1206 return codecs_[current_send_codec_idx_]->SetEstimatedBandwidth(bw); 1207 } 1208 1209 // Register a transport callback which will be called to deliver 1210 // the encoded buffers. 1211 int AudioCodingModuleImpl::RegisterTransportCallback( 1212 AudioPacketizationCallback* transport) { 1213 CriticalSectionScoped lock(callback_crit_sect_); 1214 packetization_callback_ = transport; 1215 return 0; 1216 } 1217 1218 // Add 10MS of raw (PCM) audio data to the encoder. 1219 int AudioCodingModuleImpl::Add10MsData( 1220 const AudioFrame& audio_frame) { 1221 if (audio_frame.samples_per_channel_ <= 0) { 1222 assert(false); 1223 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1224 "Cannot Add 10 ms audio, payload length is negative or " 1225 "zero"); 1226 return -1; 1227 } 1228 1229 if (audio_frame.sample_rate_hz_ > 48000) { 1230 assert(false); 1231 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1232 "Cannot Add 10 ms audio, input frequency not valid"); 1233 return -1; 1234 } 1235 1236 // If the length and frequency matches. We currently just support raw PCM. 1237 if ((audio_frame.sample_rate_hz_ / 100) 1238 != audio_frame.samples_per_channel_) { 1239 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1240 "Cannot Add 10 ms audio, input frequency and length doesn't" 1241 " match"); 1242 return -1; 1243 } 1244 1245 if (audio_frame.num_channels_ != 1 && audio_frame.num_channels_ != 2) { 1246 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1247 "Cannot Add 10 ms audio, invalid number of channels."); 1248 return -1; 1249 } 1250 1251 CriticalSectionScoped lock(acm_crit_sect_); 1252 // Do we have a codec registered? 1253 if (!HaveValidEncoder("Add10MsData")) { 1254 return -1; 1255 } 1256 1257 const AudioFrame* ptr_frame; 1258 // Perform a resampling, also down-mix if it is required and can be 1259 // performed before resampling (a down mix prior to resampling will take 1260 // place if both primary and secondary encoders are mono and input is in 1261 // stereo). 1262 if (PreprocessToAddData(audio_frame, &ptr_frame) < 0) { 1263 return -1; 1264 } 1265 1266 // Check whether we need an up-mix or down-mix? 1267 bool remix = ptr_frame->num_channels_ != send_codec_inst_.channels; 1268 if (secondary_encoder_.get() != NULL) { 1269 remix = remix || 1270 (ptr_frame->num_channels_ != secondary_send_codec_inst_.channels); 1271 } 1272 1273 // If a re-mix is required (up or down), this buffer will store re-mixed 1274 // version of the input. 1275 int16_t buffer[WEBRTC_10MS_PCM_AUDIO]; 1276 if (remix) { 1277 if (ptr_frame->num_channels_ == 1) { 1278 if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, buffer) < 0) 1279 return -1; 1280 } else { 1281 if (DownMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, buffer) < 0) 1282 return -1; 1283 } 1284 } 1285 1286 // When adding data to encoders this pointer is pointing to an audio buffer 1287 // with correct number of channels. 1288 const int16_t* ptr_audio = ptr_frame->data_; 1289 1290 // For pushing data to primary, point the |ptr_audio| to correct buffer. 1291 if (send_codec_inst_.channels != ptr_frame->num_channels_) 1292 ptr_audio = buffer; 1293 1294 if (codecs_[current_send_codec_idx_]->Add10MsData( 1295 ptr_frame->timestamp_, ptr_audio, ptr_frame->samples_per_channel_, 1296 send_codec_inst_.channels) < 0) 1297 return -1; 1298 1299 if (secondary_encoder_.get() != NULL) { 1300 // For pushing data to secondary, point the |ptr_audio| to correct buffer. 1301 ptr_audio = ptr_frame->data_; 1302 if (secondary_send_codec_inst_.channels != ptr_frame->num_channels_) 1303 ptr_audio = buffer; 1304 1305 if (secondary_encoder_->Add10MsData( 1306 ptr_frame->timestamp_, ptr_audio, ptr_frame->samples_per_channel_, 1307 secondary_send_codec_inst_.channels) < 0) 1308 return -1; 1309 } 1310 1311 return 0; 1312 } 1313 1314 // Perform a resampling and down-mix if required. We down-mix only if 1315 // encoder is mono and input is stereo. In case of dual-streaming, both 1316 // encoders has to be mono for down-mix to take place. 1317 // |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing 1318 // is required, |*ptr_out| points to |in_frame|. 1319 int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, 1320 const AudioFrame** ptr_out) { 1321 // Primary and secondary (if exists) should have the same sampling rate. 1322 assert((secondary_encoder_.get() != NULL) ? 1323 secondary_send_codec_inst_.plfreq == send_codec_inst_.plfreq : true); 1324 1325 bool resample = (in_frame.sample_rate_hz_ != send_codec_inst_.plfreq); 1326 1327 // This variable is true if primary codec and secondary codec (if exists) 1328 // are both mono and input is stereo. 1329 bool down_mix; 1330 if (secondary_encoder_.get() != NULL) { 1331 down_mix = (in_frame.num_channels_ == 2) && 1332 (send_codec_inst_.channels == 1) && 1333 (secondary_send_codec_inst_.channels == 1); 1334 } else { 1335 down_mix = (in_frame.num_channels_ == 2) && 1336 (send_codec_inst_.channels == 1); 1337 } 1338 1339 if (!first_10ms_data_) { 1340 expected_in_ts_ = in_frame.timestamp_; 1341 expected_codec_ts_ = in_frame.timestamp_; 1342 first_10ms_data_ = true; 1343 } else if (in_frame.timestamp_ != expected_in_ts_) { 1344 // TODO(turajs): Do we need a warning here. 1345 expected_codec_ts_ += (in_frame.timestamp_ - expected_in_ts_) * 1346 static_cast<uint32_t>((static_cast<double>(send_codec_inst_.plfreq) / 1347 static_cast<double>(in_frame.sample_rate_hz_))); 1348 expected_in_ts_ = in_frame.timestamp_; 1349 } 1350 1351 1352 if (!down_mix && !resample) { 1353 // No pre-processing is required. 1354 expected_in_ts_ += in_frame.samples_per_channel_; 1355 expected_codec_ts_ += in_frame.samples_per_channel_; 1356 *ptr_out = &in_frame; 1357 return 0; 1358 } 1359 1360 *ptr_out = &preprocess_frame_; 1361 preprocess_frame_.num_channels_ = in_frame.num_channels_; 1362 int16_t audio[WEBRTC_10MS_PCM_AUDIO]; 1363 const int16_t* src_ptr_audio = in_frame.data_; 1364 int16_t* dest_ptr_audio = preprocess_frame_.data_; 1365 if (down_mix) { 1366 // If a resampling is required the output of a down-mix is written into a 1367 // local buffer, otherwise, it will be written to the output frame. 1368 if (resample) 1369 dest_ptr_audio = audio; 1370 if (DownMix(in_frame, WEBRTC_10MS_PCM_AUDIO, dest_ptr_audio) < 0) 1371 return -1; 1372 preprocess_frame_.num_channels_ = 1; 1373 // Set the input of the resampler is the down-mixed signal. 1374 src_ptr_audio = audio; 1375 } 1376 1377 preprocess_frame_.timestamp_ = expected_codec_ts_; 1378 preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_; 1379 preprocess_frame_.sample_rate_hz_ = in_frame.sample_rate_hz_; 1380 // If it is required, we have to do a resampling. 1381 if (resample) { 1382 // The result of the resampler is written to output frame. 1383 dest_ptr_audio = preprocess_frame_.data_; 1384 1385 preprocess_frame_.samples_per_channel_ = 1386 resampler_.Resample10Msec(src_ptr_audio, 1387 in_frame.sample_rate_hz_, 1388 send_codec_inst_.plfreq, 1389 preprocess_frame_.num_channels_, 1390 AudioFrame::kMaxDataSizeSamples, 1391 dest_ptr_audio); 1392 1393 if (preprocess_frame_.samples_per_channel_ < 0) { 1394 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1395 "Cannot add 10 ms audio, resampling failed"); 1396 return -1; 1397 } 1398 preprocess_frame_.sample_rate_hz_ = send_codec_inst_.plfreq; 1399 } 1400 1401 expected_codec_ts_ += preprocess_frame_.samples_per_channel_; 1402 expected_in_ts_ += in_frame.samples_per_channel_; 1403 1404 return 0; 1405 } 1406 1407 ///////////////////////////////////////// 1408 // (RED) Redundant Coding 1409 // 1410 1411 bool AudioCodingModuleImpl::REDStatus() const { 1412 CriticalSectionScoped lock(acm_crit_sect_); 1413 1414 return red_enabled_; 1415 } 1416 1417 // Configure RED status i.e on/off. 1418 int AudioCodingModuleImpl::SetREDStatus( 1419 #ifdef WEBRTC_CODEC_RED 1420 bool enable_red) { 1421 CriticalSectionScoped lock(acm_crit_sect_); 1422 1423 if (enable_red == true && codec_fec_enabled_ == true) { 1424 WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_, 1425 "Codec internal FEC and RED cannot be co-enabled."); 1426 return -1; 1427 } 1428 1429 if (red_enabled_ != enable_red) { 1430 // Reset the RED buffer. 1431 memset(red_buffer_, 0, MAX_PAYLOAD_SIZE_BYTE); 1432 1433 // Reset fragmentation buffers. 1434 ResetFragmentation(kNumRedFragmentationVectors); 1435 // Set red_enabled_. 1436 red_enabled_ = enable_red; 1437 } 1438 is_first_red_ = true; // Make sure we restart RED. 1439 return 0; 1440 #else 1441 bool /* enable_red */) { 1442 red_enabled_ = false; 1443 WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_, 1444 " WEBRTC_CODEC_RED is undefined => red_enabled_ = %d", 1445 red_enabled_); 1446 return -1; 1447 #endif 1448 } 1449 1450 ///////////////////////////////////////// 1451 // (FEC) Forward Error Correction (codec internal) 1452 // 1453 1454 bool AudioCodingModuleImpl::CodecFEC() const { 1455 return codec_fec_enabled_; 1456 } 1457 1458 int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) { 1459 CriticalSectionScoped lock(acm_crit_sect_); 1460 1461 if (enable_codec_fec == true && red_enabled_ == true) { 1462 WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_, 1463 "Codec internal FEC and RED cannot be co-enabled."); 1464 return -1; 1465 } 1466 1467 // Set codec FEC. 1468 if (HaveValidEncoder("SetCodecFEC") && 1469 codecs_[current_send_codec_idx_]->SetFEC(enable_codec_fec) < 0) { 1470 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1471 "Set codec internal FEC failed."); 1472 return -1; 1473 } 1474 codec_fec_enabled_ = enable_codec_fec; 1475 return 0; 1476 } 1477 1478 int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) { 1479 if (HaveValidEncoder("SetPacketLossRate") && 1480 codecs_[current_send_codec_idx_]->SetPacketLossRate(loss_rate) < 0) { 1481 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1482 "Set packet loss rate failed."); 1483 return -1; 1484 } 1485 return 0; 1486 } 1487 1488 ///////////////////////////////////////// 1489 // (VAD) Voice Activity Detection 1490 // 1491 int AudioCodingModuleImpl::SetVAD(bool enable_dtx, 1492 bool enable_vad, 1493 ACMVADMode mode) { 1494 CriticalSectionScoped lock(acm_crit_sect_); 1495 return SetVADSafe(enable_dtx, enable_vad, mode); 1496 } 1497 1498 int AudioCodingModuleImpl::SetVADSafe(bool enable_dtx, 1499 bool enable_vad, 1500 ACMVADMode mode) { 1501 // Sanity check of the mode. 1502 if ((mode != VADNormal) && (mode != VADLowBitrate) 1503 && (mode != VADAggr) && (mode != VADVeryAggr)) { 1504 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1505 "Invalid VAD Mode %d, no change is made to VAD/DTX status", 1506 mode); 1507 return -1; 1508 } 1509 1510 // Check that the send codec is mono. We don't support VAD/DTX for stereo 1511 // sending. 1512 if ((enable_dtx || enable_vad) && stereo_send_) { 1513 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1514 "VAD/DTX not supported for stereo sending"); 1515 dtx_enabled_ = false; 1516 vad_enabled_ = false; 1517 vad_mode_ = mode; 1518 return -1; 1519 } 1520 1521 // We don't support VAD/DTX when dual-streaming is enabled, i.e. 1522 // secondary-encoder is registered. 1523 if ((enable_dtx || enable_vad) && secondary_encoder_.get() != NULL) { 1524 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1525 "VAD/DTX not supported when dual-streaming is enabled."); 1526 dtx_enabled_ = false; 1527 vad_enabled_ = false; 1528 vad_mode_ = mode; 1529 return -1; 1530 } 1531 1532 // Store VAD/DTX settings. Values can be changed in the call to "SetVAD" 1533 // below. 1534 dtx_enabled_ = enable_dtx; 1535 vad_enabled_ = enable_vad; 1536 vad_mode_ = mode; 1537 1538 // If a send codec is registered, set VAD/DTX for the codec. 1539 if (HaveValidEncoder("SetVAD") && codecs_[current_send_codec_idx_]->SetVAD( 1540 &dtx_enabled_, &vad_enabled_, &vad_mode_) < 0) { 1541 // SetVAD failed. 1542 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1543 "SetVAD failed"); 1544 vad_enabled_ = false; 1545 dtx_enabled_ = false; 1546 return -1; 1547 } 1548 return 0; 1549 } 1550 1551 // Get VAD/DTX settings. 1552 int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled, 1553 ACMVADMode* mode) const { 1554 CriticalSectionScoped lock(acm_crit_sect_); 1555 1556 *dtx_enabled = dtx_enabled_; 1557 *vad_enabled = vad_enabled_; 1558 *mode = vad_mode_; 1559 1560 return 0; 1561 } 1562 1563 ///////////////////////////////////////// 1564 // Receiver 1565 // 1566 1567 int AudioCodingModuleImpl::InitializeReceiver() { 1568 CriticalSectionScoped lock(acm_crit_sect_); 1569 return InitializeReceiverSafe(); 1570 } 1571 1572 // Initialize receiver, resets codec database etc. 1573 int AudioCodingModuleImpl::InitializeReceiverSafe() { 1574 // If the receiver is already initialized then we want to destroy any 1575 // existing decoders. After a call to this function, we should have a clean 1576 // start-up. 1577 if (receiver_initialized_) { 1578 if (receiver_.RemoveAllCodecs() < 0) 1579 return -1; 1580 } 1581 receiver_.set_id(id_); 1582 receiver_.ResetInitialDelay(); 1583 receiver_.SetMinimumDelay(0); 1584 receiver_.SetMaximumDelay(0); 1585 receiver_.FlushBuffers(); 1586 1587 // Register RED and CN. 1588 for (int i = 0; i < ACMCodecDB::kNumCodecs; i++) { 1589 if (IsCodecRED(i) || IsCodecCN(i)) { 1590 uint8_t pl_type = static_cast<uint8_t>(ACMCodecDB::database_[i].pltype); 1591 if (receiver_.AddCodec(i, pl_type, 1, NULL) < 0) { 1592 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1593 "Cannot register master codec."); 1594 return -1; 1595 } 1596 } 1597 } 1598 receiver_initialized_ = true; 1599 return 0; 1600 } 1601 1602 // TODO(turajs): If NetEq opens an API for reseting the state of decoders then 1603 // implement this method. Otherwise it should be removed. I might be that by 1604 // removing and registering a decoder we can achieve the effect of resetting. 1605 // Reset the decoder state. 1606 int AudioCodingModuleImpl::ResetDecoder() { 1607 return 0; 1608 } 1609 1610 // Get current receive frequency. 1611 int AudioCodingModuleImpl::ReceiveFrequency() const { 1612 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, 1613 "ReceiveFrequency()"); 1614 1615 CriticalSectionScoped lock(acm_crit_sect_); 1616 1617 int codec_id = receiver_.last_audio_codec_id(); 1618 1619 int sample_rate_hz; 1620 if (codec_id < 0) 1621 sample_rate_hz = receiver_.current_sample_rate_hz(); 1622 else 1623 sample_rate_hz = ACMCodecDB::database_[codec_id].plfreq; 1624 1625 // TODO(tlegrand): Remove this option when we have full 48 kHz support. 1626 return (sample_rate_hz > 32000) ? 32000 : sample_rate_hz; 1627 } 1628 1629 // Get current playout frequency. 1630 int AudioCodingModuleImpl::PlayoutFrequency() const { 1631 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, 1632 "PlayoutFrequency()"); 1633 1634 CriticalSectionScoped lock(acm_crit_sect_); 1635 1636 return receiver_.current_sample_rate_hz(); 1637 } 1638 1639 // Register possible receive codecs, can be called multiple times, 1640 // for codecs, CNG (NB, WB and SWB), DTMF, RED. 1641 int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) { 1642 CriticalSectionScoped lock(acm_crit_sect_); 1643 1644 if (codec.channels > 2 || codec.channels < 0) { 1645 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1646 "Unsupported number of channels, %d.", codec.channels); 1647 return -1; 1648 } 1649 1650 // TODO(turajs) do we need this for NetEq 4? 1651 if (!receiver_initialized_) { 1652 if (InitializeReceiverSafe() < 0) { 1653 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1654 "Cannot initialize receiver, failed registering codec."); 1655 return -1; 1656 } 1657 } 1658 1659 int mirror_id; 1660 int codec_id = ACMCodecDB::ReceiverCodecNumber(codec, &mirror_id); 1661 1662 if (codec_id < 0 || codec_id >= ACMCodecDB::kNumCodecs) { 1663 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1664 "Wrong codec params to be registered as receive codec"); 1665 return -1; 1666 } 1667 1668 // Check if the payload-type is valid. 1669 if (!ACMCodecDB::ValidPayloadType(codec.pltype)) { 1670 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1671 "Invalid payload-type %d for %s.", codec.pltype, 1672 codec.plname); 1673 return -1; 1674 } 1675 1676 AudioDecoder* decoder = NULL; 1677 // Get |decoder| associated with |codec|. |decoder| can be NULL if |codec| 1678 // does not own its decoder. 1679 if (GetAudioDecoder(codec, codec_id, mirror_id, &decoder) < 0) { 1680 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1681 "Wrong codec params to be registered as receive codec"); 1682 return -1; 1683 } 1684 uint8_t payload_type = static_cast<uint8_t>(codec.pltype); 1685 return receiver_.AddCodec(codec_id, payload_type, codec.channels, decoder); 1686 } 1687 1688 // Get current received codec. 1689 int AudioCodingModuleImpl::ReceiveCodec(CodecInst* current_codec) const { 1690 return receiver_.LastAudioCodec(current_codec); 1691 } 1692 1693 // Incoming packet from network parsed and ready for decode. 1694 int AudioCodingModuleImpl::IncomingPacket(const uint8_t* incoming_payload, 1695 const int payload_length, 1696 const WebRtcRTPHeader& rtp_header) { 1697 if (payload_length < 0) { 1698 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1699 "IncomingPacket() Error, payload-length cannot be negative"); 1700 return -1; 1701 } 1702 int last_audio_pltype = receiver_.last_audio_payload_type(); 1703 if (receiver_.InsertPacket(rtp_header, incoming_payload, payload_length) < 1704 0) { 1705 return -1; 1706 } 1707 if (receiver_.last_audio_payload_type() != last_audio_pltype) { 1708 int index = receiver_.last_audio_codec_id(); 1709 assert(index >= 0); 1710 CriticalSectionScoped lock(acm_crit_sect_); 1711 1712 // |codec_[index]| might not be even created, simply because it is not 1713 // yet registered as send codec. Even if it is registered, unless the 1714 // codec shares same instance for encoder and decoder, this call is 1715 // useless. 1716 if (codecs_[index] != NULL) 1717 codecs_[index]->UpdateDecoderSampFreq(index); 1718 } 1719 return 0; 1720 } 1721 1722 // Minimum playout delay (Used for lip-sync). 1723 int AudioCodingModuleImpl::SetMinimumPlayoutDelay(int time_ms) { 1724 if ((time_ms < 0) || (time_ms > 10000)) { 1725 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1726 "Delay must be in the range of 0-1000 milliseconds."); 1727 return -1; 1728 } 1729 return receiver_.SetMinimumDelay(time_ms); 1730 } 1731 1732 int AudioCodingModuleImpl::SetMaximumPlayoutDelay(int time_ms) { 1733 if ((time_ms < 0) || (time_ms > 10000)) { 1734 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1735 "Delay must be in the range of 0-1000 milliseconds."); 1736 return -1; 1737 } 1738 return receiver_.SetMaximumDelay(time_ms); 1739 } 1740 1741 // Estimate the Bandwidth based on the incoming stream, needed for one way 1742 // audio where the RTCP send the BW estimate. 1743 // This is also done in the RTP module. 1744 int AudioCodingModuleImpl::DecoderEstimatedBandwidth() const { 1745 // We can estimate far-end to near-end bandwidth if the iSAC are sent. Check 1746 // if the last received packets were iSAC packet then retrieve the bandwidth. 1747 int last_audio_codec_id = receiver_.last_audio_codec_id(); 1748 if (last_audio_codec_id >= 0 && 1749 STR_CASE_CMP("ISAC", ACMCodecDB::database_[last_audio_codec_id].plname)) { 1750 CriticalSectionScoped lock(acm_crit_sect_); 1751 return codecs_[last_audio_codec_id]->GetEstimatedBandwidth(); 1752 } 1753 return -1; 1754 } 1755 1756 // Set playout mode for: voice, fax, streaming or off. 1757 int AudioCodingModuleImpl::SetPlayoutMode(AudioPlayoutMode mode) { 1758 receiver_.SetPlayoutMode(mode); 1759 return 0; // TODO(turajs): return value is for backward compatibility. 1760 } 1761 1762 // Get playout mode voice, fax, streaming or off. 1763 AudioPlayoutMode AudioCodingModuleImpl::PlayoutMode() const { 1764 return receiver_.PlayoutMode(); 1765 } 1766 1767 // Get 10 milliseconds of raw audio data to play out. 1768 // Automatic resample to the requested frequency. 1769 int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz, 1770 AudioFrame* audio_frame) { 1771 // GetAudio always returns 10 ms, at the requested sample rate. 1772 if (receiver_.GetAudio(desired_freq_hz, audio_frame) != 0) { 1773 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1774 "PlayoutData failed, RecOut Failed"); 1775 return -1; 1776 } 1777 1778 audio_frame->id_ = id_; 1779 return 0; 1780 } 1781 1782 ///////////////////////////////////////// 1783 // Statistics 1784 // 1785 1786 // TODO(turajs) change the return value to void. Also change the corresponding 1787 // NetEq function. 1788 int AudioCodingModuleImpl::NetworkStatistics(ACMNetworkStatistics* statistics) { 1789 receiver_.NetworkStatistics(statistics); 1790 return 0; 1791 } 1792 1793 void AudioCodingModuleImpl::DestructEncoderInst(void* inst) { 1794 CriticalSectionScoped lock(acm_crit_sect_); 1795 WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, id_, 1796 "DestructEncoderInst()"); 1797 if (!HaveValidEncoder("DestructEncoderInst")) 1798 return; 1799 codecs_[current_send_codec_idx_]->DestructEncoderInst(inst); 1800 } 1801 1802 int AudioCodingModuleImpl::RegisterVADCallback(ACMVADCallback* vad_callback) { 1803 WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, id_, 1804 "RegisterVADCallback()"); 1805 CriticalSectionScoped lock(callback_crit_sect_); 1806 vad_callback_ = vad_callback; 1807 return 0; 1808 } 1809 1810 // TODO(tlegrand): Modify this function to work for stereo, and add tests. 1811 int AudioCodingModuleImpl::IncomingPayload(const uint8_t* incoming_payload, 1812 int payload_length, 1813 uint8_t payload_type, 1814 uint32_t timestamp) { 1815 if (payload_length < 0) { 1816 // Log error in trace file. 1817 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1818 "IncomingPacket() Error, payload-length cannot be negative"); 1819 return -1; 1820 } 1821 1822 // We are not acquiring any lock when interacting with |aux_rtp_header_| no 1823 // other method uses this member variable. 1824 if (aux_rtp_header_ == NULL) { 1825 // This is the first time that we are using |dummy_rtp_header_| 1826 // so we have to create it. 1827 aux_rtp_header_ = new WebRtcRTPHeader; 1828 aux_rtp_header_->header.payloadType = payload_type; 1829 // Don't matter in this case. 1830 aux_rtp_header_->header.ssrc = 0; 1831 aux_rtp_header_->header.markerBit = false; 1832 // Start with random numbers. 1833 aux_rtp_header_->header.sequenceNumber = 0x1234; // Arbitrary. 1834 aux_rtp_header_->type.Audio.channel = 1; 1835 } 1836 1837 aux_rtp_header_->header.timestamp = timestamp; 1838 IncomingPacket(incoming_payload, payload_length, *aux_rtp_header_); 1839 // Get ready for the next payload. 1840 aux_rtp_header_->header.sequenceNumber++; 1841 return 0; 1842 } 1843 1844 int AudioCodingModuleImpl::ReplaceInternalDTXWithWebRtc(bool use_webrtc_dtx) { 1845 CriticalSectionScoped lock(acm_crit_sect_); 1846 1847 if (!HaveValidEncoder("ReplaceInternalDTXWithWebRtc")) { 1848 WEBRTC_TRACE( 1849 webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1850 "Cannot replace codec internal DTX when no send codec is registered."); 1851 return -1; 1852 } 1853 1854 int res = codecs_[current_send_codec_idx_]->ReplaceInternalDTX( 1855 use_webrtc_dtx); 1856 // Check if VAD is turned on, or if there is any error. 1857 if (res == 1) { 1858 vad_enabled_ = true; 1859 } else if (res < 0) { 1860 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1861 "Failed to set ReplaceInternalDTXWithWebRtc(%d)", 1862 use_webrtc_dtx); 1863 return res; 1864 } 1865 1866 return 0; 1867 } 1868 1869 int AudioCodingModuleImpl::IsInternalDTXReplacedWithWebRtc( 1870 bool* uses_webrtc_dtx) { 1871 CriticalSectionScoped lock(acm_crit_sect_); 1872 1873 if (!HaveValidEncoder("IsInternalDTXReplacedWithWebRtc")) { 1874 return -1; 1875 } 1876 if (codecs_[current_send_codec_idx_]->IsInternalDTXReplaced(uses_webrtc_dtx) 1877 < 0) { 1878 return -1; 1879 } 1880 return 0; 1881 } 1882 1883 int AudioCodingModuleImpl::SetISACMaxRate(int max_bit_per_sec) { 1884 CriticalSectionScoped lock(acm_crit_sect_); 1885 1886 if (!HaveValidEncoder("SetISACMaxRate")) { 1887 return -1; 1888 } 1889 1890 return codecs_[current_send_codec_idx_]->SetISACMaxRate(max_bit_per_sec); 1891 } 1892 1893 int AudioCodingModuleImpl::SetISACMaxPayloadSize(int max_size_bytes) { 1894 CriticalSectionScoped lock(acm_crit_sect_); 1895 1896 if (!HaveValidEncoder("SetISACMaxPayloadSize")) { 1897 return -1; 1898 } 1899 1900 return codecs_[current_send_codec_idx_]->SetISACMaxPayloadSize( 1901 max_size_bytes); 1902 } 1903 1904 int AudioCodingModuleImpl::ConfigISACBandwidthEstimator( 1905 int frame_size_ms, 1906 int rate_bit_per_sec, 1907 bool enforce_frame_size) { 1908 CriticalSectionScoped lock(acm_crit_sect_); 1909 1910 if (!HaveValidEncoder("ConfigISACBandwidthEstimator")) { 1911 return -1; 1912 } 1913 1914 return codecs_[current_send_codec_idx_]->ConfigISACBandwidthEstimator( 1915 frame_size_ms, rate_bit_per_sec, enforce_frame_size); 1916 } 1917 1918 int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) { 1919 return receiver_.GetPlayoutTimestamp(timestamp) ? 0 : -1; 1920 } 1921 1922 bool AudioCodingModuleImpl::HaveValidEncoder(const char* caller_name) const { 1923 if ((!send_codec_registered_) || (current_send_codec_idx_ < 0) || 1924 (current_send_codec_idx_ >= ACMCodecDB::kNumCodecs)) { 1925 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1926 "%s failed: No send codec is registered.", caller_name); 1927 return false; 1928 } 1929 if ((current_send_codec_idx_ < 0) || 1930 (current_send_codec_idx_ >= ACMCodecDB::kNumCodecs)) { 1931 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1932 "%s failed: Send codec index out of range.", caller_name); 1933 return false; 1934 } 1935 if (codecs_[current_send_codec_idx_] == NULL) { 1936 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1937 "%s failed: Send codec is NULL pointer.", caller_name); 1938 return false; 1939 } 1940 return true; 1941 } 1942 1943 int AudioCodingModuleImpl::UnregisterReceiveCodec(uint8_t payload_type) { 1944 return receiver_.RemoveCodec(payload_type); 1945 } 1946 1947 // TODO(turajs): correct the type of |length_bytes| when it is corrected in 1948 // GenericCodec. 1949 int AudioCodingModuleImpl::REDPayloadISAC(int isac_rate, 1950 int isac_bw_estimate, 1951 uint8_t* payload, 1952 int16_t* length_bytes) { 1953 if (!HaveValidEncoder("EncodeData")) { 1954 return -1; 1955 } 1956 int status; 1957 status = codecs_[current_send_codec_idx_]->REDPayloadISAC(isac_rate, 1958 isac_bw_estimate, 1959 payload, 1960 length_bytes); 1961 return status; 1962 } 1963 1964 void AudioCodingModuleImpl::ResetFragmentation(int vector_size) { 1965 for (int n = 0; n < kMaxNumFragmentationVectors; n++) { 1966 fragmentation_.fragmentationOffset[n] = n * MAX_PAYLOAD_SIZE_BYTE; 1967 } 1968 memset(fragmentation_.fragmentationLength, 0, kMaxNumFragmentationVectors * 1969 sizeof(fragmentation_.fragmentationLength[0])); 1970 memset(fragmentation_.fragmentationTimeDiff, 0, kMaxNumFragmentationVectors * 1971 sizeof(fragmentation_.fragmentationTimeDiff[0])); 1972 memset(fragmentation_.fragmentationPlType, 1973 0, 1974 kMaxNumFragmentationVectors * 1975 sizeof(fragmentation_.fragmentationPlType[0])); 1976 fragmentation_.fragmentationVectorSize = static_cast<uint16_t>(vector_size); 1977 } 1978 1979 int AudioCodingModuleImpl::GetAudioDecoder(const CodecInst& codec, int codec_id, 1980 int mirror_id, 1981 AudioDecoder** decoder) { 1982 if (ACMCodecDB::OwnsDecoder(codec_id)) { 1983 // This codec has to own its own decoder. Therefore, it should create the 1984 // corresponding AudioDecoder class and insert it into NetEq. If the codec 1985 // does not exist create it. 1986 // 1987 // TODO(turajs): this part of the code is common with RegisterSendCodec(), 1988 // make a method for it. 1989 if (codecs_[mirror_id] == NULL) { 1990 codecs_[mirror_id] = CreateCodec(codec); 1991 if (codecs_[mirror_id] == NULL) { 1992 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 1993 "Cannot Create the codec"); 1994 return -1; 1995 } 1996 mirror_codec_idx_[mirror_id] = mirror_id; 1997 } 1998 1999 if (mirror_id != codec_id) { 2000 codecs_[codec_id] = codecs_[mirror_id]; 2001 mirror_codec_idx_[codec_id] = mirror_id; 2002 } 2003 *decoder = codecs_[codec_id]->Decoder(codec_id); 2004 if (!*decoder) { 2005 assert(false); 2006 return -1; 2007 } 2008 } else { 2009 *decoder = NULL; 2010 } 2011 2012 return 0; 2013 } 2014 2015 int AudioCodingModuleImpl::SetInitialPlayoutDelay(int delay_ms) { 2016 { 2017 CriticalSectionScoped lock(acm_crit_sect_); 2018 // Initialize receiver, if it is not initialized. Otherwise, initial delay 2019 // is reset upon initialization of the receiver. 2020 if (!receiver_initialized_) 2021 InitializeReceiverSafe(); 2022 } 2023 return receiver_.SetInitialDelay(delay_ms); 2024 } 2025 2026 int AudioCodingModuleImpl::EnableNack(size_t max_nack_list_size) { 2027 return receiver_.EnableNack(max_nack_list_size); 2028 } 2029 2030 void AudioCodingModuleImpl::DisableNack() { 2031 receiver_.DisableNack(); 2032 } 2033 2034 std::vector<uint16_t> AudioCodingModuleImpl::GetNackList( 2035 int round_trip_time_ms) const { 2036 return receiver_.GetNackList(round_trip_time_ms); 2037 } 2038 2039 int AudioCodingModuleImpl::LeastRequiredDelayMs() const { 2040 return receiver_.LeastRequiredDelayMs(); 2041 } 2042 2043 void AudioCodingModuleImpl::GetDecodingCallStatistics( 2044 AudioDecodingCallStats* call_stats) const { 2045 receiver_.GetDecodingCallStatistics(call_stats); 2046 } 2047 2048 } // namespace acm2 2049 2050 } // namespace webrtc 2051