1 /* 2 ** 3 ** Copyright 2012, The Android Open Source Project 4 ** 5 ** Licensed under the Apache License, Version 2.0 (the "License"); 6 ** you may not use this file except in compliance with the License. 7 ** You may obtain a copy of the License at 8 ** 9 ** http://www.apache.org/licenses/LICENSE-2.0 10 ** 11 ** Unless required by applicable law or agreed to in writing, software 12 ** distributed under the License is distributed on an "AS IS" BASIS, 13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 ** See the License for the specific language governing permissions and 15 ** limitations under the License. 16 */ 17 18 #define LOG_TAG "AudioHAL:AudioStreamOut" 19 20 #include <utils/Log.h> 21 22 #include "AudioHardwareOutput.h" 23 #include "AudioStreamOut.h" 24 25 // Set to 1 to print timestamp data in CSV format. 26 #ifndef HAL_PRINT_TIMESTAMP_CSV 27 #define HAL_PRINT_TIMESTAMP_CSV 0 28 #endif 29 30 //#define VERY_VERBOSE_LOGGING 31 #ifdef VERY_VERBOSE_LOGGING 32 #define ALOGVV ALOGV 33 #else 34 #define ALOGVV(a...) do { } while(0) 35 #endif 36 37 namespace android { 38 39 AudioStreamOut::AudioStreamOut(AudioHardwareOutput& owner, bool mcOut) 40 : mFramesPresented(0) 41 , mFramesRendered(0) 42 , mFramesWrittenRemainder(0) 43 , mOwnerHAL(owner) 44 , mFramesWritten(0) 45 , mTgtDevices(0) 46 , mAudioFlingerTgtDevices(0) 47 , mIsMCOutput(mcOut) 48 , mIsEncoded(false) 49 , mInStandby(false) 50 , mSPDIFEncoder(this) 51 { 52 assert(mLocalClock.initCheck()); 53 54 mPhysOutputs.setCapacity(3); 55 56 // Set some reasonable defaults for these. All of this should be eventually 57 // be overwritten by a specific audio flinger configuration, but it does not 58 // hurt to have something here by default. 59 mInputSampleRate = 48000; 60 mInputChanMask = AUDIO_CHANNEL_OUT_STEREO; 61 mInputFormat = AUDIO_FORMAT_PCM_16_BIT; 62 mInputNominalChunksInFlight = 4; 63 updateInputNums(); 64 65 mThrottleValid = false; 66 67 memset(&mUSecToLocalTime, 0, sizeof(mUSecToLocalTime)); 68 mUSecToLocalTime.a_to_b_numer = mLocalClock.getLocalFreq(); 69 mUSecToLocalTime.a_to_b_denom = 1000000; 70 LinearTransform::reduce(&mUSecToLocalTime.a_to_b_numer, 71 &mUSecToLocalTime.a_to_b_denom); 72 } 73 74 AudioStreamOut::~AudioStreamOut() 75 { 76 releaseAllOutputs(); 77 } 78 79 status_t AudioStreamOut::set( 80 audio_format_t *pFormat, 81 uint32_t *pChannels, 82 uint32_t *pRate) 83 { 84 Mutex::Autolock _l(mLock); 85 audio_format_t lFormat = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT; 86 uint32_t lChannels = pChannels ? *pChannels : 0; 87 uint32_t lRate = pRate ? *pRate : 0; 88 89 // fix up defaults 90 if (lFormat == AUDIO_FORMAT_DEFAULT) lFormat = format(); 91 if (lChannels == 0) lChannels = chanMask(); 92 if (lRate == 0) lRate = sampleRate(); 93 94 if (pFormat) *pFormat = lFormat; 95 if (pChannels) *pChannels = lChannels; 96 if (pRate) *pRate = lRate; 97 98 mIsEncoded = !audio_is_linear_pcm(lFormat); 99 100 if (!mIsMCOutput && !mIsEncoded) { 101 // If this is the primary stream out, then demand our defaults. 102 if ((lFormat != format()) || 103 (lChannels != chanMask()) || 104 (lRate != sampleRate())) 105 return BAD_VALUE; 106 } else { 107 // Else check to see if our HDMI sink supports this format before proceeding. 108 if (!mOwnerHAL.getHDMIAudioCaps().supportsFormat(lFormat, 109 lRate, 110 audio_channel_count_from_out_mask(lChannels))) 111 return BAD_VALUE; 112 } 113 114 mInputFormat = lFormat; 115 mInputChanMask = lChannels; 116 mInputSampleRate = lRate; 117 ALOGI("AudioStreamOut::set: lRate = %u, mIsEncoded = %d\n", lRate, mIsEncoded); 118 updateInputNums(); 119 120 return NO_ERROR; 121 } 122 123 void AudioStreamOut::setTgtDevices(uint32_t tgtDevices) 124 { 125 Mutex::Autolock _l(mRoutingLock); 126 if (mTgtDevices != tgtDevices) { 127 mTgtDevices = tgtDevices; 128 } 129 } 130 131 status_t AudioStreamOut::standby() 132 { 133 mFramesRendered = 0; 134 releaseAllOutputs(); 135 mOwnerHAL.standbyStatusUpdate(true, mIsMCOutput); 136 mInStandby = true; 137 138 return NO_ERROR; 139 } 140 141 void AudioStreamOut::releaseAllOutputs() { 142 Mutex::Autolock _l(mRoutingLock); 143 144 ALOGI("releaseAllOutputs: releasing %d mPhysOutputs", mPhysOutputs.size()); 145 AudioOutputList::iterator I; 146 for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) 147 mOwnerHAL.releaseOutput(*this, *I); 148 149 mPhysOutputs.clear(); 150 } 151 152 void AudioStreamOut::updateInputNums() 153 { 154 assert(mLocalClock.initCheck()); 155 156 // mInputBufSize determines how many audio frames AudioFlinger is going to 157 // mix at a time. We also use the mInputBufSize to determine the ALSA 158 // period_size, the number of of samples which need to play out (at most) 159 // before low level ALSA driver code is required to wake up upper levels of 160 // SW to fill a new buffer. As it turns out, ALSA is going to apply some 161 // rules and modify the period_size which we pass to it. One of the things 162 // ALSA seems to do is attempt to round the period_size up to a value which 163 // will make the period an integral number of 0.5 mSec. This round-up 164 // behavior can cause the low levels of ALSA to consume more data per period 165 // than the AudioFlinger mixer has been told to produce. If there are only 166 // two buffers in flight at any given point in time, this can lead to a 167 // situation where the pipeline ends up slipping an extra buffer and 168 // underflowing. There are two approaches to mitigate this, both of which 169 // are implemented in this HAL... 170 // 171 // 1) Try as hard as possible to make certain that the buffer size we choose 172 // results in a period_size which is not going to get rounded up by ALSA. 173 // This means that we want a buffer size which at the chosen sample rate 174 // and frame size will be an integral multiple of 1/2 mSec. 175 // 2) Increate the number of chunks we keep in flight. If the system slips 176 // a single period, its only really a problem if there is no data left in 177 // the pipeline waiting to be played out. The mixer should going to mix 178 // as fast as possible until the buffer has been topped off. By 179 // decreasing the buffer size and increasing the number of buffers in 180 // flight, we increase the number of interrups and mix events per second, 181 // but buy ourselves some insurance against the negative side effects of 182 // slipping one buffer in the schedule. We end up using 4 buffers at 183 // 10mSec, making the total audio latency somewhere between 40 and 50 184 // mSec, depending on when a sample begins playback relative to 185 // AudioFlinger's mixing schedule. 186 // 187 mInputChanCount = audio_channel_count_from_out_mask(mInputChanMask); 188 189 // Picking a chunk duration 10mSec should satisfy #1 for both major families 190 // of audio sample rates (the 44.1K and 48K families). In the case of 44.1 191 // (or higher) we will end up with a multiple of 441 frames of audio per 192 // chunk, while for 48K, we will have a multiple of 480 frames of audio per 193 // chunk. This will not work well for lower sample rates in the 44.1 family 194 // (22.05K and 11.025K); it is unlikely that we will ever be configured to 195 // deliver those rates, and if we ever do, we will need to rely on having 196 // extra chunks in flight to deal with the jitter problem described above. 197 mInputChunkFrames = outputSampleRate() / 100; 198 199 // FIXME: Currently, audio flinger demands an input buffer size which is a 200 // multiple of 16 audio frames. Right now, there is no good way to 201 // reconcile this with ALSA round-up behavior described above when the 202 // desired sample rate is a member of the 44.1 family. For now, we just 203 // round up to the nearest multiple of 16 frames and roll the dice, but 204 // someday it would be good to fix one or the other halves of the problem 205 // (either ALSA or AudioFlinger) 206 mInputChunkFrames = (mInputChunkFrames + 0xF) & ~0xF; 207 208 ALOGD("AudioStreamOut::updateInputNums: chunk size %u from output rate %u\n", 209 mInputChunkFrames, outputSampleRate()); 210 211 // Buffer size is just the frame size multiplied by the number of 212 // frames per chunk. 213 mInputBufSize = mInputChunkFrames * getBytesPerOutputFrame(); 214 215 // The nominal latency is just the duration of a chunk * the number of 216 // chunks we nominally keep in flight at any given point in time. 217 mInputNominalLatencyUSec = static_cast<uint32_t>((( 218 static_cast<uint64_t>(mInputChunkFrames) 219 * 1000000 * mInputNominalChunksInFlight) 220 / mInputSampleRate)); 221 222 memset(&mLocalTimeToFrames, 0, sizeof(mLocalTimeToFrames)); 223 mLocalTimeToFrames.a_to_b_numer = mInputSampleRate; 224 mLocalTimeToFrames.a_to_b_denom = mLocalClock.getLocalFreq(); 225 LinearTransform::reduce( 226 &mLocalTimeToFrames.a_to_b_numer, 227 &mLocalTimeToFrames.a_to_b_denom); 228 } 229 230 void AudioStreamOut::finishedWriteOp(size_t framesWritten, 231 bool needThrottle) 232 { 233 assert(mLocalClock.initCheck()); 234 235 int64_t now = mLocalClock.getLocalTime(); 236 237 if (!mThrottleValid || !needThrottle) { 238 mThrottleValid = true; 239 mWriteStartLT = now; 240 mFramesWritten = 0; 241 } 242 243 size_t framesWrittenAppRate; 244 uint32_t multiplier = getRateMultiplier(); 245 if (multiplier != 1) { 246 // Accumulate round-off error from previous call. 247 framesWritten += mFramesWrittenRemainder; 248 // Scale from device sample rate to application rate. 249 framesWrittenAppRate = framesWritten / multiplier; 250 ALOGV("finishedWriteOp() framesWrittenAppRate = %d = %d / %d\n", 251 framesWrittenAppRate, framesWritten, multiplier); 252 // Save remainder for next time to prevent error accumulation. 253 mFramesWrittenRemainder = framesWritten - (framesWrittenAppRate * multiplier); 254 } else { 255 framesWrittenAppRate = framesWritten; 256 } 257 258 mFramesWritten += framesWrittenAppRate; 259 mFramesPresented += framesWrittenAppRate; 260 mFramesRendered += framesWrittenAppRate; 261 262 if (needThrottle) { 263 int64_t deltaLT; 264 mLocalTimeToFrames.doReverseTransform(mFramesWritten, &deltaLT); 265 deltaLT += mWriteStartLT; 266 deltaLT -= now; 267 268 int64_t deltaUSec; 269 mUSecToLocalTime.doReverseTransform(deltaLT, &deltaUSec); 270 271 if (deltaUSec > 0) { 272 useconds_t sleep_time; 273 274 // We should never be a full second ahead of schedule; sanity check 275 // our throttle time and cap the max sleep time at 1 second. 276 if (deltaUSec > 1000000) { 277 ALOGW("throttle time clipped! deltaLT = %lld deltaUSec = %lld", 278 deltaLT, deltaUSec); 279 sleep_time = 1000000; 280 } else { 281 sleep_time = static_cast<useconds_t>(deltaUSec); 282 } 283 usleep(sleep_time); 284 } 285 } 286 } 287 288 static const String8 keyRouting(AudioParameter::keyRouting); 289 static const String8 keySupSampleRates("sup_sampling_rates"); 290 static const String8 keySupFormats("sup_formats"); 291 static const String8 keySupChannels("sup_channels"); 292 status_t AudioStreamOut::setParameters(__unused struct audio_stream *stream, const char *kvpairs) 293 { 294 AudioParameter param = AudioParameter(String8(kvpairs)); 295 String8 key = String8(AudioParameter::keyRouting); 296 int tmpInt; 297 298 if (param.getInt(key, tmpInt) == NO_ERROR) { 299 // The audio HAL handles routing to physical devices entirely 300 // internally and mostly ignores what audio flinger tells it to do. JiC 301 // there is something (now or in the future) in audio flinger which 302 // cares about the routing value in a call to getParameters, we hang on 303 // to the last routing value set by audio flinger so we can at least be 304 // consistent when we lie to the upper levels about doing what they told 305 // us to do. 306 mAudioFlingerTgtDevices = static_cast<uint32_t>(tmpInt); 307 } 308 309 return NO_ERROR; 310 } 311 312 char* AudioStreamOut::getParameters(const char* k) 313 { 314 AudioParameter param = AudioParameter(String8(k)); 315 String8 value; 316 317 if (param.get(keyRouting, value) == NO_ERROR) { 318 param.addInt(keyRouting, (int)mAudioFlingerTgtDevices); 319 } 320 321 HDMIAudioCaps& hdmiCaps = mOwnerHAL.getHDMIAudioCaps(); 322 323 if (param.get(keySupSampleRates, value) == NO_ERROR) { 324 if (mIsMCOutput) { 325 hdmiCaps.getRatesForAF(value); 326 param.add(keySupSampleRates, value); 327 } else { 328 param.add(keySupSampleRates, String8("48000")); 329 } 330 } 331 332 if (param.get(keySupFormats, value) == NO_ERROR) { 333 if (mIsMCOutput) { 334 hdmiCaps.getFmtsForAF(value); 335 param.add(keySupFormats, value); 336 } else { 337 param.add(keySupFormats, String8("AUDIO_FORMAT_PCM_16_BIT")); 338 } 339 } 340 341 if (param.get(keySupChannels, value) == NO_ERROR) { 342 if (mIsMCOutput) { 343 hdmiCaps.getChannelMasksForAF(value); 344 param.add(keySupChannels, value); 345 } else { 346 param.add(keySupChannels, String8("AUDIO_CHANNEL_OUT_STEREO")); 347 } 348 } 349 350 return strdup(param.toString().string()); 351 } 352 353 uint32_t AudioStreamOut::getRateMultiplier() const 354 { 355 return (mIsEncoded) ? mSPDIFEncoder.getRateMultiplier() : 1; 356 } 357 358 uint32_t AudioStreamOut::outputSampleRate() const 359 { 360 return mInputSampleRate * getRateMultiplier(); 361 } 362 363 int AudioStreamOut::getBytesPerOutputFrame() 364 { 365 return (mIsEncoded) ? mSPDIFEncoder.getBytesPerOutputFrame() 366 : (mInputChanCount * sizeof(int16_t)); 367 } 368 369 uint32_t AudioStreamOut::latency() const { 370 uint32_t uSecLatency = mInputNominalLatencyUSec; 371 uint32_t vcompDelay = mOwnerHAL.getVideoDelayCompUsec(); 372 373 if (uSecLatency < vcompDelay) 374 return 0; 375 376 return ((uSecLatency - vcompDelay) / 1000); 377 } 378 379 // Used to implement get_presentation_position() for Audio HAL. 380 // According to the prototype in audio.h, the frame count should not get 381 // reset on standby(). 382 status_t AudioStreamOut::getPresentationPosition(uint64_t *frames, 383 struct timespec *timestamp) 384 { 385 Mutex::Autolock _l(mRoutingLock); 386 status_t result = -ENODEV; 387 // The presentation timestamp should be the same for all devices. 388 // Also Molly only has one output device at the moment. 389 // So just use the first one in the list. 390 if (!mPhysOutputs.isEmpty()) { 391 const unsigned int kInsaneAvail = 10 * 48000; 392 unsigned int avail = 0; 393 sp<AudioOutput> audioOutput = mPhysOutputs.itemAt(0); 394 if (audioOutput->getHardwareTimestamp(&avail, timestamp) == 0) { 395 if (avail < kInsaneAvail) { 396 // FIXME av sync fudge factor 397 // Use a fudge factor to account for hidden buffering in the 398 // HDMI output path. This is a hack until we can determine the 399 // actual buffer sizes. 400 // Increasing kFudgeMSec will move the audio earlier in 401 // relation to the video. 402 const int kFudgeMSec = 50; 403 int fudgeFrames = kFudgeMSec * sampleRate() / 1000; 404 405 // Scale the frames in the driver because it might be running at 406 // a higher rate for EAC3. 407 int64_t framesInDriverBuffer = 408 (int64_t)audioOutput->getKernelBufferSize() - (int64_t)avail; 409 framesInDriverBuffer = framesInDriverBuffer / getRateMultiplier(); 410 411 int64_t pendingFrames = framesInDriverBuffer + fudgeFrames; 412 int64_t signedFrames = mFramesPresented - pendingFrames; 413 if (pendingFrames < 0) { 414 ALOGE("getPresentationPosition: negative pendingFrames = %lld", 415 pendingFrames); 416 } else if (signedFrames < 0) { 417 ALOGI("getPresentationPosition: playing silent preroll" 418 ", mFramesPresented = %llu, pendingFrames = %lld", 419 mFramesPresented, pendingFrames); 420 } else { 421 #if HAL_PRINT_TIMESTAMP_CSV 422 // Print comma separated values for spreadsheet analysis. 423 uint64_t nanos = (((uint64_t)timestamp->tv_sec) * 1000000000L) 424 + timestamp->tv_nsec; 425 ALOGI("getPresentationPosition, %lld, %4u, %lld, %llu", 426 mFramesPresented, avail, signedFrames, nanos); 427 #endif 428 *frames = (uint64_t) signedFrames; 429 result = NO_ERROR; 430 } 431 } else { 432 ALOGE("getPresentationPosition: avail too large = %u", avail); 433 } 434 } else { 435 ALOGE("getPresentationPosition: getHardwareTimestamp returned non-zero"); 436 } 437 } else { 438 ALOGVV("getPresentationPosition: no physical outputs! This HAL is inactive!"); 439 } 440 return result; 441 } 442 443 status_t AudioStreamOut::getRenderPosition(__unused uint32_t *dspFrames) 444 { 445 if (dspFrames == NULL) { 446 return -EINVAL; 447 } 448 if (mPhysOutputs.isEmpty()) { 449 *dspFrames = 0; 450 return -ENODEV; 451 } 452 *dspFrames = (uint32_t) mFramesRendered; 453 return NO_ERROR; 454 } 455 456 void AudioStreamOut::updateTargetOutputs() 457 { 458 Mutex::Autolock _l(mRoutingLock); 459 460 AudioOutputList::iterator I; 461 uint32_t cur_outputs = 0; 462 463 for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) 464 cur_outputs |= (*I)->devMask(); 465 466 if (cur_outputs == mTgtDevices) 467 return; 468 469 uint32_t outputsToObtain = mTgtDevices & ~cur_outputs; 470 uint32_t outputsToRelease = cur_outputs & ~mTgtDevices; 471 472 // Start by releasing any outputs we should no longer have back to the HAL. 473 if (outputsToRelease) { 474 475 I = mPhysOutputs.begin(); 476 while (I != mPhysOutputs.end()) { 477 if (!(outputsToRelease & (*I)->devMask())) { 478 ++I; 479 continue; 480 } 481 482 outputsToRelease &= ~((*I)->devMask()); 483 mOwnerHAL.releaseOutput(*this, *I); 484 I = mPhysOutputs.erase(I); 485 } 486 } 487 488 if (outputsToRelease) { 489 ALOGW("Bookkeeping error! Still have outputs to release (%08x), but" 490 " none of them appear to be in the mPhysOutputs list!", 491 outputsToRelease); 492 } 493 494 // Now attempt to obtain any outputs we should be using, but are not 495 // currently. 496 if (outputsToObtain) { 497 uint32_t mask; 498 499 // Buffer configuration may need updating now that we have decoded 500 // the start of a stream. For example, EAC3, needs 4X sampleRate. 501 updateInputNums(); 502 503 for (mask = 0x1; outputsToObtain; mask <<= 1) { 504 if (!(mask & outputsToObtain)) 505 continue; 506 507 sp<AudioOutput> newOutput; 508 status_t res; 509 510 res = mOwnerHAL.obtainOutput(*this, mask, &newOutput); 511 outputsToObtain &= ~mask; 512 513 if (OK != res) { 514 // If we get an error back from obtain output, it means that 515 // something went really wrong at a lower level (probably failed 516 // to open the driver). We should not try to obtain this output 517 // again, at least until the next routing change. 518 ALOGW("Failed to obtain output %08x for %s audio stream out." 519 " (res %d)", mask, getName(), res); 520 mTgtDevices &= ~mask; 521 continue; 522 } 523 524 if (newOutput != NULL) { 525 // If we actually got an output, go ahead and add it to our list 526 // of physical outputs. The rest of the system will handle 527 // starting it up. If we didn't get an output, but also go no 528 // error code, it just means that the output is currently busy 529 // and should become available soon. 530 ALOGI("updateTargetOutputs: adding output back to mPhysOutputs"); 531 mPhysOutputs.push_back(newOutput); 532 } 533 } 534 } 535 } 536 537 void AudioStreamOut::adjustOutputs(int64_t maxTime) 538 { 539 int64_t a_zero_original = mLocalTimeToFrames.a_zero; 540 int64_t b_zero_original = mLocalTimeToFrames.b_zero; 541 AudioOutputList::iterator I; 542 543 // Check to see if any outputs are active and see what their buffer levels 544 // are. 545 for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) { 546 if ((*I)->getState() == AudioOutput::DMA_START) { 547 int64_t lastWriteTS = (*I)->getLastNextWriteTS(); 548 int64_t padAmt; 549 550 mLocalTimeToFrames.a_zero = lastWriteTS; 551 mLocalTimeToFrames.b_zero = 0; 552 if (mLocalTimeToFrames.doForwardTransform(maxTime, 553 &padAmt)) { 554 (*I)->adjustDelay(((int32_t)padAmt)); 555 } 556 } 557 } 558 // Restore original offset so that the sleep time calculation for 559 // throttling is not broken in finishedWriteOp(). 560 mLocalTimeToFrames.a_zero = a_zero_original; 561 mLocalTimeToFrames.b_zero = b_zero_original; 562 } 563 564 ssize_t AudioStreamOut::write(const void* buffer, size_t bytes) 565 { 566 uint8_t *data = (uint8_t *)buffer; 567 ALOGVV("AudioStreamOut::write(%u) 0x%02X, 0x%02X, 0x%02X, 0x%02X," 568 " 0x%02X, 0x%02X, 0x%02X, 0x%02X," 569 " 0x%02X, 0x%02X, 0x%02X, 0x%02X," 570 " 0x%02X, 0x%02X, 0x%02X, 0x%02X ====", 571 bytes, data[0], data[1], data[2], data[3], 572 data[4], data[5], data[6], data[7], 573 data[8], data[9], data[10], data[11], 574 data[12], data[13], data[14], data[15] 575 ); 576 if (mIsEncoded) { 577 return mSPDIFEncoder.write(buffer, bytes); 578 } else { 579 return writeInternal(buffer, bytes); 580 } 581 } 582 583 ssize_t AudioStreamOut::writeInternal(const void* buffer, size_t bytes) 584 { 585 uint8_t *data = (uint8_t *)buffer; 586 ALOGVV("AudioStreamOut::write_l(%u) 0x%02X, 0x%02X, 0x%02X, 0x%02X," 587 " 0x%02X, 0x%02X, 0x%02X, 0x%02X," 588 " 0x%02X, 0x%02X, 0x%02X, 0x%02X," 589 " 0x%02X, 0x%02X, 0x%02X, 0x%02X", 590 bytes, data[0], data[1], data[2], data[3], 591 data[4], data[5], data[6], data[7], 592 data[8], data[9], data[10], data[11], 593 data[12], data[13], data[14], data[15] 594 ); 595 596 // Note: no lock is obtained here. Calls to write and getNextWriteTimestamp 597 // happen only on the AudioFlinger mixer thread which owns this particular 598 // output stream, so there is no need to worry that there will be two 599 // threads in this instance method concurrently. 600 // 601 // In addition, only calls to write change the contents of the mPhysOutputs 602 // collection (during the call to updateTargetOutputs). updateTargetOutputs 603 // will hold the routing lock during the operation, as should any reader of 604 // mPhysOutputs, unless the reader is a call to write or 605 // getNextWriteTimestamp (we know that it is safe for write and gnwt to read 606 // the collection because the only collection mutator is the same thread 607 // which calls write and gnwt). 608 609 // If the stream is in standby, then the first write should bring it out 610 // of standby 611 if (mInStandby) { 612 mOwnerHAL.standbyStatusUpdate(false, mIsMCOutput); 613 mInStandby = false; 614 } 615 616 updateTargetOutputs(); 617 618 // If any of our outputs is in the PRIMED state when ::write is called, it 619 // means one of two things. First, it could be that the DMA output really 620 // has not started yet. This is odd, but certainly not impossible. The 621 // other possibility is that AudioFlinger is in its silence-pushing mode and 622 // is not calling getNextWriteTimestamp. After an output is primed, its in 623 // GNWTS where the amt of padding to compensate for different DMA start 624 // times is taken into account. Go ahead and force a call to GNWTS, just to 625 // be certain that we have checked recently and are not stuck in silence 626 // fill mode. Failure to do this will cause the AudioOutput state machine 627 // to eventually give up on DMA starting and reset the output over and over 628 // again (spamming the log and producing general confusion). 629 // 630 // While we are in the process of checking our various output states, check 631 // to see if any outputs have made it to the ACTIVE state. Pass this 632 // information along to the call to processOneChunk. If any of our outputs 633 // are waiting to be primed while other outputs have made it to steady 634 // state, we need to change our priming behavior slightly. Instead of 635 // filling an output's buffer completely, we want to fill it to slightly 636 // less than full and let the adjustDelay mechanism take care of the rest. 637 // 638 // Failure to do this during steady state operation will almost certainly 639 // lead to the new output being over-filled relative to the other outputs 640 // causing it to be slightly out of sync. 641 AudioOutputList::iterator I; 642 bool checkDMAStart = false; 643 bool hasActiveOutputs = false; 644 for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) { 645 if (AudioOutput::PRIMED == (*I)->getState()) 646 checkDMAStart = true; 647 648 if ((*I)->getState() == AudioOutput::ACTIVE) 649 hasActiveOutputs = true; 650 } 651 652 if (checkDMAStart) { 653 int64_t junk; 654 getNextWriteTimestamp_internal(&junk); 655 } 656 657 // We always call processOneChunk on the outputs, as it is the 658 // tick for their state machines. 659 for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) { 660 (*I)->processOneChunk((uint8_t *)buffer, bytes, hasActiveOutputs); 661 } 662 663 // If we don't actually have any physical outputs to write to, just sleep 664 // for the proper amt of time in order to simulate the throttle that writing 665 // to the hardware would impose. 666 finishedWriteOp(bytes / getBytesPerOutputFrame(), (0 == mPhysOutputs.size())); 667 668 return static_cast<ssize_t>(bytes); 669 } 670 671 status_t AudioStreamOut::getNextWriteTimestamp(int64_t *timestamp) 672 { 673 return getNextWriteTimestamp_internal(timestamp); 674 } 675 676 status_t AudioStreamOut::getNextWriteTimestamp_internal( 677 int64_t *timestamp) 678 { 679 int64_t max_time = LLONG_MIN; 680 bool max_time_valid = false; 681 bool need_adjust = false; 682 683 // Across all of our physical outputs, figure out the max time when 684 // a write operation will hit the speakers. Assume that if an 685 // output cannot answer the question, its because it has never 686 // started or because it has recently underflowed and needs to be 687 // restarted. If this is the case, we will need to prime the 688 // pipeline with a chunk's worth of data before proceeding. 689 // If any of the outputs indicate a discontinuity (meaning that the 690 // DMA start time was valid and is now invalid, or was and is valid 691 // but was different from before; almost certainly caused by a low 692 // level underfow), then just stop now. We will need to reset and 693 // re-prime all of the outputs in order to make certain that the 694 // lead-times on all of the outputs match. 695 696 AudioOutputList::iterator I; 697 bool discon = false; 698 699 // Find the largest next write timestamp. The goal is to make EVERY 700 // output have the same value, but we also need this to pass back 701 // up the layers. 702 for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) { 703 int64_t tmp; 704 if (OK == (*I)->getNextWriteTimestamp(&tmp, &discon)) { 705 if (!max_time_valid || (max_time < tmp)) { 706 max_time = tmp; 707 max_time_valid = true; 708 } 709 } 710 } 711 712 // Check the state of each output and determine if we need to align them. 713 // Make sure to do this after we have called each outputs' 714 // getNextWriteTimestamp as the transition from PRIMED to DMA_START happens 715 // there. 716 for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) { 717 if ((*I)->getState() == AudioOutput::DMA_START) { 718 need_adjust = true; 719 break; 720 } 721 } 722 723 // At this point, if we still have not found at least one output 724 // who knows when their data is going to hit the speakers, then we 725 // just can't answer the getNextWriteTimestamp question and we 726 // should give up. 727 if (!max_time_valid) { 728 return INVALID_OPERATION; 729 } 730 731 // Stuff silence into the non-aligned outputs so that the effective 732 // timestamp is the same for all the outputs. 733 if (need_adjust) 734 adjustOutputs(max_time); 735 736 // We are done. The time at which the next written audio should 737 // hit the speakers is just max_time plus the maximum amt of delay 738 // compensation in the system. 739 *timestamp = max_time; 740 return OK; 741 } 742 743 #define DUMP(a...) \ 744 snprintf(buffer, SIZE, a); \ 745 buffer[SIZE - 1] = 0; \ 746 result.append(buffer); 747 #define B2STR(b) b ? "true" : "false" 748 749 status_t AudioStreamOut::dump(int fd) 750 { 751 const size_t SIZE = 256; 752 char buffer[SIZE]; 753 String8 result; 754 DUMP("\n%s AudioStreamOut::dump\n", getName()); 755 DUMP("\tsample rate : %d\n", sampleRate()); 756 DUMP("\tbuffer size : %d\n", bufferSize()); 757 DUMP("\tchannel mask : 0x%04x\n", chanMask()); 758 DUMP("\tformat : %d\n", format()); 759 DUMP("\tdevice mask : 0x%04x\n", mTgtDevices); 760 DUMP("\tIn standby : %s\n", mInStandby? "yes" : "no"); 761 762 mRoutingLock.lock(); 763 AudioOutputList outSnapshot(mPhysOutputs); 764 mRoutingLock.unlock(); 765 766 AudioOutputList::iterator I; 767 for (I = outSnapshot.begin(); I != outSnapshot.end(); ++I) 768 (*I)->dump(result); 769 770 ::write(fd, result.string(), result.size()); 771 772 return NO_ERROR; 773 } 774 775 #undef B2STR 776 #undef DUMP 777 778 } // android 779