1 /* 2 ** 3 ** Copyright 2012, The Android Open Source Project 4 ** 5 ** Licensed under the Apache License, Version 2.0 (the "License"); 6 ** you may not use this file except in compliance with the License. 7 ** You may obtain a copy of the License at 8 ** 9 ** http://www.apache.org/licenses/LICENSE-2.0 10 ** 11 ** Unless required by applicable law or agreed to in writing, software 12 ** distributed under the License is distributed on an "AS IS" BASIS, 13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 ** See the License for the specific language governing permissions and 15 ** limitations under the License. 16 */ 17 18 19 #define LOG_TAG "AudioFlinger" 20 //#define LOG_NDEBUG 0 21 22 #include "Configuration.h" 23 #include <linux/futex.h> 24 #include <math.h> 25 #include <sys/syscall.h> 26 #include <utils/Log.h> 27 28 #include <private/media/AudioTrackShared.h> 29 30 #include <common_time/cc_helper.h> 31 #include <common_time/local_clock.h> 32 33 #include "AudioMixer.h" 34 #include "AudioFlinger.h" 35 #include "ServiceUtilities.h" 36 37 #include <media/nbaio/Pipe.h> 38 #include <media/nbaio/PipeReader.h> 39 #include <audio_utils/minifloat.h> 40 41 // ---------------------------------------------------------------------------- 42 43 // Note: the following macro is used for extremely verbose logging message. In 44 // order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to 45 // 0; but one side effect of this is to turn all LOGV's as well. Some messages 46 // are so verbose that we want to suppress them even when we have ALOG_ASSERT 47 // turned on. Do not uncomment the #def below unless you really know what you 48 // are doing and want to see all of the extremely verbose messages. 49 //#define VERY_VERY_VERBOSE_LOGGING 50 #ifdef VERY_VERY_VERBOSE_LOGGING 51 #define ALOGVV ALOGV 52 #else 53 #define ALOGVV(a...) do { } while(0) 54 #endif 55 56 namespace android { 57 58 // ---------------------------------------------------------------------------- 59 // TrackBase 60 // ---------------------------------------------------------------------------- 61 62 static volatile int32_t nextTrackId = 55; 63 64 // TrackBase constructor must be called with AudioFlinger::mLock held 65 AudioFlinger::ThreadBase::TrackBase::TrackBase( 66 ThreadBase *thread, 67 const sp<Client>& client, 68 uint32_t sampleRate, 69 audio_format_t format, 70 audio_channel_mask_t channelMask, 71 size_t frameCount, 72 void *buffer, 73 int sessionId, 74 int clientUid, 75 IAudioFlinger::track_flags_t flags, 76 bool isOut, 77 alloc_type alloc, 78 track_type type) 79 : RefBase(), 80 mThread(thread), 81 mClient(client), 82 mCblk(NULL), 83 // mBuffer 84 mState(IDLE), 85 mSampleRate(sampleRate), 86 mFormat(format), 87 mChannelMask(channelMask), 88 mChannelCount(isOut ? 89 audio_channel_count_from_out_mask(channelMask) : 90 audio_channel_count_from_in_mask(channelMask)), 91 mFrameSize(audio_is_linear_pcm(format) ? 92 mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)), 93 mFrameCount(frameCount), 94 mSessionId(sessionId), 95 mFlags(flags), 96 mIsOut(isOut), 97 mServerProxy(NULL), 98 mId(android_atomic_inc(&nextTrackId)), 99 mTerminated(false), 100 mType(type), 101 mThreadIoHandle(thread->id()) 102 { 103 // if the caller is us, trust the specified uid 104 if (IPCThreadState::self()->getCallingPid() != getpid_cached || clientUid == -1) { 105 int newclientUid = IPCThreadState::self()->getCallingUid(); 106 if (clientUid != -1 && clientUid != newclientUid) { 107 ALOGW("uid %d tried to pass itself off as %d", newclientUid, clientUid); 108 } 109 clientUid = newclientUid; 110 } 111 // clientUid contains the uid of the app that is responsible for this track, so we can blame 112 // battery usage on it. 113 mUid = clientUid; 114 115 // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize); 116 size_t size = sizeof(audio_track_cblk_t); 117 size_t bufferSize = (buffer == NULL ? roundup(frameCount) : frameCount) * mFrameSize; 118 if (buffer == NULL && alloc == ALLOC_CBLK) { 119 size += bufferSize; 120 } 121 122 if (client != 0) { 123 mCblkMemory = client->heap()->allocate(size); 124 if (mCblkMemory == 0 || 125 (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer())) == NULL) { 126 ALOGE("not enough memory for AudioTrack size=%u", size); 127 client->heap()->dump("AudioTrack"); 128 mCblkMemory.clear(); 129 return; 130 } 131 } else { 132 // this syntax avoids calling the audio_track_cblk_t constructor twice 133 mCblk = (audio_track_cblk_t *) new uint8_t[size]; 134 // assume mCblk != NULL 135 } 136 137 // construct the shared structure in-place. 138 if (mCblk != NULL) { 139 new(mCblk) audio_track_cblk_t(); 140 switch (alloc) { 141 case ALLOC_READONLY: { 142 const sp<MemoryDealer> roHeap(thread->readOnlyHeap()); 143 if (roHeap == 0 || 144 (mBufferMemory = roHeap->allocate(bufferSize)) == 0 || 145 (mBuffer = mBufferMemory->pointer()) == NULL) { 146 ALOGE("not enough memory for read-only buffer size=%zu", bufferSize); 147 if (roHeap != 0) { 148 roHeap->dump("buffer"); 149 } 150 mCblkMemory.clear(); 151 mBufferMemory.clear(); 152 return; 153 } 154 memset(mBuffer, 0, bufferSize); 155 } break; 156 case ALLOC_PIPE: 157 mBufferMemory = thread->pipeMemory(); 158 // mBuffer is the virtual address as seen from current process (mediaserver), 159 // and should normally be coming from mBufferMemory->pointer(). 160 // However in this case the TrackBase does not reference the buffer directly. 161 // It should references the buffer via the pipe. 162 // Therefore, to detect incorrect usage of the buffer, we set mBuffer to NULL. 163 mBuffer = NULL; 164 break; 165 case ALLOC_CBLK: 166 // clear all buffers 167 if (buffer == NULL) { 168 mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t); 169 memset(mBuffer, 0, bufferSize); 170 } else { 171 mBuffer = buffer; 172 #if 0 173 mCblk->mFlags = CBLK_FORCEREADY; // FIXME hack, need to fix the track ready logic 174 #endif 175 } 176 break; 177 case ALLOC_LOCAL: 178 mBuffer = calloc(1, bufferSize); 179 break; 180 case ALLOC_NONE: 181 mBuffer = buffer; 182 break; 183 } 184 185 #ifdef TEE_SINK 186 if (mTeeSinkTrackEnabled) { 187 NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount, mFormat); 188 if (Format_isValid(pipeFormat)) { 189 Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat); 190 size_t numCounterOffers = 0; 191 const NBAIO_Format offers[1] = {pipeFormat}; 192 ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers); 193 ALOG_ASSERT(index == 0); 194 PipeReader *pipeReader = new PipeReader(*pipe); 195 numCounterOffers = 0; 196 index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers); 197 ALOG_ASSERT(index == 0); 198 mTeeSink = pipe; 199 mTeeSource = pipeReader; 200 } 201 } 202 #endif 203 204 } 205 } 206 207 status_t AudioFlinger::ThreadBase::TrackBase::initCheck() const 208 { 209 status_t status; 210 if (mType == TYPE_OUTPUT || mType == TYPE_PATCH) { 211 status = cblk() != NULL ? NO_ERROR : NO_MEMORY; 212 } else { 213 status = getCblk() != 0 ? NO_ERROR : NO_MEMORY; 214 } 215 return status; 216 } 217 218 AudioFlinger::ThreadBase::TrackBase::~TrackBase() 219 { 220 #ifdef TEE_SINK 221 dumpTee(-1, mTeeSource, mId); 222 #endif 223 // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference 224 delete mServerProxy; 225 if (mCblk != NULL) { 226 if (mClient == 0) { 227 delete mCblk; 228 } else { 229 mCblk->~audio_track_cblk_t(); // destroy our shared-structure. 230 } 231 } 232 mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to 233 if (mClient != 0) { 234 // Client destructor must run with AudioFlinger client mutex locked 235 Mutex::Autolock _l(mClient->audioFlinger()->mClientLock); 236 // If the client's reference count drops to zero, the associated destructor 237 // must run with AudioFlinger lock held. Thus the explicit clear() rather than 238 // relying on the automatic clear() at end of scope. 239 mClient.clear(); 240 } 241 // flush the binder command buffer 242 IPCThreadState::self()->flushCommands(); 243 } 244 245 // AudioBufferProvider interface 246 // getNextBuffer() = 0; 247 // This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack 248 void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer) 249 { 250 #ifdef TEE_SINK 251 if (mTeeSink != 0) { 252 (void) mTeeSink->write(buffer->raw, buffer->frameCount); 253 } 254 #endif 255 256 ServerProxy::Buffer buf; 257 buf.mFrameCount = buffer->frameCount; 258 buf.mRaw = buffer->raw; 259 buffer->frameCount = 0; 260 buffer->raw = NULL; 261 mServerProxy->releaseBuffer(&buf); 262 } 263 264 status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event) 265 { 266 mSyncEvents.add(event); 267 return NO_ERROR; 268 } 269 270 // ---------------------------------------------------------------------------- 271 // Playback 272 // ---------------------------------------------------------------------------- 273 274 AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track) 275 : BnAudioTrack(), 276 mTrack(track) 277 { 278 } 279 280 AudioFlinger::TrackHandle::~TrackHandle() { 281 // just stop the track on deletion, associated resources 282 // will be freed from the main thread once all pending buffers have 283 // been played. Unless it's not in the active track list, in which 284 // case we free everything now... 285 mTrack->destroy(); 286 } 287 288 sp<IMemory> AudioFlinger::TrackHandle::getCblk() const { 289 return mTrack->getCblk(); 290 } 291 292 status_t AudioFlinger::TrackHandle::start() { 293 return mTrack->start(); 294 } 295 296 void AudioFlinger::TrackHandle::stop() { 297 mTrack->stop(); 298 } 299 300 void AudioFlinger::TrackHandle::flush() { 301 mTrack->flush(); 302 } 303 304 void AudioFlinger::TrackHandle::pause() { 305 mTrack->pause(); 306 } 307 308 status_t AudioFlinger::TrackHandle::attachAuxEffect(int EffectId) 309 { 310 return mTrack->attachAuxEffect(EffectId); 311 } 312 313 status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size, 314 sp<IMemory>* buffer) { 315 if (!mTrack->isTimedTrack()) 316 return INVALID_OPERATION; 317 318 PlaybackThread::TimedTrack* tt = 319 reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); 320 return tt->allocateTimedBuffer(size, buffer); 321 } 322 323 status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer, 324 int64_t pts) { 325 if (!mTrack->isTimedTrack()) 326 return INVALID_OPERATION; 327 328 if (buffer == 0 || buffer->pointer() == NULL) { 329 ALOGE("queueTimedBuffer() buffer is 0 or has NULL pointer()"); 330 return BAD_VALUE; 331 } 332 333 PlaybackThread::TimedTrack* tt = 334 reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); 335 return tt->queueTimedBuffer(buffer, pts); 336 } 337 338 status_t AudioFlinger::TrackHandle::setMediaTimeTransform( 339 const LinearTransform& xform, int target) { 340 341 if (!mTrack->isTimedTrack()) 342 return INVALID_OPERATION; 343 344 PlaybackThread::TimedTrack* tt = 345 reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); 346 return tt->setMediaTimeTransform( 347 xform, static_cast<TimedAudioTrack::TargetTimeline>(target)); 348 } 349 350 status_t AudioFlinger::TrackHandle::setParameters(const String8& keyValuePairs) { 351 return mTrack->setParameters(keyValuePairs); 352 } 353 354 status_t AudioFlinger::TrackHandle::getTimestamp(AudioTimestamp& timestamp) 355 { 356 return mTrack->getTimestamp(timestamp); 357 } 358 359 360 void AudioFlinger::TrackHandle::signal() 361 { 362 return mTrack->signal(); 363 } 364 365 status_t AudioFlinger::TrackHandle::onTransact( 366 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) 367 { 368 return BnAudioTrack::onTransact(code, data, reply, flags); 369 } 370 371 // ---------------------------------------------------------------------------- 372 373 // Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held 374 AudioFlinger::PlaybackThread::Track::Track( 375 PlaybackThread *thread, 376 const sp<Client>& client, 377 audio_stream_type_t streamType, 378 uint32_t sampleRate, 379 audio_format_t format, 380 audio_channel_mask_t channelMask, 381 size_t frameCount, 382 void *buffer, 383 const sp<IMemory>& sharedBuffer, 384 int sessionId, 385 int uid, 386 IAudioFlinger::track_flags_t flags, 387 track_type type) 388 : TrackBase(thread, client, sampleRate, format, channelMask, frameCount, 389 (sharedBuffer != 0) ? sharedBuffer->pointer() : buffer, 390 sessionId, uid, flags, true /*isOut*/, 391 (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK, 392 type), 393 mFillingUpStatus(FS_INVALID), 394 // mRetryCount initialized later when needed 395 mSharedBuffer(sharedBuffer), 396 mStreamType(streamType), 397 mName(-1), // see note below 398 mMainBuffer(thread->mixBuffer()), 399 mAuxBuffer(NULL), 400 mAuxEffectId(0), mHasVolumeController(false), 401 mPresentationCompleteFrames(0), 402 mFastIndex(-1), 403 mCachedVolume(1.0), 404 mIsInvalid(false), 405 mAudioTrackServerProxy(NULL), 406 mResumeToStopping(false), 407 mFlushHwPending(false) 408 { 409 // client == 0 implies sharedBuffer == 0 410 ALOG_ASSERT(!(client == 0 && sharedBuffer != 0)); 411 412 ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), 413 sharedBuffer->size()); 414 415 if (mCblk == NULL) { 416 return; 417 } 418 419 if (sharedBuffer == 0) { 420 mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount, 421 mFrameSize, !isExternalTrack(), sampleRate); 422 } else { 423 mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount, 424 mFrameSize); 425 } 426 mServerProxy = mAudioTrackServerProxy; 427 428 mName = thread->getTrackName_l(channelMask, format, sessionId); 429 if (mName < 0) { 430 ALOGE("no more track names available"); 431 return; 432 } 433 // only allocate a fast track index if we were able to allocate a normal track name 434 if (flags & IAudioFlinger::TRACK_FAST) { 435 mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads(); 436 ALOG_ASSERT(thread->mFastTrackAvailMask != 0); 437 int i = __builtin_ctz(thread->mFastTrackAvailMask); 438 ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks); 439 // FIXME This is too eager. We allocate a fast track index before the 440 // fast track becomes active. Since fast tracks are a scarce resource, 441 // this means we are potentially denying other more important fast tracks from 442 // being created. It would be better to allocate the index dynamically. 443 mFastIndex = i; 444 thread->mFastTrackAvailMask &= ~(1 << i); 445 } 446 } 447 448 AudioFlinger::PlaybackThread::Track::~Track() 449 { 450 ALOGV("PlaybackThread::Track destructor"); 451 452 // The destructor would clear mSharedBuffer, 453 // but it will not push the decremented reference count, 454 // leaving the client's IMemory dangling indefinitely. 455 // This prevents that leak. 456 if (mSharedBuffer != 0) { 457 mSharedBuffer.clear(); 458 } 459 } 460 461 status_t AudioFlinger::PlaybackThread::Track::initCheck() const 462 { 463 status_t status = TrackBase::initCheck(); 464 if (status == NO_ERROR && mName < 0) { 465 status = NO_MEMORY; 466 } 467 return status; 468 } 469 470 void AudioFlinger::PlaybackThread::Track::destroy() 471 { 472 // NOTE: destroyTrack_l() can remove a strong reference to this Track 473 // by removing it from mTracks vector, so there is a risk that this Tracks's 474 // destructor is called. As the destructor needs to lock mLock, 475 // we must acquire a strong reference on this Track before locking mLock 476 // here so that the destructor is called only when exiting this function. 477 // On the other hand, as long as Track::destroy() is only called by 478 // TrackHandle destructor, the TrackHandle still holds a strong ref on 479 // this Track with its member mTrack. 480 sp<Track> keep(this); 481 { // scope for mLock 482 bool wasActive = false; 483 sp<ThreadBase> thread = mThread.promote(); 484 if (thread != 0) { 485 Mutex::Autolock _l(thread->mLock); 486 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 487 wasActive = playbackThread->destroyTrack_l(this); 488 } 489 if (isExternalTrack() && !wasActive) { 490 AudioSystem::releaseOutput(mThreadIoHandle, mStreamType, (audio_session_t)mSessionId); 491 } 492 } 493 } 494 495 /*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result) 496 { 497 result.append(" Name Active Client Type Fmt Chn mask Session fCount S F SRate " 498 "L dB R dB Server Main buf Aux Buf Flags UndFrmCnt\n"); 499 } 500 501 void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size, bool active) 502 { 503 gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR(); 504 if (isFastTrack()) { 505 sprintf(buffer, " F %2d", mFastIndex); 506 } else if (mName >= AudioMixer::TRACK0) { 507 sprintf(buffer, " %4d", mName - AudioMixer::TRACK0); 508 } else { 509 sprintf(buffer, " none"); 510 } 511 track_state state = mState; 512 char stateChar; 513 if (isTerminated()) { 514 stateChar = 'T'; 515 } else { 516 switch (state) { 517 case IDLE: 518 stateChar = 'I'; 519 break; 520 case STOPPING_1: 521 stateChar = 's'; 522 break; 523 case STOPPING_2: 524 stateChar = '5'; 525 break; 526 case STOPPED: 527 stateChar = 'S'; 528 break; 529 case RESUMING: 530 stateChar = 'R'; 531 break; 532 case ACTIVE: 533 stateChar = 'A'; 534 break; 535 case PAUSING: 536 stateChar = 'p'; 537 break; 538 case PAUSED: 539 stateChar = 'P'; 540 break; 541 case FLUSHED: 542 stateChar = 'F'; 543 break; 544 default: 545 stateChar = '?'; 546 break; 547 } 548 } 549 char nowInUnderrun; 550 switch (mObservedUnderruns.mBitFields.mMostRecent) { 551 case UNDERRUN_FULL: 552 nowInUnderrun = ' '; 553 break; 554 case UNDERRUN_PARTIAL: 555 nowInUnderrun = '<'; 556 break; 557 case UNDERRUN_EMPTY: 558 nowInUnderrun = '*'; 559 break; 560 default: 561 nowInUnderrun = '?'; 562 break; 563 } 564 snprintf(&buffer[8], size-8, " %6s %6u %4u %08X %08X %7u %6zu %1c %1d %5u %5.2g %5.2g " 565 "%08X %p %p 0x%03X %9u%c\n", 566 active ? "yes" : "no", 567 (mClient == 0) ? getpid_cached : mClient->pid(), 568 mStreamType, 569 mFormat, 570 mChannelMask, 571 mSessionId, 572 mFrameCount, 573 stateChar, 574 mFillingUpStatus, 575 mAudioTrackServerProxy->getSampleRate(), 576 20.0 * log10(float_from_gain(gain_minifloat_unpack_left(vlr))), 577 20.0 * log10(float_from_gain(gain_minifloat_unpack_right(vlr))), 578 mCblk->mServer, 579 mMainBuffer, 580 mAuxBuffer, 581 mCblk->mFlags, 582 mAudioTrackServerProxy->getUnderrunFrames(), 583 nowInUnderrun); 584 } 585 586 uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const { 587 return mAudioTrackServerProxy->getSampleRate(); 588 } 589 590 // AudioBufferProvider interface 591 status_t AudioFlinger::PlaybackThread::Track::getNextBuffer( 592 AudioBufferProvider::Buffer* buffer, int64_t pts __unused) 593 { 594 ServerProxy::Buffer buf; 595 size_t desiredFrames = buffer->frameCount; 596 buf.mFrameCount = desiredFrames; 597 status_t status = mServerProxy->obtainBuffer(&buf); 598 buffer->frameCount = buf.mFrameCount; 599 buffer->raw = buf.mRaw; 600 if (buf.mFrameCount == 0) { 601 mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames); 602 } 603 return status; 604 } 605 606 // releaseBuffer() is not overridden 607 608 // ExtendedAudioBufferProvider interface 609 610 // framesReady() may return an approximation of the number of frames if called 611 // from a different thread than the one calling Proxy->obtainBuffer() and 612 // Proxy->releaseBuffer(). Also note there is no mutual exclusion in the 613 // AudioTrackServerProxy so be especially careful calling with FastTracks. 614 size_t AudioFlinger::PlaybackThread::Track::framesReady() const { 615 if (mSharedBuffer != 0 && (isStopped() || isStopping())) { 616 // Static tracks return zero frames immediately upon stopping (for FastTracks). 617 // The remainder of the buffer is not drained. 618 return 0; 619 } 620 return mAudioTrackServerProxy->framesReady(); 621 } 622 623 size_t AudioFlinger::PlaybackThread::Track::framesReleased() const 624 { 625 return mAudioTrackServerProxy->framesReleased(); 626 } 627 628 // Don't call for fast tracks; the framesReady() could result in priority inversion 629 bool AudioFlinger::PlaybackThread::Track::isReady() const { 630 if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) { 631 return true; 632 } 633 634 if (isStopping()) { 635 if (framesReady() > 0) { 636 mFillingUpStatus = FS_FILLED; 637 } 638 return true; 639 } 640 641 if (framesReady() >= mFrameCount || 642 (mCblk->mFlags & CBLK_FORCEREADY)) { 643 mFillingUpStatus = FS_FILLED; 644 android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags); 645 return true; 646 } 647 return false; 648 } 649 650 status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event __unused, 651 int triggerSession __unused) 652 { 653 status_t status = NO_ERROR; 654 ALOGV("start(%d), calling pid %d session %d", 655 mName, IPCThreadState::self()->getCallingPid(), mSessionId); 656 657 sp<ThreadBase> thread = mThread.promote(); 658 if (thread != 0) { 659 if (isOffloaded()) { 660 Mutex::Autolock _laf(thread->mAudioFlinger->mLock); 661 Mutex::Autolock _lth(thread->mLock); 662 sp<EffectChain> ec = thread->getEffectChain_l(mSessionId); 663 if (thread->mAudioFlinger->isNonOffloadableGlobalEffectEnabled_l() || 664 (ec != 0 && ec->isNonOffloadableEnabled())) { 665 invalidate(); 666 return PERMISSION_DENIED; 667 } 668 } 669 Mutex::Autolock _lth(thread->mLock); 670 track_state state = mState; 671 // here the track could be either new, or restarted 672 // in both cases "unstop" the track 673 674 // initial state-stopping. next state-pausing. 675 // What if resume is called ? 676 677 if (state == PAUSED || state == PAUSING) { 678 if (mResumeToStopping) { 679 // happened we need to resume to STOPPING_1 680 mState = TrackBase::STOPPING_1; 681 ALOGV("PAUSED => STOPPING_1 (%d) on thread %p", mName, this); 682 } else { 683 mState = TrackBase::RESUMING; 684 ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this); 685 } 686 } else { 687 mState = TrackBase::ACTIVE; 688 ALOGV("? => ACTIVE (%d) on thread %p", mName, this); 689 } 690 691 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 692 if (isFastTrack()) { 693 // refresh fast track underruns on start because that field is never cleared 694 // by the fast mixer; furthermore, the same track can be recycled, i.e. start 695 // after stop. 696 mObservedUnderruns = playbackThread->getFastTrackUnderruns(mFastIndex); 697 } 698 status = playbackThread->addTrack_l(this); 699 if (status == INVALID_OPERATION || status == PERMISSION_DENIED) { 700 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE); 701 // restore previous state if start was rejected by policy manager 702 if (status == PERMISSION_DENIED) { 703 mState = state; 704 } 705 } 706 // track was already in the active list, not a problem 707 if (status == ALREADY_EXISTS) { 708 status = NO_ERROR; 709 } else { 710 // Acknowledge any pending flush(), so that subsequent new data isn't discarded. 711 // It is usually unsafe to access the server proxy from a binder thread. 712 // But in this case we know the mixer thread (whether normal mixer or fast mixer) 713 // isn't looking at this track yet: we still hold the normal mixer thread lock, 714 // and for fast tracks the track is not yet in the fast mixer thread's active set. 715 ServerProxy::Buffer buffer; 716 buffer.mFrameCount = 1; 717 (void) mAudioTrackServerProxy->obtainBuffer(&buffer, true /*ackFlush*/); 718 } 719 } else { 720 status = BAD_VALUE; 721 } 722 return status; 723 } 724 725 void AudioFlinger::PlaybackThread::Track::stop() 726 { 727 ALOGV("stop(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid()); 728 sp<ThreadBase> thread = mThread.promote(); 729 if (thread != 0) { 730 Mutex::Autolock _l(thread->mLock); 731 track_state state = mState; 732 if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) { 733 // If the track is not active (PAUSED and buffers full), flush buffers 734 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 735 if (playbackThread->mActiveTracks.indexOf(this) < 0) { 736 reset(); 737 mState = STOPPED; 738 } else if (!isFastTrack() && !isOffloaded() && !isDirect()) { 739 mState = STOPPED; 740 } else { 741 // For fast tracks prepareTracks_l() will set state to STOPPING_2 742 // presentation is complete 743 // For an offloaded track this starts a drain and state will 744 // move to STOPPING_2 when drain completes and then STOPPED 745 mState = STOPPING_1; 746 } 747 playbackThread->broadcast_l(); 748 ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName, 749 playbackThread); 750 } 751 } 752 } 753 754 void AudioFlinger::PlaybackThread::Track::pause() 755 { 756 ALOGV("pause(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid()); 757 sp<ThreadBase> thread = mThread.promote(); 758 if (thread != 0) { 759 Mutex::Autolock _l(thread->mLock); 760 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 761 switch (mState) { 762 case STOPPING_1: 763 case STOPPING_2: 764 if (!isOffloaded()) { 765 /* nothing to do if track is not offloaded */ 766 break; 767 } 768 769 // Offloaded track was draining, we need to carry on draining when resumed 770 mResumeToStopping = true; 771 // fall through... 772 case ACTIVE: 773 case RESUMING: 774 mState = PAUSING; 775 ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get()); 776 playbackThread->broadcast_l(); 777 break; 778 779 default: 780 break; 781 } 782 } 783 } 784 785 void AudioFlinger::PlaybackThread::Track::flush() 786 { 787 ALOGV("flush(%d)", mName); 788 sp<ThreadBase> thread = mThread.promote(); 789 if (thread != 0) { 790 Mutex::Autolock _l(thread->mLock); 791 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 792 793 if (isOffloaded()) { 794 // If offloaded we allow flush during any state except terminated 795 // and keep the track active to avoid problems if user is seeking 796 // rapidly and underlying hardware has a significant delay handling 797 // a pause 798 if (isTerminated()) { 799 return; 800 } 801 802 ALOGV("flush: offload flush"); 803 reset(); 804 805 if (mState == STOPPING_1 || mState == STOPPING_2) { 806 ALOGV("flushed in STOPPING_1 or 2 state, change state to ACTIVE"); 807 mState = ACTIVE; 808 } 809 810 if (mState == ACTIVE) { 811 ALOGV("flush called in active state, resetting buffer time out retry count"); 812 mRetryCount = PlaybackThread::kMaxTrackRetriesOffload; 813 } 814 815 mFlushHwPending = true; 816 mResumeToStopping = false; 817 } else { 818 if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED && 819 mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) { 820 return; 821 } 822 // No point remaining in PAUSED state after a flush => go to 823 // FLUSHED state 824 mState = FLUSHED; 825 // do not reset the track if it is still in the process of being stopped or paused. 826 // this will be done by prepareTracks_l() when the track is stopped. 827 // prepareTracks_l() will see mState == FLUSHED, then 828 // remove from active track list, reset(), and trigger presentation complete 829 if (isDirect()) { 830 mFlushHwPending = true; 831 } 832 if (playbackThread->mActiveTracks.indexOf(this) < 0) { 833 reset(); 834 } 835 } 836 // Prevent flush being lost if the track is flushed and then resumed 837 // before mixer thread can run. This is important when offloading 838 // because the hardware buffer could hold a large amount of audio 839 playbackThread->broadcast_l(); 840 } 841 } 842 843 // must be called with thread lock held 844 void AudioFlinger::PlaybackThread::Track::flushAck() 845 { 846 if (!isOffloaded() && !isDirect()) 847 return; 848 849 mFlushHwPending = false; 850 } 851 852 void AudioFlinger::PlaybackThread::Track::reset() 853 { 854 // Do not reset twice to avoid discarding data written just after a flush and before 855 // the audioflinger thread detects the track is stopped. 856 if (!mResetDone) { 857 // Force underrun condition to avoid false underrun callback until first data is 858 // written to buffer 859 android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags); 860 mFillingUpStatus = FS_FILLING; 861 mResetDone = true; 862 if (mState == FLUSHED) { 863 mState = IDLE; 864 } 865 } 866 } 867 868 status_t AudioFlinger::PlaybackThread::Track::setParameters(const String8& keyValuePairs) 869 { 870 sp<ThreadBase> thread = mThread.promote(); 871 if (thread == 0) { 872 ALOGE("thread is dead"); 873 return FAILED_TRANSACTION; 874 } else if ((thread->type() == ThreadBase::DIRECT) || 875 (thread->type() == ThreadBase::OFFLOAD)) { 876 return thread->setParameters(keyValuePairs); 877 } else { 878 return PERMISSION_DENIED; 879 } 880 } 881 882 status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp) 883 { 884 // Client should implement this using SSQ; the unpresented frame count in latch is irrelevant 885 if (isFastTrack()) { 886 return INVALID_OPERATION; 887 } 888 sp<ThreadBase> thread = mThread.promote(); 889 if (thread == 0) { 890 return INVALID_OPERATION; 891 } 892 893 Mutex::Autolock _l(thread->mLock); 894 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 895 896 status_t result = INVALID_OPERATION; 897 if (!isOffloaded() && !isDirect()) { 898 if (!playbackThread->mLatchQValid) { 899 return INVALID_OPERATION; 900 } 901 // FIXME Not accurate under dynamic changes of sample rate and speed. 902 // Do not use track's mSampleRate as it is not current for mixer tracks. 903 uint32_t sampleRate = mAudioTrackServerProxy->getSampleRate(); 904 AudioPlaybackRate playbackRate = mAudioTrackServerProxy->getPlaybackRate(); 905 uint32_t unpresentedFrames = ((double) playbackThread->mLatchQ.mUnpresentedFrames * 906 sampleRate * playbackRate.mSpeed)/ playbackThread->mSampleRate; 907 // FIXME Since we're using a raw pointer as the key, it is theoretically possible 908 // for a brand new track to share the same address as a recently destroyed 909 // track, and thus for us to get the frames released of the wrong track. 910 // It is unlikely that we would be able to call getTimestamp() so quickly 911 // right after creating a new track. Nevertheless, the index here should 912 // be changed to something that is unique. Or use a completely different strategy. 913 ssize_t i = playbackThread->mLatchQ.mFramesReleased.indexOfKey(this); 914 uint32_t framesWritten = i >= 0 ? 915 playbackThread->mLatchQ.mFramesReleased[i] : 916 mAudioTrackServerProxy->framesReleased(); 917 if (framesWritten >= unpresentedFrames) { 918 timestamp.mPosition = framesWritten - unpresentedFrames; 919 timestamp.mTime = playbackThread->mLatchQ.mTimestamp.mTime; 920 result = NO_ERROR; 921 } 922 } else { // offloaded or direct 923 result = playbackThread->getTimestamp_l(timestamp); 924 } 925 926 return result; 927 } 928 929 status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId) 930 { 931 status_t status = DEAD_OBJECT; 932 sp<ThreadBase> thread = mThread.promote(); 933 if (thread != 0) { 934 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 935 sp<AudioFlinger> af = mClient->audioFlinger(); 936 937 Mutex::Autolock _l(af->mLock); 938 939 sp<PlaybackThread> srcThread = af->getEffectThread_l(AUDIO_SESSION_OUTPUT_MIX, EffectId); 940 941 if (EffectId != 0 && srcThread != 0 && playbackThread != srcThread.get()) { 942 Mutex::Autolock _dl(playbackThread->mLock); 943 Mutex::Autolock _sl(srcThread->mLock); 944 sp<EffectChain> chain = srcThread->getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX); 945 if (chain == 0) { 946 return INVALID_OPERATION; 947 } 948 949 sp<EffectModule> effect = chain->getEffectFromId_l(EffectId); 950 if (effect == 0) { 951 return INVALID_OPERATION; 952 } 953 srcThread->removeEffect_l(effect); 954 status = playbackThread->addEffect_l(effect); 955 if (status != NO_ERROR) { 956 srcThread->addEffect_l(effect); 957 return INVALID_OPERATION; 958 } 959 // removeEffect_l() has stopped the effect if it was active so it must be restarted 960 if (effect->state() == EffectModule::ACTIVE || 961 effect->state() == EffectModule::STOPPING) { 962 effect->start(); 963 } 964 965 sp<EffectChain> dstChain = effect->chain().promote(); 966 if (dstChain == 0) { 967 srcThread->addEffect_l(effect); 968 return INVALID_OPERATION; 969 } 970 AudioSystem::unregisterEffect(effect->id()); 971 AudioSystem::registerEffect(&effect->desc(), 972 srcThread->id(), 973 dstChain->strategy(), 974 AUDIO_SESSION_OUTPUT_MIX, 975 effect->id()); 976 AudioSystem::setEffectEnabled(effect->id(), effect->isEnabled()); 977 } 978 status = playbackThread->attachAuxEffect(this, EffectId); 979 } 980 return status; 981 } 982 983 void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer) 984 { 985 mAuxEffectId = EffectId; 986 mAuxBuffer = buffer; 987 } 988 989 bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWritten, 990 size_t audioHalFrames) 991 { 992 // a track is considered presented when the total number of frames written to audio HAL 993 // corresponds to the number of frames written when presentationComplete() is called for the 994 // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time. 995 // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used 996 // to detect when all frames have been played. In this case framesWritten isn't 997 // useful because it doesn't always reflect whether there is data in the h/w 998 // buffers, particularly if a track has been paused and resumed during draining 999 ALOGV("presentationComplete() mPresentationCompleteFrames %d framesWritten %d", 1000 mPresentationCompleteFrames, framesWritten); 1001 if (mPresentationCompleteFrames == 0) { 1002 mPresentationCompleteFrames = framesWritten + audioHalFrames; 1003 ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d", 1004 mPresentationCompleteFrames, audioHalFrames); 1005 } 1006 1007 if (framesWritten >= mPresentationCompleteFrames || isOffloaded()) { 1008 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE); 1009 mAudioTrackServerProxy->setStreamEndDone(); 1010 return true; 1011 } 1012 return false; 1013 } 1014 1015 void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type) 1016 { 1017 for (size_t i = 0; i < mSyncEvents.size(); i++) { 1018 if (mSyncEvents[i]->type() == type) { 1019 mSyncEvents[i]->trigger(); 1020 mSyncEvents.removeAt(i); 1021 i--; 1022 } 1023 } 1024 } 1025 1026 // implement VolumeBufferProvider interface 1027 1028 gain_minifloat_packed_t AudioFlinger::PlaybackThread::Track::getVolumeLR() 1029 { 1030 // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs 1031 ALOG_ASSERT(isFastTrack() && (mCblk != NULL)); 1032 gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR(); 1033 float vl = float_from_gain(gain_minifloat_unpack_left(vlr)); 1034 float vr = float_from_gain(gain_minifloat_unpack_right(vlr)); 1035 // track volumes come from shared memory, so can't be trusted and must be clamped 1036 if (vl > GAIN_FLOAT_UNITY) { 1037 vl = GAIN_FLOAT_UNITY; 1038 } 1039 if (vr > GAIN_FLOAT_UNITY) { 1040 vr = GAIN_FLOAT_UNITY; 1041 } 1042 // now apply the cached master volume and stream type volume; 1043 // this is trusted but lacks any synchronization or barrier so may be stale 1044 float v = mCachedVolume; 1045 vl *= v; 1046 vr *= v; 1047 // re-combine into packed minifloat 1048 vlr = gain_minifloat_pack(gain_from_float(vl), gain_from_float(vr)); 1049 // FIXME look at mute, pause, and stop flags 1050 return vlr; 1051 } 1052 1053 status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event) 1054 { 1055 if (isTerminated() || mState == PAUSED || 1056 ((framesReady() == 0) && ((mSharedBuffer != 0) || 1057 (mState == STOPPED)))) { 1058 ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ", 1059 mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady()); 1060 event->cancel(); 1061 return INVALID_OPERATION; 1062 } 1063 (void) TrackBase::setSyncEvent(event); 1064 return NO_ERROR; 1065 } 1066 1067 void AudioFlinger::PlaybackThread::Track::invalidate() 1068 { 1069 // FIXME should use proxy, and needs work 1070 audio_track_cblk_t* cblk = mCblk; 1071 android_atomic_or(CBLK_INVALID, &cblk->mFlags); 1072 android_atomic_release_store(0x40000000, &cblk->mFutex); 1073 // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE 1074 (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX); 1075 mIsInvalid = true; 1076 } 1077 1078 void AudioFlinger::PlaybackThread::Track::signal() 1079 { 1080 sp<ThreadBase> thread = mThread.promote(); 1081 if (thread != 0) { 1082 PlaybackThread *t = (PlaybackThread *)thread.get(); 1083 Mutex::Autolock _l(t->mLock); 1084 t->broadcast_l(); 1085 } 1086 } 1087 1088 //To be called with thread lock held 1089 bool AudioFlinger::PlaybackThread::Track::isResumePending() { 1090 1091 if (mState == RESUMING) 1092 return true; 1093 /* Resume is pending if track was stopping before pause was called */ 1094 if (mState == STOPPING_1 && 1095 mResumeToStopping) 1096 return true; 1097 1098 return false; 1099 } 1100 1101 //To be called with thread lock held 1102 void AudioFlinger::PlaybackThread::Track::resumeAck() { 1103 1104 1105 if (mState == RESUMING) 1106 mState = ACTIVE; 1107 1108 // Other possibility of pending resume is stopping_1 state 1109 // Do not update the state from stopping as this prevents 1110 // drain being called. 1111 if (mState == STOPPING_1) { 1112 mResumeToStopping = false; 1113 } 1114 } 1115 // ---------------------------------------------------------------------------- 1116 1117 sp<AudioFlinger::PlaybackThread::TimedTrack> 1118 AudioFlinger::PlaybackThread::TimedTrack::create( 1119 PlaybackThread *thread, 1120 const sp<Client>& client, 1121 audio_stream_type_t streamType, 1122 uint32_t sampleRate, 1123 audio_format_t format, 1124 audio_channel_mask_t channelMask, 1125 size_t frameCount, 1126 const sp<IMemory>& sharedBuffer, 1127 int sessionId, 1128 int uid) 1129 { 1130 if (!client->reserveTimedTrack()) 1131 return 0; 1132 1133 return new TimedTrack( 1134 thread, client, streamType, sampleRate, format, channelMask, frameCount, 1135 sharedBuffer, sessionId, uid); 1136 } 1137 1138 AudioFlinger::PlaybackThread::TimedTrack::TimedTrack( 1139 PlaybackThread *thread, 1140 const sp<Client>& client, 1141 audio_stream_type_t streamType, 1142 uint32_t sampleRate, 1143 audio_format_t format, 1144 audio_channel_mask_t channelMask, 1145 size_t frameCount, 1146 const sp<IMemory>& sharedBuffer, 1147 int sessionId, 1148 int uid) 1149 : Track(thread, client, streamType, sampleRate, format, channelMask, 1150 frameCount, (sharedBuffer != 0) ? sharedBuffer->pointer() : NULL, sharedBuffer, 1151 sessionId, uid, IAudioFlinger::TRACK_TIMED, TYPE_TIMED), 1152 mQueueHeadInFlight(false), 1153 mTrimQueueHeadOnRelease(false), 1154 mFramesPendingInQueue(0), 1155 mTimedSilenceBuffer(NULL), 1156 mTimedSilenceBufferSize(0), 1157 mTimedAudioOutputOnTime(false), 1158 mMediaTimeTransformValid(false) 1159 { 1160 LocalClock lc; 1161 mLocalTimeFreq = lc.getLocalFreq(); 1162 1163 mLocalTimeToSampleTransform.a_zero = 0; 1164 mLocalTimeToSampleTransform.b_zero = 0; 1165 mLocalTimeToSampleTransform.a_to_b_numer = sampleRate; 1166 mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq; 1167 LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer, 1168 &mLocalTimeToSampleTransform.a_to_b_denom); 1169 1170 mMediaTimeToSampleTransform.a_zero = 0; 1171 mMediaTimeToSampleTransform.b_zero = 0; 1172 mMediaTimeToSampleTransform.a_to_b_numer = sampleRate; 1173 mMediaTimeToSampleTransform.a_to_b_denom = 1000000; 1174 LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer, 1175 &mMediaTimeToSampleTransform.a_to_b_denom); 1176 } 1177 1178 AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() { 1179 mClient->releaseTimedTrack(); 1180 delete [] mTimedSilenceBuffer; 1181 } 1182 1183 status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer( 1184 size_t size, sp<IMemory>* buffer) { 1185 1186 Mutex::Autolock _l(mTimedBufferQueueLock); 1187 1188 trimTimedBufferQueue_l(); 1189 1190 // lazily initialize the shared memory heap for timed buffers 1191 if (mTimedMemoryDealer == NULL) { 1192 const int kTimedBufferHeapSize = 512 << 10; 1193 1194 mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize, 1195 "AudioFlingerTimed"); 1196 if (mTimedMemoryDealer == NULL) { 1197 return NO_MEMORY; 1198 } 1199 } 1200 1201 sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size); 1202 if (newBuffer == 0 || newBuffer->pointer() == NULL) { 1203 return NO_MEMORY; 1204 } 1205 1206 *buffer = newBuffer; 1207 return NO_ERROR; 1208 } 1209 1210 // caller must hold mTimedBufferQueueLock 1211 void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() { 1212 int64_t mediaTimeNow; 1213 { 1214 Mutex::Autolock mttLock(mMediaTimeTransformLock); 1215 if (!mMediaTimeTransformValid) 1216 return; 1217 1218 int64_t targetTimeNow; 1219 status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) 1220 ? mCCHelper.getCommonTime(&targetTimeNow) 1221 : mCCHelper.getLocalTime(&targetTimeNow); 1222 1223 if (OK != res) 1224 return; 1225 1226 if (!mMediaTimeTransform.doReverseTransform(targetTimeNow, 1227 &mediaTimeNow)) { 1228 return; 1229 } 1230 } 1231 1232 size_t trimEnd; 1233 for (trimEnd = 0; trimEnd < mTimedBufferQueue.size(); trimEnd++) { 1234 int64_t bufEnd; 1235 1236 if ((trimEnd + 1) < mTimedBufferQueue.size()) { 1237 // We have a next buffer. Just use its PTS as the PTS of the frame 1238 // following the last frame in this buffer. If the stream is sparse 1239 // (ie, there are deliberate gaps left in the stream which should be 1240 // filled with silence by the TimedAudioTrack), then this can result 1241 // in one extra buffer being left un-trimmed when it could have 1242 // been. In general, this is not typical, and we would rather 1243 // optimized away the TS calculation below for the more common case 1244 // where PTSes are contiguous. 1245 bufEnd = mTimedBufferQueue[trimEnd + 1].pts(); 1246 } else { 1247 // We have no next buffer. Compute the PTS of the frame following 1248 // the last frame in this buffer by computing the duration of of 1249 // this frame in media time units and adding it to the PTS of the 1250 // buffer. 1251 int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size() 1252 / mFrameSize; 1253 1254 if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount, 1255 &bufEnd)) { 1256 ALOGE("Failed to convert frame count of %lld to media time" 1257 " duration" " (scale factor %d/%u) in %s", 1258 frameCount, 1259 mMediaTimeToSampleTransform.a_to_b_numer, 1260 mMediaTimeToSampleTransform.a_to_b_denom, 1261 __PRETTY_FUNCTION__); 1262 break; 1263 } 1264 bufEnd += mTimedBufferQueue[trimEnd].pts(); 1265 } 1266 1267 if (bufEnd > mediaTimeNow) 1268 break; 1269 1270 // Is the buffer we want to use in the middle of a mix operation right 1271 // now? If so, don't actually trim it. Just wait for the releaseBuffer 1272 // from the mixer which should be coming back shortly. 1273 if (!trimEnd && mQueueHeadInFlight) { 1274 mTrimQueueHeadOnRelease = true; 1275 } 1276 } 1277 1278 size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0; 1279 if (trimStart < trimEnd) { 1280 // Update the bookkeeping for framesReady() 1281 for (size_t i = trimStart; i < trimEnd; ++i) { 1282 updateFramesPendingAfterTrim_l(mTimedBufferQueue[i], "trim"); 1283 } 1284 1285 // Now actually remove the buffers from the queue. 1286 mTimedBufferQueue.removeItemsAt(trimStart, trimEnd); 1287 } 1288 } 1289 1290 void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l( 1291 const char* logTag) { 1292 ALOG_ASSERT(mTimedBufferQueue.size() > 0, 1293 "%s called (reason \"%s\"), but timed buffer queue has no" 1294 " elements to trim.", __FUNCTION__, logTag); 1295 1296 updateFramesPendingAfterTrim_l(mTimedBufferQueue[0], logTag); 1297 mTimedBufferQueue.removeAt(0); 1298 } 1299 1300 void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l( 1301 const TimedBuffer& buf, 1302 const char* logTag __unused) { 1303 uint32_t bufBytes = buf.buffer()->size(); 1304 uint32_t consumedAlready = buf.position(); 1305 1306 ALOG_ASSERT(consumedAlready <= bufBytes, 1307 "Bad bookkeeping while updating frames pending. Timed buffer is" 1308 " only %u bytes long, but claims to have consumed %u" 1309 " bytes. (update reason: \"%s\")", 1310 bufBytes, consumedAlready, logTag); 1311 1312 uint32_t bufFrames = (bufBytes - consumedAlready) / mFrameSize; 1313 ALOG_ASSERT(mFramesPendingInQueue >= bufFrames, 1314 "Bad bookkeeping while updating frames pending. Should have at" 1315 " least %u queued frames, but we think we have only %u. (update" 1316 " reason: \"%s\")", 1317 bufFrames, mFramesPendingInQueue, logTag); 1318 1319 mFramesPendingInQueue -= bufFrames; 1320 } 1321 1322 status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer( 1323 const sp<IMemory>& buffer, int64_t pts) { 1324 1325 { 1326 Mutex::Autolock mttLock(mMediaTimeTransformLock); 1327 if (!mMediaTimeTransformValid) 1328 return INVALID_OPERATION; 1329 } 1330 1331 Mutex::Autolock _l(mTimedBufferQueueLock); 1332 1333 uint32_t bufFrames = buffer->size() / mFrameSize; 1334 mFramesPendingInQueue += bufFrames; 1335 mTimedBufferQueue.add(TimedBuffer(buffer, pts)); 1336 1337 return NO_ERROR; 1338 } 1339 1340 status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform( 1341 const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) { 1342 1343 ALOGVV("setMediaTimeTransform az=%lld bz=%lld n=%d d=%u tgt=%d", 1344 xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom, 1345 target); 1346 1347 if (!(target == TimedAudioTrack::LOCAL_TIME || 1348 target == TimedAudioTrack::COMMON_TIME)) { 1349 return BAD_VALUE; 1350 } 1351 1352 Mutex::Autolock lock(mMediaTimeTransformLock); 1353 mMediaTimeTransform = xform; 1354 mMediaTimeTransformTarget = target; 1355 mMediaTimeTransformValid = true; 1356 1357 return NO_ERROR; 1358 } 1359 1360 #define min(a, b) ((a) < (b) ? (a) : (b)) 1361 1362 // implementation of getNextBuffer for tracks whose buffers have timestamps 1363 status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer( 1364 AudioBufferProvider::Buffer* buffer, int64_t pts) 1365 { 1366 if (pts == AudioBufferProvider::kInvalidPTS) { 1367 buffer->raw = NULL; 1368 buffer->frameCount = 0; 1369 mTimedAudioOutputOnTime = false; 1370 return INVALID_OPERATION; 1371 } 1372 1373 Mutex::Autolock _l(mTimedBufferQueueLock); 1374 1375 ALOG_ASSERT(!mQueueHeadInFlight, 1376 "getNextBuffer called without releaseBuffer!"); 1377 1378 while (true) { 1379 1380 // if we have no timed buffers, then fail 1381 if (mTimedBufferQueue.isEmpty()) { 1382 buffer->raw = NULL; 1383 buffer->frameCount = 0; 1384 return NOT_ENOUGH_DATA; 1385 } 1386 1387 TimedBuffer& head = mTimedBufferQueue.editItemAt(0); 1388 1389 // calculate the PTS of the head of the timed buffer queue expressed in 1390 // local time 1391 int64_t headLocalPTS; 1392 { 1393 Mutex::Autolock mttLock(mMediaTimeTransformLock); 1394 1395 ALOG_ASSERT(mMediaTimeTransformValid, "media time transform invalid"); 1396 1397 if (mMediaTimeTransform.a_to_b_denom == 0) { 1398 // the transform represents a pause, so yield silence 1399 timedYieldSilence_l(buffer->frameCount, buffer); 1400 return NO_ERROR; 1401 } 1402 1403 int64_t transformedPTS; 1404 if (!mMediaTimeTransform.doForwardTransform(head.pts(), 1405 &transformedPTS)) { 1406 // the transform failed. this shouldn't happen, but if it does 1407 // then just drop this buffer 1408 ALOGW("timedGetNextBuffer transform failed"); 1409 buffer->raw = NULL; 1410 buffer->frameCount = 0; 1411 trimTimedBufferQueueHead_l("getNextBuffer; no transform"); 1412 return NO_ERROR; 1413 } 1414 1415 if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) { 1416 if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS, 1417 &headLocalPTS)) { 1418 buffer->raw = NULL; 1419 buffer->frameCount = 0; 1420 return INVALID_OPERATION; 1421 } 1422 } else { 1423 headLocalPTS = transformedPTS; 1424 } 1425 } 1426 1427 uint32_t sr = sampleRate(); 1428 1429 // adjust the head buffer's PTS to reflect the portion of the head buffer 1430 // that has already been consumed 1431 int64_t effectivePTS = headLocalPTS + 1432 ((head.position() / mFrameSize) * mLocalTimeFreq / sr); 1433 1434 // Calculate the delta in samples between the head of the input buffer 1435 // queue and the start of the next output buffer that will be written. 1436 // If the transformation fails because of over or underflow, it means 1437 // that the sample's position in the output stream is so far out of 1438 // whack that it should just be dropped. 1439 int64_t sampleDelta; 1440 if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) { 1441 ALOGV("*** head buffer is too far from PTS: dropped buffer"); 1442 trimTimedBufferQueueHead_l("getNextBuffer, buf pts too far from" 1443 " mix"); 1444 continue; 1445 } 1446 if (!mLocalTimeToSampleTransform.doForwardTransform( 1447 (effectivePTS - pts) << 32, &sampleDelta)) { 1448 ALOGV("*** too late during sample rate transform: dropped buffer"); 1449 trimTimedBufferQueueHead_l("getNextBuffer, bad local to sample"); 1450 continue; 1451 } 1452 1453 ALOGVV("*** getNextBuffer head.pts=%lld head.pos=%d pts=%lld" 1454 " sampleDelta=[%d.%08x]", 1455 head.pts(), head.position(), pts, 1456 static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1) 1457 + (sampleDelta >> 32)), 1458 static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF)); 1459 1460 // if the delta between the ideal placement for the next input sample and 1461 // the current output position is within this threshold, then we will 1462 // concatenate the next input samples to the previous output 1463 const int64_t kSampleContinuityThreshold = 1464 (static_cast<int64_t>(sr) << 32) / 250; 1465 1466 // if this is the first buffer of audio that we're emitting from this track 1467 // then it should be almost exactly on time. 1468 const int64_t kSampleStartupThreshold = 1LL << 32; 1469 1470 if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) || 1471 (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) { 1472 // the next input is close enough to being on time, so concatenate it 1473 // with the last output 1474 timedYieldSamples_l(buffer); 1475 1476 ALOGVV("*** on time: head.pos=%d frameCount=%u", 1477 head.position(), buffer->frameCount); 1478 return NO_ERROR; 1479 } 1480 1481 // Looks like our output is not on time. Reset our on timed status. 1482 // Next time we mix samples from our input queue, then should be within 1483 // the StartupThreshold. 1484 mTimedAudioOutputOnTime = false; 1485 if (sampleDelta > 0) { 1486 // the gap between the current output position and the proper start of 1487 // the next input sample is too big, so fill it with silence 1488 uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32; 1489 1490 timedYieldSilence_l(framesUntilNextInput, buffer); 1491 ALOGV("*** silence: frameCount=%u", buffer->frameCount); 1492 return NO_ERROR; 1493 } else { 1494 // the next input sample is late 1495 uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32)); 1496 size_t onTimeSamplePosition = 1497 head.position() + lateFrames * mFrameSize; 1498 1499 if (onTimeSamplePosition > head.buffer()->size()) { 1500 // all the remaining samples in the head are too late, so 1501 // drop it and move on 1502 ALOGV("*** too late: dropped buffer"); 1503 trimTimedBufferQueueHead_l("getNextBuffer, dropped late buffer"); 1504 continue; 1505 } else { 1506 // skip over the late samples 1507 head.setPosition(onTimeSamplePosition); 1508 1509 // yield the available samples 1510 timedYieldSamples_l(buffer); 1511 1512 ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount); 1513 return NO_ERROR; 1514 } 1515 } 1516 } 1517 } 1518 1519 // Yield samples from the timed buffer queue head up to the given output 1520 // buffer's capacity. 1521 // 1522 // Caller must hold mTimedBufferQueueLock 1523 void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l( 1524 AudioBufferProvider::Buffer* buffer) { 1525 1526 const TimedBuffer& head = mTimedBufferQueue[0]; 1527 1528 buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) + 1529 head.position()); 1530 1531 uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) / 1532 mFrameSize); 1533 size_t framesRequested = buffer->frameCount; 1534 buffer->frameCount = min(framesLeftInHead, framesRequested); 1535 1536 mQueueHeadInFlight = true; 1537 mTimedAudioOutputOnTime = true; 1538 } 1539 1540 // Yield samples of silence up to the given output buffer's capacity 1541 // 1542 // Caller must hold mTimedBufferQueueLock 1543 void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l( 1544 uint32_t numFrames, AudioBufferProvider::Buffer* buffer) { 1545 1546 // lazily allocate a buffer filled with silence 1547 if (mTimedSilenceBufferSize < numFrames * mFrameSize) { 1548 delete [] mTimedSilenceBuffer; 1549 mTimedSilenceBufferSize = numFrames * mFrameSize; 1550 mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize]; 1551 memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize); 1552 } 1553 1554 buffer->raw = mTimedSilenceBuffer; 1555 size_t framesRequested = buffer->frameCount; 1556 buffer->frameCount = min(numFrames, framesRequested); 1557 1558 mTimedAudioOutputOnTime = false; 1559 } 1560 1561 // AudioBufferProvider interface 1562 void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer( 1563 AudioBufferProvider::Buffer* buffer) { 1564 1565 Mutex::Autolock _l(mTimedBufferQueueLock); 1566 1567 // If the buffer which was just released is part of the buffer at the head 1568 // of the queue, be sure to update the amt of the buffer which has been 1569 // consumed. If the buffer being returned is not part of the head of the 1570 // queue, its either because the buffer is part of the silence buffer, or 1571 // because the head of the timed queue was trimmed after the mixer called 1572 // getNextBuffer but before the mixer called releaseBuffer. 1573 if (buffer->raw == mTimedSilenceBuffer) { 1574 ALOG_ASSERT(!mQueueHeadInFlight, 1575 "Queue head in flight during release of silence buffer!"); 1576 goto done; 1577 } 1578 1579 ALOG_ASSERT(mQueueHeadInFlight, 1580 "TimedTrack::releaseBuffer of non-silence buffer, but no queue" 1581 " head in flight."); 1582 1583 if (mTimedBufferQueue.size()) { 1584 TimedBuffer& head = mTimedBufferQueue.editItemAt(0); 1585 1586 void* start = head.buffer()->pointer(); 1587 void* end = reinterpret_cast<void*>( 1588 reinterpret_cast<uint8_t*>(head.buffer()->pointer()) 1589 + head.buffer()->size()); 1590 1591 ALOG_ASSERT((buffer->raw >= start) && (buffer->raw < end), 1592 "released buffer not within the head of the timed buffer" 1593 " queue; qHead = [%p, %p], released buffer = %p", 1594 start, end, buffer->raw); 1595 1596 head.setPosition(head.position() + 1597 (buffer->frameCount * mFrameSize)); 1598 mQueueHeadInFlight = false; 1599 1600 ALOG_ASSERT(mFramesPendingInQueue >= buffer->frameCount, 1601 "Bad bookkeeping during releaseBuffer! Should have at" 1602 " least %u queued frames, but we think we have only %u", 1603 buffer->frameCount, mFramesPendingInQueue); 1604 1605 mFramesPendingInQueue -= buffer->frameCount; 1606 1607 if ((static_cast<size_t>(head.position()) >= head.buffer()->size()) 1608 || mTrimQueueHeadOnRelease) { 1609 trimTimedBufferQueueHead_l("releaseBuffer"); 1610 mTrimQueueHeadOnRelease = false; 1611 } 1612 } else { 1613 LOG_ALWAYS_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no" 1614 " buffers in the timed buffer queue"); 1615 } 1616 1617 done: 1618 buffer->raw = 0; 1619 buffer->frameCount = 0; 1620 } 1621 1622 size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const { 1623 Mutex::Autolock _l(mTimedBufferQueueLock); 1624 return mFramesPendingInQueue; 1625 } 1626 1627 AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer() 1628 : mPTS(0), mPosition(0) {} 1629 1630 AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer( 1631 const sp<IMemory>& buffer, int64_t pts) 1632 : mBuffer(buffer), mPTS(pts), mPosition(0) {} 1633 1634 1635 // ---------------------------------------------------------------------------- 1636 1637 AudioFlinger::PlaybackThread::OutputTrack::OutputTrack( 1638 PlaybackThread *playbackThread, 1639 DuplicatingThread *sourceThread, 1640 uint32_t sampleRate, 1641 audio_format_t format, 1642 audio_channel_mask_t channelMask, 1643 size_t frameCount, 1644 int uid) 1645 : Track(playbackThread, NULL, AUDIO_STREAM_PATCH, 1646 sampleRate, format, channelMask, frameCount, 1647 NULL, 0, 0, uid, IAudioFlinger::TRACK_DEFAULT, TYPE_OUTPUT), 1648 mActive(false), mSourceThread(sourceThread), mClientProxy(NULL) 1649 { 1650 1651 if (mCblk != NULL) { 1652 mOutBuffer.frameCount = 0; 1653 playbackThread->mTracks.add(this); 1654 ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, " 1655 "frameCount %u, mChannelMask 0x%08x", 1656 mCblk, mBuffer, 1657 frameCount, mChannelMask); 1658 // since client and server are in the same process, 1659 // the buffer has the same virtual address on both sides 1660 mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize, 1661 true /*clientInServer*/); 1662 mClientProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY); 1663 mClientProxy->setSendLevel(0.0); 1664 mClientProxy->setSampleRate(sampleRate); 1665 } else { 1666 ALOGW("Error creating output track on thread %p", playbackThread); 1667 } 1668 } 1669 1670 AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack() 1671 { 1672 clearBufferQueue(); 1673 delete mClientProxy; 1674 // superclass destructor will now delete the server proxy and shared memory both refer to 1675 } 1676 1677 status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event, 1678 int triggerSession) 1679 { 1680 status_t status = Track::start(event, triggerSession); 1681 if (status != NO_ERROR) { 1682 return status; 1683 } 1684 1685 mActive = true; 1686 mRetryCount = 127; 1687 return status; 1688 } 1689 1690 void AudioFlinger::PlaybackThread::OutputTrack::stop() 1691 { 1692 Track::stop(); 1693 clearBufferQueue(); 1694 mOutBuffer.frameCount = 0; 1695 mActive = false; 1696 } 1697 1698 bool AudioFlinger::PlaybackThread::OutputTrack::write(void* data, uint32_t frames) 1699 { 1700 Buffer *pInBuffer; 1701 Buffer inBuffer; 1702 bool outputBufferFull = false; 1703 inBuffer.frameCount = frames; 1704 inBuffer.raw = data; 1705 1706 uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs(); 1707 1708 if (!mActive && frames != 0) { 1709 (void) start(); 1710 } 1711 1712 while (waitTimeLeftMs) { 1713 // First write pending buffers, then new data 1714 if (mBufferQueue.size()) { 1715 pInBuffer = mBufferQueue.itemAt(0); 1716 } else { 1717 pInBuffer = &inBuffer; 1718 } 1719 1720 if (pInBuffer->frameCount == 0) { 1721 break; 1722 } 1723 1724 if (mOutBuffer.frameCount == 0) { 1725 mOutBuffer.frameCount = pInBuffer->frameCount; 1726 nsecs_t startTime = systemTime(); 1727 status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs); 1728 if (status != NO_ERROR) { 1729 ALOGV("OutputTrack::write() %p thread %p no more output buffers; status %d", this, 1730 mThread.unsafe_get(), status); 1731 outputBufferFull = true; 1732 break; 1733 } 1734 uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime); 1735 if (waitTimeLeftMs >= waitTimeMs) { 1736 waitTimeLeftMs -= waitTimeMs; 1737 } else { 1738 waitTimeLeftMs = 0; 1739 } 1740 } 1741 1742 uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount : 1743 pInBuffer->frameCount; 1744 memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * mFrameSize); 1745 Proxy::Buffer buf; 1746 buf.mFrameCount = outFrames; 1747 buf.mRaw = NULL; 1748 mClientProxy->releaseBuffer(&buf); 1749 pInBuffer->frameCount -= outFrames; 1750 pInBuffer->raw = (int8_t *)pInBuffer->raw + outFrames * mFrameSize; 1751 mOutBuffer.frameCount -= outFrames; 1752 mOutBuffer.raw = (int8_t *)mOutBuffer.raw + outFrames * mFrameSize; 1753 1754 if (pInBuffer->frameCount == 0) { 1755 if (mBufferQueue.size()) { 1756 mBufferQueue.removeAt(0); 1757 free(pInBuffer->mBuffer); 1758 delete pInBuffer; 1759 ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this, 1760 mThread.unsafe_get(), mBufferQueue.size()); 1761 } else { 1762 break; 1763 } 1764 } 1765 } 1766 1767 // If we could not write all frames, allocate a buffer and queue it for next time. 1768 if (inBuffer.frameCount) { 1769 sp<ThreadBase> thread = mThread.promote(); 1770 if (thread != 0 && !thread->standby()) { 1771 if (mBufferQueue.size() < kMaxOverFlowBuffers) { 1772 pInBuffer = new Buffer; 1773 pInBuffer->mBuffer = malloc(inBuffer.frameCount * mFrameSize); 1774 pInBuffer->frameCount = inBuffer.frameCount; 1775 pInBuffer->raw = pInBuffer->mBuffer; 1776 memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * mFrameSize); 1777 mBufferQueue.add(pInBuffer); 1778 ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this, 1779 mThread.unsafe_get(), mBufferQueue.size()); 1780 } else { 1781 ALOGW("OutputTrack::write() %p thread %p no more overflow buffers", 1782 mThread.unsafe_get(), this); 1783 } 1784 } 1785 } 1786 1787 // Calling write() with a 0 length buffer means that no more data will be written: 1788 // We rely on stop() to set the appropriate flags to allow the remaining frames to play out. 1789 if (frames == 0 && mBufferQueue.size() == 0 && mActive) { 1790 stop(); 1791 } 1792 1793 return outputBufferFull; 1794 } 1795 1796 status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer( 1797 AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs) 1798 { 1799 ClientProxy::Buffer buf; 1800 buf.mFrameCount = buffer->frameCount; 1801 struct timespec timeout; 1802 timeout.tv_sec = waitTimeMs / 1000; 1803 timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000; 1804 status_t status = mClientProxy->obtainBuffer(&buf, &timeout); 1805 buffer->frameCount = buf.mFrameCount; 1806 buffer->raw = buf.mRaw; 1807 return status; 1808 } 1809 1810 void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue() 1811 { 1812 size_t size = mBufferQueue.size(); 1813 1814 for (size_t i = 0; i < size; i++) { 1815 Buffer *pBuffer = mBufferQueue.itemAt(i); 1816 free(pBuffer->mBuffer); 1817 delete pBuffer; 1818 } 1819 mBufferQueue.clear(); 1820 } 1821 1822 1823 AudioFlinger::PlaybackThread::PatchTrack::PatchTrack(PlaybackThread *playbackThread, 1824 audio_stream_type_t streamType, 1825 uint32_t sampleRate, 1826 audio_channel_mask_t channelMask, 1827 audio_format_t format, 1828 size_t frameCount, 1829 void *buffer, 1830 IAudioFlinger::track_flags_t flags) 1831 : Track(playbackThread, NULL, streamType, 1832 sampleRate, format, channelMask, frameCount, 1833 buffer, 0, 0, getuid(), flags, TYPE_PATCH), 1834 mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true)) 1835 { 1836 uint64_t mixBufferNs = ((uint64_t)2 * playbackThread->frameCount() * 1000000000) / 1837 playbackThread->sampleRate(); 1838 mPeerTimeout.tv_sec = mixBufferNs / 1000000000; 1839 mPeerTimeout.tv_nsec = (int) (mixBufferNs % 1000000000); 1840 1841 ALOGV("PatchTrack %p sampleRate %d mPeerTimeout %d.%03d sec", 1842 this, sampleRate, 1843 (int)mPeerTimeout.tv_sec, 1844 (int)(mPeerTimeout.tv_nsec / 1000000)); 1845 } 1846 1847 AudioFlinger::PlaybackThread::PatchTrack::~PatchTrack() 1848 { 1849 } 1850 1851 // AudioBufferProvider interface 1852 status_t AudioFlinger::PlaybackThread::PatchTrack::getNextBuffer( 1853 AudioBufferProvider::Buffer* buffer, int64_t pts) 1854 { 1855 ALOG_ASSERT(mPeerProxy != 0, "PatchTrack::getNextBuffer() called without peer proxy"); 1856 Proxy::Buffer buf; 1857 buf.mFrameCount = buffer->frameCount; 1858 status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout); 1859 ALOGV_IF(status != NO_ERROR, "PatchTrack() %p getNextBuffer status %d", this, status); 1860 buffer->frameCount = buf.mFrameCount; 1861 if (buf.mFrameCount == 0) { 1862 return WOULD_BLOCK; 1863 } 1864 status = Track::getNextBuffer(buffer, pts); 1865 return status; 1866 } 1867 1868 void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(AudioBufferProvider::Buffer* buffer) 1869 { 1870 ALOG_ASSERT(mPeerProxy != 0, "PatchTrack::releaseBuffer() called without peer proxy"); 1871 Proxy::Buffer buf; 1872 buf.mFrameCount = buffer->frameCount; 1873 buf.mRaw = buffer->raw; 1874 mPeerProxy->releaseBuffer(&buf); 1875 TrackBase::releaseBuffer(buffer); 1876 } 1877 1878 status_t AudioFlinger::PlaybackThread::PatchTrack::obtainBuffer(Proxy::Buffer* buffer, 1879 const struct timespec *timeOut) 1880 { 1881 return mProxy->obtainBuffer(buffer, timeOut); 1882 } 1883 1884 void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(Proxy::Buffer* buffer) 1885 { 1886 mProxy->releaseBuffer(buffer); 1887 if (android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags) & CBLK_DISABLED) { 1888 ALOGW("PatchTrack::releaseBuffer() disabled due to previous underrun, restarting"); 1889 start(); 1890 } 1891 android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags); 1892 } 1893 1894 // ---------------------------------------------------------------------------- 1895 // Record 1896 // ---------------------------------------------------------------------------- 1897 1898 AudioFlinger::RecordHandle::RecordHandle( 1899 const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack) 1900 : BnAudioRecord(), 1901 mRecordTrack(recordTrack) 1902 { 1903 } 1904 1905 AudioFlinger::RecordHandle::~RecordHandle() { 1906 stop_nonvirtual(); 1907 mRecordTrack->destroy(); 1908 } 1909 1910 status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event, 1911 int triggerSession) { 1912 ALOGV("RecordHandle::start()"); 1913 return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession); 1914 } 1915 1916 void AudioFlinger::RecordHandle::stop() { 1917 stop_nonvirtual(); 1918 } 1919 1920 void AudioFlinger::RecordHandle::stop_nonvirtual() { 1921 ALOGV("RecordHandle::stop()"); 1922 mRecordTrack->stop(); 1923 } 1924 1925 status_t AudioFlinger::RecordHandle::onTransact( 1926 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) 1927 { 1928 return BnAudioRecord::onTransact(code, data, reply, flags); 1929 } 1930 1931 // ---------------------------------------------------------------------------- 1932 1933 // RecordTrack constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held 1934 AudioFlinger::RecordThread::RecordTrack::RecordTrack( 1935 RecordThread *thread, 1936 const sp<Client>& client, 1937 uint32_t sampleRate, 1938 audio_format_t format, 1939 audio_channel_mask_t channelMask, 1940 size_t frameCount, 1941 void *buffer, 1942 int sessionId, 1943 int uid, 1944 IAudioFlinger::track_flags_t flags, 1945 track_type type) 1946 : TrackBase(thread, client, sampleRate, format, 1947 channelMask, frameCount, buffer, sessionId, uid, 1948 flags, false /*isOut*/, 1949 (type == TYPE_DEFAULT) ? 1950 ((flags & IAudioFlinger::TRACK_FAST) ? ALLOC_PIPE : ALLOC_CBLK) : 1951 ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE), 1952 type), 1953 mOverflow(false), 1954 mFramesToDrop(0), 1955 mResamplerBufferProvider(NULL), // initialize in case of early constructor exit 1956 mRecordBufferConverter(NULL) 1957 { 1958 if (mCblk == NULL) { 1959 return; 1960 } 1961 1962 mRecordBufferConverter = new RecordBufferConverter( 1963 thread->mChannelMask, thread->mFormat, thread->mSampleRate, 1964 channelMask, format, sampleRate); 1965 // Check if the RecordBufferConverter construction was successful. 1966 // If not, don't continue with construction. 1967 // 1968 // NOTE: It would be extremely rare that the record track cannot be created 1969 // for the current device, but a pending or future device change would make 1970 // the record track configuration valid. 1971 if (mRecordBufferConverter->initCheck() != NO_ERROR) { 1972 ALOGE("RecordTrack unable to create record buffer converter"); 1973 return; 1974 } 1975 1976 mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount, 1977 mFrameSize, !isExternalTrack()); 1978 mResamplerBufferProvider = new ResamplerBufferProvider(this); 1979 1980 if (flags & IAudioFlinger::TRACK_FAST) { 1981 ALOG_ASSERT(thread->mFastTrackAvail); 1982 thread->mFastTrackAvail = false; 1983 } 1984 } 1985 1986 AudioFlinger::RecordThread::RecordTrack::~RecordTrack() 1987 { 1988 ALOGV("%s", __func__); 1989 delete mRecordBufferConverter; 1990 delete mResamplerBufferProvider; 1991 } 1992 1993 status_t AudioFlinger::RecordThread::RecordTrack::initCheck() const 1994 { 1995 status_t status = TrackBase::initCheck(); 1996 if (status == NO_ERROR && mServerProxy == 0) { 1997 status = BAD_VALUE; 1998 } 1999 return status; 2000 } 2001 2002 // AudioBufferProvider interface 2003 status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer, 2004 int64_t pts __unused) 2005 { 2006 ServerProxy::Buffer buf; 2007 buf.mFrameCount = buffer->frameCount; 2008 status_t status = mServerProxy->obtainBuffer(&buf); 2009 buffer->frameCount = buf.mFrameCount; 2010 buffer->raw = buf.mRaw; 2011 if (buf.mFrameCount == 0) { 2012 // FIXME also wake futex so that overrun is noticed more quickly 2013 (void) android_atomic_or(CBLK_OVERRUN, &mCblk->mFlags); 2014 } 2015 return status; 2016 } 2017 2018 status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event, 2019 int triggerSession) 2020 { 2021 sp<ThreadBase> thread = mThread.promote(); 2022 if (thread != 0) { 2023 RecordThread *recordThread = (RecordThread *)thread.get(); 2024 return recordThread->start(this, event, triggerSession); 2025 } else { 2026 return BAD_VALUE; 2027 } 2028 } 2029 2030 void AudioFlinger::RecordThread::RecordTrack::stop() 2031 { 2032 sp<ThreadBase> thread = mThread.promote(); 2033 if (thread != 0) { 2034 RecordThread *recordThread = (RecordThread *)thread.get(); 2035 if (recordThread->stop(this) && isExternalTrack()) { 2036 AudioSystem::stopInput(mThreadIoHandle, (audio_session_t)mSessionId); 2037 } 2038 } 2039 } 2040 2041 void AudioFlinger::RecordThread::RecordTrack::destroy() 2042 { 2043 // see comments at AudioFlinger::PlaybackThread::Track::destroy() 2044 sp<RecordTrack> keep(this); 2045 { 2046 if (isExternalTrack()) { 2047 if (mState == ACTIVE || mState == RESUMING) { 2048 AudioSystem::stopInput(mThreadIoHandle, (audio_session_t)mSessionId); 2049 } 2050 AudioSystem::releaseInput(mThreadIoHandle, (audio_session_t)mSessionId); 2051 } 2052 sp<ThreadBase> thread = mThread.promote(); 2053 if (thread != 0) { 2054 Mutex::Autolock _l(thread->mLock); 2055 RecordThread *recordThread = (RecordThread *) thread.get(); 2056 recordThread->destroyTrack_l(this); 2057 } 2058 } 2059 } 2060 2061 void AudioFlinger::RecordThread::RecordTrack::invalidate() 2062 { 2063 // FIXME should use proxy, and needs work 2064 audio_track_cblk_t* cblk = mCblk; 2065 android_atomic_or(CBLK_INVALID, &cblk->mFlags); 2066 android_atomic_release_store(0x40000000, &cblk->mFutex); 2067 // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE 2068 (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX); 2069 } 2070 2071 2072 /*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result) 2073 { 2074 result.append(" Active Client Fmt Chn mask Session S Server fCount SRate\n"); 2075 } 2076 2077 void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size, bool active) 2078 { 2079 snprintf(buffer, size, " %6s %6u %3u %08X %7u %1d %08X %6zu %5u\n", 2080 active ? "yes" : "no", 2081 (mClient == 0) ? getpid_cached : mClient->pid(), 2082 mFormat, 2083 mChannelMask, 2084 mSessionId, 2085 mState, 2086 mCblk->mServer, 2087 mFrameCount, 2088 mSampleRate); 2089 2090 } 2091 2092 void AudioFlinger::RecordThread::RecordTrack::handleSyncStartEvent(const sp<SyncEvent>& event) 2093 { 2094 if (event == mSyncStartEvent) { 2095 ssize_t framesToDrop = 0; 2096 sp<ThreadBase> threadBase = mThread.promote(); 2097 if (threadBase != 0) { 2098 // TODO: use actual buffer filling status instead of 2 buffers when info is available 2099 // from audio HAL 2100 framesToDrop = threadBase->mFrameCount * 2; 2101 } 2102 mFramesToDrop = framesToDrop; 2103 } 2104 } 2105 2106 void AudioFlinger::RecordThread::RecordTrack::clearSyncStartEvent() 2107 { 2108 if (mSyncStartEvent != 0) { 2109 mSyncStartEvent->cancel(); 2110 mSyncStartEvent.clear(); 2111 } 2112 mFramesToDrop = 0; 2113 } 2114 2115 2116 AudioFlinger::RecordThread::PatchRecord::PatchRecord(RecordThread *recordThread, 2117 uint32_t sampleRate, 2118 audio_channel_mask_t channelMask, 2119 audio_format_t format, 2120 size_t frameCount, 2121 void *buffer, 2122 IAudioFlinger::track_flags_t flags) 2123 : RecordTrack(recordThread, NULL, sampleRate, format, channelMask, frameCount, 2124 buffer, 0, getuid(), flags, TYPE_PATCH), 2125 mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true)) 2126 { 2127 uint64_t mixBufferNs = ((uint64_t)2 * recordThread->frameCount() * 1000000000) / 2128 recordThread->sampleRate(); 2129 mPeerTimeout.tv_sec = mixBufferNs / 1000000000; 2130 mPeerTimeout.tv_nsec = (int) (mixBufferNs % 1000000000); 2131 2132 ALOGV("PatchRecord %p sampleRate %d mPeerTimeout %d.%03d sec", 2133 this, sampleRate, 2134 (int)mPeerTimeout.tv_sec, 2135 (int)(mPeerTimeout.tv_nsec / 1000000)); 2136 } 2137 2138 AudioFlinger::RecordThread::PatchRecord::~PatchRecord() 2139 { 2140 } 2141 2142 // AudioBufferProvider interface 2143 status_t AudioFlinger::RecordThread::PatchRecord::getNextBuffer( 2144 AudioBufferProvider::Buffer* buffer, int64_t pts) 2145 { 2146 ALOG_ASSERT(mPeerProxy != 0, "PatchRecord::getNextBuffer() called without peer proxy"); 2147 Proxy::Buffer buf; 2148 buf.mFrameCount = buffer->frameCount; 2149 status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout); 2150 ALOGV_IF(status != NO_ERROR, 2151 "PatchRecord() %p mPeerProxy->obtainBuffer status %d", this, status); 2152 buffer->frameCount = buf.mFrameCount; 2153 if (buf.mFrameCount == 0) { 2154 return WOULD_BLOCK; 2155 } 2156 status = RecordTrack::getNextBuffer(buffer, pts); 2157 return status; 2158 } 2159 2160 void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(AudioBufferProvider::Buffer* buffer) 2161 { 2162 ALOG_ASSERT(mPeerProxy != 0, "PatchRecord::releaseBuffer() called without peer proxy"); 2163 Proxy::Buffer buf; 2164 buf.mFrameCount = buffer->frameCount; 2165 buf.mRaw = buffer->raw; 2166 mPeerProxy->releaseBuffer(&buf); 2167 TrackBase::releaseBuffer(buffer); 2168 } 2169 2170 status_t AudioFlinger::RecordThread::PatchRecord::obtainBuffer(Proxy::Buffer* buffer, 2171 const struct timespec *timeOut) 2172 { 2173 return mProxy->obtainBuffer(buffer, timeOut); 2174 } 2175 2176 void AudioFlinger::RecordThread::PatchRecord::releaseBuffer(Proxy::Buffer* buffer) 2177 { 2178 mProxy->releaseBuffer(buffer); 2179 } 2180 2181 } // namespace android 2182