1 /* 2 ** 3 ** Copyright 2012, The Android Open Source Project 4 ** 5 ** Licensed under the Apache License, Version 2.0 (the "License"); 6 ** you may not use this file except in compliance with the License. 7 ** You may obtain a copy of the License at 8 ** 9 ** http://www.apache.org/licenses/LICENSE-2.0 10 ** 11 ** Unless required by applicable law or agreed to in writing, software 12 ** distributed under the License is distributed on an "AS IS" BASIS, 13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 ** See the License for the specific language governing permissions and 15 ** limitations under the License. 16 */ 17 18 19 #define LOG_TAG "AudioFlinger" 20 //#define LOG_NDEBUG 0 21 22 #include "Configuration.h" 23 #include <math.h> 24 #include <utils/Log.h> 25 26 #include <private/media/AudioTrackShared.h> 27 28 #include <common_time/cc_helper.h> 29 #include <common_time/local_clock.h> 30 31 #include "AudioMixer.h" 32 #include "AudioFlinger.h" 33 #include "ServiceUtilities.h" 34 35 #include <media/nbaio/Pipe.h> 36 #include <media/nbaio/PipeReader.h> 37 38 // ---------------------------------------------------------------------------- 39 40 // Note: the following macro is used for extremely verbose logging message. In 41 // order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to 42 // 0; but one side effect of this is to turn all LOGV's as well. Some messages 43 // are so verbose that we want to suppress them even when we have ALOG_ASSERT 44 // turned on. Do not uncomment the #def below unless you really know what you 45 // are doing and want to see all of the extremely verbose messages. 46 //#define VERY_VERY_VERBOSE_LOGGING 47 #ifdef VERY_VERY_VERBOSE_LOGGING 48 #define ALOGVV ALOGV 49 #else 50 #define ALOGVV(a...) do { } while(0) 51 #endif 52 53 namespace android { 54 55 // ---------------------------------------------------------------------------- 56 // TrackBase 57 // ---------------------------------------------------------------------------- 58 59 static volatile int32_t nextTrackId = 55; 60 61 // TrackBase constructor must be called with AudioFlinger::mLock held 62 AudioFlinger::ThreadBase::TrackBase::TrackBase( 63 ThreadBase *thread, 64 const sp<Client>& client, 65 uint32_t sampleRate, 66 audio_format_t format, 67 audio_channel_mask_t channelMask, 68 size_t frameCount, 69 const sp<IMemory>& sharedBuffer, 70 int sessionId, 71 int clientUid, 72 bool isOut) 73 : RefBase(), 74 mThread(thread), 75 mClient(client), 76 mCblk(NULL), 77 // mBuffer 78 mState(IDLE), 79 mSampleRate(sampleRate), 80 mFormat(format), 81 mChannelMask(channelMask), 82 mChannelCount(popcount(channelMask)), 83 mFrameSize(audio_is_linear_pcm(format) ? 84 mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)), 85 mFrameCount(frameCount), 86 mSessionId(sessionId), 87 mIsOut(isOut), 88 mServerProxy(NULL), 89 mId(android_atomic_inc(&nextTrackId)), 90 mTerminated(false) 91 { 92 // if the caller is us, trust the specified uid 93 if (IPCThreadState::self()->getCallingPid() != getpid_cached || clientUid == -1) { 94 int newclientUid = IPCThreadState::self()->getCallingUid(); 95 if (clientUid != -1 && clientUid != newclientUid) { 96 ALOGW("uid %d tried to pass itself off as %d", newclientUid, clientUid); 97 } 98 clientUid = newclientUid; 99 } 100 // clientUid contains the uid of the app that is responsible for this track, so we can blame 101 // battery usage on it. 102 mUid = clientUid; 103 104 // client == 0 implies sharedBuffer == 0 105 ALOG_ASSERT(!(client == 0 && sharedBuffer != 0)); 106 107 ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), 108 sharedBuffer->size()); 109 110 // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize); 111 size_t size = sizeof(audio_track_cblk_t); 112 size_t bufferSize = (sharedBuffer == 0 ? roundup(frameCount) : frameCount) * mFrameSize; 113 if (sharedBuffer == 0) { 114 size += bufferSize; 115 } 116 117 if (client != 0) { 118 mCblkMemory = client->heap()->allocate(size); 119 if (mCblkMemory != 0) { 120 mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer()); 121 // can't assume mCblk != NULL 122 } else { 123 ALOGE("not enough memory for AudioTrack size=%u", size); 124 client->heap()->dump("AudioTrack"); 125 return; 126 } 127 } else { 128 // this syntax avoids calling the audio_track_cblk_t constructor twice 129 mCblk = (audio_track_cblk_t *) new uint8_t[size]; 130 // assume mCblk != NULL 131 } 132 133 // construct the shared structure in-place. 134 if (mCblk != NULL) { 135 new(mCblk) audio_track_cblk_t(); 136 // clear all buffers 137 mCblk->frameCount_ = frameCount; 138 if (sharedBuffer == 0) { 139 mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t); 140 memset(mBuffer, 0, bufferSize); 141 } else { 142 mBuffer = sharedBuffer->pointer(); 143 #if 0 144 mCblk->mFlags = CBLK_FORCEREADY; // FIXME hack, need to fix the track ready logic 145 #endif 146 } 147 148 #ifdef TEE_SINK 149 if (mTeeSinkTrackEnabled) { 150 NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount); 151 if (pipeFormat != Format_Invalid) { 152 Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat); 153 size_t numCounterOffers = 0; 154 const NBAIO_Format offers[1] = {pipeFormat}; 155 ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers); 156 ALOG_ASSERT(index == 0); 157 PipeReader *pipeReader = new PipeReader(*pipe); 158 numCounterOffers = 0; 159 index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers); 160 ALOG_ASSERT(index == 0); 161 mTeeSink = pipe; 162 mTeeSource = pipeReader; 163 } 164 } 165 #endif 166 167 } 168 } 169 170 AudioFlinger::ThreadBase::TrackBase::~TrackBase() 171 { 172 #ifdef TEE_SINK 173 dumpTee(-1, mTeeSource, mId); 174 #endif 175 // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference 176 delete mServerProxy; 177 if (mCblk != NULL) { 178 if (mClient == 0) { 179 delete mCblk; 180 } else { 181 mCblk->~audio_track_cblk_t(); // destroy our shared-structure. 182 } 183 } 184 mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to 185 if (mClient != 0) { 186 // Client destructor must run with AudioFlinger mutex locked 187 Mutex::Autolock _l(mClient->audioFlinger()->mLock); 188 // If the client's reference count drops to zero, the associated destructor 189 // must run with AudioFlinger lock held. Thus the explicit clear() rather than 190 // relying on the automatic clear() at end of scope. 191 mClient.clear(); 192 } 193 } 194 195 // AudioBufferProvider interface 196 // getNextBuffer() = 0; 197 // This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack 198 void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer) 199 { 200 #ifdef TEE_SINK 201 if (mTeeSink != 0) { 202 (void) mTeeSink->write(buffer->raw, buffer->frameCount); 203 } 204 #endif 205 206 ServerProxy::Buffer buf; 207 buf.mFrameCount = buffer->frameCount; 208 buf.mRaw = buffer->raw; 209 buffer->frameCount = 0; 210 buffer->raw = NULL; 211 mServerProxy->releaseBuffer(&buf); 212 } 213 214 status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event) 215 { 216 mSyncEvents.add(event); 217 return NO_ERROR; 218 } 219 220 // ---------------------------------------------------------------------------- 221 // Playback 222 // ---------------------------------------------------------------------------- 223 224 AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track) 225 : BnAudioTrack(), 226 mTrack(track) 227 { 228 } 229 230 AudioFlinger::TrackHandle::~TrackHandle() { 231 // just stop the track on deletion, associated resources 232 // will be freed from the main thread once all pending buffers have 233 // been played. Unless it's not in the active track list, in which 234 // case we free everything now... 235 mTrack->destroy(); 236 } 237 238 sp<IMemory> AudioFlinger::TrackHandle::getCblk() const { 239 return mTrack->getCblk(); 240 } 241 242 status_t AudioFlinger::TrackHandle::start() { 243 return mTrack->start(); 244 } 245 246 void AudioFlinger::TrackHandle::stop() { 247 mTrack->stop(); 248 } 249 250 void AudioFlinger::TrackHandle::flush() { 251 mTrack->flush(); 252 } 253 254 void AudioFlinger::TrackHandle::pause() { 255 mTrack->pause(); 256 } 257 258 status_t AudioFlinger::TrackHandle::attachAuxEffect(int EffectId) 259 { 260 return mTrack->attachAuxEffect(EffectId); 261 } 262 263 status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size, 264 sp<IMemory>* buffer) { 265 if (!mTrack->isTimedTrack()) 266 return INVALID_OPERATION; 267 268 PlaybackThread::TimedTrack* tt = 269 reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); 270 return tt->allocateTimedBuffer(size, buffer); 271 } 272 273 status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer, 274 int64_t pts) { 275 if (!mTrack->isTimedTrack()) 276 return INVALID_OPERATION; 277 278 PlaybackThread::TimedTrack* tt = 279 reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); 280 return tt->queueTimedBuffer(buffer, pts); 281 } 282 283 status_t AudioFlinger::TrackHandle::setMediaTimeTransform( 284 const LinearTransform& xform, int target) { 285 286 if (!mTrack->isTimedTrack()) 287 return INVALID_OPERATION; 288 289 PlaybackThread::TimedTrack* tt = 290 reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); 291 return tt->setMediaTimeTransform( 292 xform, static_cast<TimedAudioTrack::TargetTimeline>(target)); 293 } 294 295 status_t AudioFlinger::TrackHandle::setParameters(const String8& keyValuePairs) { 296 return mTrack->setParameters(keyValuePairs); 297 } 298 299 status_t AudioFlinger::TrackHandle::getTimestamp(AudioTimestamp& timestamp) 300 { 301 return mTrack->getTimestamp(timestamp); 302 } 303 304 305 void AudioFlinger::TrackHandle::signal() 306 { 307 return mTrack->signal(); 308 } 309 310 status_t AudioFlinger::TrackHandle::onTransact( 311 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) 312 { 313 return BnAudioTrack::onTransact(code, data, reply, flags); 314 } 315 316 // ---------------------------------------------------------------------------- 317 318 // Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held 319 AudioFlinger::PlaybackThread::Track::Track( 320 PlaybackThread *thread, 321 const sp<Client>& client, 322 audio_stream_type_t streamType, 323 uint32_t sampleRate, 324 audio_format_t format, 325 audio_channel_mask_t channelMask, 326 size_t frameCount, 327 const sp<IMemory>& sharedBuffer, 328 int sessionId, 329 int uid, 330 IAudioFlinger::track_flags_t flags) 331 : TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer, 332 sessionId, uid, true /*isOut*/), 333 mFillingUpStatus(FS_INVALID), 334 // mRetryCount initialized later when needed 335 mSharedBuffer(sharedBuffer), 336 mStreamType(streamType), 337 mName(-1), // see note below 338 mMainBuffer(thread->mixBuffer()), 339 mAuxBuffer(NULL), 340 mAuxEffectId(0), mHasVolumeController(false), 341 mPresentationCompleteFrames(0), 342 mFlags(flags), 343 mFastIndex(-1), 344 mCachedVolume(1.0), 345 mIsInvalid(false), 346 mAudioTrackServerProxy(NULL), 347 mResumeToStopping(false) 348 { 349 if (mCblk != NULL) { 350 if (sharedBuffer == 0) { 351 mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount, 352 mFrameSize); 353 } else { 354 mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount, 355 mFrameSize); 356 } 357 mServerProxy = mAudioTrackServerProxy; 358 // to avoid leaking a track name, do not allocate one unless there is an mCblk 359 mName = thread->getTrackName_l(channelMask, sessionId); 360 if (mName < 0) { 361 ALOGE("no more track names available"); 362 return; 363 } 364 // only allocate a fast track index if we were able to allocate a normal track name 365 if (flags & IAudioFlinger::TRACK_FAST) { 366 mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads(); 367 ALOG_ASSERT(thread->mFastTrackAvailMask != 0); 368 int i = __builtin_ctz(thread->mFastTrackAvailMask); 369 ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks); 370 // FIXME This is too eager. We allocate a fast track index before the 371 // fast track becomes active. Since fast tracks are a scarce resource, 372 // this means we are potentially denying other more important fast tracks from 373 // being created. It would be better to allocate the index dynamically. 374 mFastIndex = i; 375 // Read the initial underruns because this field is never cleared by the fast mixer 376 mObservedUnderruns = thread->getFastTrackUnderruns(i); 377 thread->mFastTrackAvailMask &= ~(1 << i); 378 } 379 } 380 ALOGV("Track constructor name %d, calling pid %d", mName, 381 IPCThreadState::self()->getCallingPid()); 382 } 383 384 AudioFlinger::PlaybackThread::Track::~Track() 385 { 386 ALOGV("PlaybackThread::Track destructor"); 387 388 // The destructor would clear mSharedBuffer, 389 // but it will not push the decremented reference count, 390 // leaving the client's IMemory dangling indefinitely. 391 // This prevents that leak. 392 if (mSharedBuffer != 0) { 393 mSharedBuffer.clear(); 394 // flush the binder command buffer 395 IPCThreadState::self()->flushCommands(); 396 } 397 } 398 399 void AudioFlinger::PlaybackThread::Track::destroy() 400 { 401 // NOTE: destroyTrack_l() can remove a strong reference to this Track 402 // by removing it from mTracks vector, so there is a risk that this Tracks's 403 // destructor is called. As the destructor needs to lock mLock, 404 // we must acquire a strong reference on this Track before locking mLock 405 // here so that the destructor is called only when exiting this function. 406 // On the other hand, as long as Track::destroy() is only called by 407 // TrackHandle destructor, the TrackHandle still holds a strong ref on 408 // this Track with its member mTrack. 409 sp<Track> keep(this); 410 { // scope for mLock 411 sp<ThreadBase> thread = mThread.promote(); 412 if (thread != 0) { 413 Mutex::Autolock _l(thread->mLock); 414 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 415 bool wasActive = playbackThread->destroyTrack_l(this); 416 if (!isOutputTrack() && !wasActive) { 417 AudioSystem::releaseOutput(thread->id()); 418 } 419 } 420 } 421 } 422 423 /*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result) 424 { 425 result.append(" Name Client Type Fmt Chn mask Session fCount S F SRate " 426 "L dB R dB Server Main buf Aux Buf Flags UndFrmCnt\n"); 427 } 428 429 void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size) 430 { 431 uint32_t vlr = mAudioTrackServerProxy->getVolumeLR(); 432 if (isFastTrack()) { 433 sprintf(buffer, " F %2d", mFastIndex); 434 } else { 435 sprintf(buffer, " %4d", mName - AudioMixer::TRACK0); 436 } 437 track_state state = mState; 438 char stateChar; 439 if (isTerminated()) { 440 stateChar = 'T'; 441 } else { 442 switch (state) { 443 case IDLE: 444 stateChar = 'I'; 445 break; 446 case STOPPING_1: 447 stateChar = 's'; 448 break; 449 case STOPPING_2: 450 stateChar = '5'; 451 break; 452 case STOPPED: 453 stateChar = 'S'; 454 break; 455 case RESUMING: 456 stateChar = 'R'; 457 break; 458 case ACTIVE: 459 stateChar = 'A'; 460 break; 461 case PAUSING: 462 stateChar = 'p'; 463 break; 464 case PAUSED: 465 stateChar = 'P'; 466 break; 467 case FLUSHED: 468 stateChar = 'F'; 469 break; 470 default: 471 stateChar = '?'; 472 break; 473 } 474 } 475 char nowInUnderrun; 476 switch (mObservedUnderruns.mBitFields.mMostRecent) { 477 case UNDERRUN_FULL: 478 nowInUnderrun = ' '; 479 break; 480 case UNDERRUN_PARTIAL: 481 nowInUnderrun = '<'; 482 break; 483 case UNDERRUN_EMPTY: 484 nowInUnderrun = '*'; 485 break; 486 default: 487 nowInUnderrun = '?'; 488 break; 489 } 490 snprintf(&buffer[7], size-7, " %6u %4u %08X %08X %7u %6u %1c %1d %5u %5.2g %5.2g " 491 "%08X %08X %08X 0x%03X %9u%c\n", 492 (mClient == 0) ? getpid_cached : mClient->pid(), 493 mStreamType, 494 mFormat, 495 mChannelMask, 496 mSessionId, 497 mFrameCount, 498 stateChar, 499 mFillingUpStatus, 500 mAudioTrackServerProxy->getSampleRate(), 501 20.0 * log10((vlr & 0xFFFF) / 4096.0), 502 20.0 * log10((vlr >> 16) / 4096.0), 503 mCblk->mServer, 504 (int)mMainBuffer, 505 (int)mAuxBuffer, 506 mCblk->mFlags, 507 mAudioTrackServerProxy->getUnderrunFrames(), 508 nowInUnderrun); 509 } 510 511 uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const { 512 return mAudioTrackServerProxy->getSampleRate(); 513 } 514 515 // AudioBufferProvider interface 516 status_t AudioFlinger::PlaybackThread::Track::getNextBuffer( 517 AudioBufferProvider::Buffer* buffer, int64_t pts) 518 { 519 ServerProxy::Buffer buf; 520 size_t desiredFrames = buffer->frameCount; 521 buf.mFrameCount = desiredFrames; 522 status_t status = mServerProxy->obtainBuffer(&buf); 523 buffer->frameCount = buf.mFrameCount; 524 buffer->raw = buf.mRaw; 525 if (buf.mFrameCount == 0) { 526 mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames); 527 } 528 return status; 529 } 530 531 // releaseBuffer() is not overridden 532 533 // ExtendedAudioBufferProvider interface 534 535 // Note that framesReady() takes a mutex on the control block using tryLock(). 536 // This could result in priority inversion if framesReady() is called by the normal mixer, 537 // as the normal mixer thread runs at lower 538 // priority than the client's callback thread: there is a short window within framesReady() 539 // during which the normal mixer could be preempted, and the client callback would block. 540 // Another problem can occur if framesReady() is called by the fast mixer: 541 // the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer. 542 // FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue. 543 size_t AudioFlinger::PlaybackThread::Track::framesReady() const { 544 return mAudioTrackServerProxy->framesReady(); 545 } 546 547 size_t AudioFlinger::PlaybackThread::Track::framesReleased() const 548 { 549 return mAudioTrackServerProxy->framesReleased(); 550 } 551 552 // Don't call for fast tracks; the framesReady() could result in priority inversion 553 bool AudioFlinger::PlaybackThread::Track::isReady() const { 554 if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing() || isStopping()) { 555 return true; 556 } 557 558 if (framesReady() >= mFrameCount || 559 (mCblk->mFlags & CBLK_FORCEREADY)) { 560 mFillingUpStatus = FS_FILLED; 561 android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags); 562 return true; 563 } 564 return false; 565 } 566 567 status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event, 568 int triggerSession) 569 { 570 status_t status = NO_ERROR; 571 ALOGV("start(%d), calling pid %d session %d", 572 mName, IPCThreadState::self()->getCallingPid(), mSessionId); 573 574 sp<ThreadBase> thread = mThread.promote(); 575 if (thread != 0) { 576 if (isOffloaded()) { 577 Mutex::Autolock _laf(thread->mAudioFlinger->mLock); 578 Mutex::Autolock _lth(thread->mLock); 579 sp<EffectChain> ec = thread->getEffectChain_l(mSessionId); 580 if (thread->mAudioFlinger->isNonOffloadableGlobalEffectEnabled_l() || 581 (ec != 0 && ec->isNonOffloadableEnabled())) { 582 invalidate(); 583 return PERMISSION_DENIED; 584 } 585 } 586 Mutex::Autolock _lth(thread->mLock); 587 track_state state = mState; 588 // here the track could be either new, or restarted 589 // in both cases "unstop" the track 590 591 if (state == PAUSED) { 592 if (mResumeToStopping) { 593 // happened we need to resume to STOPPING_1 594 mState = TrackBase::STOPPING_1; 595 ALOGV("PAUSED => STOPPING_1 (%d) on thread %p", mName, this); 596 } else { 597 mState = TrackBase::RESUMING; 598 ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this); 599 } 600 } else { 601 mState = TrackBase::ACTIVE; 602 ALOGV("? => ACTIVE (%d) on thread %p", mName, this); 603 } 604 605 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 606 status = playbackThread->addTrack_l(this); 607 if (status == INVALID_OPERATION || status == PERMISSION_DENIED) { 608 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE); 609 // restore previous state if start was rejected by policy manager 610 if (status == PERMISSION_DENIED) { 611 mState = state; 612 } 613 } 614 // track was already in the active list, not a problem 615 if (status == ALREADY_EXISTS) { 616 status = NO_ERROR; 617 } else { 618 // Acknowledge any pending flush(), so that subsequent new data isn't discarded. 619 // It is usually unsafe to access the server proxy from a binder thread. 620 // But in this case we know the mixer thread (whether normal mixer or fast mixer) 621 // isn't looking at this track yet: we still hold the normal mixer thread lock, 622 // and for fast tracks the track is not yet in the fast mixer thread's active set. 623 ServerProxy::Buffer buffer; 624 buffer.mFrameCount = 1; 625 (void) mAudioTrackServerProxy->obtainBuffer(&buffer, true /*ackFlush*/); 626 } 627 } else { 628 status = BAD_VALUE; 629 } 630 return status; 631 } 632 633 void AudioFlinger::PlaybackThread::Track::stop() 634 { 635 ALOGV("stop(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid()); 636 sp<ThreadBase> thread = mThread.promote(); 637 if (thread != 0) { 638 Mutex::Autolock _l(thread->mLock); 639 track_state state = mState; 640 if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) { 641 // If the track is not active (PAUSED and buffers full), flush buffers 642 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 643 if (playbackThread->mActiveTracks.indexOf(this) < 0) { 644 reset(); 645 mState = STOPPED; 646 } else if (!isFastTrack() && !isOffloaded()) { 647 mState = STOPPED; 648 } else { 649 // For fast tracks prepareTracks_l() will set state to STOPPING_2 650 // presentation is complete 651 // For an offloaded track this starts a drain and state will 652 // move to STOPPING_2 when drain completes and then STOPPED 653 mState = STOPPING_1; 654 } 655 ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName, 656 playbackThread); 657 } 658 } 659 } 660 661 void AudioFlinger::PlaybackThread::Track::pause() 662 { 663 ALOGV("pause(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid()); 664 sp<ThreadBase> thread = mThread.promote(); 665 if (thread != 0) { 666 Mutex::Autolock _l(thread->mLock); 667 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 668 switch (mState) { 669 case STOPPING_1: 670 case STOPPING_2: 671 if (!isOffloaded()) { 672 /* nothing to do if track is not offloaded */ 673 break; 674 } 675 676 // Offloaded track was draining, we need to carry on draining when resumed 677 mResumeToStopping = true; 678 // fall through... 679 case ACTIVE: 680 case RESUMING: 681 mState = PAUSING; 682 ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get()); 683 playbackThread->broadcast_l(); 684 break; 685 686 default: 687 break; 688 } 689 } 690 } 691 692 void AudioFlinger::PlaybackThread::Track::flush() 693 { 694 ALOGV("flush(%d)", mName); 695 sp<ThreadBase> thread = mThread.promote(); 696 if (thread != 0) { 697 Mutex::Autolock _l(thread->mLock); 698 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 699 700 if (isOffloaded()) { 701 // If offloaded we allow flush during any state except terminated 702 // and keep the track active to avoid problems if user is seeking 703 // rapidly and underlying hardware has a significant delay handling 704 // a pause 705 if (isTerminated()) { 706 return; 707 } 708 709 ALOGV("flush: offload flush"); 710 reset(); 711 712 if (mState == STOPPING_1 || mState == STOPPING_2) { 713 ALOGV("flushed in STOPPING_1 or 2 state, change state to ACTIVE"); 714 mState = ACTIVE; 715 } 716 717 if (mState == ACTIVE) { 718 ALOGV("flush called in active state, resetting buffer time out retry count"); 719 mRetryCount = PlaybackThread::kMaxTrackRetriesOffload; 720 } 721 722 mResumeToStopping = false; 723 } else { 724 if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED && 725 mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) { 726 return; 727 } 728 // No point remaining in PAUSED state after a flush => go to 729 // FLUSHED state 730 mState = FLUSHED; 731 // do not reset the track if it is still in the process of being stopped or paused. 732 // this will be done by prepareTracks_l() when the track is stopped. 733 // prepareTracks_l() will see mState == FLUSHED, then 734 // remove from active track list, reset(), and trigger presentation complete 735 if (playbackThread->mActiveTracks.indexOf(this) < 0) { 736 reset(); 737 } 738 } 739 // Prevent flush being lost if the track is flushed and then resumed 740 // before mixer thread can run. This is important when offloading 741 // because the hardware buffer could hold a large amount of audio 742 playbackThread->flushOutput_l(); 743 playbackThread->broadcast_l(); 744 } 745 } 746 747 void AudioFlinger::PlaybackThread::Track::reset() 748 { 749 // Do not reset twice to avoid discarding data written just after a flush and before 750 // the audioflinger thread detects the track is stopped. 751 if (!mResetDone) { 752 // Force underrun condition to avoid false underrun callback until first data is 753 // written to buffer 754 android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags); 755 mFillingUpStatus = FS_FILLING; 756 mResetDone = true; 757 if (mState == FLUSHED) { 758 mState = IDLE; 759 } 760 } 761 } 762 763 status_t AudioFlinger::PlaybackThread::Track::setParameters(const String8& keyValuePairs) 764 { 765 sp<ThreadBase> thread = mThread.promote(); 766 if (thread == 0) { 767 ALOGE("thread is dead"); 768 return FAILED_TRANSACTION; 769 } else if ((thread->type() == ThreadBase::DIRECT) || 770 (thread->type() == ThreadBase::OFFLOAD)) { 771 return thread->setParameters(keyValuePairs); 772 } else { 773 return PERMISSION_DENIED; 774 } 775 } 776 777 status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp) 778 { 779 // Client should implement this using SSQ; the unpresented frame count in latch is irrelevant 780 if (isFastTrack()) { 781 return INVALID_OPERATION; 782 } 783 sp<ThreadBase> thread = mThread.promote(); 784 if (thread == 0) { 785 return INVALID_OPERATION; 786 } 787 Mutex::Autolock _l(thread->mLock); 788 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 789 if (!isOffloaded()) { 790 if (!playbackThread->mLatchQValid) { 791 return INVALID_OPERATION; 792 } 793 uint32_t unpresentedFrames = 794 ((int64_t) playbackThread->mLatchQ.mUnpresentedFrames * mSampleRate) / 795 playbackThread->mSampleRate; 796 uint32_t framesWritten = mAudioTrackServerProxy->framesReleased(); 797 if (framesWritten < unpresentedFrames) { 798 return INVALID_OPERATION; 799 } 800 timestamp.mPosition = framesWritten - unpresentedFrames; 801 timestamp.mTime = playbackThread->mLatchQ.mTimestamp.mTime; 802 return NO_ERROR; 803 } 804 805 return playbackThread->getTimestamp_l(timestamp); 806 } 807 808 status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId) 809 { 810 status_t status = DEAD_OBJECT; 811 sp<ThreadBase> thread = mThread.promote(); 812 if (thread != 0) { 813 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 814 sp<AudioFlinger> af = mClient->audioFlinger(); 815 816 Mutex::Autolock _l(af->mLock); 817 818 sp<PlaybackThread> srcThread = af->getEffectThread_l(AUDIO_SESSION_OUTPUT_MIX, EffectId); 819 820 if (EffectId != 0 && srcThread != 0 && playbackThread != srcThread.get()) { 821 Mutex::Autolock _dl(playbackThread->mLock); 822 Mutex::Autolock _sl(srcThread->mLock); 823 sp<EffectChain> chain = srcThread->getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX); 824 if (chain == 0) { 825 return INVALID_OPERATION; 826 } 827 828 sp<EffectModule> effect = chain->getEffectFromId_l(EffectId); 829 if (effect == 0) { 830 return INVALID_OPERATION; 831 } 832 srcThread->removeEffect_l(effect); 833 status = playbackThread->addEffect_l(effect); 834 if (status != NO_ERROR) { 835 srcThread->addEffect_l(effect); 836 return INVALID_OPERATION; 837 } 838 // removeEffect_l() has stopped the effect if it was active so it must be restarted 839 if (effect->state() == EffectModule::ACTIVE || 840 effect->state() == EffectModule::STOPPING) { 841 effect->start(); 842 } 843 844 sp<EffectChain> dstChain = effect->chain().promote(); 845 if (dstChain == 0) { 846 srcThread->addEffect_l(effect); 847 return INVALID_OPERATION; 848 } 849 AudioSystem::unregisterEffect(effect->id()); 850 AudioSystem::registerEffect(&effect->desc(), 851 srcThread->id(), 852 dstChain->strategy(), 853 AUDIO_SESSION_OUTPUT_MIX, 854 effect->id()); 855 AudioSystem::setEffectEnabled(effect->id(), effect->isEnabled()); 856 } 857 status = playbackThread->attachAuxEffect(this, EffectId); 858 } 859 return status; 860 } 861 862 void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer) 863 { 864 mAuxEffectId = EffectId; 865 mAuxBuffer = buffer; 866 } 867 868 bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWritten, 869 size_t audioHalFrames) 870 { 871 // a track is considered presented when the total number of frames written to audio HAL 872 // corresponds to the number of frames written when presentationComplete() is called for the 873 // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time. 874 // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used 875 // to detect when all frames have been played. In this case framesWritten isn't 876 // useful because it doesn't always reflect whether there is data in the h/w 877 // buffers, particularly if a track has been paused and resumed during draining 878 ALOGV("presentationComplete() mPresentationCompleteFrames %d framesWritten %d", 879 mPresentationCompleteFrames, framesWritten); 880 if (mPresentationCompleteFrames == 0) { 881 mPresentationCompleteFrames = framesWritten + audioHalFrames; 882 ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d", 883 mPresentationCompleteFrames, audioHalFrames); 884 } 885 886 if (framesWritten >= mPresentationCompleteFrames || isOffloaded()) { 887 ALOGV("presentationComplete() session %d complete: framesWritten %d", 888 mSessionId, framesWritten); 889 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE); 890 mAudioTrackServerProxy->setStreamEndDone(); 891 return true; 892 } 893 return false; 894 } 895 896 void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type) 897 { 898 for (int i = 0; i < (int)mSyncEvents.size(); i++) { 899 if (mSyncEvents[i]->type() == type) { 900 mSyncEvents[i]->trigger(); 901 mSyncEvents.removeAt(i); 902 i--; 903 } 904 } 905 } 906 907 // implement VolumeBufferProvider interface 908 909 uint32_t AudioFlinger::PlaybackThread::Track::getVolumeLR() 910 { 911 // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs 912 ALOG_ASSERT(isFastTrack() && (mCblk != NULL)); 913 uint32_t vlr = mAudioTrackServerProxy->getVolumeLR(); 914 uint32_t vl = vlr & 0xFFFF; 915 uint32_t vr = vlr >> 16; 916 // track volumes come from shared memory, so can't be trusted and must be clamped 917 if (vl > MAX_GAIN_INT) { 918 vl = MAX_GAIN_INT; 919 } 920 if (vr > MAX_GAIN_INT) { 921 vr = MAX_GAIN_INT; 922 } 923 // now apply the cached master volume and stream type volume; 924 // this is trusted but lacks any synchronization or barrier so may be stale 925 float v = mCachedVolume; 926 vl *= v; 927 vr *= v; 928 // re-combine into U4.16 929 vlr = (vr << 16) | (vl & 0xFFFF); 930 // FIXME look at mute, pause, and stop flags 931 return vlr; 932 } 933 934 status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event) 935 { 936 if (isTerminated() || mState == PAUSED || 937 ((framesReady() == 0) && ((mSharedBuffer != 0) || 938 (mState == STOPPED)))) { 939 ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ", 940 mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady()); 941 event->cancel(); 942 return INVALID_OPERATION; 943 } 944 (void) TrackBase::setSyncEvent(event); 945 return NO_ERROR; 946 } 947 948 void AudioFlinger::PlaybackThread::Track::invalidate() 949 { 950 // FIXME should use proxy, and needs work 951 audio_track_cblk_t* cblk = mCblk; 952 android_atomic_or(CBLK_INVALID, &cblk->mFlags); 953 android_atomic_release_store(0x40000000, &cblk->mFutex); 954 // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE 955 (void) __futex_syscall3(&cblk->mFutex, FUTEX_WAKE, INT_MAX); 956 mIsInvalid = true; 957 } 958 959 void AudioFlinger::PlaybackThread::Track::signal() 960 { 961 sp<ThreadBase> thread = mThread.promote(); 962 if (thread != 0) { 963 PlaybackThread *t = (PlaybackThread *)thread.get(); 964 Mutex::Autolock _l(t->mLock); 965 t->broadcast_l(); 966 } 967 } 968 969 // ---------------------------------------------------------------------------- 970 971 sp<AudioFlinger::PlaybackThread::TimedTrack> 972 AudioFlinger::PlaybackThread::TimedTrack::create( 973 PlaybackThread *thread, 974 const sp<Client>& client, 975 audio_stream_type_t streamType, 976 uint32_t sampleRate, 977 audio_format_t format, 978 audio_channel_mask_t channelMask, 979 size_t frameCount, 980 const sp<IMemory>& sharedBuffer, 981 int sessionId, 982 int uid) { 983 if (!client->reserveTimedTrack()) 984 return 0; 985 986 return new TimedTrack( 987 thread, client, streamType, sampleRate, format, channelMask, frameCount, 988 sharedBuffer, sessionId, uid); 989 } 990 991 AudioFlinger::PlaybackThread::TimedTrack::TimedTrack( 992 PlaybackThread *thread, 993 const sp<Client>& client, 994 audio_stream_type_t streamType, 995 uint32_t sampleRate, 996 audio_format_t format, 997 audio_channel_mask_t channelMask, 998 size_t frameCount, 999 const sp<IMemory>& sharedBuffer, 1000 int sessionId, 1001 int uid) 1002 : Track(thread, client, streamType, sampleRate, format, channelMask, 1003 frameCount, sharedBuffer, sessionId, uid, IAudioFlinger::TRACK_TIMED), 1004 mQueueHeadInFlight(false), 1005 mTrimQueueHeadOnRelease(false), 1006 mFramesPendingInQueue(0), 1007 mTimedSilenceBuffer(NULL), 1008 mTimedSilenceBufferSize(0), 1009 mTimedAudioOutputOnTime(false), 1010 mMediaTimeTransformValid(false) 1011 { 1012 LocalClock lc; 1013 mLocalTimeFreq = lc.getLocalFreq(); 1014 1015 mLocalTimeToSampleTransform.a_zero = 0; 1016 mLocalTimeToSampleTransform.b_zero = 0; 1017 mLocalTimeToSampleTransform.a_to_b_numer = sampleRate; 1018 mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq; 1019 LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer, 1020 &mLocalTimeToSampleTransform.a_to_b_denom); 1021 1022 mMediaTimeToSampleTransform.a_zero = 0; 1023 mMediaTimeToSampleTransform.b_zero = 0; 1024 mMediaTimeToSampleTransform.a_to_b_numer = sampleRate; 1025 mMediaTimeToSampleTransform.a_to_b_denom = 1000000; 1026 LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer, 1027 &mMediaTimeToSampleTransform.a_to_b_denom); 1028 } 1029 1030 AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() { 1031 mClient->releaseTimedTrack(); 1032 delete [] mTimedSilenceBuffer; 1033 } 1034 1035 status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer( 1036 size_t size, sp<IMemory>* buffer) { 1037 1038 Mutex::Autolock _l(mTimedBufferQueueLock); 1039 1040 trimTimedBufferQueue_l(); 1041 1042 // lazily initialize the shared memory heap for timed buffers 1043 if (mTimedMemoryDealer == NULL) { 1044 const int kTimedBufferHeapSize = 512 << 10; 1045 1046 mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize, 1047 "AudioFlingerTimed"); 1048 if (mTimedMemoryDealer == NULL) 1049 return NO_MEMORY; 1050 } 1051 1052 sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size); 1053 if (newBuffer == NULL) { 1054 newBuffer = mTimedMemoryDealer->allocate(size); 1055 if (newBuffer == NULL) 1056 return NO_MEMORY; 1057 } 1058 1059 *buffer = newBuffer; 1060 return NO_ERROR; 1061 } 1062 1063 // caller must hold mTimedBufferQueueLock 1064 void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() { 1065 int64_t mediaTimeNow; 1066 { 1067 Mutex::Autolock mttLock(mMediaTimeTransformLock); 1068 if (!mMediaTimeTransformValid) 1069 return; 1070 1071 int64_t targetTimeNow; 1072 status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) 1073 ? mCCHelper.getCommonTime(&targetTimeNow) 1074 : mCCHelper.getLocalTime(&targetTimeNow); 1075 1076 if (OK != res) 1077 return; 1078 1079 if (!mMediaTimeTransform.doReverseTransform(targetTimeNow, 1080 &mediaTimeNow)) { 1081 return; 1082 } 1083 } 1084 1085 size_t trimEnd; 1086 for (trimEnd = 0; trimEnd < mTimedBufferQueue.size(); trimEnd++) { 1087 int64_t bufEnd; 1088 1089 if ((trimEnd + 1) < mTimedBufferQueue.size()) { 1090 // We have a next buffer. Just use its PTS as the PTS of the frame 1091 // following the last frame in this buffer. If the stream is sparse 1092 // (ie, there are deliberate gaps left in the stream which should be 1093 // filled with silence by the TimedAudioTrack), then this can result 1094 // in one extra buffer being left un-trimmed when it could have 1095 // been. In general, this is not typical, and we would rather 1096 // optimized away the TS calculation below for the more common case 1097 // where PTSes are contiguous. 1098 bufEnd = mTimedBufferQueue[trimEnd + 1].pts(); 1099 } else { 1100 // We have no next buffer. Compute the PTS of the frame following 1101 // the last frame in this buffer by computing the duration of of 1102 // this frame in media time units and adding it to the PTS of the 1103 // buffer. 1104 int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size() 1105 / mFrameSize; 1106 1107 if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount, 1108 &bufEnd)) { 1109 ALOGE("Failed to convert frame count of %lld to media time" 1110 " duration" " (scale factor %d/%u) in %s", 1111 frameCount, 1112 mMediaTimeToSampleTransform.a_to_b_numer, 1113 mMediaTimeToSampleTransform.a_to_b_denom, 1114 __PRETTY_FUNCTION__); 1115 break; 1116 } 1117 bufEnd += mTimedBufferQueue[trimEnd].pts(); 1118 } 1119 1120 if (bufEnd > mediaTimeNow) 1121 break; 1122 1123 // Is the buffer we want to use in the middle of a mix operation right 1124 // now? If so, don't actually trim it. Just wait for the releaseBuffer 1125 // from the mixer which should be coming back shortly. 1126 if (!trimEnd && mQueueHeadInFlight) { 1127 mTrimQueueHeadOnRelease = true; 1128 } 1129 } 1130 1131 size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0; 1132 if (trimStart < trimEnd) { 1133 // Update the bookkeeping for framesReady() 1134 for (size_t i = trimStart; i < trimEnd; ++i) { 1135 updateFramesPendingAfterTrim_l(mTimedBufferQueue[i], "trim"); 1136 } 1137 1138 // Now actually remove the buffers from the queue. 1139 mTimedBufferQueue.removeItemsAt(trimStart, trimEnd); 1140 } 1141 } 1142 1143 void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l( 1144 const char* logTag) { 1145 ALOG_ASSERT(mTimedBufferQueue.size() > 0, 1146 "%s called (reason \"%s\"), but timed buffer queue has no" 1147 " elements to trim.", __FUNCTION__, logTag); 1148 1149 updateFramesPendingAfterTrim_l(mTimedBufferQueue[0], logTag); 1150 mTimedBufferQueue.removeAt(0); 1151 } 1152 1153 void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l( 1154 const TimedBuffer& buf, 1155 const char* logTag) { 1156 uint32_t bufBytes = buf.buffer()->size(); 1157 uint32_t consumedAlready = buf.position(); 1158 1159 ALOG_ASSERT(consumedAlready <= bufBytes, 1160 "Bad bookkeeping while updating frames pending. Timed buffer is" 1161 " only %u bytes long, but claims to have consumed %u" 1162 " bytes. (update reason: \"%s\")", 1163 bufBytes, consumedAlready, logTag); 1164 1165 uint32_t bufFrames = (bufBytes - consumedAlready) / mFrameSize; 1166 ALOG_ASSERT(mFramesPendingInQueue >= bufFrames, 1167 "Bad bookkeeping while updating frames pending. Should have at" 1168 " least %u queued frames, but we think we have only %u. (update" 1169 " reason: \"%s\")", 1170 bufFrames, mFramesPendingInQueue, logTag); 1171 1172 mFramesPendingInQueue -= bufFrames; 1173 } 1174 1175 status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer( 1176 const sp<IMemory>& buffer, int64_t pts) { 1177 1178 { 1179 Mutex::Autolock mttLock(mMediaTimeTransformLock); 1180 if (!mMediaTimeTransformValid) 1181 return INVALID_OPERATION; 1182 } 1183 1184 Mutex::Autolock _l(mTimedBufferQueueLock); 1185 1186 uint32_t bufFrames = buffer->size() / mFrameSize; 1187 mFramesPendingInQueue += bufFrames; 1188 mTimedBufferQueue.add(TimedBuffer(buffer, pts)); 1189 1190 return NO_ERROR; 1191 } 1192 1193 status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform( 1194 const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) { 1195 1196 ALOGVV("setMediaTimeTransform az=%lld bz=%lld n=%d d=%u tgt=%d", 1197 xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom, 1198 target); 1199 1200 if (!(target == TimedAudioTrack::LOCAL_TIME || 1201 target == TimedAudioTrack::COMMON_TIME)) { 1202 return BAD_VALUE; 1203 } 1204 1205 Mutex::Autolock lock(mMediaTimeTransformLock); 1206 mMediaTimeTransform = xform; 1207 mMediaTimeTransformTarget = target; 1208 mMediaTimeTransformValid = true; 1209 1210 return NO_ERROR; 1211 } 1212 1213 #define min(a, b) ((a) < (b) ? (a) : (b)) 1214 1215 // implementation of getNextBuffer for tracks whose buffers have timestamps 1216 status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer( 1217 AudioBufferProvider::Buffer* buffer, int64_t pts) 1218 { 1219 if (pts == AudioBufferProvider::kInvalidPTS) { 1220 buffer->raw = NULL; 1221 buffer->frameCount = 0; 1222 mTimedAudioOutputOnTime = false; 1223 return INVALID_OPERATION; 1224 } 1225 1226 Mutex::Autolock _l(mTimedBufferQueueLock); 1227 1228 ALOG_ASSERT(!mQueueHeadInFlight, 1229 "getNextBuffer called without releaseBuffer!"); 1230 1231 while (true) { 1232 1233 // if we have no timed buffers, then fail 1234 if (mTimedBufferQueue.isEmpty()) { 1235 buffer->raw = NULL; 1236 buffer->frameCount = 0; 1237 return NOT_ENOUGH_DATA; 1238 } 1239 1240 TimedBuffer& head = mTimedBufferQueue.editItemAt(0); 1241 1242 // calculate the PTS of the head of the timed buffer queue expressed in 1243 // local time 1244 int64_t headLocalPTS; 1245 { 1246 Mutex::Autolock mttLock(mMediaTimeTransformLock); 1247 1248 ALOG_ASSERT(mMediaTimeTransformValid, "media time transform invalid"); 1249 1250 if (mMediaTimeTransform.a_to_b_denom == 0) { 1251 // the transform represents a pause, so yield silence 1252 timedYieldSilence_l(buffer->frameCount, buffer); 1253 return NO_ERROR; 1254 } 1255 1256 int64_t transformedPTS; 1257 if (!mMediaTimeTransform.doForwardTransform(head.pts(), 1258 &transformedPTS)) { 1259 // the transform failed. this shouldn't happen, but if it does 1260 // then just drop this buffer 1261 ALOGW("timedGetNextBuffer transform failed"); 1262 buffer->raw = NULL; 1263 buffer->frameCount = 0; 1264 trimTimedBufferQueueHead_l("getNextBuffer; no transform"); 1265 return NO_ERROR; 1266 } 1267 1268 if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) { 1269 if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS, 1270 &headLocalPTS)) { 1271 buffer->raw = NULL; 1272 buffer->frameCount = 0; 1273 return INVALID_OPERATION; 1274 } 1275 } else { 1276 headLocalPTS = transformedPTS; 1277 } 1278 } 1279 1280 uint32_t sr = sampleRate(); 1281 1282 // adjust the head buffer's PTS to reflect the portion of the head buffer 1283 // that has already been consumed 1284 int64_t effectivePTS = headLocalPTS + 1285 ((head.position() / mFrameSize) * mLocalTimeFreq / sr); 1286 1287 // Calculate the delta in samples between the head of the input buffer 1288 // queue and the start of the next output buffer that will be written. 1289 // If the transformation fails because of over or underflow, it means 1290 // that the sample's position in the output stream is so far out of 1291 // whack that it should just be dropped. 1292 int64_t sampleDelta; 1293 if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) { 1294 ALOGV("*** head buffer is too far from PTS: dropped buffer"); 1295 trimTimedBufferQueueHead_l("getNextBuffer, buf pts too far from" 1296 " mix"); 1297 continue; 1298 } 1299 if (!mLocalTimeToSampleTransform.doForwardTransform( 1300 (effectivePTS - pts) << 32, &sampleDelta)) { 1301 ALOGV("*** too late during sample rate transform: dropped buffer"); 1302 trimTimedBufferQueueHead_l("getNextBuffer, bad local to sample"); 1303 continue; 1304 } 1305 1306 ALOGVV("*** getNextBuffer head.pts=%lld head.pos=%d pts=%lld" 1307 " sampleDelta=[%d.%08x]", 1308 head.pts(), head.position(), pts, 1309 static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1) 1310 + (sampleDelta >> 32)), 1311 static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF)); 1312 1313 // if the delta between the ideal placement for the next input sample and 1314 // the current output position is within this threshold, then we will 1315 // concatenate the next input samples to the previous output 1316 const int64_t kSampleContinuityThreshold = 1317 (static_cast<int64_t>(sr) << 32) / 250; 1318 1319 // if this is the first buffer of audio that we're emitting from this track 1320 // then it should be almost exactly on time. 1321 const int64_t kSampleStartupThreshold = 1LL << 32; 1322 1323 if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) || 1324 (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) { 1325 // the next input is close enough to being on time, so concatenate it 1326 // with the last output 1327 timedYieldSamples_l(buffer); 1328 1329 ALOGVV("*** on time: head.pos=%d frameCount=%u", 1330 head.position(), buffer->frameCount); 1331 return NO_ERROR; 1332 } 1333 1334 // Looks like our output is not on time. Reset our on timed status. 1335 // Next time we mix samples from our input queue, then should be within 1336 // the StartupThreshold. 1337 mTimedAudioOutputOnTime = false; 1338 if (sampleDelta > 0) { 1339 // the gap between the current output position and the proper start of 1340 // the next input sample is too big, so fill it with silence 1341 uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32; 1342 1343 timedYieldSilence_l(framesUntilNextInput, buffer); 1344 ALOGV("*** silence: frameCount=%u", buffer->frameCount); 1345 return NO_ERROR; 1346 } else { 1347 // the next input sample is late 1348 uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32)); 1349 size_t onTimeSamplePosition = 1350 head.position() + lateFrames * mFrameSize; 1351 1352 if (onTimeSamplePosition > head.buffer()->size()) { 1353 // all the remaining samples in the head are too late, so 1354 // drop it and move on 1355 ALOGV("*** too late: dropped buffer"); 1356 trimTimedBufferQueueHead_l("getNextBuffer, dropped late buffer"); 1357 continue; 1358 } else { 1359 // skip over the late samples 1360 head.setPosition(onTimeSamplePosition); 1361 1362 // yield the available samples 1363 timedYieldSamples_l(buffer); 1364 1365 ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount); 1366 return NO_ERROR; 1367 } 1368 } 1369 } 1370 } 1371 1372 // Yield samples from the timed buffer queue head up to the given output 1373 // buffer's capacity. 1374 // 1375 // Caller must hold mTimedBufferQueueLock 1376 void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l( 1377 AudioBufferProvider::Buffer* buffer) { 1378 1379 const TimedBuffer& head = mTimedBufferQueue[0]; 1380 1381 buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) + 1382 head.position()); 1383 1384 uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) / 1385 mFrameSize); 1386 size_t framesRequested = buffer->frameCount; 1387 buffer->frameCount = min(framesLeftInHead, framesRequested); 1388 1389 mQueueHeadInFlight = true; 1390 mTimedAudioOutputOnTime = true; 1391 } 1392 1393 // Yield samples of silence up to the given output buffer's capacity 1394 // 1395 // Caller must hold mTimedBufferQueueLock 1396 void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l( 1397 uint32_t numFrames, AudioBufferProvider::Buffer* buffer) { 1398 1399 // lazily allocate a buffer filled with silence 1400 if (mTimedSilenceBufferSize < numFrames * mFrameSize) { 1401 delete [] mTimedSilenceBuffer; 1402 mTimedSilenceBufferSize = numFrames * mFrameSize; 1403 mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize]; 1404 memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize); 1405 } 1406 1407 buffer->raw = mTimedSilenceBuffer; 1408 size_t framesRequested = buffer->frameCount; 1409 buffer->frameCount = min(numFrames, framesRequested); 1410 1411 mTimedAudioOutputOnTime = false; 1412 } 1413 1414 // AudioBufferProvider interface 1415 void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer( 1416 AudioBufferProvider::Buffer* buffer) { 1417 1418 Mutex::Autolock _l(mTimedBufferQueueLock); 1419 1420 // If the buffer which was just released is part of the buffer at the head 1421 // of the queue, be sure to update the amt of the buffer which has been 1422 // consumed. If the buffer being returned is not part of the head of the 1423 // queue, its either because the buffer is part of the silence buffer, or 1424 // because the head of the timed queue was trimmed after the mixer called 1425 // getNextBuffer but before the mixer called releaseBuffer. 1426 if (buffer->raw == mTimedSilenceBuffer) { 1427 ALOG_ASSERT(!mQueueHeadInFlight, 1428 "Queue head in flight during release of silence buffer!"); 1429 goto done; 1430 } 1431 1432 ALOG_ASSERT(mQueueHeadInFlight, 1433 "TimedTrack::releaseBuffer of non-silence buffer, but no queue" 1434 " head in flight."); 1435 1436 if (mTimedBufferQueue.size()) { 1437 TimedBuffer& head = mTimedBufferQueue.editItemAt(0); 1438 1439 void* start = head.buffer()->pointer(); 1440 void* end = reinterpret_cast<void*>( 1441 reinterpret_cast<uint8_t*>(head.buffer()->pointer()) 1442 + head.buffer()->size()); 1443 1444 ALOG_ASSERT((buffer->raw >= start) && (buffer->raw < end), 1445 "released buffer not within the head of the timed buffer" 1446 " queue; qHead = [%p, %p], released buffer = %p", 1447 start, end, buffer->raw); 1448 1449 head.setPosition(head.position() + 1450 (buffer->frameCount * mFrameSize)); 1451 mQueueHeadInFlight = false; 1452 1453 ALOG_ASSERT(mFramesPendingInQueue >= buffer->frameCount, 1454 "Bad bookkeeping during releaseBuffer! Should have at" 1455 " least %u queued frames, but we think we have only %u", 1456 buffer->frameCount, mFramesPendingInQueue); 1457 1458 mFramesPendingInQueue -= buffer->frameCount; 1459 1460 if ((static_cast<size_t>(head.position()) >= head.buffer()->size()) 1461 || mTrimQueueHeadOnRelease) { 1462 trimTimedBufferQueueHead_l("releaseBuffer"); 1463 mTrimQueueHeadOnRelease = false; 1464 } 1465 } else { 1466 LOG_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no" 1467 " buffers in the timed buffer queue"); 1468 } 1469 1470 done: 1471 buffer->raw = 0; 1472 buffer->frameCount = 0; 1473 } 1474 1475 size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const { 1476 Mutex::Autolock _l(mTimedBufferQueueLock); 1477 return mFramesPendingInQueue; 1478 } 1479 1480 AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer() 1481 : mPTS(0), mPosition(0) {} 1482 1483 AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer( 1484 const sp<IMemory>& buffer, int64_t pts) 1485 : mBuffer(buffer), mPTS(pts), mPosition(0) {} 1486 1487 1488 // ---------------------------------------------------------------------------- 1489 1490 AudioFlinger::PlaybackThread::OutputTrack::OutputTrack( 1491 PlaybackThread *playbackThread, 1492 DuplicatingThread *sourceThread, 1493 uint32_t sampleRate, 1494 audio_format_t format, 1495 audio_channel_mask_t channelMask, 1496 size_t frameCount, 1497 int uid) 1498 : Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount, 1499 NULL, 0, uid, IAudioFlinger::TRACK_DEFAULT), 1500 mActive(false), mSourceThread(sourceThread), mClientProxy(NULL) 1501 { 1502 1503 if (mCblk != NULL) { 1504 mOutBuffer.frameCount = 0; 1505 playbackThread->mTracks.add(this); 1506 ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, " 1507 "mCblk->frameCount_ %u, mChannelMask 0x%08x", 1508 mCblk, mBuffer, 1509 mCblk->frameCount_, mChannelMask); 1510 // since client and server are in the same process, 1511 // the buffer has the same virtual address on both sides 1512 mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize); 1513 mClientProxy->setVolumeLR((uint32_t(uint16_t(0x1000)) << 16) | uint16_t(0x1000)); 1514 mClientProxy->setSendLevel(0.0); 1515 mClientProxy->setSampleRate(sampleRate); 1516 mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize, 1517 true /*clientInServer*/); 1518 } else { 1519 ALOGW("Error creating output track on thread %p", playbackThread); 1520 } 1521 } 1522 1523 AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack() 1524 { 1525 clearBufferQueue(); 1526 delete mClientProxy; 1527 // superclass destructor will now delete the server proxy and shared memory both refer to 1528 } 1529 1530 status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event, 1531 int triggerSession) 1532 { 1533 status_t status = Track::start(event, triggerSession); 1534 if (status != NO_ERROR) { 1535 return status; 1536 } 1537 1538 mActive = true; 1539 mRetryCount = 127; 1540 return status; 1541 } 1542 1543 void AudioFlinger::PlaybackThread::OutputTrack::stop() 1544 { 1545 Track::stop(); 1546 clearBufferQueue(); 1547 mOutBuffer.frameCount = 0; 1548 mActive = false; 1549 } 1550 1551 bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t frames) 1552 { 1553 Buffer *pInBuffer; 1554 Buffer inBuffer; 1555 uint32_t channelCount = mChannelCount; 1556 bool outputBufferFull = false; 1557 inBuffer.frameCount = frames; 1558 inBuffer.i16 = data; 1559 1560 uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs(); 1561 1562 if (!mActive && frames != 0) { 1563 start(); 1564 sp<ThreadBase> thread = mThread.promote(); 1565 if (thread != 0) { 1566 MixerThread *mixerThread = (MixerThread *)thread.get(); 1567 if (mFrameCount > frames) { 1568 if (mBufferQueue.size() < kMaxOverFlowBuffers) { 1569 uint32_t startFrames = (mFrameCount - frames); 1570 pInBuffer = new Buffer; 1571 pInBuffer->mBuffer = new int16_t[startFrames * channelCount]; 1572 pInBuffer->frameCount = startFrames; 1573 pInBuffer->i16 = pInBuffer->mBuffer; 1574 memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t)); 1575 mBufferQueue.add(pInBuffer); 1576 } else { 1577 ALOGW("OutputTrack::write() %p no more buffers in queue", this); 1578 } 1579 } 1580 } 1581 } 1582 1583 while (waitTimeLeftMs) { 1584 // First write pending buffers, then new data 1585 if (mBufferQueue.size()) { 1586 pInBuffer = mBufferQueue.itemAt(0); 1587 } else { 1588 pInBuffer = &inBuffer; 1589 } 1590 1591 if (pInBuffer->frameCount == 0) { 1592 break; 1593 } 1594 1595 if (mOutBuffer.frameCount == 0) { 1596 mOutBuffer.frameCount = pInBuffer->frameCount; 1597 nsecs_t startTime = systemTime(); 1598 status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs); 1599 if (status != NO_ERROR) { 1600 ALOGV("OutputTrack::write() %p thread %p no more output buffers; status %d", this, 1601 mThread.unsafe_get(), status); 1602 outputBufferFull = true; 1603 break; 1604 } 1605 uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime); 1606 if (waitTimeLeftMs >= waitTimeMs) { 1607 waitTimeLeftMs -= waitTimeMs; 1608 } else { 1609 waitTimeLeftMs = 0; 1610 } 1611 } 1612 1613 uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount : 1614 pInBuffer->frameCount; 1615 memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t)); 1616 Proxy::Buffer buf; 1617 buf.mFrameCount = outFrames; 1618 buf.mRaw = NULL; 1619 mClientProxy->releaseBuffer(&buf); 1620 pInBuffer->frameCount -= outFrames; 1621 pInBuffer->i16 += outFrames * channelCount; 1622 mOutBuffer.frameCount -= outFrames; 1623 mOutBuffer.i16 += outFrames * channelCount; 1624 1625 if (pInBuffer->frameCount == 0) { 1626 if (mBufferQueue.size()) { 1627 mBufferQueue.removeAt(0); 1628 delete [] pInBuffer->mBuffer; 1629 delete pInBuffer; 1630 ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this, 1631 mThread.unsafe_get(), mBufferQueue.size()); 1632 } else { 1633 break; 1634 } 1635 } 1636 } 1637 1638 // If we could not write all frames, allocate a buffer and queue it for next time. 1639 if (inBuffer.frameCount) { 1640 sp<ThreadBase> thread = mThread.promote(); 1641 if (thread != 0 && !thread->standby()) { 1642 if (mBufferQueue.size() < kMaxOverFlowBuffers) { 1643 pInBuffer = new Buffer; 1644 pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channelCount]; 1645 pInBuffer->frameCount = inBuffer.frameCount; 1646 pInBuffer->i16 = pInBuffer->mBuffer; 1647 memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount * 1648 sizeof(int16_t)); 1649 mBufferQueue.add(pInBuffer); 1650 ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this, 1651 mThread.unsafe_get(), mBufferQueue.size()); 1652 } else { 1653 ALOGW("OutputTrack::write() %p thread %p no more overflow buffers", 1654 mThread.unsafe_get(), this); 1655 } 1656 } 1657 } 1658 1659 // Calling write() with a 0 length buffer, means that no more data will be written: 1660 // If no more buffers are pending, fill output track buffer to make sure it is started 1661 // by output mixer. 1662 if (frames == 0 && mBufferQueue.size() == 0) { 1663 // FIXME borken, replace by getting framesReady() from proxy 1664 size_t user = 0; // was mCblk->user 1665 if (user < mFrameCount) { 1666 frames = mFrameCount - user; 1667 pInBuffer = new Buffer; 1668 pInBuffer->mBuffer = new int16_t[frames * channelCount]; 1669 pInBuffer->frameCount = frames; 1670 pInBuffer->i16 = pInBuffer->mBuffer; 1671 memset(pInBuffer->raw, 0, frames * channelCount * sizeof(int16_t)); 1672 mBufferQueue.add(pInBuffer); 1673 } else if (mActive) { 1674 stop(); 1675 } 1676 } 1677 1678 return outputBufferFull; 1679 } 1680 1681 status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer( 1682 AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs) 1683 { 1684 ClientProxy::Buffer buf; 1685 buf.mFrameCount = buffer->frameCount; 1686 struct timespec timeout; 1687 timeout.tv_sec = waitTimeMs / 1000; 1688 timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000; 1689 status_t status = mClientProxy->obtainBuffer(&buf, &timeout); 1690 buffer->frameCount = buf.mFrameCount; 1691 buffer->raw = buf.mRaw; 1692 return status; 1693 } 1694 1695 void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue() 1696 { 1697 size_t size = mBufferQueue.size(); 1698 1699 for (size_t i = 0; i < size; i++) { 1700 Buffer *pBuffer = mBufferQueue.itemAt(i); 1701 delete [] pBuffer->mBuffer; 1702 delete pBuffer; 1703 } 1704 mBufferQueue.clear(); 1705 } 1706 1707 1708 // ---------------------------------------------------------------------------- 1709 // Record 1710 // ---------------------------------------------------------------------------- 1711 1712 AudioFlinger::RecordHandle::RecordHandle( 1713 const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack) 1714 : BnAudioRecord(), 1715 mRecordTrack(recordTrack) 1716 { 1717 } 1718 1719 AudioFlinger::RecordHandle::~RecordHandle() { 1720 stop_nonvirtual(); 1721 mRecordTrack->destroy(); 1722 } 1723 1724 sp<IMemory> AudioFlinger::RecordHandle::getCblk() const { 1725 return mRecordTrack->getCblk(); 1726 } 1727 1728 status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event, 1729 int triggerSession) { 1730 ALOGV("RecordHandle::start()"); 1731 return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession); 1732 } 1733 1734 void AudioFlinger::RecordHandle::stop() { 1735 stop_nonvirtual(); 1736 } 1737 1738 void AudioFlinger::RecordHandle::stop_nonvirtual() { 1739 ALOGV("RecordHandle::stop()"); 1740 mRecordTrack->stop(); 1741 } 1742 1743 status_t AudioFlinger::RecordHandle::onTransact( 1744 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) 1745 { 1746 return BnAudioRecord::onTransact(code, data, reply, flags); 1747 } 1748 1749 // ---------------------------------------------------------------------------- 1750 1751 // RecordTrack constructor must be called with AudioFlinger::mLock held 1752 AudioFlinger::RecordThread::RecordTrack::RecordTrack( 1753 RecordThread *thread, 1754 const sp<Client>& client, 1755 uint32_t sampleRate, 1756 audio_format_t format, 1757 audio_channel_mask_t channelMask, 1758 size_t frameCount, 1759 int sessionId, 1760 int uid) 1761 : TrackBase(thread, client, sampleRate, format, 1762 channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, uid, false /*isOut*/), 1763 mOverflow(false) 1764 { 1765 ALOGV("RecordTrack constructor"); 1766 if (mCblk != NULL) { 1767 mAudioRecordServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount, 1768 mFrameSize); 1769 mServerProxy = mAudioRecordServerProxy; 1770 } 1771 } 1772 1773 AudioFlinger::RecordThread::RecordTrack::~RecordTrack() 1774 { 1775 ALOGV("%s", __func__); 1776 } 1777 1778 // AudioBufferProvider interface 1779 status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer, 1780 int64_t pts) 1781 { 1782 ServerProxy::Buffer buf; 1783 buf.mFrameCount = buffer->frameCount; 1784 status_t status = mServerProxy->obtainBuffer(&buf); 1785 buffer->frameCount = buf.mFrameCount; 1786 buffer->raw = buf.mRaw; 1787 if (buf.mFrameCount == 0) { 1788 // FIXME also wake futex so that overrun is noticed more quickly 1789 (void) android_atomic_or(CBLK_OVERRUN, &mCblk->mFlags); 1790 } 1791 return status; 1792 } 1793 1794 status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event, 1795 int triggerSession) 1796 { 1797 sp<ThreadBase> thread = mThread.promote(); 1798 if (thread != 0) { 1799 RecordThread *recordThread = (RecordThread *)thread.get(); 1800 return recordThread->start(this, event, triggerSession); 1801 } else { 1802 return BAD_VALUE; 1803 } 1804 } 1805 1806 void AudioFlinger::RecordThread::RecordTrack::stop() 1807 { 1808 sp<ThreadBase> thread = mThread.promote(); 1809 if (thread != 0) { 1810 RecordThread *recordThread = (RecordThread *)thread.get(); 1811 if (recordThread->stop(this)) { 1812 AudioSystem::stopInput(recordThread->id()); 1813 } 1814 } 1815 } 1816 1817 void AudioFlinger::RecordThread::RecordTrack::destroy() 1818 { 1819 // see comments at AudioFlinger::PlaybackThread::Track::destroy() 1820 sp<RecordTrack> keep(this); 1821 { 1822 sp<ThreadBase> thread = mThread.promote(); 1823 if (thread != 0) { 1824 if (mState == ACTIVE || mState == RESUMING) { 1825 AudioSystem::stopInput(thread->id()); 1826 } 1827 AudioSystem::releaseInput(thread->id()); 1828 Mutex::Autolock _l(thread->mLock); 1829 RecordThread *recordThread = (RecordThread *) thread.get(); 1830 recordThread->destroyTrack_l(this); 1831 } 1832 } 1833 } 1834 1835 void AudioFlinger::RecordThread::RecordTrack::invalidate() 1836 { 1837 // FIXME should use proxy, and needs work 1838 audio_track_cblk_t* cblk = mCblk; 1839 android_atomic_or(CBLK_INVALID, &cblk->mFlags); 1840 android_atomic_release_store(0x40000000, &cblk->mFutex); 1841 // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE 1842 (void) __futex_syscall3(&cblk->mFutex, FUTEX_WAKE, INT_MAX); 1843 } 1844 1845 1846 /*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result) 1847 { 1848 result.append("Client Fmt Chn mask Session S Server fCount\n"); 1849 } 1850 1851 void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size) 1852 { 1853 snprintf(buffer, size, "%6u %3u %08X %7u %1d %08X %6u\n", 1854 (mClient == 0) ? getpid_cached : mClient->pid(), 1855 mFormat, 1856 mChannelMask, 1857 mSessionId, 1858 mState, 1859 mCblk->mServer, 1860 mFrameCount); 1861 } 1862 1863 }; // namespace android 1864