Home | History | Annotate | Download | only in client
      1 /*
      2  * Copyright (C) 2017 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #define LOG_TAG (mInService ? "AudioStreamInternalCapture_Service" \
     18                           : "AudioStreamInternalCapture_Client")
     19 //#define LOG_NDEBUG 0
     20 #include <utils/Log.h>
     21 
     22 #include <algorithm>
     23 #include <aaudio/AAudio.h>
     24 
     25 #include "client/AudioStreamInternalCapture.h"
     26 #include "utility/AudioClock.h"
     27 
     28 #define ATRACE_TAG ATRACE_TAG_AUDIO
     29 #include <utils/Trace.h>
     30 
     31 using android::WrappingBuffer;
     32 
     33 using namespace aaudio;
     34 
     35 AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface  &serviceInterface,
     36                                                  bool inService)
     37     : AudioStreamInternal(serviceInterface, inService) {
     38 
     39 }
     40 
     41 AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
     42 
     43 void AudioStreamInternalCapture::advanceClientToMatchServerPosition() {
     44     int64_t readCounter = mAudioEndpoint.getDataReadCounter();
     45     int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
     46 
     47     // Bump offset so caller does not see the retrograde motion in getFramesRead().
     48     int64_t offset = readCounter - writeCounter;
     49     mFramesOffsetFromService += offset;
     50     ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
     51           (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
     52 
     53     // Force readCounter to match writeCounter.
     54     // This is because we cannot change the write counter in the hardware.
     55     mAudioEndpoint.setDataReadCounter(writeCounter);
     56 }
     57 
     58 // Write the data, block if needed and timeoutMillis > 0
     59 aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
     60                                                int64_t timeoutNanoseconds)
     61 {
     62     return processData(buffer, numFrames, timeoutNanoseconds);
     63 }
     64 
     65 // Read as much data as we can without blocking.
     66 aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t numFrames,
     67                                                   int64_t currentNanoTime, int64_t *wakeTimePtr) {
     68     aaudio_result_t result = processCommands();
     69     if (result != AAUDIO_OK) {
     70         return result;
     71     }
     72 
     73     const char *traceName = "aaRdNow";
     74     ATRACE_BEGIN(traceName);
     75 
     76     if (mClockModel.isStarting()) {
     77         // Still haven't got any timestamps from server.
     78         // Keep waiting until we get some valid timestamps then start writing to the
     79         // current buffer position.
     80         ALOGD("processDataNow() wait for valid timestamps");
     81         // Sleep very briefly and hope we get a timestamp soon.
     82         *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
     83         ATRACE_END();
     84         return 0;
     85     }
     86     // If we have gotten this far then we have at least one timestamp from server.
     87 
     88     if (mAudioEndpoint.isFreeRunning()) {
     89         //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
     90         // Update data queue based on the timing model.
     91         int64_t estimatedRemoteCounter = mClockModel.convertTimeToPosition(currentNanoTime);
     92         // TODO refactor, maybe use setRemoteCounter()
     93         mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
     94     }
     95 
     96     // This code assumes that we have already received valid timestamps.
     97     if (mNeedCatchUp.isRequested()) {
     98         // Catch an MMAP pointer that is already advancing.
     99         // This will avoid initial underruns caused by a slow cold start.
    100         advanceClientToMatchServerPosition();
    101         mNeedCatchUp.acknowledge();
    102     }
    103 
    104     // If the write index passed the read index then consider it an overrun.
    105     // For shared streams, the xRunCount is passed up from the service.
    106     if (mAudioEndpoint.isFreeRunning() && mAudioEndpoint.getEmptyFramesAvailable() < 0) {
    107         mXRunCount++;
    108         if (ATRACE_ENABLED()) {
    109             ATRACE_INT("aaOverRuns", mXRunCount);
    110         }
    111     }
    112 
    113     // Read some data from the buffer.
    114     //ALOGD("AudioStreamInternalCapture::processDataNow() - readNowWithConversion(%d)", numFrames);
    115     int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
    116     //ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
    117     //    numFrames, framesProcessed);
    118     if (ATRACE_ENABLED()) {
    119         ATRACE_INT("aaRead", framesProcessed);
    120     }
    121 
    122     // Calculate an ideal time to wake up.
    123     if (wakeTimePtr != nullptr && framesProcessed >= 0) {
    124         // By default wake up a few milliseconds from now.  // TODO review
    125         int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
    126         aaudio_stream_state_t state = getState();
    127         //ALOGD("AudioStreamInternalCapture::processDataNow() - wakeTime based on %s",
    128         //      AAudio_convertStreamStateToText(state));
    129         switch (state) {
    130             case AAUDIO_STREAM_STATE_OPEN:
    131             case AAUDIO_STREAM_STATE_STARTING:
    132                 break;
    133             case AAUDIO_STREAM_STATE_STARTED:
    134             {
    135                 // When do we expect the next write burst to occur?
    136 
    137                 // Calculate frame position based off of the readCounter because
    138                 // the writeCounter might have just advanced in the background,
    139                 // causing us to sleep until a later burst.
    140                 int64_t nextPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
    141                 wakeTime = mClockModel.convertPositionToTime(nextPosition);
    142             }
    143                 break;
    144             default:
    145                 break;
    146         }
    147         *wakeTimePtr = wakeTime;
    148 
    149     }
    150 
    151     ATRACE_END();
    152     return framesProcessed;
    153 }
    154 
    155 aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
    156                                                                 int32_t numFrames) {
    157     // ALOGD("readNowWithConversion(%p, %d)",
    158     //              buffer, numFrames);
    159     WrappingBuffer wrappingBuffer;
    160     uint8_t *destination = (uint8_t *) buffer;
    161     int32_t framesLeft = numFrames;
    162 
    163     mAudioEndpoint.getFullFramesAvailable(&wrappingBuffer);
    164 
    165     // Read data in one or two parts.
    166     for (int partIndex = 0; framesLeft > 0 && partIndex < WrappingBuffer::SIZE; partIndex++) {
    167         int32_t framesToProcess = framesLeft;
    168         int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
    169         if (framesAvailable <= 0) break;
    170 
    171         if (framesToProcess > framesAvailable) {
    172             framesToProcess = framesAvailable;
    173         }
    174 
    175         int32_t numBytes = getBytesPerFrame() * framesToProcess;
    176         int32_t numSamples = framesToProcess * getSamplesPerFrame();
    177 
    178         // TODO factor this out into a utility function
    179         if (getDeviceFormat() == getFormat()) {
    180             memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
    181         } else if (getDeviceFormat() == AAUDIO_FORMAT_PCM_I16
    182                    && getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
    183             AAudioConvert_pcm16ToFloat(
    184                     (const int16_t *) wrappingBuffer.data[partIndex],
    185                     (float *) destination,
    186                     numSamples,
    187                     1.0f);
    188         } else if (getDeviceFormat() == AAUDIO_FORMAT_PCM_FLOAT
    189                    && getFormat() == AAUDIO_FORMAT_PCM_I16) {
    190             AAudioConvert_floatToPcm16(
    191                     (const float *) wrappingBuffer.data[partIndex],
    192                     (int16_t *) destination,
    193                     numSamples,
    194                     1.0f);
    195         } else {
    196             ALOGE("Format conversion not supported!");
    197             return AAUDIO_ERROR_INVALID_FORMAT;
    198         }
    199         destination += numBytes;
    200         framesLeft -= framesToProcess;
    201     }
    202 
    203     int32_t framesProcessed = numFrames - framesLeft;
    204     mAudioEndpoint.advanceReadIndex(framesProcessed);
    205 
    206     //ALOGD("readNowWithConversion() returns %d", framesProcessed);
    207     return framesProcessed;
    208 }
    209 
    210 int64_t AudioStreamInternalCapture::getFramesWritten() {
    211     int64_t framesWrittenHardware;
    212     if (isActive()) {
    213         framesWrittenHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
    214     } else {
    215         framesWrittenHardware = mAudioEndpoint.getDataWriteCounter();
    216     }
    217     // Prevent retrograde motion.
    218     mLastFramesWritten = std::max(mLastFramesWritten,
    219                                   framesWrittenHardware + mFramesOffsetFromService);
    220     //ALOGD("getFramesWritten() returns %lld",
    221     //      (long long)mLastFramesWritten);
    222     return mLastFramesWritten;
    223 }
    224 
    225 int64_t AudioStreamInternalCapture::getFramesRead() {
    226     int64_t frames = mAudioEndpoint.getDataReadCounter() + mFramesOffsetFromService;
    227     //ALOGD("getFramesRead() returns %lld", (long long)frames);
    228     return frames;
    229 }
    230 
    231 // Read data from the stream and pass it to the callback for processing.
    232 void *AudioStreamInternalCapture::callbackLoop() {
    233     aaudio_result_t result = AAUDIO_OK;
    234     aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
    235     if (!isDataCallbackSet()) return NULL;
    236 
    237     // result might be a frame count
    238     while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
    239 
    240         // Read audio data from stream.
    241         int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
    242 
    243         // This is a BLOCKING READ!
    244         result = read(mCallbackBuffer, mCallbackFrames, timeoutNanos);
    245         if ((result != mCallbackFrames)) {
    246             ALOGE("callbackLoop: read() returned %d", result);
    247             if (result >= 0) {
    248                 // Only read some of the frames requested. Must have timed out.
    249                 result = AAUDIO_ERROR_TIMEOUT;
    250             }
    251             maybeCallErrorCallback(result);
    252             break;
    253         }
    254 
    255         // Call application using the AAudio callback interface.
    256         callbackResult = maybeCallDataCallback(mCallbackBuffer, mCallbackFrames);
    257 
    258         if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
    259             ALOGD("callback returned AAUDIO_CALLBACK_RESULT_STOP");
    260             break;
    261         }
    262     }
    263 
    264     ALOGD("callbackLoop() exiting, result = %d, isActive() = %d",
    265           result, (int) isActive());
    266     return NULL;
    267 }
    268