1 /* 2 * Copyright (C) 2017 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #define LOG_TAG "AAudioServiceEndpointMMAP" 18 //#define LOG_NDEBUG 0 19 #include <utils/Log.h> 20 21 #include <algorithm> 22 #include <assert.h> 23 #include <map> 24 #include <mutex> 25 #include <sstream> 26 #include <utils/Singleton.h> 27 #include <vector> 28 29 30 #include "AAudioEndpointManager.h" 31 #include "AAudioServiceEndpoint.h" 32 33 #include "core/AudioStreamBuilder.h" 34 #include "AAudioServiceEndpoint.h" 35 #include "AAudioServiceStreamShared.h" 36 #include "AAudioServiceEndpointPlay.h" 37 #include "AAudioServiceEndpointMMAP.h" 38 39 40 #define AAUDIO_BUFFER_CAPACITY_MIN 4 * 512 41 #define AAUDIO_SAMPLE_RATE_DEFAULT 48000 42 43 // This is an estimate of the time difference between the HW and the MMAP time. 44 // TODO Get presentation timestamps from the HAL instead of using these estimates. 45 #define OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS (3 * AAUDIO_NANOS_PER_MILLISECOND) 46 #define INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS (-1 * AAUDIO_NANOS_PER_MILLISECOND) 47 48 using namespace android; // TODO just import names needed 49 using namespace aaudio; // TODO just import names needed 50 51 AAudioServiceEndpointMMAP::AAudioServiceEndpointMMAP() 52 : mMmapStream(nullptr) {} 53 54 AAudioServiceEndpointMMAP::~AAudioServiceEndpointMMAP() {} 55 56 std::string AAudioServiceEndpointMMAP::dump() const { 57 std::stringstream result; 58 59 result << " MMAP: framesTransferred = " << mFramesTransferred.get(); 60 result << ", HW nanos = " << mHardwareTimeOffsetNanos; 61 result << ", port handle = " << mPortHandle; 62 result << ", audio data FD = " << mAudioDataFileDescriptor; 63 result << "\n"; 64 65 result << " HW Offset Micros: " << 66 (getHardwareTimeOffsetNanos() 67 / AAUDIO_NANOS_PER_MICROSECOND) << "\n"; 68 69 result << AAudioServiceEndpoint::dump(); 70 return result.str(); 71 } 72 73 aaudio_result_t AAudioServiceEndpointMMAP::open(const aaudio::AAudioStreamRequest &request) { 74 aaudio_result_t result = AAUDIO_OK; 75 const audio_attributes_t attributes = { 76 .content_type = AUDIO_CONTENT_TYPE_MUSIC, 77 .usage = AUDIO_USAGE_MEDIA, 78 .source = AUDIO_SOURCE_VOICE_RECOGNITION, 79 .flags = AUDIO_FLAG_LOW_LATENCY, 80 .tags = "" 81 }; 82 audio_config_base_t config; 83 audio_port_handle_t deviceId; 84 85 int32_t burstMinMicros = AAudioProperty_getHardwareBurstMinMicros(); 86 int32_t burstMicros = 0; 87 88 copyFrom(request.getConstantConfiguration()); 89 90 mMmapClient.clientUid = request.getUserId(); 91 mMmapClient.clientPid = request.getProcessId(); 92 mMmapClient.packageName.setTo(String16("")); 93 94 mRequestedDeviceId = deviceId = getDeviceId(); 95 96 // Fill in config 97 aaudio_format_t aaudioFormat = getFormat(); 98 if (aaudioFormat == AAUDIO_UNSPECIFIED || aaudioFormat == AAUDIO_FORMAT_PCM_FLOAT) { 99 aaudioFormat = AAUDIO_FORMAT_PCM_I16; 100 } 101 config.format = AAudioConvert_aaudioToAndroidDataFormat(aaudioFormat); 102 103 int32_t aaudioSampleRate = getSampleRate(); 104 if (aaudioSampleRate == AAUDIO_UNSPECIFIED) { 105 aaudioSampleRate = AAUDIO_SAMPLE_RATE_DEFAULT; 106 } 107 config.sample_rate = aaudioSampleRate; 108 109 int32_t aaudioSamplesPerFrame = getSamplesPerFrame(); 110 111 aaudio_direction_t direction = getDirection(); 112 if (direction == AAUDIO_DIRECTION_OUTPUT) { 113 config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED) 114 ? AUDIO_CHANNEL_OUT_STEREO 115 : audio_channel_out_mask_from_count(aaudioSamplesPerFrame); 116 mHardwareTimeOffsetNanos = OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at DAC later 117 118 } else if (direction == AAUDIO_DIRECTION_INPUT) { 119 config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED) 120 ? AUDIO_CHANNEL_IN_STEREO 121 : audio_channel_in_mask_from_count(aaudioSamplesPerFrame); 122 mHardwareTimeOffsetNanos = INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at ADC earlier 123 124 } else { 125 ALOGE("openMmapStream - invalid direction = %d", direction); 126 return AAUDIO_ERROR_ILLEGAL_ARGUMENT; 127 } 128 129 MmapStreamInterface::stream_direction_t streamDirection = 130 (direction == AAUDIO_DIRECTION_OUTPUT) 131 ? MmapStreamInterface::DIRECTION_OUTPUT 132 : MmapStreamInterface::DIRECTION_INPUT; 133 134 // Open HAL stream. Set mMmapStream 135 status_t status = MmapStreamInterface::openMmapStream(streamDirection, 136 &attributes, 137 &config, 138 mMmapClient, 139 &deviceId, 140 this, // callback 141 mMmapStream, 142 &mPortHandle); 143 ALOGD("AAudioServiceEndpointMMAP::open() mMapClient.uid = %d, pid = %d => portHandle = %d\n", 144 mMmapClient.clientUid, mMmapClient.clientPid, mPortHandle); 145 if (status != OK) { 146 ALOGE("openMmapStream returned status %d", status); 147 return AAUDIO_ERROR_UNAVAILABLE; 148 } 149 150 if (deviceId == AAUDIO_UNSPECIFIED) { 151 ALOGW("AAudioServiceEndpointMMAP::open() - openMmapStream() failed to set deviceId"); 152 } 153 setDeviceId(deviceId); 154 155 // Create MMAP/NOIRQ buffer. 156 int32_t minSizeFrames = getBufferCapacity(); 157 if (minSizeFrames <= 0) { // zero will get rejected 158 minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN; 159 } 160 status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo); 161 if (status != OK) { 162 ALOGE("AAudioServiceEndpointMMAP::open() - createMmapBuffer() failed with status %d %s", 163 status, strerror(-status)); 164 result = AAUDIO_ERROR_UNAVAILABLE; 165 goto error; 166 } else { 167 ALOGD("createMmapBuffer status = %d, buffer_size = %d, burst_size %d" 168 ", Sharable FD: %s", 169 status, 170 abs(mMmapBufferinfo.buffer_size_frames), 171 mMmapBufferinfo.burst_size_frames, 172 mMmapBufferinfo.buffer_size_frames < 0 ? "Yes" : "No"); 173 } 174 175 setBufferCapacity(mMmapBufferinfo.buffer_size_frames); 176 // The audio HAL indicates if the shared memory fd can be shared outside of audioserver 177 // by returning a negative buffer size 178 if (getBufferCapacity() < 0) { 179 // Exclusive mode can be used by client or service. 180 setBufferCapacity(-getBufferCapacity()); 181 } else { 182 // Exclusive mode can only be used by the service because the FD cannot be shared. 183 uid_t audioServiceUid = getuid(); 184 if ((mMmapClient.clientUid != audioServiceUid) && 185 getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE) { 186 // Fallback is handled by caller but indicate what is possible in case 187 // this is used in the future 188 setSharingMode(AAUDIO_SHARING_MODE_SHARED); 189 ALOGW("AAudioServiceEndpointMMAP::open() - exclusive FD cannot be used by client"); 190 result = AAUDIO_ERROR_UNAVAILABLE; 191 goto error; 192 } 193 } 194 195 // Get information about the stream and pass it back to the caller. 196 setSamplesPerFrame((direction == AAUDIO_DIRECTION_OUTPUT) 197 ? audio_channel_count_from_out_mask(config.channel_mask) 198 : audio_channel_count_from_in_mask(config.channel_mask)); 199 200 // AAudio creates a copy of this FD and retains ownership of the copy. 201 // Assume that AudioFlinger will close the original shared_memory_fd. 202 mAudioDataFileDescriptor.reset(dup(mMmapBufferinfo.shared_memory_fd)); 203 if (mAudioDataFileDescriptor.get() == -1) { 204 ALOGE("AAudioServiceEndpointMMAP::open() - could not dup shared_memory_fd"); 205 result = AAUDIO_ERROR_INTERNAL; 206 goto error; 207 } 208 mFramesPerBurst = mMmapBufferinfo.burst_size_frames; 209 setFormat(AAudioConvert_androidToAAudioDataFormat(config.format)); 210 setSampleRate(config.sample_rate); 211 212 // Scale up the burst size to meet the minimum equivalent in microseconds. 213 // This is to avoid waking the CPU too often when the HW burst is very small 214 // or at high sample rates. 215 do { 216 if (burstMicros > 0) { // skip first loop 217 mFramesPerBurst *= 2; 218 } 219 burstMicros = mFramesPerBurst * static_cast<int64_t>(1000000) / getSampleRate(); 220 } while (burstMicros < burstMinMicros); 221 222 ALOGD("AAudioServiceEndpointMMAP::open() original burst = %d, minMicros = %d, to burst = %d\n", 223 mMmapBufferinfo.burst_size_frames, burstMinMicros, mFramesPerBurst); 224 225 ALOGD("AAudioServiceEndpointMMAP::open() actual rate = %d, channels = %d" 226 ", deviceId = %d, capacity = %d\n", 227 getSampleRate(), getSamplesPerFrame(), deviceId, getBufferCapacity()); 228 229 return result; 230 231 error: 232 close(); 233 return result; 234 } 235 236 aaudio_result_t AAudioServiceEndpointMMAP::close() { 237 238 if (mMmapStream != 0) { 239 ALOGD("AAudioServiceEndpointMMAP::close() clear() endpoint"); 240 // Needs to be explicitly cleared or CTS will fail but it is not clear why. 241 mMmapStream.clear(); 242 // Apparently the above close is asynchronous. An attempt to open a new device 243 // right after a close can fail. Also some callbacks may still be in flight! 244 // FIXME Make closing synchronous. 245 AudioClock::sleepForNanos(100 * AAUDIO_NANOS_PER_MILLISECOND); 246 } 247 248 return AAUDIO_OK; 249 } 250 251 aaudio_result_t AAudioServiceEndpointMMAP::startStream(sp<AAudioServiceStreamBase> stream, 252 audio_port_handle_t *clientHandle) { 253 // Start the client on behalf of the AAudio service. 254 // Use the port handle that was provided by openMmapStream(). 255 return startClient(mMmapClient, &mPortHandle); 256 } 257 258 aaudio_result_t AAudioServiceEndpointMMAP::stopStream(sp<AAudioServiceStreamBase> stream, 259 audio_port_handle_t clientHandle) { 260 mFramesTransferred.reset32(); 261 262 // Round 64-bit counter up to a multiple of the buffer capacity. 263 // This is required because the 64-bit counter is used as an index 264 // into a circular buffer and the actual HW position is reset to zero 265 // when the stream is stopped. 266 mFramesTransferred.roundUp64(getBufferCapacity()); 267 268 return stopClient(mPortHandle); 269 } 270 271 aaudio_result_t AAudioServiceEndpointMMAP::startClient(const android::AudioClient& client, 272 audio_port_handle_t *clientHandle) { 273 if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL; 274 ALOGD("AAudioServiceEndpointMMAP::startClient(%p(uid=%d, pid=%d))", 275 &client, client.clientUid, client.clientPid); 276 audio_port_handle_t originalHandle = *clientHandle; 277 status_t status = mMmapStream->start(client, clientHandle); 278 aaudio_result_t result = AAudioConvert_androidToAAudioResult(status); 279 ALOGD("AAudioServiceEndpointMMAP::startClient() , %d => %d returns %d", 280 originalHandle, *clientHandle, result); 281 return result; 282 } 283 284 aaudio_result_t AAudioServiceEndpointMMAP::stopClient(audio_port_handle_t clientHandle) { 285 if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL; 286 aaudio_result_t result = AAudioConvert_androidToAAudioResult(mMmapStream->stop(clientHandle)); 287 ALOGD("AAudioServiceEndpointMMAP::stopClient(%d) returns %d", clientHandle, result); 288 return result; 289 } 290 291 // Get free-running DSP or DMA hardware position from the HAL. 292 aaudio_result_t AAudioServiceEndpointMMAP::getFreeRunningPosition(int64_t *positionFrames, 293 int64_t *timeNanos) { 294 struct audio_mmap_position position; 295 if (mMmapStream == nullptr) { 296 return AAUDIO_ERROR_NULL; 297 } 298 status_t status = mMmapStream->getMmapPosition(&position); 299 ALOGV("AAudioServiceEndpointMMAP::getFreeRunningPosition() status= %d, pos = %d, nanos = %lld\n", 300 status, position.position_frames, (long long) position.time_nanoseconds); 301 aaudio_result_t result = AAudioConvert_androidToAAudioResult(status); 302 if (result == AAUDIO_ERROR_UNAVAILABLE) { 303 ALOGW("sendCurrentTimestamp(): getMmapPosition() has no position data available"); 304 } else if (result != AAUDIO_OK) { 305 ALOGE("sendCurrentTimestamp(): getMmapPosition() returned status %d", status); 306 } else { 307 // Convert 32-bit position to 64-bit position. 308 mFramesTransferred.update32(position.position_frames); 309 *positionFrames = mFramesTransferred.get(); 310 *timeNanos = position.time_nanoseconds; 311 } 312 return result; 313 } 314 315 aaudio_result_t AAudioServiceEndpointMMAP::getTimestamp(int64_t *positionFrames, 316 int64_t *timeNanos) { 317 return 0; // TODO 318 } 319 320 321 void AAudioServiceEndpointMMAP::onTearDown() { 322 ALOGD("AAudioServiceEndpointMMAP::onTearDown() called"); 323 disconnectRegisteredStreams(); 324 }; 325 326 void AAudioServiceEndpointMMAP::onVolumeChanged(audio_channel_mask_t channels, 327 android::Vector<float> values) { 328 // TODO do we really need a different volume for each channel? 329 float volume = values[0]; 330 ALOGD("AAudioServiceEndpointMMAP::onVolumeChanged() volume[0] = %f", volume); 331 std::lock_guard<std::mutex> lock(mLockStreams); 332 for(const auto stream : mRegisteredStreams) { 333 stream->onVolumeChanged(volume); 334 } 335 }; 336 337 void AAudioServiceEndpointMMAP::onRoutingChanged(audio_port_handle_t deviceId) { 338 ALOGD("AAudioServiceEndpointMMAP::onRoutingChanged() called with %d, old = %d", 339 deviceId, getDeviceId()); 340 if (getDeviceId() != AUDIO_PORT_HANDLE_NONE && getDeviceId() != deviceId) { 341 disconnectRegisteredStreams(); 342 } 343 setDeviceId(deviceId); 344 }; 345 346 /** 347 * Get an immutable description of the data queue from the HAL. 348 */ 349 aaudio_result_t AAudioServiceEndpointMMAP::getDownDataDescription(AudioEndpointParcelable &parcelable) 350 { 351 // Gather information on the data queue based on HAL info. 352 int32_t bytesPerFrame = calculateBytesPerFrame(); 353 int32_t capacityInBytes = getBufferCapacity() * bytesPerFrame; 354 int fdIndex = parcelable.addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes); 355 parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes); 356 parcelable.mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame); 357 parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst); 358 parcelable.mDownDataQueueParcelable.setCapacityInFrames(getBufferCapacity()); 359 return AAUDIO_OK; 360 } 361