1 /* 2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 package org.webrtc.voiceengine; 12 13 import android.annotation.TargetApi; 14 import android.content.Context; 15 import android.content.pm.PackageManager; 16 import android.media.AudioFormat; 17 import android.media.AudioManager; 18 import android.media.AudioRecord; 19 import android.media.AudioTrack; 20 import android.os.Build; 21 22 import org.webrtc.Logging; 23 24 import java.lang.Math; 25 26 // WebRtcAudioManager handles tasks that uses android.media.AudioManager. 27 // At construction, storeAudioParameters() is called and it retrieves 28 // fundamental audio parameters like native sample rate and number of channels. 29 // The result is then provided to the caller by nativeCacheAudioParameters(). 30 // It is also possible to call init() to set up the audio environment for best 31 // possible "VoIP performance". All settings done in init() are reverted by 32 // dispose(). This class can also be used without calling init() if the user 33 // prefers to set up the audio environment separately. However, it is 34 // recommended to always use AudioManager.MODE_IN_COMMUNICATION. 35 // This class also adds support for output volume control of the 36 // STREAM_VOICE_CALL-type stream. 37 public class WebRtcAudioManager { 38 private static final boolean DEBUG = false; 39 40 private static final String TAG = "WebRtcAudioManager"; 41 42 private static boolean blacklistDeviceForOpenSLESUsage = false; 43 private static boolean blacklistDeviceForOpenSLESUsageIsOverridden = false; 44 45 // Call this method to override the deault list of blacklisted devices 46 // specified in WebRtcAudioUtils.BLACKLISTED_OPEN_SL_ES_MODELS. 47 // Allows an app to take control over which devices to exlude from using 48 // the OpenSL ES audio output path 49 public static synchronized void setBlacklistDeviceForOpenSLESUsage( 50 boolean enable) { 51 blacklistDeviceForOpenSLESUsageIsOverridden = true; 52 blacklistDeviceForOpenSLESUsage = enable; 53 } 54 55 // Default audio data format is PCM 16 bit per sample. 56 // Guaranteed to be supported by all devices. 57 private static final int BITS_PER_SAMPLE = 16; 58 59 private static final int DEFAULT_FRAME_PER_BUFFER = 256; 60 61 // TODO(henrika): add stereo support for playout. 62 private static final int CHANNELS = 1; 63 64 // List of possible audio modes. 65 private static final String[] AUDIO_MODES = new String[] { 66 "MODE_NORMAL", 67 "MODE_RINGTONE", 68 "MODE_IN_CALL", 69 "MODE_IN_COMMUNICATION", 70 }; 71 72 private final long nativeAudioManager; 73 private final Context context; 74 private final AudioManager audioManager; 75 76 private boolean initialized = false; 77 private int nativeSampleRate; 78 private int nativeChannels; 79 80 private boolean hardwareAEC; 81 private boolean hardwareAGC; 82 private boolean hardwareNS; 83 private boolean lowLatencyOutput; 84 private int sampleRate; 85 private int channels; 86 private int outputBufferSize; 87 private int inputBufferSize; 88 89 WebRtcAudioManager(Context context, long nativeAudioManager) { 90 Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo()); 91 this.context = context; 92 this.nativeAudioManager = nativeAudioManager; 93 audioManager = (AudioManager) context.getSystemService( 94 Context.AUDIO_SERVICE); 95 if (DEBUG) { 96 WebRtcAudioUtils.logDeviceInfo(TAG); 97 } 98 storeAudioParameters(); 99 nativeCacheAudioParameters( 100 sampleRate, channels, hardwareAEC, hardwareAGC, hardwareNS, 101 lowLatencyOutput, outputBufferSize, inputBufferSize, 102 nativeAudioManager); 103 } 104 105 private boolean init() { 106 Logging.d(TAG, "init" + WebRtcAudioUtils.getThreadInfo()); 107 if (initialized) { 108 return true; 109 } 110 Logging.d(TAG, "audio mode is: " + AUDIO_MODES[audioManager.getMode()]); 111 initialized = true; 112 return true; 113 } 114 115 private void dispose() { 116 Logging.d(TAG, "dispose" + WebRtcAudioUtils.getThreadInfo()); 117 if (!initialized) { 118 return; 119 } 120 } 121 122 private boolean isCommunicationModeEnabled() { 123 return (audioManager.getMode() == AudioManager.MODE_IN_COMMUNICATION); 124 } 125 126 private boolean isDeviceBlacklistedForOpenSLESUsage() { 127 boolean blacklisted = blacklistDeviceForOpenSLESUsageIsOverridden ? 128 blacklistDeviceForOpenSLESUsage : 129 WebRtcAudioUtils.deviceIsBlacklistedForOpenSLESUsage(); 130 if (blacklisted) { 131 Logging.e(TAG, Build.MODEL + " is blacklisted for OpenSL ES usage!"); 132 } 133 return blacklisted; 134 } 135 136 private void storeAudioParameters() { 137 // Only mono is supported currently (in both directions). 138 // TODO(henrika): add support for stereo playout. 139 channels = CHANNELS; 140 sampleRate = getNativeOutputSampleRate(); 141 hardwareAEC = isAcousticEchoCancelerSupported(); 142 hardwareAGC = isAutomaticGainControlSupported(); 143 hardwareNS = isNoiseSuppressorSupported(); 144 lowLatencyOutput = isLowLatencyOutputSupported(); 145 outputBufferSize = lowLatencyOutput ? 146 getLowLatencyOutputFramesPerBuffer() : 147 getMinOutputFrameSize(sampleRate, channels); 148 // TODO(henrika): add support for low-latency input. 149 inputBufferSize = getMinInputFrameSize(sampleRate, channels); 150 } 151 152 // Gets the current earpiece state. 153 private boolean hasEarpiece() { 154 return context.getPackageManager().hasSystemFeature( 155 PackageManager.FEATURE_TELEPHONY); 156 } 157 158 // Returns true if low-latency audio output is supported. 159 private boolean isLowLatencyOutputSupported() { 160 return isOpenSLESSupported() && 161 context.getPackageManager().hasSystemFeature( 162 PackageManager.FEATURE_AUDIO_LOW_LATENCY); 163 } 164 165 // Returns true if low-latency audio input is supported. 166 public boolean isLowLatencyInputSupported() { 167 // TODO(henrika): investigate if some sort of device list is needed here 168 // as well. The NDK doc states that: "As of API level 21, lower latency 169 // audio input is supported on select devices. To take advantage of this 170 // feature, first confirm that lower latency output is available". 171 return WebRtcAudioUtils.runningOnLollipopOrHigher() && 172 isLowLatencyOutputSupported(); 173 } 174 175 // Returns the native output sample rate for this device's output stream. 176 private int getNativeOutputSampleRate() { 177 // Override this if we're running on an old emulator image which only 178 // supports 8 kHz and doesn't support PROPERTY_OUTPUT_SAMPLE_RATE. 179 if (WebRtcAudioUtils.runningOnEmulator()) { 180 Logging.d(TAG, "Running emulator, overriding sample rate to 8 kHz."); 181 return 8000; 182 } 183 // Default can be overriden by WebRtcAudioUtils.setDefaultSampleRateHz(). 184 // If so, use that value and return here. 185 if (WebRtcAudioUtils.isDefaultSampleRateOverridden()) { 186 Logging.d(TAG, "Default sample rate is overriden to " + 187 WebRtcAudioUtils.getDefaultSampleRateHz() + " Hz"); 188 return WebRtcAudioUtils.getDefaultSampleRateHz(); 189 } 190 // No overrides available. Deliver best possible estimate based on default 191 // Android AudioManager APIs. 192 final int sampleRateHz; 193 if (WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) { 194 sampleRateHz = getSampleRateOnJellyBeanMR10OrHigher(); 195 } else { 196 sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz(); 197 } 198 Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz"); 199 return sampleRateHz; 200 } 201 202 @TargetApi(17) 203 private int getSampleRateOnJellyBeanMR10OrHigher() { 204 String sampleRateString = audioManager.getProperty( 205 AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE); 206 return (sampleRateString == null) 207 ? WebRtcAudioUtils.getDefaultSampleRateHz() 208 : Integer.parseInt(sampleRateString); 209 } 210 211 // Returns the native output buffer size for low-latency output streams. 212 @TargetApi(17) 213 private int getLowLatencyOutputFramesPerBuffer() { 214 assertTrue(isLowLatencyOutputSupported()); 215 if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) { 216 return DEFAULT_FRAME_PER_BUFFER; 217 } 218 String framesPerBuffer = audioManager.getProperty( 219 AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER); 220 return framesPerBuffer == null ? 221 DEFAULT_FRAME_PER_BUFFER : Integer.parseInt(framesPerBuffer); 222 } 223 224 // Returns true if the device supports an audio effect (AEC, AGC or NS). 225 // Four conditions must be fulfilled if functions are to return true: 226 // 1) the platform must support the built-in (HW) effect, 227 // 2) explicit use (override) of a WebRTC based version must not be set, 228 // 3) the device must not be blacklisted for use of the effect, and 229 // 4) the UUID of the effect must be approved (some UUIDs can be excluded). 230 private static boolean isAcousticEchoCancelerSupported() { 231 return WebRtcAudioEffects.canUseAcousticEchoCanceler(); 232 } 233 private static boolean isAutomaticGainControlSupported() { 234 return WebRtcAudioEffects.canUseAutomaticGainControl(); 235 } 236 private static boolean isNoiseSuppressorSupported() { 237 return WebRtcAudioEffects.canUseNoiseSuppressor(); 238 } 239 240 // Returns the minimum output buffer size for Java based audio (AudioTrack). 241 // This size can also be used for OpenSL ES implementations on devices that 242 // lacks support of low-latency output. 243 private static int getMinOutputFrameSize(int sampleRateInHz, int numChannels) { 244 final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8); 245 final int channelConfig; 246 if (numChannels == 1) { 247 channelConfig = AudioFormat.CHANNEL_OUT_MONO; 248 } else if (numChannels == 2) { 249 channelConfig = AudioFormat.CHANNEL_OUT_STEREO; 250 } else { 251 return -1; 252 } 253 return AudioTrack.getMinBufferSize( 254 sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT) / 255 bytesPerFrame; 256 } 257 258 // Returns the native input buffer size for input streams. 259 private int getLowLatencyInputFramesPerBuffer() { 260 assertTrue(isLowLatencyInputSupported()); 261 return getLowLatencyOutputFramesPerBuffer(); 262 } 263 264 // Returns the minimum input buffer size for Java based audio (AudioRecord). 265 // This size can calso be used for OpenSL ES implementations on devices that 266 // lacks support of low-latency input. 267 private static int getMinInputFrameSize(int sampleRateInHz, int numChannels) { 268 final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8); 269 assertTrue(numChannels == CHANNELS); 270 return AudioRecord.getMinBufferSize(sampleRateInHz, 271 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT) / 272 bytesPerFrame; 273 } 274 275 // Returns true if OpenSL ES audio is supported. 276 private static boolean isOpenSLESSupported() { 277 // Check for API level 9 or higher, to confirm use of OpenSL ES. 278 return WebRtcAudioUtils.runningOnGingerBreadOrHigher(); 279 } 280 281 // Helper method which throws an exception when an assertion has failed. 282 private static void assertTrue(boolean condition) { 283 if (!condition) { 284 throw new AssertionError("Expected condition to be true"); 285 } 286 } 287 288 private native void nativeCacheAudioParameters( 289 int sampleRate, int channels, boolean hardwareAEC, boolean hardwareAGC, 290 boolean hardwareNS, boolean lowLatencyOutput, int outputBufferSize, 291 int inputBufferSize, long nativeAudioManager); 292 } 293