Home | History | Annotate | Download | only in voiceengine
      1 /*
      2  *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
      3  *
      4  *  Use of this source code is governed by a BSD-style license
      5  *  that can be found in the LICENSE file in the root of the source
      6  *  tree. An additional intellectual property rights grant can be found
      7  *  in the file PATENTS.  All contributing project authors may
      8  *  be found in the AUTHORS file in the root of the source tree.
      9  */
     10 
     11 package org.webrtc.voiceengine;
     12 
     13 import java.lang.System;
     14 import java.nio.ByteBuffer;
     15 import java.util.concurrent.TimeUnit;
     16 
     17 import android.content.Context;
     18 import android.media.AudioFormat;
     19 import android.media.AudioRecord;
     20 import android.media.MediaRecorder.AudioSource;
     21 import android.os.Build;
     22 import android.os.Process;
     23 import android.os.SystemClock;
     24 
     25 import org.webrtc.Logging;
     26 
     27 class  WebRtcAudioRecord {
     28   private static final boolean DEBUG = false;
     29 
     30   private static final String TAG = "WebRtcAudioRecord";
     31 
     32   // Default audio data format is PCM 16 bit per sample.
     33   // Guaranteed to be supported by all devices.
     34   private static final int BITS_PER_SAMPLE = 16;
     35 
     36   // Requested size of each recorded buffer provided to the client.
     37   private static final int CALLBACK_BUFFER_SIZE_MS = 10;
     38 
     39   // Average number of callbacks per second.
     40   private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;
     41 
     42   // We ask for a native buffer size of BUFFER_SIZE_FACTOR * (minimum required
     43   // buffer size). The extra space is allocated to guard against glitches under
     44   // high load.
     45   private static final int BUFFER_SIZE_FACTOR = 2;
     46 
     47   private final long nativeAudioRecord;
     48   private final Context context;
     49 
     50   private WebRtcAudioEffects effects = null;
     51 
     52   private ByteBuffer byteBuffer;
     53 
     54   private AudioRecord audioRecord = null;
     55   private AudioRecordThread audioThread = null;
     56 
     57   /**
     58    * Audio thread which keeps calling ByteBuffer.read() waiting for audio
     59    * to be recorded. Feeds recorded data to the native counterpart as a
     60    * periodic sequence of callbacks using DataIsRecorded().
     61    * This thread uses a Process.THREAD_PRIORITY_URGENT_AUDIO priority.
     62    */
     63   private class AudioRecordThread extends Thread {
     64     private volatile boolean keepAlive = true;
     65 
     66     public AudioRecordThread(String name) {
     67       super(name);
     68     }
     69 
     70     @Override
     71     public void run() {
     72       Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
     73       Logging.d(TAG, "AudioRecordThread" + WebRtcAudioUtils.getThreadInfo());
     74       assertTrue(audioRecord.getRecordingState()
     75           == AudioRecord.RECORDSTATE_RECORDING);
     76 
     77       long lastTime = System.nanoTime();
     78       while (keepAlive) {
     79         int bytesRead = audioRecord.read(byteBuffer, byteBuffer.capacity());
     80         if (bytesRead == byteBuffer.capacity()) {
     81           nativeDataIsRecorded(bytesRead, nativeAudioRecord);
     82         } else {
     83           Logging.e(TAG,"AudioRecord.read failed: " + bytesRead);
     84           if (bytesRead == AudioRecord.ERROR_INVALID_OPERATION) {
     85             keepAlive = false;
     86           }
     87         }
     88         if (DEBUG) {
     89           long nowTime = System.nanoTime();
     90           long durationInMs =
     91               TimeUnit.NANOSECONDS.toMillis((nowTime - lastTime));
     92           lastTime = nowTime;
     93           Logging.d(TAG, "bytesRead[" + durationInMs + "] " + bytesRead);
     94         }
     95       }
     96 
     97       try {
     98         audioRecord.stop();
     99       } catch (IllegalStateException e) {
    100         Logging.e(TAG,"AudioRecord.stop failed: " + e.getMessage());
    101       }
    102     }
    103 
    104     public void joinThread() {
    105       keepAlive = false;
    106       while (isAlive()) {
    107         try {
    108           join();
    109         } catch (InterruptedException e) {
    110           // Ignore.
    111         }
    112       }
    113     }
    114   }
    115 
    116   WebRtcAudioRecord(Context context, long nativeAudioRecord) {
    117     Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
    118     this.context = context;
    119     this.nativeAudioRecord = nativeAudioRecord;
    120     if (DEBUG) {
    121       WebRtcAudioUtils.logDeviceInfo(TAG);
    122     }
    123     effects = WebRtcAudioEffects.create();
    124   }
    125 
    126   private boolean enableBuiltInAEC(boolean enable) {
    127     Logging.d(TAG, "enableBuiltInAEC(" + enable + ')');
    128     if (effects == null) {
    129       Logging.e(TAG,"Built-in AEC is not supported on this platform");
    130       return false;
    131     }
    132     return effects.setAEC(enable);
    133   }
    134 
    135   private boolean enableBuiltInAGC(boolean enable) {
    136     Logging.d(TAG, "enableBuiltInAGC(" + enable + ')');
    137     if (effects == null) {
    138       Logging.e(TAG,"Built-in AGC is not supported on this platform");
    139       return false;
    140     }
    141     return effects.setAGC(enable);
    142   }
    143 
    144   private boolean enableBuiltInNS(boolean enable) {
    145     Logging.d(TAG, "enableBuiltInNS(" + enable + ')');
    146     if (effects == null) {
    147       Logging.e(TAG,"Built-in NS is not supported on this platform");
    148       return false;
    149     }
    150     return effects.setNS(enable);
    151   }
    152 
    153   private int initRecording(int sampleRate, int channels) {
    154     Logging.d(TAG, "initRecording(sampleRate=" + sampleRate + ", channels=" +
    155         channels + ")");
    156     if (!WebRtcAudioUtils.hasPermission(
    157         context, android.Manifest.permission.RECORD_AUDIO)) {
    158       Logging.e(TAG,"RECORD_AUDIO permission is missing");
    159       return -1;
    160     }
    161     if (audioRecord != null) {
    162       Logging.e(TAG,"InitRecording() called twice without StopRecording()");
    163       return -1;
    164     }
    165     final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
    166     final int framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
    167     byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * framesPerBuffer);
    168     Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
    169     // Rather than passing the ByteBuffer with every callback (requiring
    170     // the potentially expensive GetDirectBufferAddress) we simply have the
    171     // the native class cache the address to the memory once.
    172     nativeCacheDirectBufferAddress(byteBuffer, nativeAudioRecord);
    173 
    174     // Get the minimum buffer size required for the successful creation of
    175     // an AudioRecord object, in byte units.
    176     // Note that this size doesn't guarantee a smooth recording under load.
    177     int minBufferSize = AudioRecord.getMinBufferSize(
    178           sampleRate,
    179           AudioFormat.CHANNEL_IN_MONO,
    180           AudioFormat.ENCODING_PCM_16BIT);
    181     if (minBufferSize == AudioRecord.ERROR
    182         || minBufferSize == AudioRecord.ERROR_BAD_VALUE) {
    183       Logging.e(TAG, "AudioRecord.getMinBufferSize failed: " + minBufferSize);
    184       return -1;
    185     }
    186     Logging.d(TAG, "AudioRecord.getMinBufferSize: " + minBufferSize);
    187 
    188     // Use a larger buffer size than the minimum required when creating the
    189     // AudioRecord instance to ensure smooth recording under load. It has been
    190     // verified that it does not increase the actual recording latency.
    191     int bufferSizeInBytes =
    192         Math.max(BUFFER_SIZE_FACTOR * minBufferSize, byteBuffer.capacity());
    193     Logging.d(TAG, "bufferSizeInBytes: " + bufferSizeInBytes);
    194     try {
    195       audioRecord = new AudioRecord(AudioSource.VOICE_COMMUNICATION,
    196                                     sampleRate,
    197                                     AudioFormat.CHANNEL_IN_MONO,
    198                                     AudioFormat.ENCODING_PCM_16BIT,
    199                                     bufferSizeInBytes);
    200     } catch (IllegalArgumentException e) {
    201       Logging.e(TAG,e.getMessage());
    202       return -1;
    203     }
    204     if (audioRecord == null ||
    205         audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
    206       Logging.e(TAG,"Failed to create a new AudioRecord instance");
    207       return -1;
    208     }
    209     Logging.d(TAG, "AudioRecord "
    210         + "session ID: " + audioRecord.getAudioSessionId() + ", "
    211         + "audio format: " + audioRecord.getAudioFormat() + ", "
    212         + "channels: " + audioRecord.getChannelCount() + ", "
    213         + "sample rate: " + audioRecord.getSampleRate());
    214     if (effects != null) {
    215       effects.enable(audioRecord.getAudioSessionId());
    216     }
    217     // TODO(phoglund): put back audioRecord.getBufferSizeInFrames when
    218     // all known downstream users supports M.
    219     // if (WebRtcAudioUtils.runningOnMOrHigher()) {
    220       // Returns the frame count of the native AudioRecord buffer. This is
    221       // greater than or equal to the bufferSizeInBytes converted to frame
    222       // units. The native frame count may be enlarged to accommodate the
    223       // requirements of the source on creation or if the AudioRecord is
    224       // subsequently rerouted.
    225 
    226       // Logging.d(TAG, "bufferSizeInFrames: "
    227       //     + audioRecord.getBufferSizeInFrames());
    228     //}
    229     return framesPerBuffer;
    230   }
    231 
    232   private boolean startRecording() {
    233     Logging.d(TAG, "startRecording");
    234     assertTrue(audioRecord != null);
    235     assertTrue(audioThread == null);
    236     try {
    237       audioRecord.startRecording();
    238     } catch (IllegalStateException e) {
    239       Logging.e(TAG,"AudioRecord.startRecording failed: " + e.getMessage());
    240       return false;
    241     }
    242     if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
    243       Logging.e(TAG,"AudioRecord.startRecording failed");
    244       return false;
    245     }
    246     audioThread = new AudioRecordThread("AudioRecordJavaThread");
    247     audioThread.start();
    248     return true;
    249   }
    250 
    251   private boolean stopRecording() {
    252     Logging.d(TAG, "stopRecording");
    253     assertTrue(audioThread != null);
    254     audioThread.joinThread();
    255     audioThread = null;
    256     if (effects != null) {
    257       effects.release();
    258     }
    259     audioRecord.release();
    260     audioRecord = null;
    261     return true;
    262   }
    263 
    264   // Helper method which throws an exception  when an assertion has failed.
    265   private static void assertTrue(boolean condition) {
    266     if (!condition) {
    267       throw new AssertionError("Expected condition to be true");
    268     }
    269   }
    270 
    271   private native void nativeCacheDirectBufferAddress(
    272       ByteBuffer byteBuffer, long nativeAudioRecord);
    273 
    274   private native void nativeDataIsRecorded(int bytes, long nativeAudioRecord);
    275 }
    276