1 /* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package android.media.cts; 18 19 import android.app.ActivityManager; 20 import android.content.Context; 21 import android.content.pm.PackageManager; 22 import android.media.AudioFormat; 23 import android.media.AudioRecord; 24 import android.media.AudioRecord.OnRecordPositionUpdateListener; 25 import android.media.AudioTimestamp; 26 import android.media.AudioTrack; 27 import android.media.MediaRecorder; 28 import android.media.MediaSyncEvent; 29 import android.media.MicrophoneInfo; 30 import android.os.Handler; 31 import android.os.Looper; 32 import android.os.Message; 33 import android.os.SystemClock; 34 import android.platform.test.annotations.Presubmit; 35 import android.support.test.InstrumentationRegistry; 36 import android.support.test.runner.AndroidJUnit4; 37 import android.util.Log; 38 39 import static org.junit.Assert.assertEquals; 40 import static org.junit.Assert.assertFalse; 41 import static org.junit.Assert.assertNotNull; 42 import static org.testng.Assert.assertThrows; 43 import static org.junit.Assert.assertTrue; 44 import static org.junit.Assert.fail; 45 46 import com.android.compatibility.common.util.DeviceReportLog; 47 import com.android.compatibility.common.util.ResultType; 48 import com.android.compatibility.common.util.ResultUnit; 49 import com.android.compatibility.common.util.SystemUtil; 50 import org.junit.After; 51 import org.junit.Before; 52 import org.junit.Test; 53 import org.junit.runner.RunWith; 54 55 import java.io.IOException; 56 import java.nio.ByteBuffer; 57 import java.nio.ShortBuffer; 58 import java.util.ArrayList; 59 import java.util.List; 60 61 @RunWith(AndroidJUnit4.class) 62 public class AudioRecordTest { 63 private final static String TAG = "AudioRecordTest"; 64 private static final String REPORT_LOG_NAME = "CtsMediaTestCases"; 65 private AudioRecord mAudioRecord; 66 private int mHz = 44100; 67 private boolean mIsOnMarkerReachedCalled; 68 private boolean mIsOnPeriodicNotificationCalled; 69 private boolean mIsHandleMessageCalled; 70 private Looper mLooper; 71 // For doTest 72 private int mMarkerPeriodInFrames; 73 private int mMarkerPosition; 74 private Handler mHandler = new Handler(Looper.getMainLooper()) { 75 @Override 76 public void handleMessage(Message msg) { 77 mIsHandleMessageCalled = true; 78 super.handleMessage(msg); 79 } 80 }; 81 82 @Before 83 public void setUp() throws Exception { 84 if (!hasMicrophone()) { 85 return; 86 } 87 88 /* 89 * InstrumentationTestRunner.onStart() calls Looper.prepare(), which creates a looper 90 * for the current thread. However, since we don't actually call loop() in the test, 91 * any messages queued with that looper will never be consumed. Therefore, we must 92 * create the instance in another thread, either without a looper, so the main looper is 93 * used, or with an active looper. 94 */ 95 Thread t = new Thread() { 96 @Override 97 public void run() { 98 Looper.prepare(); 99 mLooper = Looper.myLooper(); 100 synchronized(this) { 101 mAudioRecord = new AudioRecord(MediaRecorder.AudioSource.DEFAULT, mHz, 102 AudioFormat.CHANNEL_CONFIGURATION_MONO, 103 AudioFormat.ENCODING_PCM_16BIT, 104 AudioRecord.getMinBufferSize(mHz, 105 AudioFormat.CHANNEL_CONFIGURATION_MONO, 106 AudioFormat.ENCODING_PCM_16BIT) * 10); 107 this.notify(); 108 } 109 Looper.loop(); 110 } 111 }; 112 synchronized(t) { 113 t.start(); // will block until we wait 114 t.wait(); 115 } 116 assertNotNull(mAudioRecord); 117 } 118 119 @After 120 public void tearDown() throws Exception { 121 if (hasMicrophone()) { 122 mAudioRecord.release(); 123 mLooper.quit(); 124 } 125 } 126 127 private void reset() { 128 mIsOnMarkerReachedCalled = false; 129 mIsOnPeriodicNotificationCalled = false; 130 mIsHandleMessageCalled = false; 131 } 132 133 @Test 134 public void testAudioRecordProperties() throws Exception { 135 if (!hasMicrophone()) { 136 return; 137 } 138 assertEquals(AudioFormat.ENCODING_PCM_16BIT, mAudioRecord.getAudioFormat()); 139 assertEquals(MediaRecorder.AudioSource.DEFAULT, mAudioRecord.getAudioSource()); 140 assertEquals(1, mAudioRecord.getChannelCount()); 141 assertEquals(AudioFormat.CHANNEL_IN_MONO, 142 mAudioRecord.getChannelConfiguration()); 143 assertEquals(AudioRecord.STATE_INITIALIZED, mAudioRecord.getState()); 144 assertEquals(mHz, mAudioRecord.getSampleRate()); 145 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 146 147 int bufferSize = AudioRecord.getMinBufferSize(mHz, 148 AudioFormat.CHANNEL_CONFIGURATION_DEFAULT, AudioFormat.ENCODING_PCM_16BIT); 149 assertTrue(bufferSize > 0); 150 } 151 152 @Test 153 public void testAudioRecordOP() throws Exception { 154 if (!hasMicrophone()) { 155 return; 156 } 157 final int SLEEP_TIME = 10; 158 final int RECORD_TIME = 10000; 159 assertEquals(AudioRecord.STATE_INITIALIZED, mAudioRecord.getState()); 160 161 int markerInFrames = mAudioRecord.getSampleRate() / 2; 162 assertEquals(AudioRecord.SUCCESS, 163 mAudioRecord.setNotificationMarkerPosition(markerInFrames)); 164 assertEquals(markerInFrames, mAudioRecord.getNotificationMarkerPosition()); 165 int periodInFrames = mAudioRecord.getSampleRate(); 166 assertEquals(AudioRecord.SUCCESS, 167 mAudioRecord.setPositionNotificationPeriod(periodInFrames)); 168 assertEquals(periodInFrames, mAudioRecord.getPositionNotificationPeriod()); 169 OnRecordPositionUpdateListener listener = new OnRecordPositionUpdateListener() { 170 171 public void onMarkerReached(AudioRecord recorder) { 172 mIsOnMarkerReachedCalled = true; 173 } 174 175 public void onPeriodicNotification(AudioRecord recorder) { 176 mIsOnPeriodicNotificationCalled = true; 177 } 178 }; 179 mAudioRecord.setRecordPositionUpdateListener(listener); 180 181 // use byte array as buffer 182 final int BUFFER_SIZE = 102400; 183 byte[] byteData = new byte[BUFFER_SIZE]; 184 long time = System.currentTimeMillis(); 185 mAudioRecord.startRecording(); 186 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 187 while (System.currentTimeMillis() - time < RECORD_TIME) { 188 Thread.sleep(SLEEP_TIME); 189 mAudioRecord.read(byteData, 0, BUFFER_SIZE); 190 } 191 mAudioRecord.stop(); 192 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 193 assertTrue(mIsOnMarkerReachedCalled); 194 assertTrue(mIsOnPeriodicNotificationCalled); 195 reset(); 196 197 // use short array as buffer 198 short[] shortData = new short[BUFFER_SIZE]; 199 time = System.currentTimeMillis(); 200 mAudioRecord.startRecording(); 201 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 202 while (System.currentTimeMillis() - time < RECORD_TIME) { 203 Thread.sleep(SLEEP_TIME); 204 mAudioRecord.read(shortData, 0, BUFFER_SIZE); 205 } 206 mAudioRecord.stop(); 207 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 208 assertTrue(mIsOnMarkerReachedCalled); 209 assertTrue(mIsOnPeriodicNotificationCalled); 210 reset(); 211 212 // use ByteBuffer as buffer 213 ByteBuffer byteBuffer = ByteBuffer.allocateDirect(BUFFER_SIZE); 214 time = System.currentTimeMillis(); 215 mAudioRecord.startRecording(); 216 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 217 while (System.currentTimeMillis() - time < RECORD_TIME) { 218 Thread.sleep(SLEEP_TIME); 219 mAudioRecord.read(byteBuffer, BUFFER_SIZE); 220 } 221 mAudioRecord.stop(); 222 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 223 assertTrue(mIsOnMarkerReachedCalled); 224 assertTrue(mIsOnPeriodicNotificationCalled); 225 reset(); 226 227 // use handler 228 final Handler handler = new Handler(Looper.getMainLooper()) { 229 @Override 230 public void handleMessage(Message msg) { 231 mIsHandleMessageCalled = true; 232 super.handleMessage(msg); 233 } 234 }; 235 236 mAudioRecord.setRecordPositionUpdateListener(listener, handler); 237 time = System.currentTimeMillis(); 238 mAudioRecord.startRecording(); 239 assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState()); 240 while (System.currentTimeMillis() - time < RECORD_TIME) { 241 Thread.sleep(SLEEP_TIME); 242 mAudioRecord.read(byteData, 0, BUFFER_SIZE); 243 } 244 mAudioRecord.stop(); 245 assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState()); 246 assertTrue(mIsOnMarkerReachedCalled); 247 assertTrue(mIsOnPeriodicNotificationCalled); 248 // The handler argument is only ever used for getting the associated Looper 249 assertFalse(mIsHandleMessageCalled); 250 251 mAudioRecord.release(); 252 assertEquals(AudioRecord.STATE_UNINITIALIZED, mAudioRecord.getState()); 253 } 254 255 @Test 256 public void testAudioRecordResamplerMono8Bit() throws Exception { 257 doTest("resampler_mono_8bit", true /*localRecord*/, false /*customHandler*/, 258 1 /*periodsPerSecond*/, 1 /*markerPeriodsPerSecond*/, 259 false /*useByteBuffer*/, false /*blocking*/, 260 false /*auditRecording*/, false /*isChannelIndex*/, 88200 /*TEST_SR*/, 261 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_8BIT); 262 } 263 264 @Test 265 public void testAudioRecordResamplerStereo8Bit() throws Exception { 266 doTest("resampler_stereo_8bit", true /*localRecord*/, false /*customHandler*/, 267 0 /*periodsPerSecond*/, 3 /*markerPeriodsPerSecond*/, 268 true /*useByteBuffer*/, true /*blocking*/, 269 false /*auditRecording*/, false /*isChannelIndex*/, 45000 /*TEST_SR*/, 270 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_8BIT); 271 } 272 273 @Presubmit 274 @Test 275 public void testAudioRecordLocalMono16BitShort() throws Exception { 276 doTest("local_mono_16bit_short", true /*localRecord*/, false /*customHandler*/, 277 30 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/, 278 false /*useByteBuffer*/, true /*blocking*/, 279 false /*auditRecording*/, false /*isChannelIndex*/, 8000 /*TEST_SR*/, 280 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, 500 /*TEST_TIME_MS*/); 281 } 282 283 @Test 284 public void testAudioRecordLocalMono16Bit() throws Exception { 285 doTest("local_mono_16bit", true /*localRecord*/, false /*customHandler*/, 286 30 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/, 287 false /*useByteBuffer*/, true /*blocking*/, 288 false /*auditRecording*/, false /*isChannelIndex*/, 8000 /*TEST_SR*/, 289 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT); 290 } 291 292 @Test 293 public void testAudioRecordStereo16Bit() throws Exception { 294 doTest("stereo_16bit", false /*localRecord*/, false /*customHandler*/, 295 2 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/, 296 false /*useByteBuffer*/, false /*blocking*/, 297 false /*auditRecording*/, false /*isChannelIndex*/, 17000 /*TEST_SR*/, 298 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_16BIT); 299 } 300 301 @Test 302 public void testAudioRecordMonoFloat() throws Exception { 303 doTest("mono_float", false /*localRecord*/, true /*customHandler*/, 304 30 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/, 305 false /*useByteBuffer*/, true /*blocking*/, 306 false /*auditRecording*/, false /*isChannelIndex*/, 32000 /*TEST_SR*/, 307 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_FLOAT); 308 } 309 310 @Test 311 public void testAudioRecordLocalNonblockingStereoFloat() throws Exception { 312 doTest("local_nonblocking_stereo_float", true /*localRecord*/, true /*customHandler*/, 313 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 314 false /*useByteBuffer*/, false /*blocking*/, 315 false /*auditRecording*/, false /*isChannelIndex*/, 48000 /*TEST_SR*/, 316 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_FLOAT); 317 } 318 319 // Audit modes work best with non-blocking mode 320 @Test 321 public void testAudioRecordAuditByteBufferResamplerStereoFloat() throws Exception { 322 if (isLowRamDevice()) { 323 return; // skip. FIXME: reenable when AF memory allocation is updated. 324 } 325 doTest("audit_byte_buffer_resampler_stereo_float", 326 false /*localRecord*/, true /*customHandler*/, 327 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 328 true /*useByteBuffer*/, false /*blocking*/, 329 true /*auditRecording*/, false /*isChannelIndex*/, 96000 /*TEST_SR*/, 330 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_FLOAT); 331 } 332 333 @Test 334 public void testAudioRecordAuditChannelIndexMonoFloat() throws Exception { 335 doTest("audit_channel_index_mono_float", true /*localRecord*/, true /*customHandler*/, 336 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 337 false /*useByteBuffer*/, false /*blocking*/, 338 true /*auditRecording*/, true /*isChannelIndex*/, 47000 /*TEST_SR*/, 339 (1 << 0) /* 1 channel */, AudioFormat.ENCODING_PCM_FLOAT); 340 } 341 342 // Audit buffers can run out of space with high sample rate, 343 // so keep the channels and pcm encoding low 344 @Test 345 public void testAudioRecordAuditChannelIndex2() throws Exception { 346 if (isLowRamDevice()) { 347 return; // skip. FIXME: reenable when AF memory allocation is updated. 348 } 349 doTest("audit_channel_index_2", true /*localRecord*/, true /*customHandler*/, 350 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 351 false /*useByteBuffer*/, false /*blocking*/, 352 true /*auditRecording*/, true /*isChannelIndex*/, 192000 /*TEST_SR*/, 353 (1 << 0) | (1 << 2) /* 2 channels, gap in middle */, 354 AudioFormat.ENCODING_PCM_8BIT); 355 } 356 357 // Audit buffers can run out of space with high numbers of channels, 358 // so keep the sample rate low. 359 @Test 360 public void testAudioRecordAuditChannelIndex5() throws Exception { 361 doTest("audit_channel_index_5", true /*localRecord*/, true /*customHandler*/, 362 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/, 363 false /*useByteBuffer*/, false /*blocking*/, 364 true /*auditRecording*/, true /*isChannelIndex*/, 16000 /*TEST_SR*/, 365 (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4) /* 5 channels */, 366 AudioFormat.ENCODING_PCM_16BIT); 367 } 368 369 // Test AudioRecord.Builder to verify the observed configuration of an AudioRecord built with 370 // an empty Builder matches the documentation / expected values 371 @Test 372 public void testAudioRecordBuilderDefault() throws Exception { 373 if (!hasMicrophone()) { 374 return; 375 } 376 // constants for test 377 final String TEST_NAME = "testAudioRecordBuilderDefault"; 378 // expected values below match the AudioRecord.Builder documentation 379 final int expectedCapturePreset = MediaRecorder.AudioSource.DEFAULT; 380 final int expectedChannel = AudioFormat.CHANNEL_IN_MONO; 381 final int expectedEncoding = AudioFormat.ENCODING_PCM_16BIT; 382 final int expectedState = AudioRecord.STATE_INITIALIZED; 383 // use builder with default values 384 final AudioRecord rec = new AudioRecord.Builder().build(); 385 // save results 386 final int observedSource = rec.getAudioSource(); 387 final int observedChannel = rec.getChannelConfiguration(); 388 final int observedEncoding = rec.getAudioFormat(); 389 final int observedState = rec.getState(); 390 // release recorder before the test exits (either successfully or with an exception) 391 rec.release(); 392 // compare results 393 assertEquals(TEST_NAME + ": default capture preset", expectedCapturePreset, observedSource); 394 assertEquals(TEST_NAME + ": default channel config", expectedChannel, observedChannel); 395 assertEquals(TEST_NAME + ": default encoding", expectedEncoding, observedEncoding); 396 assertEquals(TEST_NAME + ": state", expectedState, observedState); 397 } 398 399 // Test AudioRecord.Builder to verify the observed configuration of an AudioRecord built with 400 // an incomplete AudioFormat matches the documentation / expected values 401 @Test 402 public void testAudioRecordBuilderPartialFormat() throws Exception { 403 if (!hasMicrophone()) { 404 return; 405 } 406 // constants for test 407 final String TEST_NAME = "testAudioRecordBuilderPartialFormat"; 408 final int expectedRate = 16000; 409 final int expectedState = AudioRecord.STATE_INITIALIZED; 410 // expected values below match the AudioRecord.Builder documentation 411 final int expectedChannel = AudioFormat.CHANNEL_IN_MONO; 412 final int expectedEncoding = AudioFormat.ENCODING_PCM_16BIT; 413 // use builder with a partial audio format 414 final AudioRecord rec = new AudioRecord.Builder() 415 .setAudioFormat(new AudioFormat.Builder().setSampleRate(expectedRate).build()) 416 .build(); 417 // save results 418 final int observedRate = rec.getSampleRate(); 419 final int observedChannel = rec.getChannelConfiguration(); 420 final int observedEncoding = rec.getAudioFormat(); 421 final int observedState = rec.getState(); 422 // release recorder before the test exits (either successfully or with an exception) 423 rec.release(); 424 // compare results 425 assertEquals(TEST_NAME + ": configured rate", expectedRate, observedRate); 426 assertEquals(TEST_NAME + ": default channel config", expectedChannel, observedChannel); 427 assertEquals(TEST_NAME + ": default encoding", expectedEncoding, observedEncoding); 428 assertEquals(TEST_NAME + ": state", expectedState, observedState); 429 } 430 431 // Test AudioRecord.Builder to verify the observed configuration of an AudioRecord matches 432 // the parameters used in the builder 433 @Test 434 public void testAudioRecordBuilderParams() throws Exception { 435 if (!hasMicrophone()) { 436 return; 437 } 438 // constants for test 439 final String TEST_NAME = "testAudioRecordBuilderParams"; 440 final int expectedRate = 8000; 441 final int expectedChannel = AudioFormat.CHANNEL_IN_MONO; 442 final int expectedChannelCount = 1; 443 final int expectedEncoding = AudioFormat.ENCODING_PCM_16BIT; 444 final int expectedSource = MediaRecorder.AudioSource.VOICE_COMMUNICATION; 445 final int expectedState = AudioRecord.STATE_INITIALIZED; 446 // use builder with expected parameters 447 final AudioRecord rec = new AudioRecord.Builder() 448 .setAudioFormat(new AudioFormat.Builder() 449 .setSampleRate(expectedRate) 450 .setChannelMask(expectedChannel) 451 .setEncoding(expectedEncoding) 452 .build()) 453 .setAudioSource(expectedSource) 454 .build(); 455 // save results 456 final int observedRate = rec.getSampleRate(); 457 final int observedChannel = rec.getChannelConfiguration(); 458 final int observedChannelCount = rec.getChannelCount(); 459 final int observedEncoding = rec.getAudioFormat(); 460 final int observedSource = rec.getAudioSource(); 461 final int observedState = rec.getState(); 462 // release recorder before the test exits (either successfully or with an exception) 463 rec.release(); 464 // compare results 465 assertEquals(TEST_NAME + ": configured rate", expectedRate, observedRate); 466 assertEquals(TEST_NAME + ": configured channel config", expectedChannel, observedChannel); 467 assertEquals(TEST_NAME + ": configured encoding", expectedEncoding, observedEncoding); 468 assertEquals(TEST_NAME + ": implicit channel count", expectedChannelCount, 469 observedChannelCount); 470 assertEquals(TEST_NAME + ": configured source", expectedSource, observedSource); 471 assertEquals(TEST_NAME + ": state", expectedState, observedState); 472 } 473 474 // Test AudioRecord to ensure we can build after a failure. 475 @Test 476 public void testAudioRecordBufferSize() throws Exception { 477 if (!hasMicrophone()) { 478 return; 479 } 480 // constants for test 481 final String TEST_NAME = "testAudioRecordBufferSize"; 482 483 // use builder with parameters that should fail 484 final int superBigBufferSize = 1 << 28; 485 try { 486 final AudioRecord record = new AudioRecord.Builder() 487 .setBufferSizeInBytes(superBigBufferSize) 488 .build(); 489 record.release(); 490 fail(TEST_NAME + ": should throw exception on failure"); 491 } catch (UnsupportedOperationException e) { 492 ; 493 } 494 495 // we should be able to create again with minimum buffer size 496 final int verySmallBufferSize = 2 * 3 * 4; // frame size multiples 497 final AudioRecord record2 = new AudioRecord.Builder() 498 .setBufferSizeInBytes(verySmallBufferSize) 499 .build(); 500 501 final int observedState2 = record2.getState(); 502 final int observedBufferSize2 = record2.getBufferSizeInFrames(); 503 record2.release(); 504 505 // succeeds for minimum buffer size 506 assertEquals(TEST_NAME + ": state", AudioRecord.STATE_INITIALIZED, observedState2); 507 // should force the minimum size buffer which is > 0 508 assertTrue(TEST_NAME + ": buffer frame count", observedBufferSize2 > 0); 509 } 510 511 @Test 512 public void testTimestamp() throws Exception { 513 if (!hasMicrophone()) { 514 return; 515 } 516 final String TEST_NAME = "testTimestamp"; 517 AudioRecord record = null; 518 519 try { 520 final int NANOS_PER_MILLIS = 1000000; 521 final long RECORD_TIME_IN_MS = 2000; 522 final long RECORD_TIME_IN_NANOS = RECORD_TIME_IN_MS * NANOS_PER_MILLIS; 523 final int RECORD_ENCODING = AudioFormat.ENCODING_PCM_16BIT; // fixed at this time. 524 final int RECORD_CHANNEL_MASK = AudioFormat.CHANNEL_IN_STEREO; 525 final int RECORD_SAMPLE_RATE = 23456; // requires resampling 526 record = new AudioRecord.Builder() 527 .setAudioFormat(new AudioFormat.Builder() 528 .setSampleRate(RECORD_SAMPLE_RATE) 529 .setChannelMask(RECORD_CHANNEL_MASK) 530 .setEncoding(RECORD_ENCODING) 531 .build()) 532 .build(); 533 534 // For our tests, we could set test duration by timed sleep or by # frames received. 535 // Since we don't know *exactly* when AudioRecord actually begins recording, 536 // we end the test by # frames read. 537 final int numChannels = 538 AudioFormat.channelCountFromInChannelMask(RECORD_CHANNEL_MASK); 539 final int bytesPerSample = AudioFormat.getBytesPerSample(RECORD_ENCODING); 540 final int bytesPerFrame = numChannels * bytesPerSample; 541 // careful about integer overflow in the formula below: 542 final int targetFrames = 543 (int)((long)RECORD_TIME_IN_MS * RECORD_SAMPLE_RATE / 1000); 544 final int targetSamples = targetFrames * numChannels; 545 final int BUFFER_FRAMES = 512; 546 final int BUFFER_SAMPLES = BUFFER_FRAMES * numChannels; 547 548 final int tries = 2; 549 for (int i = 0; i < tries; ++i) { 550 long startTime = System.nanoTime(); 551 long startTimeBoot = android.os.SystemClock.elapsedRealtimeNanos(); 552 553 record.startRecording(); 554 555 AudioTimestamp startTs = new AudioTimestamp(); 556 int samplesRead = 0; 557 boolean timestampRead = false; 558 // For 16 bit data, use shorts 559 short[] shortData = new short[BUFFER_SAMPLES]; 560 while (samplesRead < targetSamples) { 561 int amount = samplesRead == 0 ? numChannels : 562 Math.min(BUFFER_SAMPLES, targetSamples - samplesRead); 563 int ret = record.read(shortData, 0, amount); 564 assertEquals(TEST_NAME, amount, ret); 565 // timestamps follow a different path than data, so it is conceivable 566 // that first data arrives before the first timestamp is ready. 567 if (!timestampRead) { 568 timestampRead = 569 record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC) 570 == AudioRecord.SUCCESS; 571 } 572 samplesRead += ret; 573 } 574 record.stop(); 575 576 // stop is synchronous, but need not be in the future. 577 final long SLEEP_AFTER_STOP_FOR_INACTIVITY_MS = 1000; 578 Thread.sleep(SLEEP_AFTER_STOP_FOR_INACTIVITY_MS); 579 580 AudioTimestamp stopTs = new AudioTimestamp(); 581 AudioTimestamp stopTsBoot = new AudioTimestamp(); 582 583 assertEquals(AudioRecord.SUCCESS, 584 record.getTimestamp(stopTs, AudioTimestamp.TIMEBASE_MONOTONIC)); 585 assertEquals(AudioRecord.SUCCESS, 586 record.getTimestamp(stopTsBoot, AudioTimestamp.TIMEBASE_BOOTTIME)); 587 588 // printTimestamp("timestamp Monotonic", ts); 589 // printTimestamp("timestamp Boottime", tsBoot); 590 // Log.d(TEST_NAME, "startTime Monotonic " + startTime); 591 // Log.d(TEST_NAME, "startTime Boottime " + startTimeBoot); 592 593 assertEquals(stopTs.framePosition, stopTsBoot.framePosition); 594 assertTrue(stopTs.framePosition >= targetFrames); 595 assertTrue(stopTs.nanoTime - startTime > RECORD_TIME_IN_NANOS); 596 assertTrue(stopTsBoot.nanoTime - startTimeBoot > RECORD_TIME_IN_NANOS); 597 verifyContinuousTimestamps(startTs, stopTs, RECORD_SAMPLE_RATE); 598 } 599 } finally { 600 if (record != null) { 601 record.release(); 602 record = null; 603 } 604 } 605 } 606 607 @Test 608 public void testRecordNoDataForIdleUids() throws Exception { 609 if (!hasMicrophone()) { 610 return; 611 } 612 613 AudioRecord recorder = null; 614 615 // We will record audio for 20 sec from active and idle state expecting 616 // the recording from active state to have data while from idle silence. 617 try { 618 // Ensure no race and UID active 619 makeMyUidStateActive(); 620 621 // Setup a recorder 622 final AudioRecord candidateRecorder = new AudioRecord.Builder() 623 .setAudioSource(MediaRecorder.AudioSource.MIC) 624 .setBufferSizeInBytes(1024) 625 .setAudioFormat(new AudioFormat.Builder() 626 .setSampleRate(8000) 627 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 628 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 629 .build()) 630 .build(); 631 632 // Unleash it :P 633 candidateRecorder.startRecording(); 634 recorder = candidateRecorder; 635 636 final int sampleCount = AudioHelper.frameCountFromMsec(6000, 637 candidateRecorder.getFormat()) * candidateRecorder.getFormat() 638 .getChannelCount(); 639 final ShortBuffer buffer = ShortBuffer.allocate(sampleCount); 640 641 // Read five seconds of data 642 readDataTimed(recorder, 5000, buffer); 643 // Ensure we read non-empty bytes. Some systems only 644 // emulate audio devices and do not provide any actual audio data. 645 if (isAudioSilent(buffer)) { 646 Log.w(TAG, "Recording does not produce audio data"); 647 return; 648 } 649 650 // Start clean 651 buffer.clear(); 652 // Force idle the package 653 makeMyUidStateIdle(); 654 // Read five seconds of data 655 readDataTimed(recorder, 5000, buffer); 656 // Ensure we read empty bytes 657 assertTrue("Recording was not silenced while UID idle", isAudioSilent(buffer)); 658 659 // Start clean 660 buffer.clear(); 661 // Reset to active 662 makeMyUidStateActive(); 663 // Read five seconds of data 664 readDataTimed(recorder, 5000, buffer); 665 // Ensure we read non-empty bytes 666 assertFalse("Recording was silenced while UID active", isAudioSilent(buffer)); 667 } finally { 668 if (recorder != null) { 669 recorder.stop(); 670 recorder.release(); 671 } 672 resetMyUidState(); 673 } 674 } 675 676 @Test 677 public void testSynchronizedRecord() throws Exception { 678 if (!hasMicrophone()) { 679 return; 680 } 681 final String TEST_NAME = "testSynchronizedRecord"; 682 AudioTrack track = null; 683 AudioRecord record = null; 684 685 try { 686 // 1. create a static AudioTrack. 687 final int PLAYBACK_TIME_IN_MS = 2000; /* ms duration. */ 688 final int PLAYBACK_SAMPLE_RATE = 8000; /* in hz */ 689 AudioFormat format = new AudioFormat.Builder() 690 .setChannelMask(AudioFormat.CHANNEL_OUT_MONO) 691 .setEncoding(AudioFormat.ENCODING_PCM_8BIT) 692 .setSampleRate(PLAYBACK_SAMPLE_RATE) 693 .build(); 694 final int frameCount = AudioHelper.frameCountFromMsec(PLAYBACK_TIME_IN_MS, format); 695 final int frameSize = AudioHelper.frameSizeFromFormat(format); 696 track = new AudioTrack.Builder() 697 .setAudioFormat(format) 698 .setBufferSizeInBytes(frameCount * frameSize) 699 .setTransferMode(AudioTrack.MODE_STATIC) 700 .build(); 701 // create float array and write it 702 final int sampleCount = frameCount * format.getChannelCount(); 703 byte[] vab = AudioHelper.createSoundDataInByteArray( 704 sampleCount, PLAYBACK_SAMPLE_RATE, 600 /* frequency */, 0 /* sweep */); 705 assertEquals(TEST_NAME, vab.length, 706 track.write(vab, 0 /* offsetInBytes */, vab.length, 707 AudioTrack.WRITE_NON_BLOCKING)); 708 final int trackSessionId = track.getAudioSessionId(); 709 710 // 2. create an AudioRecord to sync off of AudioTrack completion. 711 final int RECORD_TIME_IN_MS = 2000; 712 final int RECORD_ENCODING = AudioFormat.ENCODING_PCM_16BIT; 713 final int RECORD_CHANNEL_MASK = AudioFormat.CHANNEL_IN_STEREO; 714 final int RECORD_SAMPLE_RATE = 44100; 715 record = new AudioRecord.Builder() 716 .setAudioFormat(new AudioFormat.Builder() 717 .setSampleRate(RECORD_SAMPLE_RATE) 718 .setChannelMask(RECORD_CHANNEL_MASK) 719 .setEncoding(RECORD_ENCODING) 720 .build()) 721 .build(); 722 // AudioRecord creation may have silently failed, check state now 723 assertEquals(TEST_NAME, AudioRecord.STATE_INITIALIZED, record.getState()); 724 725 // 3. create a MediaSyncEvent 726 // This MediaSyncEvent checks playback completion of an AudioTrack 727 // (or MediaPlayer, or ToneGenerator) based on its audio session id. 728 // 729 // Note: when synchronizing record from a MediaSyncEvent 730 // (1) You need to be "close" to the end of the associated AudioTrack. 731 // If the track does not complete in 30 seconds, recording begins regardless. 732 // (actual delay limit may vary). 733 // 734 // (2) Track completion may be triggered by pause() as well as stop() 735 // or when a static AudioTrack completes playback. 736 // 737 final int eventType = MediaSyncEvent.SYNC_EVENT_PRESENTATION_COMPLETE; 738 MediaSyncEvent event = MediaSyncEvent.createEvent(eventType) 739 .setAudioSessionId(trackSessionId); 740 assertEquals(TEST_NAME, trackSessionId, event.getAudioSessionId()); 741 assertEquals(TEST_NAME, eventType, event.getType()); 742 743 // 4. now set the AudioTrack playing and start the recording synchronized 744 track.play(); 745 // start recording. Recording state turns to RECORDSTATE_RECORDING immediately 746 // but the data read() only occurs after the AudioTrack completes. 747 record.startRecording(event); 748 assertEquals(TEST_NAME, 749 AudioRecord.RECORDSTATE_RECORDING, record.getRecordingState()); 750 long startTime = System.currentTimeMillis(); 751 752 // 5. get record data. 753 // For our tests, we could set test duration by timed sleep or by # frames received. 754 // Since we don't know *exactly* when AudioRecord actually begins recording, 755 // we end the test by # frames read. 756 final int numChannels = 757 AudioFormat.channelCountFromInChannelMask(RECORD_CHANNEL_MASK); 758 final int bytesPerSample = AudioFormat.getBytesPerSample(RECORD_ENCODING); 759 final int bytesPerFrame = numChannels * bytesPerSample; 760 // careful about integer overflow in the formula below: 761 final int targetSamples = 762 (int)((long)RECORD_TIME_IN_MS * RECORD_SAMPLE_RATE * numChannels / 1000); 763 final int BUFFER_FRAMES = 512; 764 final int BUFFER_SAMPLES = BUFFER_FRAMES * numChannels; 765 766 // After starting, there is no guarantee when the first frame of data is read. 767 long firstSampleTime = 0; 768 int samplesRead = 0; 769 770 // For 16 bit data, use shorts 771 short[] shortData = new short[BUFFER_SAMPLES]; 772 while (samplesRead < targetSamples) { 773 // the first time through, we read a single frame. 774 // this sets the recording anchor position. 775 int amount = samplesRead == 0 ? numChannels : 776 Math.min(BUFFER_SAMPLES, targetSamples - samplesRead); 777 int ret = record.read(shortData, 0, amount); 778 assertEquals(TEST_NAME, amount, ret); 779 if (samplesRead == 0 && ret > 0) { 780 firstSampleTime = System.currentTimeMillis(); 781 } 782 samplesRead += ret; 783 // sanity check: elapsed time cannot be more than a second 784 // than what we expect. 785 assertTrue(System.currentTimeMillis() - startTime <= 786 PLAYBACK_TIME_IN_MS + RECORD_TIME_IN_MS + 1000); 787 } 788 789 // 6. We've read all the frames, now check the timing. 790 final long endTime = System.currentTimeMillis(); 791 //Log.d(TEST_NAME, "first sample time " + (firstSampleTime - startTime) 792 // + " test time " + (endTime - firstSampleTime)); 793 // 794 // Verify recording starts within 400 ms of AudioTrack completion (typical 180ms) 795 // Verify recording completes within 50 ms of expected test time (typical 20ms) 796 assertEquals(TEST_NAME, PLAYBACK_TIME_IN_MS, firstSampleTime - startTime, 797 isLowLatencyDevice() ? 200 : 800); 798 assertEquals(TEST_NAME, RECORD_TIME_IN_MS, endTime - firstSampleTime, 799 isLowLatencyDevice()? 50 : 400); 800 801 record.stop(); 802 assertEquals(TEST_NAME, AudioRecord.RECORDSTATE_STOPPED, record.getRecordingState()); 803 } finally { 804 if (record != null) { 805 record.release(); 806 record = null; 807 } 808 if (track != null) { 809 track.release(); 810 track = null; 811 } 812 } 813 } 814 815 @Test 816 public void testVoiceCallAudioSourcePermissions() throws Exception { 817 if (!hasMicrophone()) { 818 return; 819 } 820 821 // Make sure that VOICE_CALL, VOICE_DOWNLINK and VOICE_UPLINK audio sources cannot 822 // be used by apps that don't have the CAPTURE_AUDIO_OUTPUT permissions 823 final int[] voiceCallAudioSources = new int [] {MediaRecorder.AudioSource.VOICE_CALL, 824 MediaRecorder.AudioSource.VOICE_DOWNLINK, 825 MediaRecorder.AudioSource.VOICE_UPLINK}; 826 827 for (int source : voiceCallAudioSources) { 828 // AudioRecord.Builder should fail when trying to use 829 // one of the voice call audio sources. 830 assertThrows(UnsupportedOperationException.class, 831 () -> { 832 new AudioRecord.Builder() 833 .setAudioSource(source) 834 .setAudioFormat(new AudioFormat.Builder() 835 .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 836 .setSampleRate(8000) 837 .setChannelMask(AudioFormat.CHANNEL_IN_MONO) 838 .build()) 839 .build(); }); 840 } 841 } 842 843 private void printMicrophoneInfo(MicrophoneInfo microphone) { 844 Log.i(TAG, "deviceId:" + microphone.getDescription()); 845 Log.i(TAG, "portId:" + microphone.getId()); 846 Log.i(TAG, "type:" + microphone.getType()); 847 Log.i(TAG, "address:" + microphone.getAddress()); 848 Log.i(TAG, "deviceLocation:" + microphone.getLocation()); 849 Log.i(TAG, "deviceGroup:" + microphone.getGroup() 850 + " index:" + microphone.getIndexInTheGroup()); 851 MicrophoneInfo.Coordinate3F position = microphone.getPosition(); 852 Log.i(TAG, "position:" + position.x + "," + position.y + "," + position.z); 853 MicrophoneInfo.Coordinate3F orientation = microphone.getOrientation(); 854 Log.i(TAG, "orientation:" + orientation.x + "," + orientation.y + "," + orientation.z); 855 Log.i(TAG, "frequencyResponse:" + microphone.getFrequencyResponse()); 856 Log.i(TAG, "channelMapping:" + microphone.getChannelMapping()); 857 Log.i(TAG, "sensitivity:" + microphone.getSensitivity()); 858 Log.i(TAG, "max spl:" + microphone.getMaxSpl()); 859 Log.i(TAG, "min spl:" + microphone.getMinSpl()); 860 Log.i(TAG, "directionality:" + microphone.getDirectionality()); 861 Log.i(TAG, "******"); 862 } 863 864 @Test 865 public void testGetActiveMicrophones() throws Exception { 866 if (!hasMicrophone()) { 867 return; 868 } 869 mAudioRecord.startRecording(); 870 try { 871 Thread.sleep(1000); 872 } catch (InterruptedException e) { 873 } 874 List<MicrophoneInfo> activeMicrophones = mAudioRecord.getActiveMicrophones(); 875 assertTrue(activeMicrophones.size() > 0); 876 for (MicrophoneInfo activeMicrophone : activeMicrophones) { 877 printMicrophoneInfo(activeMicrophone); 878 } 879 } 880 881 private AudioRecord createAudioRecord( 882 int audioSource, int sampleRateInHz, 883 int channelConfig, int audioFormat, int bufferSizeInBytes, 884 boolean auditRecording, boolean isChannelIndex) { 885 final AudioRecord record; 886 if (auditRecording) { 887 record = new AudioHelper.AudioRecordAudit( 888 audioSource, sampleRateInHz, channelConfig, 889 audioFormat, bufferSizeInBytes, isChannelIndex); 890 } else if (isChannelIndex) { 891 record = new AudioRecord.Builder() 892 .setAudioFormat(new AudioFormat.Builder() 893 .setChannelIndexMask(channelConfig) 894 .setEncoding(audioFormat) 895 .setSampleRate(sampleRateInHz) 896 .build()) 897 .setBufferSizeInBytes(bufferSizeInBytes) 898 .build(); 899 } else { 900 record = new AudioRecord(audioSource, sampleRateInHz, channelConfig, 901 audioFormat, bufferSizeInBytes); 902 } 903 904 // did we get the AudioRecord we expected? 905 final AudioFormat format = record.getFormat(); 906 assertEquals(isChannelIndex ? channelConfig : AudioFormat.CHANNEL_INVALID, 907 format.getChannelIndexMask()); 908 assertEquals(isChannelIndex ? AudioFormat.CHANNEL_INVALID : channelConfig, 909 format.getChannelMask()); 910 assertEquals(audioFormat, format.getEncoding()); 911 assertEquals(sampleRateInHz, format.getSampleRate()); 912 final int frameSize = 913 format.getChannelCount() * AudioFormat.getBytesPerSample(audioFormat); 914 // our native frame count cannot be smaller than our minimum buffer size request. 915 assertTrue(record.getBufferSizeInFrames() * frameSize >= bufferSizeInBytes); 916 return record; 917 } 918 919 private void doTest(String reportName, boolean localRecord, boolean customHandler, 920 int periodsPerSecond, int markerPeriodsPerSecond, 921 boolean useByteBuffer, boolean blocking, 922 final boolean auditRecording, final boolean isChannelIndex, 923 final int TEST_SR, final int TEST_CONF, final int TEST_FORMAT) throws Exception { 924 final int TEST_TIME_MS = auditRecording ? 60000 : 2000; 925 doTest(reportName, localRecord, customHandler, periodsPerSecond, markerPeriodsPerSecond, 926 useByteBuffer, blocking, auditRecording, isChannelIndex, 927 TEST_SR, TEST_CONF, TEST_FORMAT, TEST_TIME_MS); 928 } 929 private void doTest(String reportName, boolean localRecord, boolean customHandler, 930 int periodsPerSecond, int markerPeriodsPerSecond, 931 boolean useByteBuffer, boolean blocking, 932 final boolean auditRecording, final boolean isChannelIndex, 933 final int TEST_SR, final int TEST_CONF, final int TEST_FORMAT, final int TEST_TIME_MS) 934 throws Exception { 935 if (!hasMicrophone()) { 936 return; 937 } 938 // audit recording plays back recorded audio, so use longer test timing 939 final int TEST_SOURCE = MediaRecorder.AudioSource.DEFAULT; 940 mIsHandleMessageCalled = false; 941 942 // For channelIndex use one frame in bytes for buffer size. 943 // This is adjusted to the minimum buffer size by native code. 944 final int bufferSizeInBytes = isChannelIndex ? 945 (AudioFormat.getBytesPerSample(TEST_FORMAT) 946 * AudioFormat.channelCountFromInChannelMask(TEST_CONF)) : 947 AudioRecord.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT); 948 assertTrue(bufferSizeInBytes > 0); 949 950 final AudioRecord record; 951 final AudioHelper 952 .MakeSomethingAsynchronouslyAndLoop<AudioRecord> makeSomething; 953 954 if (localRecord) { 955 makeSomething = null; 956 record = createAudioRecord(TEST_SOURCE, TEST_SR, TEST_CONF, 957 TEST_FORMAT, bufferSizeInBytes, auditRecording, isChannelIndex); 958 } else { 959 makeSomething = 960 new AudioHelper.MakeSomethingAsynchronouslyAndLoop<AudioRecord>( 961 new AudioHelper.MakesSomething<AudioRecord>() { 962 @Override 963 public AudioRecord makeSomething() { 964 return createAudioRecord(TEST_SOURCE, TEST_SR, TEST_CONF, 965 TEST_FORMAT, bufferSizeInBytes, auditRecording, 966 isChannelIndex); 967 } 968 } 969 ); 970 // create AudioRecord on different thread's looper. 971 record = makeSomething.make(); 972 } 973 974 // AudioRecord creation may have silently failed, check state now 975 assertEquals(AudioRecord.STATE_INITIALIZED, record.getState()); 976 977 final MockOnRecordPositionUpdateListener listener; 978 if (customHandler) { 979 listener = new MockOnRecordPositionUpdateListener(record, mHandler); 980 } else { 981 listener = new MockOnRecordPositionUpdateListener(record); 982 } 983 984 final int updatePeriodInFrames = (periodsPerSecond == 0) 985 ? 0 : TEST_SR / periodsPerSecond; 986 // After starting, there is no guarantee when the first frame of data is read. 987 long firstSampleTime = 0; 988 989 // blank final variables: all successful paths will initialize the times. 990 // this must be declared here for visibility as they are set within the try block. 991 final long endTime; 992 final long startTime; 993 final long stopRequestTime; 994 final long stopTime; 995 final long coldInputStartTime; 996 997 try { 998 if (markerPeriodsPerSecond != 0) { 999 mMarkerPeriodInFrames = TEST_SR / markerPeriodsPerSecond; 1000 mMarkerPosition = mMarkerPeriodInFrames; 1001 assertEquals(AudioRecord.SUCCESS, 1002 record.setNotificationMarkerPosition(mMarkerPosition)); 1003 } else { 1004 mMarkerPeriodInFrames = 0; 1005 } 1006 1007 assertEquals(AudioRecord.SUCCESS, 1008 record.setPositionNotificationPeriod(updatePeriodInFrames)); 1009 1010 // at the start, there is no timestamp. 1011 AudioTimestamp startTs = new AudioTimestamp(); 1012 assertEquals(AudioRecord.ERROR_INVALID_OPERATION, 1013 record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC)); 1014 1015 listener.start(TEST_SR); 1016 record.startRecording(); 1017 assertEquals(AudioRecord.RECORDSTATE_RECORDING, record.getRecordingState()); 1018 startTime = System.currentTimeMillis(); 1019 1020 // For our tests, we could set test duration by timed sleep or by # frames received. 1021 // Since we don't know *exactly* when AudioRecord actually begins recording, 1022 // we end the test by # frames read. 1023 final int numChannels = AudioFormat.channelCountFromInChannelMask(TEST_CONF); 1024 final int bytesPerSample = AudioFormat.getBytesPerSample(TEST_FORMAT); 1025 final int bytesPerFrame = numChannels * bytesPerSample; 1026 // careful about integer overflow in the formula below: 1027 final int targetFrames = (int)((long)TEST_TIME_MS * TEST_SR / 1000); 1028 final int targetSamples = targetFrames * numChannels; 1029 final int BUFFER_FRAMES = 512; 1030 final int BUFFER_SAMPLES = BUFFER_FRAMES * numChannels; 1031 // TODO: verify behavior when buffer size is not a multiple of frame size. 1032 1033 int startTimeAtFrame = 0; 1034 int samplesRead = 0; 1035 if (useByteBuffer) { 1036 ByteBuffer byteBuffer = 1037 ByteBuffer.allocateDirect(BUFFER_SAMPLES * bytesPerSample); 1038 while (samplesRead < targetSamples) { 1039 // the first time through, we read a single frame. 1040 // this sets the recording anchor position. 1041 int amount = samplesRead == 0 ? numChannels : 1042 Math.min(BUFFER_SAMPLES, targetSamples - samplesRead); 1043 amount *= bytesPerSample; // in bytes 1044 // read always places data at the start of the byte buffer with 1045 // position and limit are ignored. test this by setting 1046 // position and limit to arbitrary values here. 1047 final int lastPosition = 7; 1048 final int lastLimit = 13; 1049 byteBuffer.position(lastPosition); 1050 byteBuffer.limit(lastLimit); 1051 int ret = blocking ? record.read(byteBuffer, amount) : 1052 record.read(byteBuffer, amount, AudioRecord.READ_NON_BLOCKING); 1053 // so long as amount requested in bytes is a multiple of the frame size 1054 // we expect the byte buffer request to be filled. Caution: the 1055 // byte buffer data will be in native endian order, not Java order. 1056 if (blocking) { 1057 assertEquals(amount, ret); 1058 } else { 1059 assertTrue("0 <= " + ret + " <= " + amount, 1060 0 <= ret && ret <= amount); 1061 } 1062 // position, limit are not changed by read(). 1063 assertEquals(lastPosition, byteBuffer.position()); 1064 assertEquals(lastLimit, byteBuffer.limit()); 1065 if (samplesRead == 0 && ret > 0) { 1066 firstSampleTime = System.currentTimeMillis(); 1067 } 1068 samplesRead += ret / bytesPerSample; 1069 if (startTimeAtFrame == 0 && ret > 0 && 1070 record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC) == 1071 AudioRecord.SUCCESS) { 1072 startTimeAtFrame = samplesRead / numChannels; 1073 } 1074 } 1075 } else { 1076 switch (TEST_FORMAT) { 1077 case AudioFormat.ENCODING_PCM_8BIT: { 1078 // For 8 bit data, use bytes 1079 byte[] byteData = new byte[BUFFER_SAMPLES]; 1080 while (samplesRead < targetSamples) { 1081 // the first time through, we read a single frame. 1082 // this sets the recording anchor position. 1083 int amount = samplesRead == 0 ? numChannels : 1084 Math.min(BUFFER_SAMPLES, targetSamples - samplesRead); 1085 int ret = blocking ? record.read(byteData, 0, amount) : 1086 record.read(byteData, 0, amount, AudioRecord.READ_NON_BLOCKING); 1087 if (blocking) { 1088 assertEquals(amount, ret); 1089 } else { 1090 assertTrue("0 <= " + ret + " <= " + amount, 1091 0 <= ret && ret <= amount); 1092 } 1093 if (samplesRead == 0 && ret > 0) { 1094 firstSampleTime = System.currentTimeMillis(); 1095 } 1096 samplesRead += ret; 1097 if (startTimeAtFrame == 0 && ret > 0 && 1098 record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC) == 1099 AudioRecord.SUCCESS) { 1100 startTimeAtFrame = samplesRead / numChannels; 1101 } 1102 } 1103 } break; 1104 case AudioFormat.ENCODING_PCM_16BIT: { 1105 // For 16 bit data, use shorts 1106 short[] shortData = new short[BUFFER_SAMPLES]; 1107 while (samplesRead < targetSamples) { 1108 // the first time through, we read a single frame. 1109 // this sets the recording anchor position. 1110 int amount = samplesRead == 0 ? numChannels : 1111 Math.min(BUFFER_SAMPLES, targetSamples - samplesRead); 1112 int ret = blocking ? record.read(shortData, 0, amount) : 1113 record.read(shortData, 0, amount, AudioRecord.READ_NON_BLOCKING); 1114 if (blocking) { 1115 assertEquals(amount, ret); 1116 } else { 1117 assertTrue("0 <= " + ret + " <= " + amount, 1118 0 <= ret && ret <= amount); 1119 } 1120 if (samplesRead == 0 && ret > 0) { 1121 firstSampleTime = System.currentTimeMillis(); 1122 } 1123 samplesRead += ret; 1124 if (startTimeAtFrame == 0 && ret > 0 && 1125 record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC) == 1126 AudioRecord.SUCCESS) { 1127 startTimeAtFrame = samplesRead / numChannels; 1128 } 1129 } 1130 } break; 1131 case AudioFormat.ENCODING_PCM_FLOAT: { 1132 float[] floatData = new float[BUFFER_SAMPLES]; 1133 while (samplesRead < targetSamples) { 1134 // the first time through, we read a single frame. 1135 // this sets the recording anchor position. 1136 int amount = samplesRead == 0 ? numChannels : 1137 Math.min(BUFFER_SAMPLES, targetSamples - samplesRead); 1138 int ret = record.read(floatData, 0, amount, blocking ? 1139 AudioRecord.READ_BLOCKING : AudioRecord.READ_NON_BLOCKING); 1140 if (blocking) { 1141 assertEquals(amount, ret); 1142 } else { 1143 assertTrue("0 <= " + ret + " <= " + amount, 1144 0 <= ret && ret <= amount); 1145 } 1146 if (samplesRead == 0 && ret > 0) { 1147 firstSampleTime = System.currentTimeMillis(); 1148 } 1149 samplesRead += ret; 1150 if (startTimeAtFrame == 0 && ret > 0 && 1151 record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC) == 1152 AudioRecord.SUCCESS) { 1153 startTimeAtFrame = samplesRead / numChannels; 1154 } 1155 } 1156 } break; 1157 } 1158 } 1159 1160 // We've read all the frames, now check the record timing. 1161 endTime = System.currentTimeMillis(); 1162 1163 coldInputStartTime = firstSampleTime - startTime; 1164 //Log.d(TAG, "first sample time " + coldInputStartTime 1165 // + " test time " + (endTime - firstSampleTime)); 1166 1167 if (coldInputStartTime > 200) { 1168 Log.w(TAG, "cold input start time way too long " 1169 + coldInputStartTime + " > 200ms"); 1170 } else if (coldInputStartTime > 100) { 1171 Log.w(TAG, "cold input start time too long " 1172 + coldInputStartTime + " > 100ms"); 1173 } 1174 assertTrue(coldInputStartTime < 5000); // must start within 5 seconds. 1175 1176 // Verify recording completes within 50 ms of expected test time (typical 20ms) 1177 assertEquals(TEST_TIME_MS, endTime - firstSampleTime, auditRecording ? 1178 (isLowLatencyDevice() ? 1000 : 2000) : (isLowLatencyDevice() ? 50 : 400)); 1179 1180 // Even though we've read all the frames we want, the events may not be sent to 1181 // the listeners (events are handled through a separate internal callback thread). 1182 // One must sleep to make sure the last event(s) come in. 1183 Thread.sleep(30); 1184 1185 stopRequestTime = System.currentTimeMillis(); 1186 record.stop(); 1187 assertEquals(AudioRecord.RECORDSTATE_STOPPED, record.getRecordingState()); 1188 1189 stopTime = System.currentTimeMillis(); 1190 1191 // stop listening - we should be done. 1192 // Caution M behavior and likely much earlier: 1193 // we assume no events can happen after stop(), but this may not 1194 // always be true as stop can take 100ms to complete (as it may disable 1195 // input recording on the hal); thus the event handler may be block with 1196 // valid events, issuing right after stop completes. Except for those events, 1197 // no other events should show up after stop. 1198 // This behavior may change in the future but we account for it here in testing. 1199 final long SLEEP_AFTER_STOP_FOR_EVENTS_MS = 30; 1200 Thread.sleep(SLEEP_AFTER_STOP_FOR_EVENTS_MS); 1201 listener.stop(); 1202 1203 // get stop timestamp 1204 AudioTimestamp stopTs = new AudioTimestamp(); 1205 assertEquals(AudioRecord.SUCCESS, 1206 record.getTimestamp(stopTs, AudioTimestamp.TIMEBASE_MONOTONIC)); 1207 AudioTimestamp stopTsBoot = new AudioTimestamp(); 1208 assertEquals(AudioRecord.SUCCESS, 1209 record.getTimestamp(stopTsBoot, AudioTimestamp.TIMEBASE_BOOTTIME)); 1210 1211 // printTimestamp("startTs", startTs); 1212 // printTimestamp("stopTs", stopTs); 1213 // printTimestamp("stopTsBoot", stopTsBoot); 1214 // Log.d(TAG, "time Monotonic " + System.nanoTime()); 1215 // Log.d(TAG, "time Boottime " + SystemClock.elapsedRealtimeNanos()); 1216 1217 // stop should not reset timestamps 1218 assertTrue(stopTs.framePosition >= targetFrames); 1219 assertEquals(stopTs.framePosition, stopTsBoot.framePosition); 1220 assertTrue(stopTs.nanoTime > 0); 1221 1222 // timestamps follow a different path than data, so it is conceivable 1223 // that first data arrives before the first timestamp is ready. 1224 assertTrue(startTimeAtFrame > 0); // we read a start timestamp 1225 1226 verifyContinuousTimestamps(startTs, stopTs, TEST_SR); 1227 1228 // clean up 1229 if (makeSomething != null) { 1230 makeSomething.join(); 1231 } 1232 1233 } finally { 1234 listener.release(); 1235 // we must release the record immediately as it is a system-wide 1236 // resource needed for other tests. 1237 record.release(); 1238 } 1239 if (auditRecording) { // don't check timing if auditing (messes up timing) 1240 return; 1241 } 1242 final int markerPeriods = markerPeriodsPerSecond * TEST_TIME_MS / 1000; 1243 final int updatePeriods = periodsPerSecond * TEST_TIME_MS / 1000; 1244 final int markerPeriodsMax = 1245 markerPeriodsPerSecond * (int)(stopTime - firstSampleTime) / 1000 + 1; 1246 final int updatePeriodsMax = 1247 periodsPerSecond * (int)(stopTime - firstSampleTime) / 1000 + 1; 1248 1249 // collect statistics 1250 final ArrayList<Integer> markerList = listener.getMarkerList(); 1251 final ArrayList<Integer> periodicList = listener.getPeriodicList(); 1252 // verify count of markers and periodic notifications. 1253 // there could be an extra notification since we don't stop() immediately 1254 // rather wait for potential events to come in. 1255 //Log.d(TAG, "markerPeriods " + markerPeriods + 1256 // " markerPeriodsReceived " + markerList.size()); 1257 //Log.d(TAG, "updatePeriods " + updatePeriods + 1258 // " updatePeriodsReceived " + periodicList.size()); 1259 if (isLowLatencyDevice()) { 1260 assertTrue(TAG + ": markerPeriods " + markerPeriods + 1261 " <= markerPeriodsReceived " + markerList.size() + 1262 " <= markerPeriodsMax " + markerPeriodsMax, 1263 markerPeriods <= markerList.size() 1264 && markerList.size() <= markerPeriodsMax); 1265 assertTrue(TAG + ": updatePeriods " + updatePeriods + 1266 " <= updatePeriodsReceived " + periodicList.size() + 1267 " <= updatePeriodsMax " + updatePeriodsMax, 1268 updatePeriods <= periodicList.size() 1269 && periodicList.size() <= updatePeriodsMax); 1270 } 1271 1272 // Since we don't have accurate positioning of the start time of the recorder, 1273 // and there is no record.getPosition(), we consider only differential timing 1274 // from the first marker or periodic event. 1275 final int toleranceInFrames = TEST_SR * 80 / 1000; // 80 ms 1276 final int testTimeInFrames = (int)((long)TEST_TIME_MS * TEST_SR / 1000); 1277 1278 AudioHelper.Statistics markerStat = new AudioHelper.Statistics(); 1279 for (int i = 1; i < markerList.size(); ++i) { 1280 final int expected = mMarkerPeriodInFrames * i; 1281 if (markerList.get(i) > testTimeInFrames) { 1282 break; // don't consider any notifications when we might be stopping. 1283 } 1284 final int actual = markerList.get(i) - markerList.get(0); 1285 //Log.d(TAG, "Marker: " + i + " expected(" + expected + ") actual(" + actual 1286 // + ") diff(" + (actual - expected) + ")" 1287 // + " tolerance " + toleranceInFrames); 1288 if (isLowLatencyDevice()) { 1289 assertEquals(expected, actual, toleranceInFrames); 1290 } 1291 markerStat.add((double)(actual - expected) * 1000 / TEST_SR); 1292 } 1293 1294 AudioHelper.Statistics periodicStat = new AudioHelper.Statistics(); 1295 for (int i = 1; i < periodicList.size(); ++i) { 1296 final int expected = updatePeriodInFrames * i; 1297 if (periodicList.get(i) > testTimeInFrames) { 1298 break; // don't consider any notifications when we might be stopping. 1299 } 1300 final int actual = periodicList.get(i) - periodicList.get(0); 1301 //Log.d(TAG, "Update: " + i + " expected(" + expected + ") actual(" + actual 1302 // + ") diff(" + (actual - expected) + ")" 1303 // + " tolerance " + toleranceInFrames); 1304 if (isLowLatencyDevice()) { 1305 assertEquals(expected, actual, toleranceInFrames); 1306 } 1307 periodicStat.add((double)(actual - expected) * 1000 / TEST_SR); 1308 } 1309 1310 // report this 1311 DeviceReportLog log = new DeviceReportLog(REPORT_LOG_NAME, reportName); 1312 log.addValue("start_recording_lag", coldInputStartTime, ResultType.LOWER_BETTER, 1313 ResultUnit.MS); 1314 log.addValue("stop_execution_time", stopTime - stopRequestTime, ResultType.LOWER_BETTER, 1315 ResultUnit.MS); 1316 log.addValue("total_record_time_expected", TEST_TIME_MS, ResultType.NEUTRAL, ResultUnit.MS); 1317 log.addValue("total_record_time_actual", endTime - firstSampleTime, ResultType.NEUTRAL, 1318 ResultUnit.MS); 1319 log.addValue("total_markers_expected", markerPeriods, ResultType.NEUTRAL, ResultUnit.COUNT); 1320 log.addValue("total_markers_actual", markerList.size(), ResultType.NEUTRAL, 1321 ResultUnit.COUNT); 1322 log.addValue("total_periods_expected", updatePeriods, ResultType.NEUTRAL, ResultUnit.COUNT); 1323 log.addValue("total_periods_actual", periodicList.size(), ResultType.NEUTRAL, 1324 ResultUnit.COUNT); 1325 log.addValue("average_marker_diff", markerStat.getAvg(), ResultType.LOWER_BETTER, 1326 ResultUnit.MS); 1327 log.addValue("maximum_marker_abs_diff", markerStat.getMaxAbs(), ResultType.LOWER_BETTER, 1328 ResultUnit.MS); 1329 log.addValue("average_marker_abs_diff", markerStat.getAvgAbs(), ResultType.LOWER_BETTER, 1330 ResultUnit.MS); 1331 log.addValue("average_periodic_diff", periodicStat.getAvg(), ResultType.LOWER_BETTER, 1332 ResultUnit.MS); 1333 log.addValue("maximum_periodic_abs_diff", periodicStat.getMaxAbs(), ResultType.LOWER_BETTER, 1334 ResultUnit.MS); 1335 log.addValue("average_periodic_abs_diff", periodicStat.getAvgAbs(), ResultType.LOWER_BETTER, 1336 ResultUnit.MS); 1337 log.setSummary("unified_abs_diff", (periodicStat.getAvgAbs() + markerStat.getAvgAbs()) / 2, 1338 ResultType.LOWER_BETTER, ResultUnit.MS); 1339 log.submit(InstrumentationRegistry.getInstrumentation()); 1340 } 1341 1342 private class MockOnRecordPositionUpdateListener 1343 implements OnRecordPositionUpdateListener { 1344 public MockOnRecordPositionUpdateListener(AudioRecord record) { 1345 mAudioRecord = record; 1346 record.setRecordPositionUpdateListener(this); 1347 } 1348 1349 public MockOnRecordPositionUpdateListener(AudioRecord record, Handler handler) { 1350 mAudioRecord = record; 1351 record.setRecordPositionUpdateListener(this, handler); 1352 } 1353 1354 public synchronized void onMarkerReached(AudioRecord record) { 1355 if (mIsTestActive) { 1356 int position = getPosition(); 1357 mOnMarkerReachedCalled.add(position); 1358 mMarkerPosition += mMarkerPeriodInFrames; 1359 assertEquals(AudioRecord.SUCCESS, 1360 mAudioRecord.setNotificationMarkerPosition(mMarkerPosition)); 1361 } else { 1362 // see comment on stop() 1363 final long delta = System.currentTimeMillis() - mStopTime; 1364 Log.d(TAG, "onMarkerReached called " + delta + " ms after stop"); 1365 fail("onMarkerReached called when not active"); 1366 } 1367 } 1368 1369 public synchronized void onPeriodicNotification(AudioRecord record) { 1370 if (mIsTestActive) { 1371 int position = getPosition(); 1372 mOnPeriodicNotificationCalled.add(position); 1373 } else { 1374 // see comment on stop() 1375 final long delta = System.currentTimeMillis() - mStopTime; 1376 Log.d(TAG, "onPeriodicNotification called " + delta + " ms after stop"); 1377 fail("onPeriodicNotification called when not active"); 1378 } 1379 } 1380 1381 public synchronized void start(int sampleRate) { 1382 mIsTestActive = true; 1383 mSampleRate = sampleRate; 1384 mStartTime = System.currentTimeMillis(); 1385 } 1386 1387 public synchronized void stop() { 1388 // the listener should be stopped some time after AudioRecord is stopped 1389 // as some messages may not yet be posted. 1390 mIsTestActive = false; 1391 mStopTime = System.currentTimeMillis(); 1392 } 1393 1394 public ArrayList<Integer> getMarkerList() { 1395 return mOnMarkerReachedCalled; 1396 } 1397 1398 public ArrayList<Integer> getPeriodicList() { 1399 return mOnPeriodicNotificationCalled; 1400 } 1401 1402 public synchronized void release() { 1403 stop(); 1404 mAudioRecord.setRecordPositionUpdateListener(null); 1405 mAudioRecord = null; 1406 } 1407 1408 private int getPosition() { 1409 // we don't have mAudioRecord.getRecordPosition(); 1410 // so we fake this by timing. 1411 long delta = System.currentTimeMillis() - mStartTime; 1412 return (int)(delta * mSampleRate / 1000); 1413 } 1414 1415 private long mStartTime; 1416 private long mStopTime; 1417 private int mSampleRate; 1418 private boolean mIsTestActive = true; 1419 private AudioRecord mAudioRecord; 1420 private ArrayList<Integer> mOnMarkerReachedCalled = new ArrayList<Integer>(); 1421 private ArrayList<Integer> mOnPeriodicNotificationCalled = new ArrayList<Integer>(); 1422 } 1423 1424 private boolean hasMicrophone() { 1425 return getContext().getPackageManager().hasSystemFeature( 1426 PackageManager.FEATURE_MICROPHONE); 1427 } 1428 1429 private boolean isLowRamDevice() { 1430 return ((ActivityManager) getContext().getSystemService(Context.ACTIVITY_SERVICE)) 1431 .isLowRamDevice(); 1432 } 1433 1434 private boolean isLowLatencyDevice() { 1435 return getContext().getPackageManager().hasSystemFeature( 1436 PackageManager.FEATURE_AUDIO_LOW_LATENCY); 1437 } 1438 1439 private void verifyContinuousTimestamps( 1440 AudioTimestamp startTs, AudioTimestamp stopTs, int sampleRate) 1441 throws Exception { 1442 final long timeDiff = stopTs.nanoTime - startTs.nanoTime; 1443 final long frameDiff = stopTs.framePosition - startTs.framePosition; 1444 final long NANOS_PER_SECOND = 1000000000; 1445 final long timeByFrames = frameDiff * NANOS_PER_SECOND / sampleRate; 1446 final double ratio = (double)timeDiff / timeByFrames; 1447 1448 // Usually the ratio is accurate to one part per thousand or better. 1449 // Log.d(TAG, "ratio=" + ratio + ", timeDiff=" + timeDiff + ", frameDiff=" + frameDiff + 1450 // ", timeByFrames=" + timeByFrames + ", sampleRate=" + sampleRate); 1451 assertEquals(1.0 /* expected */, ratio, isLowLatencyDevice() ? 0.01 : 0.5 /* delta */); 1452 } 1453 1454 // remove if AudioTimestamp has a better toString(). 1455 private void printTimestamp(String s, AudioTimestamp ats) { 1456 Log.d(TAG, s + ": pos: " + ats.framePosition + " time: " + ats.nanoTime); 1457 } 1458 1459 private static void readDataTimed(AudioRecord recorder, long durationMillis, 1460 ShortBuffer out) throws IOException { 1461 final short[] buffer = new short[1024]; 1462 final long startTimeMillis = SystemClock.uptimeMillis(); 1463 final long stopTimeMillis = startTimeMillis + durationMillis; 1464 while (SystemClock.uptimeMillis() < stopTimeMillis) { 1465 final int readCount = recorder.read(buffer, 0, buffer.length); 1466 if (readCount <= 0) { 1467 return; 1468 } 1469 out.put(buffer, 0, readCount); 1470 } 1471 } 1472 1473 private static boolean isAudioSilent(ShortBuffer buffer) { 1474 // Always need some bytes read 1475 assertTrue("Buffer should have some data", buffer.position() > 0); 1476 1477 // It is possible that the transition from empty to non empty bytes 1478 // happened in the middle of the read data due to the async nature of 1479 // the system. Therefore, we look for the transitions from non-empty 1480 // to empty and from empty to non-empty values for robustness. 1481 int totalSilenceCount = 0; 1482 final int valueCount = buffer.position(); 1483 for (int i = valueCount - 1; i >= 0; i--) { 1484 final short value = buffer.get(i); 1485 if (value == 0) { 1486 totalSilenceCount++; 1487 } 1488 } 1489 return totalSilenceCount > valueCount / 2; 1490 } 1491 1492 private static void makeMyUidStateActive() throws IOException { 1493 final String command = "cmd media.audio_policy set-uid-state " 1494 + InstrumentationRegistry.getTargetContext().getPackageName() + " active"; 1495 SystemUtil.runShellCommand(InstrumentationRegistry.getInstrumentation(), command); 1496 } 1497 1498 private static void makeMyUidStateIdle() throws IOException { 1499 final String command = "cmd media.audio_policy set-uid-state " 1500 + InstrumentationRegistry.getTargetContext().getPackageName() + " idle"; 1501 SystemUtil.runShellCommand(InstrumentationRegistry.getInstrumentation(), command); 1502 } 1503 1504 private static void resetMyUidState() throws IOException { 1505 final String command = "cmd media.audio_policy reset-uid-state " 1506 + InstrumentationRegistry.getTargetContext().getPackageName(); 1507 SystemUtil.runShellCommand(InstrumentationRegistry.getInstrumentation(), command); 1508 } 1509 1510 private static Context getContext() { 1511 return InstrumentationRegistry.getInstrumentation().getTargetContext(); 1512 } 1513 } 1514