1 /*
2  * Copyright (C) 2009 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.media.cts;
18 
19 import android.app.ActivityManager;
20 import android.content.Context;
21 import android.content.pm.PackageManager;
22 import android.cts.util.CtsAndroidTestCase;
23 import android.media.AudioFormat;
24 import android.media.AudioManager;
25 import android.media.AudioRecord;
26 import android.media.AudioRecord.OnRecordPositionUpdateListener;
27 import android.media.AudioTimestamp;
28 import android.media.AudioTrack;
29 import android.media.MediaRecorder;
30 import android.media.MediaSyncEvent;
31 import android.os.Handler;
32 import android.os.Looper;
33 import android.os.Message;
34 import android.os.SystemClock;
35 import android.util.Log;
36 
37 import com.android.compatibility.common.util.DeviceReportLog;
38 import com.android.compatibility.common.util.ResultType;
39 import com.android.compatibility.common.util.ResultUnit;
40 
41 import java.nio.ByteBuffer;
42 import java.util.ArrayList;
43 
44 public class AudioRecordTest extends CtsAndroidTestCase {
45     private final static String TAG = "AudioRecordTest";
46     private static final String REPORT_LOG_NAME = "CtsMediaTestCases";
47     private AudioRecord mAudioRecord;
48     private int mHz = 44100;
49     private boolean mIsOnMarkerReachedCalled;
50     private boolean mIsOnPeriodicNotificationCalled;
51     private boolean mIsHandleMessageCalled;
52     private Looper mLooper;
53     // For doTest
54     private int mMarkerPeriodInFrames;
55     private int mMarkerPosition;
56     private Handler mHandler = new Handler(Looper.getMainLooper()) {
57         @Override
58         public void handleMessage(Message msg) {
59             mIsHandleMessageCalled = true;
60             super.handleMessage(msg);
61         }
62     };
63 
64     @Override
setUp()65     protected void setUp() throws Exception {
66         super.setUp();
67 
68         if (!hasMicrophone()) {
69             return;
70         }
71 
72         /*
73          * InstrumentationTestRunner.onStart() calls Looper.prepare(), which creates a looper
74          * for the current thread. However, since we don't actually call loop() in the test,
75          * any messages queued with that looper will never be consumed. Therefore, we must
76          * create the instance in another thread, either without a looper, so the main looper is
77          * used, or with an active looper.
78          */
79         Thread t = new Thread() {
80             @Override
81             public void run() {
82                 Looper.prepare();
83                 mLooper = Looper.myLooper();
84                 synchronized(this) {
85                     mAudioRecord = new AudioRecord(MediaRecorder.AudioSource.DEFAULT, mHz,
86                             AudioFormat.CHANNEL_CONFIGURATION_MONO,
87                             AudioFormat.ENCODING_PCM_16BIT,
88                             AudioRecord.getMinBufferSize(mHz,
89                                     AudioFormat.CHANNEL_CONFIGURATION_MONO,
90                                     AudioFormat.ENCODING_PCM_16BIT) * 10);
91                     this.notify();
92                 }
93                 Looper.loop();
94             }
95         };
96         synchronized(t) {
97             t.start(); // will block until we wait
98             t.wait();
99         }
100         assertNotNull(mAudioRecord);
101     }
102 
103     @Override
tearDown()104     protected void tearDown() throws Exception {
105         if (hasMicrophone()) {
106             mAudioRecord.release();
107             mLooper.quit();
108         }
109         super.tearDown();
110     }
111 
reset()112     private void reset() {
113         mIsOnMarkerReachedCalled = false;
114         mIsOnPeriodicNotificationCalled = false;
115         mIsHandleMessageCalled = false;
116     }
117 
testAudioRecordProperties()118     public void testAudioRecordProperties() throws Exception {
119         if (!hasMicrophone()) {
120             return;
121         }
122         assertEquals(AudioFormat.ENCODING_PCM_16BIT, mAudioRecord.getAudioFormat());
123         assertEquals(MediaRecorder.AudioSource.DEFAULT, mAudioRecord.getAudioSource());
124         assertEquals(1, mAudioRecord.getChannelCount());
125         assertEquals(AudioFormat.CHANNEL_IN_MONO,
126                 mAudioRecord.getChannelConfiguration());
127         assertEquals(AudioRecord.STATE_INITIALIZED, mAudioRecord.getState());
128         assertEquals(mHz, mAudioRecord.getSampleRate());
129         assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState());
130 
131         int bufferSize = AudioRecord.getMinBufferSize(mHz,
132                 AudioFormat.CHANNEL_CONFIGURATION_DEFAULT, AudioFormat.ENCODING_PCM_16BIT);
133         assertTrue(bufferSize > 0);
134     }
135 
testAudioRecordOP()136     public void testAudioRecordOP() throws Exception {
137         if (!hasMicrophone()) {
138             return;
139         }
140         final int SLEEP_TIME = 10;
141         final int RECORD_TIME = 10000;
142         assertEquals(AudioRecord.STATE_INITIALIZED, mAudioRecord.getState());
143 
144         int markerInFrames = mAudioRecord.getSampleRate() / 2;
145         assertEquals(AudioRecord.SUCCESS,
146                 mAudioRecord.setNotificationMarkerPosition(markerInFrames));
147         assertEquals(markerInFrames, mAudioRecord.getNotificationMarkerPosition());
148         int periodInFrames = mAudioRecord.getSampleRate();
149         assertEquals(AudioRecord.SUCCESS,
150                 mAudioRecord.setPositionNotificationPeriod(periodInFrames));
151         assertEquals(periodInFrames, mAudioRecord.getPositionNotificationPeriod());
152         OnRecordPositionUpdateListener listener = new OnRecordPositionUpdateListener() {
153 
154             public void onMarkerReached(AudioRecord recorder) {
155                 mIsOnMarkerReachedCalled = true;
156             }
157 
158             public void onPeriodicNotification(AudioRecord recorder) {
159                 mIsOnPeriodicNotificationCalled = true;
160             }
161         };
162         mAudioRecord.setRecordPositionUpdateListener(listener);
163 
164         // use byte array as buffer
165         final int BUFFER_SIZE = 102400;
166         byte[] byteData = new byte[BUFFER_SIZE];
167         long time = System.currentTimeMillis();
168         mAudioRecord.startRecording();
169         assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState());
170         while (System.currentTimeMillis() - time < RECORD_TIME) {
171             Thread.sleep(SLEEP_TIME);
172             mAudioRecord.read(byteData, 0, BUFFER_SIZE);
173         }
174         mAudioRecord.stop();
175         assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState());
176         assertTrue(mIsOnMarkerReachedCalled);
177         assertTrue(mIsOnPeriodicNotificationCalled);
178         reset();
179 
180         // use short array as buffer
181         short[] shortData = new short[BUFFER_SIZE];
182         time = System.currentTimeMillis();
183         mAudioRecord.startRecording();
184         assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState());
185         while (System.currentTimeMillis() - time < RECORD_TIME) {
186             Thread.sleep(SLEEP_TIME);
187             mAudioRecord.read(shortData, 0, BUFFER_SIZE);
188         }
189         mAudioRecord.stop();
190         assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState());
191         assertTrue(mIsOnMarkerReachedCalled);
192         assertTrue(mIsOnPeriodicNotificationCalled);
193         reset();
194 
195         // use ByteBuffer as buffer
196         ByteBuffer byteBuffer = ByteBuffer.allocateDirect(BUFFER_SIZE);
197         time = System.currentTimeMillis();
198         mAudioRecord.startRecording();
199         assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState());
200         while (System.currentTimeMillis() - time < RECORD_TIME) {
201             Thread.sleep(SLEEP_TIME);
202             mAudioRecord.read(byteBuffer, BUFFER_SIZE);
203         }
204         mAudioRecord.stop();
205         assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState());
206         assertTrue(mIsOnMarkerReachedCalled);
207         assertTrue(mIsOnPeriodicNotificationCalled);
208         reset();
209 
210         // use handler
211         final Handler handler = new Handler(Looper.getMainLooper()) {
212             @Override
213             public void handleMessage(Message msg) {
214                 mIsHandleMessageCalled = true;
215                 super.handleMessage(msg);
216             }
217         };
218 
219         mAudioRecord.setRecordPositionUpdateListener(listener, handler);
220         time = System.currentTimeMillis();
221         mAudioRecord.startRecording();
222         assertEquals(AudioRecord.RECORDSTATE_RECORDING, mAudioRecord.getRecordingState());
223         while (System.currentTimeMillis() - time < RECORD_TIME) {
224             Thread.sleep(SLEEP_TIME);
225             mAudioRecord.read(byteData, 0, BUFFER_SIZE);
226         }
227         mAudioRecord.stop();
228         assertEquals(AudioRecord.RECORDSTATE_STOPPED, mAudioRecord.getRecordingState());
229         assertTrue(mIsOnMarkerReachedCalled);
230         assertTrue(mIsOnPeriodicNotificationCalled);
231         // The handler argument is only ever used for getting the associated Looper
232         assertFalse(mIsHandleMessageCalled);
233 
234         mAudioRecord.release();
235         assertEquals(AudioRecord.STATE_UNINITIALIZED, mAudioRecord.getState());
236     }
237 
testAudioRecordResamplerMono8Bit()238     public void testAudioRecordResamplerMono8Bit() throws Exception {
239         doTest("resampler_mono_8bit", true /*localRecord*/, false /*customHandler*/,
240                 1 /*periodsPerSecond*/, 1 /*markerPeriodsPerSecond*/,
241                 false /*useByteBuffer*/,  false /*blocking*/,
242                 false /*auditRecording*/, false /*isChannelIndex*/, 88200 /*TEST_SR*/,
243                 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_8BIT);
244     }
245 
testAudioRecordResamplerStereo8Bit()246     public void testAudioRecordResamplerStereo8Bit() throws Exception {
247         doTest("resampler_stereo_8bit", true /*localRecord*/, false /*customHandler*/,
248                 0 /*periodsPerSecond*/, 3 /*markerPeriodsPerSecond*/,
249                 true /*useByteBuffer*/,  true /*blocking*/,
250                 false /*auditRecording*/, false /*isChannelIndex*/, 45000 /*TEST_SR*/,
251                 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_8BIT);
252     }
253 
testAudioRecordLocalMono16Bit()254     public void testAudioRecordLocalMono16Bit() throws Exception {
255         doTest("local_mono_16bit", true /*localRecord*/, false /*customHandler*/,
256                 30 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/,
257                 false /*useByteBuffer*/, true /*blocking*/,
258                 false /*auditRecording*/, false /*isChannelIndex*/, 8000 /*TEST_SR*/,
259                 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
260     }
261 
testAudioRecordStereo16Bit()262     public void testAudioRecordStereo16Bit() throws Exception {
263         doTest("stereo_16bit", false /*localRecord*/, false /*customHandler*/,
264                 2 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/,
265                 false /*useByteBuffer*/, false /*blocking*/,
266                 false /*auditRecording*/, false /*isChannelIndex*/, 17000 /*TEST_SR*/,
267                 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_16BIT);
268     }
269 
testAudioRecordMonoFloat()270     public void testAudioRecordMonoFloat() throws Exception {
271         doTest("mono_float", false /*localRecord*/, true /*customHandler*/,
272                 30 /*periodsPerSecond*/, 2 /*markerPeriodsPerSecond*/,
273                 false /*useByteBuffer*/, true /*blocking*/,
274                 false /*auditRecording*/, false /*isChannelIndex*/, 32000 /*TEST_SR*/,
275                 AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_FLOAT);
276     }
277 
testAudioRecordLocalNonblockingStereoFloat()278     public void testAudioRecordLocalNonblockingStereoFloat() throws Exception {
279         doTest("local_nonblocking_stereo_float", true /*localRecord*/, true /*customHandler*/,
280                 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/,
281                 false /*useByteBuffer*/, false /*blocking*/,
282                 false /*auditRecording*/, false /*isChannelIndex*/, 48000 /*TEST_SR*/,
283                 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_FLOAT);
284     }
285 
286     // Audit modes work best with non-blocking mode
testAudioRecordAuditByteBufferResamplerStereoFloat()287     public void testAudioRecordAuditByteBufferResamplerStereoFloat() throws Exception {
288         if (isLowRamDevice()) {
289             return; // skip. FIXME: reenable when AF memory allocation is updated.
290         }
291         doTest("audit_byte_buffer_resampler_stereo_float",
292                 false /*localRecord*/, true /*customHandler*/,
293                 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/,
294                 true /*useByteBuffer*/, false /*blocking*/,
295                 true /*auditRecording*/, false /*isChannelIndex*/, 96000 /*TEST_SR*/,
296                 AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_FLOAT);
297     }
298 
testAudioRecordAuditChannelIndexMonoFloat()299     public void testAudioRecordAuditChannelIndexMonoFloat() throws Exception {
300         doTest("audit_channel_index_mono_float", true /*localRecord*/, true /*customHandler*/,
301                 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/,
302                 false /*useByteBuffer*/, false /*blocking*/,
303                 true /*auditRecording*/, true /*isChannelIndex*/, 47000 /*TEST_SR*/,
304                 (1 << 0) /* 1 channel */, AudioFormat.ENCODING_PCM_FLOAT);
305     }
306 
307     // Audit buffers can run out of space with high sample rate,
308     // so keep the channels and pcm encoding low
testAudioRecordAuditChannelIndex2()309     public void testAudioRecordAuditChannelIndex2() throws Exception {
310         if (isLowRamDevice()) {
311             return; // skip. FIXME: reenable when AF memory allocation is updated.
312         }
313         doTest("audit_channel_index_2", true /*localRecord*/, true /*customHandler*/,
314                 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/,
315                 false /*useByteBuffer*/, false /*blocking*/,
316                 true /*auditRecording*/, true /*isChannelIndex*/, 192000 /*TEST_SR*/,
317                 (1 << 0) | (1 << 2) /* 2 channels, gap in middle */,
318                 AudioFormat.ENCODING_PCM_8BIT);
319     }
320 
321     // Audit buffers can run out of space with high numbers of channels,
322     // so keep the sample rate low.
testAudioRecordAuditChannelIndex5()323     public void testAudioRecordAuditChannelIndex5() throws Exception {
324         doTest("audit_channel_index_5", true /*localRecord*/, true /*customHandler*/,
325                 2 /*periodsPerSecond*/, 0 /*markerPeriodsPerSecond*/,
326                 false /*useByteBuffer*/, false /*blocking*/,
327                 true /*auditRecording*/, true /*isChannelIndex*/, 16000 /*TEST_SR*/,
328                 (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4)  /* 5 channels */,
329                 AudioFormat.ENCODING_PCM_16BIT);
330     }
331 
332     // Test AudioRecord.Builder to verify the observed configuration of an AudioRecord built with
333     // an empty Builder matches the documentation / expected values
testAudioRecordBuilderDefault()334     public void testAudioRecordBuilderDefault() throws Exception {
335         if (!hasMicrophone()) {
336             return;
337         }
338         // constants for test
339         final String TEST_NAME = "testAudioRecordBuilderDefault";
340         // expected values below match the AudioRecord.Builder documentation
341         final int expectedCapturePreset = MediaRecorder.AudioSource.DEFAULT;
342         final int expectedChannel = AudioFormat.CHANNEL_IN_MONO;
343         final int expectedEncoding = AudioFormat.ENCODING_PCM_16BIT;
344         final int expectedState = AudioRecord.STATE_INITIALIZED;
345         // use builder with default values
346         final AudioRecord rec = new AudioRecord.Builder().build();
347         // save results
348         final int observedSource = rec.getAudioSource();
349         final int observedChannel = rec.getChannelConfiguration();
350         final int observedEncoding = rec.getAudioFormat();
351         final int observedState = rec.getState();
352         // release recorder before the test exits (either successfully or with an exception)
353         rec.release();
354         // compare results
355         assertEquals(TEST_NAME + ": default capture preset", expectedCapturePreset, observedSource);
356         assertEquals(TEST_NAME + ": default channel config", expectedChannel, observedChannel);
357         assertEquals(TEST_NAME + ": default encoding", expectedEncoding, observedEncoding);
358         assertEquals(TEST_NAME + ": state", expectedState, observedState);
359     }
360 
361     // Test AudioRecord.Builder to verify the observed configuration of an AudioRecord built with
362     // an incomplete AudioFormat matches the documentation / expected values
testAudioRecordBuilderPartialFormat()363     public void testAudioRecordBuilderPartialFormat() throws Exception {
364         if (!hasMicrophone()) {
365             return;
366         }
367         // constants for test
368         final String TEST_NAME = "testAudioRecordBuilderPartialFormat";
369         final int expectedRate = 16000;
370         final int expectedState = AudioRecord.STATE_INITIALIZED;
371         // expected values below match the AudioRecord.Builder documentation
372         final int expectedChannel = AudioFormat.CHANNEL_IN_MONO;
373         final int expectedEncoding = AudioFormat.ENCODING_PCM_16BIT;
374         // use builder with a partial audio format
375         final AudioRecord rec = new AudioRecord.Builder()
376                 .setAudioFormat(new AudioFormat.Builder().setSampleRate(expectedRate).build())
377                 .build();
378         // save results
379         final int observedRate = rec.getSampleRate();
380         final int observedChannel = rec.getChannelConfiguration();
381         final int observedEncoding = rec.getAudioFormat();
382         final int observedState = rec.getState();
383         // release recorder before the test exits (either successfully or with an exception)
384         rec.release();
385         // compare results
386         assertEquals(TEST_NAME + ": configured rate", expectedRate, observedRate);
387         assertEquals(TEST_NAME + ": default channel config", expectedChannel, observedChannel);
388         assertEquals(TEST_NAME + ": default encoding", expectedEncoding, observedEncoding);
389         assertEquals(TEST_NAME + ": state", expectedState, observedState);
390     }
391 
392     // Test AudioRecord.Builder to verify the observed configuration of an AudioRecord matches
393     // the parameters used in the builder
testAudioRecordBuilderParams()394     public void testAudioRecordBuilderParams() throws Exception {
395         if (!hasMicrophone()) {
396             return;
397         }
398         // constants for test
399         final String TEST_NAME = "testAudioRecordBuilderParams";
400         final int expectedRate = 8000;
401         final int expectedChannel = AudioFormat.CHANNEL_IN_MONO;
402         final int expectedChannelCount = 1;
403         final int expectedEncoding = AudioFormat.ENCODING_PCM_16BIT;
404         final int expectedSource = MediaRecorder.AudioSource.VOICE_COMMUNICATION;
405         final int expectedState = AudioRecord.STATE_INITIALIZED;
406         // use builder with expected parameters
407         final AudioRecord rec = new AudioRecord.Builder()
408                 .setAudioFormat(new AudioFormat.Builder()
409                         .setSampleRate(expectedRate)
410                         .setChannelMask(expectedChannel)
411                         .setEncoding(expectedEncoding)
412                         .build())
413                 .setAudioSource(expectedSource)
414                 .build();
415         // save results
416         final int observedRate = rec.getSampleRate();
417         final int observedChannel = rec.getChannelConfiguration();
418         final int observedChannelCount = rec.getChannelCount();
419         final int observedEncoding = rec.getAudioFormat();
420         final int observedSource = rec.getAudioSource();
421         final int observedState = rec.getState();
422         // release recorder before the test exits (either successfully or with an exception)
423         rec.release();
424         // compare results
425         assertEquals(TEST_NAME + ": configured rate", expectedRate, observedRate);
426         assertEquals(TEST_NAME + ": configured channel config", expectedChannel, observedChannel);
427         assertEquals(TEST_NAME + ": configured encoding", expectedEncoding, observedEncoding);
428         assertEquals(TEST_NAME + ": implicit channel count", expectedChannelCount,
429                 observedChannelCount);
430         assertEquals(TEST_NAME + ": configured source", expectedSource, observedSource);
431         assertEquals(TEST_NAME + ": state", expectedState, observedState);
432     }
433 
434     // Test AudioRecord to ensure we can build after a failure.
testAudioRecordBufferSize()435     public void testAudioRecordBufferSize() throws Exception {
436         if (!hasMicrophone()) {
437             return;
438         }
439         // constants for test
440         final String TEST_NAME = "testAudioRecordBufferSize";
441 
442         // use builder with parameters that should fail
443         final int superBigBufferSize = 1 << 28;
444         try {
445             final AudioRecord record = new AudioRecord.Builder()
446                 .setBufferSizeInBytes(superBigBufferSize)
447                 .build();
448             record.release();
449             fail(TEST_NAME + ": should throw exception on failure");
450         } catch (UnsupportedOperationException e) {
451             ;
452         }
453 
454         // we should be able to create again with minimum buffer size
455         final int verySmallBufferSize = 2 * 3 * 4; // frame size multiples
456         final AudioRecord record2 = new AudioRecord.Builder()
457                 .setBufferSizeInBytes(verySmallBufferSize)
458                 .build();
459 
460         final int observedState2 = record2.getState();
461         final int observedBufferSize2 = record2.getBufferSizeInFrames();
462         record2.release();
463 
464         // succeeds for minimum buffer size
465         assertEquals(TEST_NAME + ": state", AudioRecord.STATE_INITIALIZED, observedState2);
466         // should force the minimum size buffer which is > 0
467         assertTrue(TEST_NAME + ": buffer frame count", observedBufferSize2 > 0);
468     }
469 
testTimestamp()470     public void testTimestamp() throws Exception {
471         if (!hasMicrophone()) {
472             return;
473         }
474         final String TEST_NAME = "testTimestamp";
475         AudioRecord record = null;
476 
477         try {
478             final int NANOS_PER_MILLIS = 1000000;
479             final long RECORD_TIME_IN_MS = 2000;
480             final long RECORD_TIME_IN_NANOS = RECORD_TIME_IN_MS * NANOS_PER_MILLIS;
481             final int RECORD_ENCODING = AudioFormat.ENCODING_PCM_16BIT; // fixed at this time.
482             final int RECORD_CHANNEL_MASK = AudioFormat.CHANNEL_IN_STEREO;
483             final int RECORD_SAMPLE_RATE = 23456;  // requires resampling
484             record = new AudioRecord.Builder()
485                     .setAudioFormat(new AudioFormat.Builder()
486                             .setSampleRate(RECORD_SAMPLE_RATE)
487                             .setChannelMask(RECORD_CHANNEL_MASK)
488                             .setEncoding(RECORD_ENCODING)
489                             .build())
490                     .build();
491 
492             // For our tests, we could set test duration by timed sleep or by # frames received.
493             // Since we don't know *exactly* when AudioRecord actually begins recording,
494             // we end the test by # frames read.
495             final int numChannels =
496                     AudioFormat.channelCountFromInChannelMask(RECORD_CHANNEL_MASK);
497             final int bytesPerSample = AudioFormat.getBytesPerSample(RECORD_ENCODING);
498             final int bytesPerFrame = numChannels * bytesPerSample;
499             // careful about integer overflow in the formula below:
500             final int targetFrames =
501                     (int)((long)RECORD_TIME_IN_MS * RECORD_SAMPLE_RATE / 1000);
502             final int targetSamples = targetFrames * numChannels;
503             final int BUFFER_FRAMES = 512;
504             final int BUFFER_SAMPLES = BUFFER_FRAMES * numChannels;
505 
506             final int tries = 2;
507             for (int i = 0; i < tries; ++i) {
508                 long startTime = System.nanoTime();
509                 long startTimeBoot = android.os.SystemClock.elapsedRealtimeNanos();
510 
511                 record.startRecording();
512 
513                 AudioTimestamp startTs = new AudioTimestamp();
514                 int samplesRead = 0;
515                 boolean timestampRead = false;
516                 // For 16 bit data, use shorts
517                 short[] shortData = new short[BUFFER_SAMPLES];
518                 while (samplesRead < targetSamples) {
519                     int amount = samplesRead == 0 ? numChannels :
520                         Math.min(BUFFER_SAMPLES, targetSamples - samplesRead);
521                     int ret = record.read(shortData, 0, amount);
522                     assertEquals(TEST_NAME, amount, ret);
523                     // timestamps follow a different path than data, so it is conceivable
524                     // that first data arrives before the first timestamp is ready.
525                     if (!timestampRead) {
526                         timestampRead =
527                                 record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC)
528                                     == AudioRecord.SUCCESS;
529                     }
530                     samplesRead += ret;
531                 }
532                 record.stop();
533 
534                 // stop is synchronous, but need not be in the future.
535                 final long SLEEP_AFTER_STOP_FOR_INACTIVITY_MS = 1000;
536                 Thread.sleep(SLEEP_AFTER_STOP_FOR_INACTIVITY_MS);
537 
538                 AudioTimestamp stopTs = new AudioTimestamp();
539                 AudioTimestamp stopTsBoot = new AudioTimestamp();
540 
541                 assertEquals(AudioRecord.SUCCESS,
542                         record.getTimestamp(stopTs, AudioTimestamp.TIMEBASE_MONOTONIC));
543                 assertEquals(AudioRecord.SUCCESS,
544                         record.getTimestamp(stopTsBoot, AudioTimestamp.TIMEBASE_BOOTTIME));
545 
546                 // printTimestamp("timestamp Monotonic", ts);
547                 // printTimestamp("timestamp Boottime", tsBoot);
548                 // Log.d(TEST_NAME, "startTime Monotonic " + startTime);
549                 // Log.d(TEST_NAME, "startTime Boottime " + startTimeBoot);
550 
551                 assertEquals(stopTs.framePosition, stopTsBoot.framePosition);
552                 assertTrue(stopTs.framePosition >= targetFrames);
553                 assertTrue(stopTs.nanoTime - startTime > RECORD_TIME_IN_NANOS);
554                 assertTrue(stopTsBoot.nanoTime - startTimeBoot > RECORD_TIME_IN_NANOS);
555                 verifyContinuousTimestamps(startTs, stopTs, RECORD_SAMPLE_RATE);
556             }
557         } finally {
558             if (record != null) {
559                 record.release();
560                 record = null;
561             }
562         }
563     }
564 
testSynchronizedRecord()565     public void testSynchronizedRecord() throws Exception {
566         if (!hasMicrophone()) {
567             return;
568         }
569         final String TEST_NAME = "testSynchronizedRecord";
570         AudioTrack track = null;
571         AudioRecord record = null;
572 
573         try {
574             // 1. create a static AudioTrack.
575             final int PLAYBACK_TIME_IN_MS = 2000; /* ms duration. */
576             final int PLAYBACK_SAMPLE_RATE = 8000; /* in hz */
577             AudioFormat format = new AudioFormat.Builder()
578                     .setChannelMask(AudioFormat.CHANNEL_OUT_MONO)
579                     .setEncoding(AudioFormat.ENCODING_PCM_8BIT)
580                     .setSampleRate(PLAYBACK_SAMPLE_RATE)
581                     .build();
582             final int frameCount = AudioHelper.frameCountFromMsec(PLAYBACK_TIME_IN_MS, format);
583             final int frameSize = AudioHelper.frameSizeFromFormat(format);
584             track = new AudioTrack.Builder()
585                     .setAudioFormat(format)
586                     .setBufferSizeInBytes(frameCount * frameSize)
587                     .setTransferMode(AudioTrack.MODE_STATIC)
588                     .build();
589             // create float array and write it
590             final int sampleCount = frameCount * format.getChannelCount();
591             byte[] vab = AudioHelper.createSoundDataInByteArray(
592                     sampleCount, PLAYBACK_SAMPLE_RATE, 600 /* frequency */, 0 /* sweep */);
593             assertEquals(TEST_NAME, vab.length,
594                     track.write(vab, 0 /* offsetInBytes */, vab.length,
595                             AudioTrack.WRITE_NON_BLOCKING));
596             final int trackSessionId = track.getAudioSessionId();
597 
598             // 2. create an AudioRecord to sync off of AudioTrack completion.
599             final int RECORD_TIME_IN_MS = 2000;
600             final int RECORD_ENCODING = AudioFormat.ENCODING_PCM_16BIT;
601             final int RECORD_CHANNEL_MASK = AudioFormat.CHANNEL_IN_STEREO;
602             final int RECORD_SAMPLE_RATE = 44100;
603             record = new AudioRecord.Builder()
604                     .setAudioFormat(new AudioFormat.Builder()
605                             .setSampleRate(RECORD_SAMPLE_RATE)
606                             .setChannelMask(RECORD_CHANNEL_MASK)
607                             .setEncoding(RECORD_ENCODING)
608                             .build())
609                     .build();
610             // AudioRecord creation may have silently failed, check state now
611             assertEquals(TEST_NAME, AudioRecord.STATE_INITIALIZED, record.getState());
612 
613             // 3. create a MediaSyncEvent
614             // This MediaSyncEvent checks playback completion of an AudioTrack
615             // (or MediaPlayer, or ToneGenerator) based on its audio session id.
616             //
617             // Note: when synchronizing record from a MediaSyncEvent
618             // (1) You need to be "close" to the end of the associated AudioTrack.
619             // If the track does not complete in 30 seconds, recording begins regardless.
620             // (actual delay limit may vary).
621             //
622             // (2) Track completion may be triggered by pause() as well as stop()
623             // or when a static AudioTrack completes playback.
624             //
625             final int eventType = MediaSyncEvent.SYNC_EVENT_PRESENTATION_COMPLETE;
626             MediaSyncEvent event = MediaSyncEvent.createEvent(eventType)
627                     .setAudioSessionId(trackSessionId);
628             assertEquals(TEST_NAME, trackSessionId, event.getAudioSessionId());
629             assertEquals(TEST_NAME, eventType, event.getType());
630 
631             // 4. now set the AudioTrack playing and start the recording synchronized
632             track.play();
633             // start recording.  Recording state turns to RECORDSTATE_RECORDING immediately
634             // but the data read() only occurs after the AudioTrack completes.
635             record.startRecording(event);
636             assertEquals(TEST_NAME,
637                     AudioRecord.RECORDSTATE_RECORDING, record.getRecordingState());
638             long startTime = System.currentTimeMillis();
639 
640             // 5. get record data.
641             // For our tests, we could set test duration by timed sleep or by # frames received.
642             // Since we don't know *exactly* when AudioRecord actually begins recording,
643             // we end the test by # frames read.
644             final int numChannels =
645                     AudioFormat.channelCountFromInChannelMask(RECORD_CHANNEL_MASK);
646             final int bytesPerSample = AudioFormat.getBytesPerSample(RECORD_ENCODING);
647             final int bytesPerFrame = numChannels * bytesPerSample;
648             // careful about integer overflow in the formula below:
649             final int targetSamples =
650                     (int)((long)RECORD_TIME_IN_MS * RECORD_SAMPLE_RATE * numChannels / 1000);
651             final int BUFFER_FRAMES = 512;
652             final int BUFFER_SAMPLES = BUFFER_FRAMES * numChannels;
653 
654             // After starting, there is no guarantee when the first frame of data is read.
655             long firstSampleTime = 0;
656             int samplesRead = 0;
657 
658             // For 16 bit data, use shorts
659             short[] shortData = new short[BUFFER_SAMPLES];
660             while (samplesRead < targetSamples) {
661                 // the first time through, we read a single frame.
662                 // this sets the recording anchor position.
663                 int amount = samplesRead == 0 ? numChannels :
664                     Math.min(BUFFER_SAMPLES, targetSamples - samplesRead);
665                 int ret = record.read(shortData, 0, amount);
666                 assertEquals(TEST_NAME, amount, ret);
667                 if (samplesRead == 0 && ret > 0) {
668                     firstSampleTime = System.currentTimeMillis();
669                 }
670                 samplesRead += ret;
671                 // sanity check: elapsed time cannot be more than a second
672                 // than what we expect.
673                 assertTrue(System.currentTimeMillis() - startTime <=
674                         PLAYBACK_TIME_IN_MS + RECORD_TIME_IN_MS + 1000);
675             }
676 
677             // 6. We've read all the frames, now check the timing.
678             final long endTime = System.currentTimeMillis();
679             //Log.d(TEST_NAME, "first sample time " + (firstSampleTime - startTime)
680             //        + " test time " + (endTime - firstSampleTime));
681             //
682             // Verify recording starts within 400 ms of AudioTrack completion (typical 180ms)
683             // Verify recording completes within 50 ms of expected test time (typical 20ms)
684             assertEquals(TEST_NAME, PLAYBACK_TIME_IN_MS, firstSampleTime - startTime, 400);
685             assertEquals(TEST_NAME, RECORD_TIME_IN_MS, endTime - firstSampleTime, 50);
686 
687             record.stop();
688             assertEquals(TEST_NAME, AudioRecord.RECORDSTATE_STOPPED, record.getRecordingState());
689         } finally {
690             if (record != null) {
691                 record.release();
692                 record = null;
693             }
694             if (track != null) {
695                 track.release();
696                 track = null;
697             }
698         }
699     }
700 
createAudioRecord( int audioSource, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, boolean auditRecording, boolean isChannelIndex)701     private AudioRecord createAudioRecord(
702             int audioSource, int sampleRateInHz,
703             int channelConfig, int audioFormat, int bufferSizeInBytes,
704             boolean auditRecording, boolean isChannelIndex) {
705         final AudioRecord record;
706         if (auditRecording) {
707             record = new AudioHelper.AudioRecordAudit(
708                     audioSource, sampleRateInHz, channelConfig,
709                     audioFormat, bufferSizeInBytes, isChannelIndex);
710         } else if (isChannelIndex) {
711             record = new AudioRecord.Builder()
712                     .setAudioFormat(new AudioFormat.Builder()
713                             .setChannelIndexMask(channelConfig)
714                             .setEncoding(audioFormat)
715                             .setSampleRate(sampleRateInHz)
716                             .build())
717                     .setBufferSizeInBytes(bufferSizeInBytes)
718                     .build();
719         } else {
720             record = new AudioRecord(audioSource, sampleRateInHz, channelConfig,
721                     audioFormat, bufferSizeInBytes);
722         }
723 
724         // did we get the AudioRecord we expected?
725         final AudioFormat format = record.getFormat();
726         assertEquals(isChannelIndex ? channelConfig : AudioFormat.CHANNEL_INVALID,
727                 format.getChannelIndexMask());
728         assertEquals(isChannelIndex ? AudioFormat.CHANNEL_INVALID : channelConfig,
729                 format.getChannelMask());
730         assertEquals(audioFormat, format.getEncoding());
731         assertEquals(sampleRateInHz, format.getSampleRate());
732         final int frameSize =
733                 format.getChannelCount() * AudioFormat.getBytesPerSample(audioFormat);
734         // our native frame count cannot be smaller than our minimum buffer size request.
735         assertTrue(record.getBufferSizeInFrames() * frameSize >= bufferSizeInBytes);
736         return record;
737     }
738 
doTest(String reportName, boolean localRecord, boolean customHandler, int periodsPerSecond, int markerPeriodsPerSecond, boolean useByteBuffer, boolean blocking, final boolean auditRecording, final boolean isChannelIndex, final int TEST_SR, final int TEST_CONF, final int TEST_FORMAT)739     private void doTest(String reportName, boolean localRecord, boolean customHandler,
740             int periodsPerSecond, int markerPeriodsPerSecond,
741             boolean useByteBuffer, boolean blocking,
742             final boolean auditRecording, final boolean isChannelIndex,
743             final int TEST_SR, final int TEST_CONF, final int TEST_FORMAT) throws Exception {
744         if (!hasMicrophone()) {
745             return;
746         }
747         // audit recording plays back recorded audio, so use longer test timing
748         final int TEST_TIME_MS = auditRecording ? 60000 : 2000;
749         final int TEST_SOURCE = MediaRecorder.AudioSource.DEFAULT;
750         mIsHandleMessageCalled = false;
751 
752         // For channelIndex use one frame in bytes for buffer size.
753         // This is adjusted to the minimum buffer size by native code.
754         final int bufferSizeInBytes = isChannelIndex ?
755                 (AudioFormat.getBytesPerSample(TEST_FORMAT)
756                         * AudioFormat.channelCountFromInChannelMask(TEST_CONF)) :
757                 AudioRecord.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);
758         assertTrue(bufferSizeInBytes > 0);
759 
760         final AudioRecord record;
761         final AudioHelper
762                 .MakeSomethingAsynchronouslyAndLoop<AudioRecord> makeSomething;
763 
764         if (localRecord) {
765             makeSomething = null;
766             record = createAudioRecord(TEST_SOURCE, TEST_SR, TEST_CONF,
767                     TEST_FORMAT, bufferSizeInBytes, auditRecording, isChannelIndex);
768         } else {
769             makeSomething =
770                     new AudioHelper.MakeSomethingAsynchronouslyAndLoop<AudioRecord>(
771                             new AudioHelper.MakesSomething<AudioRecord>() {
772                                 @Override
773                                 public AudioRecord makeSomething() {
774                                     return createAudioRecord(TEST_SOURCE, TEST_SR, TEST_CONF,
775                                             TEST_FORMAT, bufferSizeInBytes, auditRecording,
776                                             isChannelIndex);
777                                 }
778                             }
779                             );
780            // create AudioRecord on different thread's looper.
781            record = makeSomething.make();
782         }
783 
784         // AudioRecord creation may have silently failed, check state now
785         assertEquals(AudioRecord.STATE_INITIALIZED, record.getState());
786 
787         final MockOnRecordPositionUpdateListener listener;
788         if (customHandler) {
789             listener = new MockOnRecordPositionUpdateListener(record, mHandler);
790         } else {
791             listener = new MockOnRecordPositionUpdateListener(record);
792         }
793 
794         final int updatePeriodInFrames = (periodsPerSecond == 0)
795                 ? 0 : TEST_SR / periodsPerSecond;
796         // After starting, there is no guarantee when the first frame of data is read.
797         long firstSampleTime = 0;
798 
799         // blank final variables: all successful paths will initialize the times.
800         // this must be declared here for visibility as they are set within the try block.
801         final long endTime;
802         final long startTime;
803         final long stopRequestTime;
804         final long stopTime;
805         final long coldInputStartTime;
806 
807         try {
808             if (markerPeriodsPerSecond != 0) {
809                 mMarkerPeriodInFrames = TEST_SR / markerPeriodsPerSecond;
810                 mMarkerPosition = mMarkerPeriodInFrames;
811                 assertEquals(AudioRecord.SUCCESS,
812                         record.setNotificationMarkerPosition(mMarkerPosition));
813             } else {
814                 mMarkerPeriodInFrames = 0;
815             }
816 
817             assertEquals(AudioRecord.SUCCESS,
818                     record.setPositionNotificationPeriod(updatePeriodInFrames));
819 
820             // at the start, there is no timestamp.
821             AudioTimestamp startTs = new AudioTimestamp();
822             assertEquals(AudioRecord.ERROR_INVALID_OPERATION,
823                     record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC));
824 
825             listener.start(TEST_SR);
826             record.startRecording();
827             assertEquals(AudioRecord.RECORDSTATE_RECORDING, record.getRecordingState());
828             startTime = System.currentTimeMillis();
829 
830             // For our tests, we could set test duration by timed sleep or by # frames received.
831             // Since we don't know *exactly* when AudioRecord actually begins recording,
832             // we end the test by # frames read.
833             final int numChannels =  AudioFormat.channelCountFromInChannelMask(TEST_CONF);
834             final int bytesPerSample = AudioFormat.getBytesPerSample(TEST_FORMAT);
835             final int bytesPerFrame = numChannels * bytesPerSample;
836             // careful about integer overflow in the formula below:
837             final int targetFrames = (int)((long)TEST_TIME_MS * TEST_SR / 1000);
838             final int targetSamples = targetFrames * numChannels;
839             final int BUFFER_FRAMES = 512;
840             final int BUFFER_SAMPLES = BUFFER_FRAMES * numChannels;
841             // TODO: verify behavior when buffer size is not a multiple of frame size.
842 
843             int startTimeAtFrame = 0;
844             int samplesRead = 0;
845             if (useByteBuffer) {
846                 ByteBuffer byteBuffer =
847                         ByteBuffer.allocateDirect(BUFFER_SAMPLES * bytesPerSample);
848                 while (samplesRead < targetSamples) {
849                     // the first time through, we read a single frame.
850                     // this sets the recording anchor position.
851                     int amount = samplesRead == 0 ? numChannels :
852                         Math.min(BUFFER_SAMPLES, targetSamples - samplesRead);
853                     amount *= bytesPerSample;    // in bytes
854                     // read always places data at the start of the byte buffer with
855                     // position and limit are ignored.  test this by setting
856                     // position and limit to arbitrary values here.
857                     final int lastPosition = 7;
858                     final int lastLimit = 13;
859                     byteBuffer.position(lastPosition);
860                     byteBuffer.limit(lastLimit);
861                     int ret = blocking ? record.read(byteBuffer, amount) :
862                         record.read(byteBuffer, amount, AudioRecord.READ_NON_BLOCKING);
863                     // so long as amount requested in bytes is a multiple of the frame size
864                     // we expect the byte buffer request to be filled.  Caution: the
865                     // byte buffer data will be in native endian order, not Java order.
866                     if (blocking) {
867                         assertEquals(amount, ret);
868                     } else {
869                         assertTrue("0 <= " + ret + " <= " + amount,
870                                 0 <= ret && ret <= amount);
871                     }
872                     // position, limit are not changed by read().
873                     assertEquals(lastPosition, byteBuffer.position());
874                     assertEquals(lastLimit, byteBuffer.limit());
875                     if (samplesRead == 0 && ret > 0) {
876                         firstSampleTime = System.currentTimeMillis();
877                     }
878                     samplesRead += ret / bytesPerSample;
879                     if (startTimeAtFrame == 0 && ret > 0 &&
880                             record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC) ==
881                             AudioRecord.SUCCESS) {
882                         startTimeAtFrame = samplesRead / numChannels;
883                     }
884                 }
885             } else {
886                 switch (TEST_FORMAT) {
887                 case AudioFormat.ENCODING_PCM_8BIT: {
888                     // For 8 bit data, use bytes
889                     byte[] byteData = new byte[BUFFER_SAMPLES];
890                     while (samplesRead < targetSamples) {
891                         // the first time through, we read a single frame.
892                         // this sets the recording anchor position.
893                         int amount = samplesRead == 0 ? numChannels :
894                             Math.min(BUFFER_SAMPLES, targetSamples - samplesRead);
895                         int ret = blocking ? record.read(byteData, 0, amount) :
896                             record.read(byteData, 0, amount, AudioRecord.READ_NON_BLOCKING);
897                         if (blocking) {
898                             assertEquals(amount, ret);
899                         } else {
900                             assertTrue("0 <= " + ret + " <= " + amount,
901                                     0 <= ret && ret <= amount);
902                         }
903                         if (samplesRead == 0 && ret > 0) {
904                             firstSampleTime = System.currentTimeMillis();
905                         }
906                         samplesRead += ret;
907                         if (startTimeAtFrame == 0 && ret > 0 &&
908                                 record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC) ==
909                                 AudioRecord.SUCCESS) {
910                             startTimeAtFrame = samplesRead / numChannels;
911                         }
912                     }
913                 } break;
914                 case AudioFormat.ENCODING_PCM_16BIT: {
915                     // For 16 bit data, use shorts
916                     short[] shortData = new short[BUFFER_SAMPLES];
917                     while (samplesRead < targetSamples) {
918                         // the first time through, we read a single frame.
919                         // this sets the recording anchor position.
920                         int amount = samplesRead == 0 ? numChannels :
921                             Math.min(BUFFER_SAMPLES, targetSamples - samplesRead);
922                         int ret = blocking ? record.read(shortData, 0, amount) :
923                             record.read(shortData, 0, amount, AudioRecord.READ_NON_BLOCKING);
924                         if (blocking) {
925                             assertEquals(amount, ret);
926                         } else {
927                             assertTrue("0 <= " + ret + " <= " + amount,
928                                     0 <= ret && ret <= amount);
929                         }
930                         if (samplesRead == 0 && ret > 0) {
931                             firstSampleTime = System.currentTimeMillis();
932                         }
933                         samplesRead += ret;
934                         if (startTimeAtFrame == 0 && ret > 0 &&
935                                 record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC) ==
936                                 AudioRecord.SUCCESS) {
937                             startTimeAtFrame = samplesRead / numChannels;
938                         }
939                     }
940                 } break;
941                 case AudioFormat.ENCODING_PCM_FLOAT: {
942                     float[] floatData = new float[BUFFER_SAMPLES];
943                     while (samplesRead < targetSamples) {
944                         // the first time through, we read a single frame.
945                         // this sets the recording anchor position.
946                         int amount = samplesRead == 0 ? numChannels :
947                             Math.min(BUFFER_SAMPLES, targetSamples - samplesRead);
948                         int ret = record.read(floatData, 0, amount, blocking ?
949                                 AudioRecord.READ_BLOCKING : AudioRecord.READ_NON_BLOCKING);
950                         if (blocking) {
951                             assertEquals(amount, ret);
952                         } else {
953                             assertTrue("0 <= " + ret + " <= " + amount,
954                                     0 <= ret && ret <= amount);
955                         }
956                         if (samplesRead == 0 && ret > 0) {
957                             firstSampleTime = System.currentTimeMillis();
958                         }
959                         samplesRead += ret;
960                         if (startTimeAtFrame == 0 && ret > 0 &&
961                                 record.getTimestamp(startTs, AudioTimestamp.TIMEBASE_MONOTONIC) ==
962                                 AudioRecord.SUCCESS) {
963                             startTimeAtFrame = samplesRead / numChannels;
964                         }
965                     }
966                 } break;
967                 }
968             }
969 
970             // We've read all the frames, now check the record timing.
971             endTime = System.currentTimeMillis();
972 
973             coldInputStartTime = firstSampleTime - startTime;
974             //Log.d(TAG, "first sample time " + coldInputStartTime
975             //        + " test time " + (endTime - firstSampleTime));
976 
977             if (coldInputStartTime > 200) {
978                 Log.w(TAG, "cold input start time way too long "
979                         + coldInputStartTime + " > 200ms");
980             } else if (coldInputStartTime > 100) {
981                 Log.w(TAG, "cold input start time too long "
982                         + coldInputStartTime + " > 100ms");
983             }
984             assertTrue(coldInputStartTime < 5000); // must start within 5 seconds.
985 
986             // Verify recording completes within 50 ms of expected test time (typical 20ms)
987             assertEquals(TEST_TIME_MS, endTime - firstSampleTime, auditRecording ? 1000 : 50);
988 
989             // Even though we've read all the frames we want, the events may not be sent to
990             // the listeners (events are handled through a separate internal callback thread).
991             // One must sleep to make sure the last event(s) come in.
992             Thread.sleep(30);
993 
994             stopRequestTime = System.currentTimeMillis();
995             record.stop();
996             assertEquals(AudioRecord.RECORDSTATE_STOPPED, record.getRecordingState());
997 
998             stopTime = System.currentTimeMillis();
999 
1000             // stop listening - we should be done.
1001             // Caution M behavior and likely much earlier:
1002             // we assume no events can happen after stop(), but this may not
1003             // always be true as stop can take 100ms to complete (as it may disable
1004             // input recording on the hal); thus the event handler may be block with
1005             // valid events, issuing right after stop completes. Except for those events,
1006             // no other events should show up after stop.
1007             // This behavior may change in the future but we account for it here in testing.
1008             final long SLEEP_AFTER_STOP_FOR_EVENTS_MS = 30;
1009             Thread.sleep(SLEEP_AFTER_STOP_FOR_EVENTS_MS);
1010             listener.stop();
1011 
1012             // get stop timestamp
1013             AudioTimestamp stopTs = new AudioTimestamp();
1014             assertEquals(AudioRecord.SUCCESS,
1015                     record.getTimestamp(stopTs, AudioTimestamp.TIMEBASE_MONOTONIC));
1016             AudioTimestamp stopTsBoot = new AudioTimestamp();
1017             assertEquals(AudioRecord.SUCCESS,
1018                     record.getTimestamp(stopTsBoot, AudioTimestamp.TIMEBASE_BOOTTIME));
1019 
1020             // printTimestamp("startTs", startTs);
1021             // printTimestamp("stopTs", stopTs);
1022             // printTimestamp("stopTsBoot", stopTsBoot);
1023             // Log.d(TAG, "time Monotonic " + System.nanoTime());
1024             // Log.d(TAG, "time Boottime " + SystemClock.elapsedRealtimeNanos());
1025 
1026             // stop should not reset timestamps
1027             assertTrue(stopTs.framePosition >= targetFrames);
1028             assertEquals(stopTs.framePosition, stopTsBoot.framePosition);
1029             assertTrue(stopTs.nanoTime > 0);
1030 
1031             // timestamps follow a different path than data, so it is conceivable
1032             // that first data arrives before the first timestamp is ready.
1033             assertTrue(startTimeAtFrame > 0); // we read a start timestamp
1034 
1035             verifyContinuousTimestamps(startTs, stopTs, TEST_SR);
1036 
1037             // clean up
1038             if (makeSomething != null) {
1039                 makeSomething.join();
1040             }
1041 
1042         } finally {
1043             listener.release();
1044             // we must release the record immediately as it is a system-wide
1045             // resource needed for other tests.
1046             record.release();
1047         }
1048         if (auditRecording) { // don't check timing if auditing (messes up timing)
1049             return;
1050         }
1051         final int markerPeriods = markerPeriodsPerSecond * TEST_TIME_MS / 1000;
1052         final int updatePeriods = periodsPerSecond * TEST_TIME_MS / 1000;
1053         final int markerPeriodsMax =
1054                 markerPeriodsPerSecond * (int)(stopTime - firstSampleTime) / 1000 + 1;
1055         final int updatePeriodsMax =
1056                 periodsPerSecond * (int)(stopTime - firstSampleTime) / 1000 + 1;
1057 
1058         // collect statistics
1059         final ArrayList<Integer> markerList = listener.getMarkerList();
1060         final ArrayList<Integer> periodicList = listener.getPeriodicList();
1061         // verify count of markers and periodic notifications.
1062         // there could be an extra notification since we don't stop() immediately
1063         // rather wait for potential events to come in.
1064         //Log.d(TAG, "markerPeriods " + markerPeriods +
1065         //        " markerPeriodsReceived " + markerList.size());
1066         //Log.d(TAG, "updatePeriods " + updatePeriods +
1067         //        " updatePeriodsReceived " + periodicList.size());
1068         assertTrue(TAG + ": markerPeriods " + markerPeriods +
1069                 " <= markerPeriodsReceived " + markerList.size() +
1070                 " <= markerPeriodsMax " + markerPeriodsMax,
1071                 markerPeriods <= markerList.size()
1072                 && markerList.size() <= markerPeriodsMax);
1073         assertTrue(TAG + ": updatePeriods " + updatePeriods +
1074                " <= updatePeriodsReceived " + periodicList.size() +
1075                " <= updatePeriodsMax " + updatePeriodsMax,
1076                 updatePeriods <= periodicList.size()
1077                 && periodicList.size() <= updatePeriodsMax);
1078 
1079         // Since we don't have accurate positioning of the start time of the recorder,
1080         // and there is no record.getPosition(), we consider only differential timing
1081         // from the first marker or periodic event.
1082         final int toleranceInFrames = TEST_SR * 80 / 1000; // 80 ms
1083         final int testTimeInFrames = (int)((long)TEST_TIME_MS * TEST_SR / 1000);
1084 
1085         AudioHelper.Statistics markerStat = new AudioHelper.Statistics();
1086         for (int i = 1; i < markerList.size(); ++i) {
1087             final int expected = mMarkerPeriodInFrames * i;
1088             if (markerList.get(i) > testTimeInFrames) {
1089                 break; // don't consider any notifications when we might be stopping.
1090             }
1091             final int actual = markerList.get(i) - markerList.get(0);
1092             //Log.d(TAG, "Marker: " + i + " expected(" + expected + ")  actual(" + actual
1093             //        + ")  diff(" + (actual - expected) + ")"
1094             //        + " tolerance " + toleranceInFrames);
1095             assertEquals(expected, actual, toleranceInFrames);
1096             markerStat.add((double)(actual - expected) * 1000 / TEST_SR);
1097         }
1098 
1099         AudioHelper.Statistics periodicStat = new AudioHelper.Statistics();
1100         for (int i = 1; i < periodicList.size(); ++i) {
1101             final int expected = updatePeriodInFrames * i;
1102             if (periodicList.get(i) > testTimeInFrames) {
1103                 break; // don't consider any notifications when we might be stopping.
1104             }
1105             final int actual = periodicList.get(i) - periodicList.get(0);
1106             //Log.d(TAG, "Update: " + i + " expected(" + expected + ")  actual(" + actual
1107             //        + ")  diff(" + (actual - expected) + ")"
1108             //        + " tolerance " + toleranceInFrames);
1109             assertEquals(expected, actual, toleranceInFrames);
1110             periodicStat.add((double)(actual - expected) * 1000 / TEST_SR);
1111         }
1112 
1113         // report this
1114         DeviceReportLog log = new DeviceReportLog(REPORT_LOG_NAME, reportName);
1115         log.addValue("start_recording_lag", coldInputStartTime, ResultType.LOWER_BETTER,
1116                 ResultUnit.MS);
1117         log.addValue("stop_execution_time", stopTime - stopRequestTime, ResultType.LOWER_BETTER,
1118                 ResultUnit.MS);
1119         log.addValue("total_record_time_expected", TEST_TIME_MS, ResultType.NEUTRAL, ResultUnit.MS);
1120         log.addValue("total_record_time_actual", endTime - firstSampleTime, ResultType.NEUTRAL,
1121                 ResultUnit.MS);
1122         log.addValue("total_markers_expected", markerPeriods, ResultType.NEUTRAL, ResultUnit.COUNT);
1123         log.addValue("total_markers_actual", markerList.size(), ResultType.NEUTRAL,
1124                 ResultUnit.COUNT);
1125         log.addValue("total_periods_expected", updatePeriods, ResultType.NEUTRAL, ResultUnit.COUNT);
1126         log.addValue("total_periods_actual", periodicList.size(), ResultType.NEUTRAL,
1127                 ResultUnit.COUNT);
1128         log.addValue("average_marker_diff", markerStat.getAvg(), ResultType.LOWER_BETTER,
1129                 ResultUnit.MS);
1130         log.addValue("maximum_marker_abs_diff", markerStat.getMaxAbs(), ResultType.LOWER_BETTER,
1131                 ResultUnit.MS);
1132         log.addValue("average_marker_abs_diff", markerStat.getAvgAbs(), ResultType.LOWER_BETTER,
1133                 ResultUnit.MS);
1134         log.addValue("average_periodic_diff", periodicStat.getAvg(), ResultType.LOWER_BETTER,
1135                 ResultUnit.MS);
1136         log.addValue("maximum_periodic_abs_diff", periodicStat.getMaxAbs(), ResultType.LOWER_BETTER,
1137                 ResultUnit.MS);
1138         log.addValue("average_periodic_abs_diff", periodicStat.getAvgAbs(), ResultType.LOWER_BETTER,
1139                 ResultUnit.MS);
1140         log.setSummary("unified_abs_diff", (periodicStat.getAvgAbs() + markerStat.getAvgAbs()) / 2,
1141                 ResultType.LOWER_BETTER, ResultUnit.MS);
1142         log.submit(getInstrumentation());
1143     }
1144 
1145     private class MockOnRecordPositionUpdateListener
1146                                         implements OnRecordPositionUpdateListener {
MockOnRecordPositionUpdateListener(AudioRecord record)1147         public MockOnRecordPositionUpdateListener(AudioRecord record) {
1148             mAudioRecord = record;
1149             record.setRecordPositionUpdateListener(this);
1150         }
1151 
MockOnRecordPositionUpdateListener(AudioRecord record, Handler handler)1152         public MockOnRecordPositionUpdateListener(AudioRecord record, Handler handler) {
1153             mAudioRecord = record;
1154             record.setRecordPositionUpdateListener(this, handler);
1155         }
1156 
onMarkerReached(AudioRecord record)1157         public synchronized void onMarkerReached(AudioRecord record) {
1158             if (mIsTestActive) {
1159                 int position = getPosition();
1160                 mOnMarkerReachedCalled.add(position);
1161                 mMarkerPosition += mMarkerPeriodInFrames;
1162                 assertEquals(AudioRecord.SUCCESS,
1163                         mAudioRecord.setNotificationMarkerPosition(mMarkerPosition));
1164             } else {
1165                 // see comment on stop()
1166                 final long delta = System.currentTimeMillis() - mStopTime;
1167                 Log.d(TAG, "onMarkerReached called " + delta + " ms after stop");
1168                 fail("onMarkerReached called when not active");
1169             }
1170         }
1171 
onPeriodicNotification(AudioRecord record)1172         public synchronized void onPeriodicNotification(AudioRecord record) {
1173             if (mIsTestActive) {
1174                 int position = getPosition();
1175                 mOnPeriodicNotificationCalled.add(position);
1176             } else {
1177                 // see comment on stop()
1178                 final long delta = System.currentTimeMillis() - mStopTime;
1179                 Log.d(TAG, "onPeriodicNotification called " + delta + " ms after stop");
1180                 fail("onPeriodicNotification called when not active");
1181             }
1182         }
1183 
start(int sampleRate)1184         public synchronized void start(int sampleRate) {
1185             mIsTestActive = true;
1186             mSampleRate = sampleRate;
1187             mStartTime = System.currentTimeMillis();
1188         }
1189 
stop()1190         public synchronized void stop() {
1191             // the listener should be stopped some time after AudioRecord is stopped
1192             // as some messages may not yet be posted.
1193             mIsTestActive = false;
1194             mStopTime = System.currentTimeMillis();
1195         }
1196 
getMarkerList()1197         public ArrayList<Integer> getMarkerList() {
1198             return mOnMarkerReachedCalled;
1199         }
1200 
getPeriodicList()1201         public ArrayList<Integer> getPeriodicList() {
1202             return mOnPeriodicNotificationCalled;
1203         }
1204 
release()1205         public synchronized void release() {
1206             stop();
1207             mAudioRecord.setRecordPositionUpdateListener(null);
1208             mAudioRecord = null;
1209         }
1210 
getPosition()1211         private int getPosition() {
1212             // we don't have mAudioRecord.getRecordPosition();
1213             // so we fake this by timing.
1214             long delta = System.currentTimeMillis() - mStartTime;
1215             return (int)(delta * mSampleRate / 1000);
1216         }
1217 
1218         private long mStartTime;
1219         private long mStopTime;
1220         private int mSampleRate;
1221         private boolean mIsTestActive = true;
1222         private AudioRecord mAudioRecord;
1223         private ArrayList<Integer> mOnMarkerReachedCalled = new ArrayList<Integer>();
1224         private ArrayList<Integer> mOnPeriodicNotificationCalled = new ArrayList<Integer>();
1225     }
1226 
hasMicrophone()1227     private boolean hasMicrophone() {
1228         return getContext().getPackageManager().hasSystemFeature(
1229                 PackageManager.FEATURE_MICROPHONE);
1230     }
1231 
isLowRamDevice()1232     private boolean isLowRamDevice() {
1233         return ((ActivityManager) getContext().getSystemService(Context.ACTIVITY_SERVICE))
1234                 .isLowRamDevice();
1235     }
1236 
verifyContinuousTimestamps( AudioTimestamp startTs, AudioTimestamp stopTs, int sampleRate)1237     private void verifyContinuousTimestamps(
1238             AudioTimestamp startTs, AudioTimestamp stopTs, int sampleRate)
1239             throws Exception {
1240         final long timeDiff = stopTs.nanoTime - startTs.nanoTime;
1241         final long frameDiff = stopTs.framePosition - startTs.framePosition;
1242         final long NANOS_PER_SECOND = 1000000000;
1243         final long timeByFrames = frameDiff * NANOS_PER_SECOND / sampleRate;
1244         final double ratio = (double)timeDiff / timeByFrames;
1245 
1246         // Usually the ratio is accurate to one part per thousand or better.
1247         // Log.d(TAG, "ratio=" + ratio + ", timeDiff=" + timeDiff + ", frameDiff=" + frameDiff +
1248         //        ", timeByFrames=" + timeByFrames + ", sampleRate=" + sampleRate);
1249         assertEquals(1.0 /* expected */, ratio, 0.01 /* delta */);
1250     }
1251 
1252     // remove if AudioTimestamp has a better toString().
printTimestamp(String s, AudioTimestamp ats)1253     private void printTimestamp(String s, AudioTimestamp ats) {
1254         Log.d(TAG, s + ":  pos: " + ats.framePosition + "  time: " + ats.nanoTime);
1255     }
1256 }
1257