1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.media;
18 
19 import java.lang.annotation.Retention;
20 import java.lang.annotation.RetentionPolicy;
21 import java.lang.ref.WeakReference;
22 import java.lang.Math;
23 import java.nio.ByteBuffer;
24 import java.nio.ByteOrder;
25 import java.nio.NioUtils;
26 import java.util.Collection;
27 
28 import android.annotation.IntDef;
29 import android.annotation.NonNull;
30 import android.annotation.Nullable;
31 import android.app.ActivityThread;
32 import android.content.Context;
33 import android.os.Handler;
34 import android.os.IBinder;
35 import android.os.Looper;
36 import android.os.Message;
37 import android.os.Process;
38 import android.os.RemoteException;
39 import android.os.ServiceManager;
40 import android.util.ArrayMap;
41 import android.util.Log;
42 
43 import com.android.internal.annotations.GuardedBy;
44 
45 /**
46  * The AudioTrack class manages and plays a single audio resource for Java applications.
47  * It allows streaming of PCM audio buffers to the audio sink for playback. This is
48  * achieved by "pushing" the data to the AudioTrack object using one of the
49  *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
50  *  and {@link #write(float[], int, int, int)} methods.
51  *
52  * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
53  * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
54  * one of the {@code write()} methods. These are blocking and return when the data has been
55  * transferred from the Java layer to the native layer and queued for playback. The streaming
56  * mode is most useful when playing blocks of audio data that for instance are:
57  *
58  * <ul>
59  *   <li>too big to fit in memory because of the duration of the sound to play,</li>
60  *   <li>too big to fit in memory because of the characteristics of the audio data
61  *         (high sampling rate, bits per sample ...)</li>
62  *   <li>received or generated while previously queued audio is playing.</li>
63  * </ul>
64  *
65  * The static mode should be chosen when dealing with short sounds that fit in memory and
66  * that need to be played with the smallest latency possible. The static mode will
67  * therefore be preferred for UI and game sounds that are played often, and with the
68  * smallest overhead possible.
69  *
70  * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
71  * The size of this buffer, specified during the construction, determines how long an AudioTrack
72  * can play before running out of data.<br>
73  * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
74  * be played from it.<br>
75  * For the streaming mode, data will be written to the audio sink in chunks of
76  * sizes less than or equal to the total buffer size.
77  *
78  * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
79  */
80 public class AudioTrack extends PlayerBase
81                         implements AudioRouting
82                                  , VolumeAutomation
83 {
84     //---------------------------------------------------------
85     // Constants
86     //--------------------
87     /** Minimum value for a linear gain or auxiliary effect level.
88      *  This value must be exactly equal to 0.0f; do not change it.
89      */
90     private static final float GAIN_MIN = 0.0f;
91     /** Maximum value for a linear gain or auxiliary effect level.
92      *  This value must be greater than or equal to 1.0f.
93      */
94     private static final float GAIN_MAX = 1.0f;
95 
96     /** Maximum value for AudioTrack channel count
97      * @hide public for MediaCode only, do not un-hide or change to a numeric literal
98      */
99     public static final int CHANNEL_COUNT_MAX = native_get_FCC_8();
100 
101     /** indicates AudioTrack state is stopped */
102     public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
103     /** indicates AudioTrack state is paused */
104     public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
105     /** indicates AudioTrack state is playing */
106     public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
107 
108     // keep these values in sync with android_media_AudioTrack.cpp
109     /**
110      * Creation mode where audio data is transferred from Java to the native layer
111      * only once before the audio starts playing.
112      */
113     public static final int MODE_STATIC = 0;
114     /**
115      * Creation mode where audio data is streamed from Java to the native layer
116      * as the audio is playing.
117      */
118     public static final int MODE_STREAM = 1;
119 
120     /** @hide */
121     @IntDef({
122         MODE_STATIC,
123         MODE_STREAM
124     })
125     @Retention(RetentionPolicy.SOURCE)
126     public @interface TransferMode {}
127 
128     /**
129      * State of an AudioTrack that was not successfully initialized upon creation.
130      */
131     public static final int STATE_UNINITIALIZED = 0;
132     /**
133      * State of an AudioTrack that is ready to be used.
134      */
135     public static final int STATE_INITIALIZED   = 1;
136     /**
137      * State of a successfully initialized AudioTrack that uses static data,
138      * but that hasn't received that data yet.
139      */
140     public static final int STATE_NO_STATIC_DATA = 2;
141 
142     /**
143      * Denotes a successful operation.
144      */
145     public  static final int SUCCESS                               = AudioSystem.SUCCESS;
146     /**
147      * Denotes a generic operation failure.
148      */
149     public  static final int ERROR                                 = AudioSystem.ERROR;
150     /**
151      * Denotes a failure due to the use of an invalid value.
152      */
153     public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
154     /**
155      * Denotes a failure due to the improper use of a method.
156      */
157     public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
158     /**
159      * An error code indicating that the object reporting it is no longer valid and needs to
160      * be recreated.
161      */
162     public  static final int ERROR_DEAD_OBJECT                     = AudioSystem.DEAD_OBJECT;
163     /**
164      * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state,
165      * or immediately after start/ACTIVE.
166      * @hide
167      */
168     public  static final int ERROR_WOULD_BLOCK                     = AudioSystem.WOULD_BLOCK;
169 
170     // Error codes:
171     // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
172     private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
173     private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
174     private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
175     private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
176     private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
177 
178     // Events:
179     // to keep in sync with frameworks/av/include/media/AudioTrack.h
180     /**
181      * Event id denotes when playback head has reached a previously set marker.
182      */
183     private static final int NATIVE_EVENT_MARKER  = 3;
184     /**
185      * Event id denotes when previously set update period has elapsed during playback.
186      */
187     private static final int NATIVE_EVENT_NEW_POS = 4;
188 
189     private final static String TAG = "android.media.AudioTrack";
190 
191 
192     /** @hide */
193     @IntDef({
194         WRITE_BLOCKING,
195         WRITE_NON_BLOCKING
196     })
197     @Retention(RetentionPolicy.SOURCE)
198     public @interface WriteMode {}
199 
200     /**
201      * The write mode indicating the write operation will block until all data has been written,
202      * to be used as the actual value of the writeMode parameter in
203      * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)},
204      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
205      * {@link #write(ByteBuffer, int, int, long)}.
206      */
207     public final static int WRITE_BLOCKING = 0;
208 
209     /**
210      * The write mode indicating the write operation will return immediately after
211      * queuing as much audio data for playback as possible without blocking,
212      * to be used as the actual value of the writeMode parameter in
213      * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)},
214      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
215      * {@link #write(ByteBuffer, int, int, long)}.
216      */
217     public final static int WRITE_NON_BLOCKING = 1;
218 
219     /** @hide */
220     @IntDef({
221         PERFORMANCE_MODE_NONE,
222         PERFORMANCE_MODE_LOW_LATENCY,
223         PERFORMANCE_MODE_POWER_SAVING
224     })
225     @Retention(RetentionPolicy.SOURCE)
226     public @interface PerformanceMode {}
227 
228     /**
229      * Default performance mode for an {@link AudioTrack}.
230      */
231     public static final int PERFORMANCE_MODE_NONE = 0;
232 
233     /**
234      * Low latency performance mode for an {@link AudioTrack}.
235      * If the device supports it, this mode
236      * enables a lower latency path through to the audio output sink.
237      * Effects may no longer work with such an {@code AudioTrack} and
238      * the sample rate must match that of the output sink.
239      * <p>
240      * Applications should be aware that low latency requires careful
241      * buffer management, with smaller chunks of audio data written by each
242      * {@code write()} call.
243      * <p>
244      * If this flag is used without specifying a {@code bufferSizeInBytes} then the
245      * {@code AudioTrack}'s actual buffer size may be too small.
246      * It is recommended that a fairly
247      * large buffer should be specified when the {@code AudioTrack} is created.
248      * Then the actual size can be reduced by calling
249      * {@link #setBufferSizeInFrames(int)}. The buffer size can be optimized
250      * by lowering it after each {@code write()} call until the audio glitches,
251      * which is detected by calling
252      * {@link #getUnderrunCount()}. Then the buffer size can be increased
253      * until there are no glitches.
254      * This tuning step should be done while playing silence.
255      * This technique provides a compromise between latency and glitch rate.
256      */
257     public static final int PERFORMANCE_MODE_LOW_LATENCY = 1;
258 
259     /**
260      * Power saving performance mode for an {@link AudioTrack}.
261      * If the device supports it, this
262      * mode will enable a lower power path to the audio output sink.
263      * In addition, this lower power path typically will have
264      * deeper internal buffers and better underrun resistance,
265      * with a tradeoff of higher latency.
266      * <p>
267      * In this mode, applications should attempt to use a larger buffer size
268      * and deliver larger chunks of audio data per {@code write()} call.
269      * Use {@link #getBufferSizeInFrames()} to determine
270      * the actual buffer size of the {@code AudioTrack} as it may have increased
271      * to accommodate a deeper buffer.
272      */
273     public static final int PERFORMANCE_MODE_POWER_SAVING = 2;
274 
275     // keep in sync with system/media/audio/include/system/audio-base.h
276     private static final int AUDIO_OUTPUT_FLAG_FAST = 0x4;
277     private static final int AUDIO_OUTPUT_FLAG_DEEP_BUFFER = 0x8;
278 
279     //--------------------------------------------------------------------------
280     // Member variables
281     //--------------------
282     /**
283      * Indicates the state of the AudioTrack instance.
284      * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA.
285      */
286     private int mState = STATE_UNINITIALIZED;
287     /**
288      * Indicates the play state of the AudioTrack instance.
289      * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING.
290      */
291     private int mPlayState = PLAYSTATE_STOPPED;
292     /**
293      * Lock to ensure mPlayState updates reflect the actual state of the object.
294      */
295     private final Object mPlayStateLock = new Object();
296     /**
297      * Sizes of the audio buffer.
298      * These values are set during construction and can be stale.
299      * To obtain the current audio buffer frame count use {@link #getBufferSizeInFrames()}.
300      */
301     private int mNativeBufferSizeInBytes = 0;
302     private int mNativeBufferSizeInFrames = 0;
303     /**
304      * Handler for events coming from the native code.
305      */
306     private NativePositionEventHandlerDelegate mEventHandlerDelegate;
307     /**
308      * Looper associated with the thread that creates the AudioTrack instance.
309      */
310     private final Looper mInitializationLooper;
311     /**
312      * The audio data source sampling rate in Hz.
313      * Never {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}.
314      */
315     private int mSampleRate; // initialized by all constructors via audioParamCheck()
316     /**
317      * The number of audio output channels (1 is mono, 2 is stereo, etc.).
318      */
319     private int mChannelCount = 1;
320     /**
321      * The audio channel mask used for calling native AudioTrack
322      */
323     private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
324 
325     /**
326      * The type of the audio stream to play. See
327      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
328      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
329      *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
330      *   {@link AudioManager#STREAM_DTMF}.
331      */
332     private int mStreamType = AudioManager.STREAM_MUSIC;
333 
334     /**
335      * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM.
336      */
337     private int mDataLoadMode = MODE_STREAM;
338     /**
339      * The current channel position mask, as specified on AudioTrack creation.
340      * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}.
341      * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified.
342      */
343     private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
344     /**
345      * The channel index mask if specified, otherwise 0.
346      */
347     private int mChannelIndexMask = 0;
348     /**
349      * The encoding of the audio samples.
350      * @see AudioFormat#ENCODING_PCM_8BIT
351      * @see AudioFormat#ENCODING_PCM_16BIT
352      * @see AudioFormat#ENCODING_PCM_FLOAT
353      */
354     private int mAudioFormat;   // initialized by all constructors via audioParamCheck()
355     /**
356      * Audio session ID
357      */
358     private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
359     /**
360      * HW_AV_SYNC track AV Sync Header
361      */
362     private ByteBuffer mAvSyncHeader = null;
363     /**
364      * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header
365      */
366     private int mAvSyncBytesRemaining = 0;
367 
368     //--------------------------------
369     // Used exclusively by native code
370     //--------------------
371     /**
372      * @hide
373      * Accessed by native methods: provides access to C++ AudioTrack object.
374      */
375     @SuppressWarnings("unused")
376     protected long mNativeTrackInJavaObj;
377     /**
378      * Accessed by native methods: provides access to the JNI data (i.e. resources used by
379      * the native AudioTrack object, but not stored in it).
380      */
381     @SuppressWarnings("unused")
382     private long mJniData;
383 
384 
385     //--------------------------------------------------------------------------
386     // Constructor, Finalize
387     //--------------------
388     /**
389      * Class constructor.
390      * @param streamType the type of the audio stream. See
391      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
392      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
393      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
394      * @param sampleRateInHz the initial source sample rate expressed in Hz.
395      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
396      *   which is usually the sample rate of the sink.
397      *   {@link #getSampleRate()} can be used to retrieve the actual sample rate chosen.
398      * @param channelConfig describes the configuration of the audio channels.
399      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
400      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
401      * @param audioFormat the format in which the audio data is represented.
402      *   See {@link AudioFormat#ENCODING_PCM_16BIT},
403      *   {@link AudioFormat#ENCODING_PCM_8BIT},
404      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
405      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
406      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
407      *   <p> If the track's creation mode is {@link #MODE_STATIC},
408      *   this is the maximum length sample, or audio clip, that can be played by this instance.
409      *   <p> If the track's creation mode is {@link #MODE_STREAM},
410      *   this should be the desired buffer size
411      *   for the <code>AudioTrack</code> to satisfy the application's
412      *   latency requirements.
413      *   If <code>bufferSizeInBytes</code> is less than the
414      *   minimum buffer size for the output sink, it is increased to the minimum
415      *   buffer size.
416      *   The method {@link #getBufferSizeInFrames()} returns the
417      *   actual size in frames of the buffer created, which
418      *   determines the minimum frequency to write
419      *   to the streaming <code>AudioTrack</code> to avoid underrun.
420      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
421      *   for an AudioTrack instance in streaming mode.
422      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
423      * @throws java.lang.IllegalArgumentException
424      * @deprecated use {@link Builder} or
425      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
426      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
427      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode)428     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
429             int bufferSizeInBytes, int mode)
430     throws IllegalArgumentException {
431         this(streamType, sampleRateInHz, channelConfig, audioFormat,
432                 bufferSizeInBytes, mode, AudioManager.AUDIO_SESSION_ID_GENERATE);
433     }
434 
435     /**
436      * Class constructor with audio session. Use this constructor when the AudioTrack must be
437      * attached to a particular audio session. The primary use of the audio session ID is to
438      * associate audio effects to a particular instance of AudioTrack: if an audio session ID
439      * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
440      * and media players in the same session and not to the output mix.
441      * When an AudioTrack is created without specifying a session, it will create its own session
442      * which can be retrieved by calling the {@link #getAudioSessionId()} method.
443      * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
444      * session
445      * with all other media players or audio tracks in the same session, otherwise a new session
446      * will be created for this track if none is supplied.
447      * @param streamType the type of the audio stream. See
448      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
449      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
450      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
451      * @param sampleRateInHz the initial source sample rate expressed in Hz.
452      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
453      *   which is usually the sample rate of the sink.
454      * @param channelConfig describes the configuration of the audio channels.
455      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
456      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
457      * @param audioFormat the format in which the audio data is represented.
458      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
459      *   {@link AudioFormat#ENCODING_PCM_8BIT},
460      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
461      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
462      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
463      *   <p> If the track's creation mode is {@link #MODE_STATIC},
464      *   this is the maximum length sample, or audio clip, that can be played by this instance.
465      *   <p> If the track's creation mode is {@link #MODE_STREAM},
466      *   this should be the desired buffer size
467      *   for the <code>AudioTrack</code> to satisfy the application's
468      *   latency requirements.
469      *   If <code>bufferSizeInBytes</code> is less than the
470      *   minimum buffer size for the output sink, it is increased to the minimum
471      *   buffer size.
472      *   The method {@link #getBufferSizeInFrames()} returns the
473      *   actual size in frames of the buffer created, which
474      *   determines the minimum frequency to write
475      *   to the streaming <code>AudioTrack</code> to avoid underrun.
476      *   You can write data into this buffer in smaller chunks than this size.
477      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
478      *   for an AudioTrack instance in streaming mode.
479      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
480      * @param sessionId Id of audio session the AudioTrack must be attached to
481      * @throws java.lang.IllegalArgumentException
482      * @deprecated use {@link Builder} or
483      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
484      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
485      */
AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode, int sessionId)486     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
487             int bufferSizeInBytes, int mode, int sessionId)
488     throws IllegalArgumentException {
489         // mState already == STATE_UNINITIALIZED
490         this((new AudioAttributes.Builder())
491                     .setLegacyStreamType(streamType)
492                     .build(),
493                 (new AudioFormat.Builder())
494                     .setChannelMask(channelConfig)
495                     .setEncoding(audioFormat)
496                     .setSampleRate(sampleRateInHz)
497                     .build(),
498                 bufferSizeInBytes,
499                 mode, sessionId);
500         deprecateStreamTypeForPlayback(streamType, "AudioTrack", "AudioTrack()");
501     }
502 
503     /**
504      * Class constructor with {@link AudioAttributes} and {@link AudioFormat}.
505      * @param attributes a non-null {@link AudioAttributes} instance.
506      * @param format a non-null {@link AudioFormat} instance describing the format of the data
507      *     that will be played through this AudioTrack. See {@link AudioFormat.Builder} for
508      *     configuring the audio format parameters such as encoding, channel mask and sample rate.
509      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
510      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
511      *   <p> If the track's creation mode is {@link #MODE_STATIC},
512      *   this is the maximum length sample, or audio clip, that can be played by this instance.
513      *   <p> If the track's creation mode is {@link #MODE_STREAM},
514      *   this should be the desired buffer size
515      *   for the <code>AudioTrack</code> to satisfy the application's
516      *   latency requirements.
517      *   If <code>bufferSizeInBytes</code> is less than the
518      *   minimum buffer size for the output sink, it is increased to the minimum
519      *   buffer size.
520      *   The method {@link #getBufferSizeInFrames()} returns the
521      *   actual size in frames of the buffer created, which
522      *   determines the minimum frequency to write
523      *   to the streaming <code>AudioTrack</code> to avoid underrun.
524      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
525      *   for an AudioTrack instance in streaming mode.
526      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}.
527      * @param sessionId ID of audio session the AudioTrack must be attached to, or
528      *   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction
529      *   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before
530      *   construction.
531      * @throws IllegalArgumentException
532      */
AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId)533     public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
534             int mode, int sessionId)
535                     throws IllegalArgumentException {
536         super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
537         // mState already == STATE_UNINITIALIZED
538 
539         if (format == null) {
540             throw new IllegalArgumentException("Illegal null AudioFormat");
541         }
542 
543         // Check if we should enable deep buffer mode
544         if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {
545             mAttributes = new AudioAttributes.Builder(mAttributes)
546                 .replaceFlags((mAttributes.getAllFlags()
547                         | AudioAttributes.FLAG_DEEP_BUFFER)
548                         & ~AudioAttributes.FLAG_LOW_LATENCY)
549                 .build();
550         }
551 
552         // remember which looper is associated with the AudioTrack instantiation
553         Looper looper;
554         if ((looper = Looper.myLooper()) == null) {
555             looper = Looper.getMainLooper();
556         }
557 
558         int rate = format.getSampleRate();
559         if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
560             rate = 0;
561         }
562 
563         int channelIndexMask = 0;
564         if ((format.getPropertySetMask()
565                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
566             channelIndexMask = format.getChannelIndexMask();
567         }
568         int channelMask = 0;
569         if ((format.getPropertySetMask()
570                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
571             channelMask = format.getChannelMask();
572         } else if (channelIndexMask == 0) { // if no masks at all, use stereo
573             channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
574                     | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
575         }
576         int encoding = AudioFormat.ENCODING_DEFAULT;
577         if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
578             encoding = format.getEncoding();
579         }
580         audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
581         mStreamType = AudioSystem.STREAM_DEFAULT;
582 
583         audioBuffSizeCheck(bufferSizeInBytes);
584 
585         mInitializationLooper = looper;
586 
587         if (sessionId < 0) {
588             throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
589         }
590 
591         int[] sampleRate = new int[] {mSampleRate};
592         int[] session = new int[1];
593         session[0] = sessionId;
594         // native initialization
595         int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
596                 sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
597                 mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/);
598         if (initResult != SUCCESS) {
599             loge("Error code "+initResult+" when initializing AudioTrack.");
600             return; // with mState == STATE_UNINITIALIZED
601         }
602 
603         mSampleRate = sampleRate[0];
604         mSessionId = session[0];
605 
606         if (mDataLoadMode == MODE_STATIC) {
607             mState = STATE_NO_STATIC_DATA;
608         } else {
609             mState = STATE_INITIALIZED;
610         }
611 
612         baseRegisterPlayer();
613     }
614 
615     /**
616      * A constructor which explicitly connects a Native (C++) AudioTrack. For use by
617      * the AudioTrackRoutingProxy subclass.
618      * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack
619      * (associated with an OpenSL ES player).
620      * IMPORTANT: For "N", this method is ONLY called to setup a Java routing proxy,
621      * i.e. IAndroidConfiguration::AcquireJavaProxy(). If we call with a 0 in nativeTrackInJavaObj
622      * it means that the OpenSL player interface hasn't been realized, so there is no native
623      * Audiotrack to connect to. In this case wait to call deferred_connect() until the
624      * OpenSLES interface is realized.
625      */
AudioTrack(long nativeTrackInJavaObj)626     /*package*/ AudioTrack(long nativeTrackInJavaObj) {
627         super(new AudioAttributes.Builder().build(),
628                 AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
629         // "final"s
630         mNativeTrackInJavaObj = 0;
631         mJniData = 0;
632 
633         // remember which looper is associated with the AudioTrack instantiation
634         Looper looper;
635         if ((looper = Looper.myLooper()) == null) {
636             looper = Looper.getMainLooper();
637         }
638         mInitializationLooper = looper;
639 
640         // other initialization...
641         if (nativeTrackInJavaObj != 0) {
642             baseRegisterPlayer();
643             deferred_connect(nativeTrackInJavaObj);
644         } else {
645             mState = STATE_UNINITIALIZED;
646         }
647     }
648 
649     /**
650      * @hide
651      */
deferred_connect(long nativeTrackInJavaObj)652     /* package */ void deferred_connect(long nativeTrackInJavaObj) {
653         if (mState != STATE_INITIALIZED) {
654             // Note that for this native_setup, we are providing an already created/initialized
655             // *Native* AudioTrack, so the attributes parameters to native_setup() are ignored.
656             int[] session = { 0 };
657             int[] rates = { 0 };
658             int initResult = native_setup(new WeakReference<AudioTrack>(this),
659                     null /*mAttributes - NA*/,
660                     rates /*sampleRate - NA*/,
661                     0 /*mChannelMask - NA*/,
662                     0 /*mChannelIndexMask - NA*/,
663                     0 /*mAudioFormat - NA*/,
664                     0 /*mNativeBufferSizeInBytes - NA*/,
665                     0 /*mDataLoadMode - NA*/,
666                     session,
667                     nativeTrackInJavaObj);
668             if (initResult != SUCCESS) {
669                 loge("Error code "+initResult+" when initializing AudioTrack.");
670                 return; // with mState == STATE_UNINITIALIZED
671             }
672 
673             mSessionId = session[0];
674 
675             mState = STATE_INITIALIZED;
676         }
677     }
678 
679     /**
680      * Builder class for {@link AudioTrack} objects.
681      * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio
682      * attributes and audio format parameters, you indicate which of those vary from the default
683      * behavior on the device.
684      * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat}
685      * parameters, to be used by a new <code>AudioTrack</code> instance:
686      *
687      * <pre class="prettyprint">
688      * AudioTrack player = new AudioTrack.Builder()
689      *         .setAudioAttributes(new AudioAttributes.Builder()
690      *                  .setUsage(AudioAttributes.USAGE_ALARM)
691      *                  .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
692      *                  .build())
693      *         .setAudioFormat(new AudioFormat.Builder()
694      *                 .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
695      *                 .setSampleRate(44100)
696      *                 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
697      *                 .build())
698      *         .setBufferSizeInBytes(minBuffSize)
699      *         .build();
700      * </pre>
701      * <p>
702      * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)},
703      * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used.
704      * <br>If the audio format is not specified or is incomplete, its channel configuration will be
705      * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be
706      * {@link AudioFormat#ENCODING_PCM_16BIT}.
707      * The sample rate will depend on the device actually selected for playback and can be queried
708      * with {@link #getSampleRate()} method.
709      * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)},
710      * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used.
711      * <br>If the transfer mode is not specified with {@link #setTransferMode(int)},
712      * <code>MODE_STREAM</code> will be used.
713      * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will
714      * be generated.
715      */
716     public static class Builder {
717         private AudioAttributes mAttributes;
718         private AudioFormat mFormat;
719         private int mBufferSizeInBytes;
720         private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
721         private int mMode = MODE_STREAM;
722         private int mPerformanceMode = PERFORMANCE_MODE_NONE;
723 
724         /**
725          * Constructs a new Builder with the default values as described above.
726          */
Builder()727         public Builder() {
728         }
729 
730         /**
731          * Sets the {@link AudioAttributes}.
732          * @param attributes a non-null {@link AudioAttributes} instance that describes the audio
733          *     data to be played.
734          * @return the same Builder instance.
735          * @throws IllegalArgumentException
736          */
setAudioAttributes(@onNull AudioAttributes attributes)737         public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes)
738                 throws IllegalArgumentException {
739             if (attributes == null) {
740                 throw new IllegalArgumentException("Illegal null AudioAttributes argument");
741             }
742             // keep reference, we only copy the data when building
743             mAttributes = attributes;
744             return this;
745         }
746 
747         /**
748          * Sets the format of the audio data to be played by the {@link AudioTrack}.
749          * See {@link AudioFormat.Builder} for configuring the audio format parameters such
750          * as encoding, channel mask and sample rate.
751          * @param format a non-null {@link AudioFormat} instance.
752          * @return the same Builder instance.
753          * @throws IllegalArgumentException
754          */
setAudioFormat(@onNull AudioFormat format)755         public @NonNull Builder setAudioFormat(@NonNull AudioFormat format)
756                 throws IllegalArgumentException {
757             if (format == null) {
758                 throw new IllegalArgumentException("Illegal null AudioFormat argument");
759             }
760             // keep reference, we only copy the data when building
761             mFormat = format;
762             return this;
763         }
764 
765         /**
766          * Sets the total size (in bytes) of the buffer where audio data is read from for playback.
767          * If using the {@link AudioTrack} in streaming mode
768          * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller
769          * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine
770          * the estimated minimum buffer size for the creation of an AudioTrack instance
771          * in streaming mode.
772          * <br>If using the <code>AudioTrack</code> in static mode (see
773          * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be
774          * played by this instance.
775          * @param bufferSizeInBytes
776          * @return the same Builder instance.
777          * @throws IllegalArgumentException
778          */
setBufferSizeInBytes(int bufferSizeInBytes)779         public @NonNull Builder setBufferSizeInBytes(int bufferSizeInBytes)
780                 throws IllegalArgumentException {
781             if (bufferSizeInBytes <= 0) {
782                 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes);
783             }
784             mBufferSizeInBytes = bufferSizeInBytes;
785             return this;
786         }
787 
788         /**
789          * Sets the mode under which buffers of audio data are transferred from the
790          * {@link AudioTrack} to the framework.
791          * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}.
792          * @return the same Builder instance.
793          * @throws IllegalArgumentException
794          */
setTransferMode(@ransferMode int mode)795         public @NonNull Builder setTransferMode(@TransferMode int mode)
796                 throws IllegalArgumentException {
797             switch(mode) {
798                 case MODE_STREAM:
799                 case MODE_STATIC:
800                     mMode = mode;
801                     break;
802                 default:
803                     throw new IllegalArgumentException("Invalid transfer mode " + mode);
804             }
805             return this;
806         }
807 
808         /**
809          * Sets the session ID the {@link AudioTrack} will be attached to.
810          * @param sessionId a strictly positive ID number retrieved from another
811          *     <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by
812          *     {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or
813          *     {@link AudioManager#AUDIO_SESSION_ID_GENERATE}.
814          * @return the same Builder instance.
815          * @throws IllegalArgumentException
816          */
setSessionId(int sessionId)817         public @NonNull Builder setSessionId(int sessionId)
818                 throws IllegalArgumentException {
819             if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) {
820                 throw new IllegalArgumentException("Invalid audio session ID " + sessionId);
821             }
822             mSessionId = sessionId;
823             return this;
824         }
825 
826         /**
827          * Sets the {@link AudioTrack} performance mode.  This is an advisory request which
828          * may not be supported by the particular device, and the framework is free
829          * to ignore such request if it is incompatible with other requests or hardware.
830          *
831          * @param performanceMode one of
832          * {@link AudioTrack#PERFORMANCE_MODE_NONE},
833          * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
834          * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
835          * @return the same Builder instance.
836          * @throws IllegalArgumentException if {@code performanceMode} is not valid.
837          */
setPerformanceMode(@erformanceMode int performanceMode)838         public @NonNull Builder setPerformanceMode(@PerformanceMode int performanceMode) {
839             switch (performanceMode) {
840                 case PERFORMANCE_MODE_NONE:
841                 case PERFORMANCE_MODE_LOW_LATENCY:
842                 case PERFORMANCE_MODE_POWER_SAVING:
843                     mPerformanceMode = performanceMode;
844                     break;
845                 default:
846                     throw new IllegalArgumentException(
847                             "Invalid performance mode " + performanceMode);
848             }
849             return this;
850         }
851 
852         /**
853          * Builds an {@link AudioTrack} instance initialized with all the parameters set
854          * on this <code>Builder</code>.
855          * @return a new successfully initialized {@link AudioTrack} instance.
856          * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code>
857          *     were incompatible, or if they are not supported by the device,
858          *     or if the device was not available.
859          */
build()860         public @NonNull AudioTrack build() throws UnsupportedOperationException {
861             if (mAttributes == null) {
862                 mAttributes = new AudioAttributes.Builder()
863                         .setUsage(AudioAttributes.USAGE_MEDIA)
864                         .build();
865             }
866             switch (mPerformanceMode) {
867             case PERFORMANCE_MODE_LOW_LATENCY:
868                 mAttributes = new AudioAttributes.Builder(mAttributes)
869                     .replaceFlags((mAttributes.getAllFlags()
870                             | AudioAttributes.FLAG_LOW_LATENCY)
871                             & ~AudioAttributes.FLAG_DEEP_BUFFER)
872                     .build();
873                 break;
874             case PERFORMANCE_MODE_NONE:
875                 if (!shouldEnablePowerSaving(mAttributes, mFormat, mBufferSizeInBytes, mMode)) {
876                     break; // do not enable deep buffer mode.
877                 }
878                 // permitted to fall through to enable deep buffer
879             case PERFORMANCE_MODE_POWER_SAVING:
880                 mAttributes = new AudioAttributes.Builder(mAttributes)
881                 .replaceFlags((mAttributes.getAllFlags()
882                         | AudioAttributes.FLAG_DEEP_BUFFER)
883                         & ~AudioAttributes.FLAG_LOW_LATENCY)
884                 .build();
885                 break;
886             }
887 
888             if (mFormat == null) {
889                 mFormat = new AudioFormat.Builder()
890                         .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
891                         //.setSampleRate(AudioFormat.SAMPLE_RATE_UNSPECIFIED)
892                         .setEncoding(AudioFormat.ENCODING_DEFAULT)
893                         .build();
894             }
895             try {
896                 // If the buffer size is not specified in streaming mode,
897                 // use a single frame for the buffer size and let the
898                 // native code figure out the minimum buffer size.
899                 if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) {
900                     mBufferSizeInBytes = mFormat.getChannelCount()
901                             * mFormat.getBytesPerSample(mFormat.getEncoding());
902                 }
903                 final AudioTrack track = new AudioTrack(
904                         mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId);
905                 if (track.getState() == STATE_UNINITIALIZED) {
906                     // release is not necessary
907                     throw new UnsupportedOperationException("Cannot create AudioTrack");
908                 }
909                 return track;
910             } catch (IllegalArgumentException e) {
911                 throw new UnsupportedOperationException(e.getMessage());
912             }
913         }
914     }
915 
916     // mask of all the positional channels supported, however the allowed combinations
917     // are further restricted by the matching left/right rule and CHANNEL_COUNT_MAX
918     private static final int SUPPORTED_OUT_CHANNELS =
919             AudioFormat.CHANNEL_OUT_FRONT_LEFT |
920             AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
921             AudioFormat.CHANNEL_OUT_FRONT_CENTER |
922             AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
923             AudioFormat.CHANNEL_OUT_BACK_LEFT |
924             AudioFormat.CHANNEL_OUT_BACK_RIGHT |
925             AudioFormat.CHANNEL_OUT_BACK_CENTER |
926             AudioFormat.CHANNEL_OUT_SIDE_LEFT |
927             AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
928 
929     // Returns a boolean whether the attributes, format, bufferSizeInBytes, mode allow
930     // power saving to be automatically enabled for an AudioTrack. Returns false if
931     // power saving is already enabled in the attributes parameter.
shouldEnablePowerSaving( @ullable AudioAttributes attributes, @Nullable AudioFormat format, int bufferSizeInBytes, int mode)932     private static boolean shouldEnablePowerSaving(
933             @Nullable AudioAttributes attributes, @Nullable AudioFormat format,
934             int bufferSizeInBytes, int mode) {
935         // If no attributes, OK
936         // otherwise check attributes for USAGE_MEDIA and CONTENT_UNKNOWN, MUSIC, or MOVIE.
937         if (attributes != null &&
938                 (attributes.getAllFlags() != 0  // cannot have any special flags
939                 || attributes.getUsage() != AudioAttributes.USAGE_MEDIA
940                 || (attributes.getContentType() != AudioAttributes.CONTENT_TYPE_UNKNOWN
941                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MUSIC
942                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MOVIE))) {
943             return false;
944         }
945 
946         // Format must be fully specified and be linear pcm
947         if (format == null
948                 || format.getSampleRate() == AudioFormat.SAMPLE_RATE_UNSPECIFIED
949                 || !AudioFormat.isEncodingLinearPcm(format.getEncoding())
950                 || !AudioFormat.isValidEncoding(format.getEncoding())
951                 || format.getChannelCount() < 1) {
952             return false;
953         }
954 
955         // Mode must be streaming
956         if (mode != MODE_STREAM) {
957             return false;
958         }
959 
960         // A buffer size of 0 is always compatible with deep buffer (when called from the Builder)
961         // but for app compatibility we only use deep buffer power saving for large buffer sizes.
962         if (bufferSizeInBytes != 0) {
963             final long BUFFER_TARGET_MODE_STREAM_MS = 100;
964             final int MILLIS_PER_SECOND = 1000;
965             final long bufferTargetSize =
966                     BUFFER_TARGET_MODE_STREAM_MS
967                     * format.getChannelCount()
968                     * format.getBytesPerSample(format.getEncoding())
969                     * format.getSampleRate()
970                     / MILLIS_PER_SECOND;
971             if (bufferSizeInBytes < bufferTargetSize) {
972                 return false;
973             }
974         }
975 
976         return true;
977     }
978 
979     // Convenience method for the constructor's parameter checks.
980     // This is where constructor IllegalArgumentException-s are thrown
981     // postconditions:
982     //    mChannelCount is valid
983     //    mChannelMask is valid
984     //    mAudioFormat is valid
985     //    mSampleRate is valid
986     //    mDataLoadMode is valid
audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, int audioFormat, int mode)987     private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask,
988                                  int audioFormat, int mode) {
989         //--------------
990         // sample rate, note these values are subject to change
991         if ((sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN ||
992                 sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) &&
993                 sampleRateInHz != AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
994             throw new IllegalArgumentException(sampleRateInHz
995                     + "Hz is not a supported sample rate.");
996         }
997         mSampleRate = sampleRateInHz;
998 
999         // IEC61937 is based on stereo. We could coerce it to stereo.
1000         // But the application needs to know the stream is stereo so that
1001         // it is encoded and played correctly. So better to just reject it.
1002         if (audioFormat == AudioFormat.ENCODING_IEC61937
1003                 && channelConfig != AudioFormat.CHANNEL_OUT_STEREO) {
1004             throw new IllegalArgumentException(
1005                     "ENCODING_IEC61937 must be configured as CHANNEL_OUT_STEREO");
1006         }
1007 
1008         //--------------
1009         // channel config
1010         mChannelConfiguration = channelConfig;
1011 
1012         switch (channelConfig) {
1013         case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
1014         case AudioFormat.CHANNEL_OUT_MONO:
1015         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
1016             mChannelCount = 1;
1017             mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
1018             break;
1019         case AudioFormat.CHANNEL_OUT_STEREO:
1020         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
1021             mChannelCount = 2;
1022             mChannelMask = AudioFormat.CHANNEL_OUT_STEREO;
1023             break;
1024         default:
1025             if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) {
1026                 mChannelCount = 0;
1027                 break; // channel index configuration only
1028             }
1029             if (!isMultichannelConfigSupported(channelConfig)) {
1030                 // input channel configuration features unsupported channels
1031                 throw new IllegalArgumentException("Unsupported channel configuration.");
1032             }
1033             mChannelMask = channelConfig;
1034             mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1035         }
1036         // check the channel index configuration (if present)
1037         mChannelIndexMask = channelIndexMask;
1038         if (mChannelIndexMask != 0) {
1039             // restrictive: indexMask could allow up to AUDIO_CHANNEL_BITS_LOG2
1040             final int indexMask = (1 << CHANNEL_COUNT_MAX) - 1;
1041             if ((channelIndexMask & ~indexMask) != 0) {
1042                 throw new IllegalArgumentException("Unsupported channel index configuration "
1043                         + channelIndexMask);
1044             }
1045             int channelIndexCount = Integer.bitCount(channelIndexMask);
1046             if (mChannelCount == 0) {
1047                  mChannelCount = channelIndexCount;
1048             } else if (mChannelCount != channelIndexCount) {
1049                 throw new IllegalArgumentException("Channel count must match");
1050             }
1051         }
1052 
1053         //--------------
1054         // audio format
1055         if (audioFormat == AudioFormat.ENCODING_DEFAULT) {
1056             audioFormat = AudioFormat.ENCODING_PCM_16BIT;
1057         }
1058 
1059         if (!AudioFormat.isPublicEncoding(audioFormat)) {
1060             throw new IllegalArgumentException("Unsupported audio encoding.");
1061         }
1062         mAudioFormat = audioFormat;
1063 
1064         //--------------
1065         // audio load mode
1066         if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) ||
1067                 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) {
1068             throw new IllegalArgumentException("Invalid mode.");
1069         }
1070         mDataLoadMode = mode;
1071     }
1072 
1073     /**
1074      * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
1075      * @param channelConfig the mask to validate
1076      * @return false if the AudioTrack can't be used with such a mask
1077      */
isMultichannelConfigSupported(int channelConfig)1078     private static boolean isMultichannelConfigSupported(int channelConfig) {
1079         // check for unsupported channels
1080         if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
1081             loge("Channel configuration features unsupported channels");
1082             return false;
1083         }
1084         final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1085         if (channelCount > CHANNEL_COUNT_MAX) {
1086             loge("Channel configuration contains too many channels " +
1087                     channelCount + ">" + CHANNEL_COUNT_MAX);
1088             return false;
1089         }
1090         // check for unsupported multichannel combinations:
1091         // - FL/FR must be present
1092         // - L/R channels must be paired (e.g. no single L channel)
1093         final int frontPair =
1094                 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
1095         if ((channelConfig & frontPair) != frontPair) {
1096                 loge("Front channels must be present in multichannel configurations");
1097                 return false;
1098         }
1099         final int backPair =
1100                 AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT;
1101         if ((channelConfig & backPair) != 0) {
1102             if ((channelConfig & backPair) != backPair) {
1103                 loge("Rear channels can't be used independently");
1104                 return false;
1105             }
1106         }
1107         final int sidePair =
1108                 AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
1109         if ((channelConfig & sidePair) != 0
1110                 && (channelConfig & sidePair) != sidePair) {
1111             loge("Side channels can't be used independently");
1112             return false;
1113         }
1114         return true;
1115     }
1116 
1117 
1118     // Convenience method for the constructor's audio buffer size check.
1119     // preconditions:
1120     //    mChannelCount is valid
1121     //    mAudioFormat is valid
1122     // postcondition:
1123     //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
audioBuffSizeCheck(int audioBufferSize)1124     private void audioBuffSizeCheck(int audioBufferSize) {
1125         // NB: this section is only valid with PCM or IEC61937 data.
1126         //     To update when supporting compressed formats
1127         int frameSizeInBytes;
1128         if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
1129             frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
1130         } else {
1131             frameSizeInBytes = 1;
1132         }
1133         if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
1134             throw new IllegalArgumentException("Invalid audio buffer size.");
1135         }
1136 
1137         mNativeBufferSizeInBytes = audioBufferSize;
1138         mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
1139     }
1140 
1141 
1142     /**
1143      * Releases the native AudioTrack resources.
1144      */
release()1145     public void release() {
1146         // even though native_release() stops the native AudioTrack, we need to stop
1147         // AudioTrack subclasses too.
1148         try {
1149             stop();
1150         } catch(IllegalStateException ise) {
1151             // don't raise an exception, we're releasing the resources.
1152         }
1153         baseRelease();
1154         native_release();
1155         mState = STATE_UNINITIALIZED;
1156     }
1157 
1158     @Override
finalize()1159     protected void finalize() {
1160         baseRelease();
1161         native_finalize();
1162     }
1163 
1164     //--------------------------------------------------------------------------
1165     // Getters
1166     //--------------------
1167     /**
1168      * Returns the minimum gain value, which is the constant 0.0.
1169      * Gain values less than 0.0 will be clamped to 0.0.
1170      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
1171      * @return the minimum value, which is the constant 0.0.
1172      */
getMinVolume()1173     static public float getMinVolume() {
1174         return GAIN_MIN;
1175     }
1176 
1177     /**
1178      * Returns the maximum gain value, which is greater than or equal to 1.0.
1179      * Gain values greater than the maximum will be clamped to the maximum.
1180      * <p>The word "volume" in the API name is historical; this is actually a gain.
1181      * expressed as a linear multiplier on sample values, where a maximum value of 1.0
1182      * corresponds to a gain of 0 dB (sample values left unmodified).
1183      * @return the maximum value, which is greater than or equal to 1.0.
1184      */
getMaxVolume()1185     static public float getMaxVolume() {
1186         return GAIN_MAX;
1187     }
1188 
1189     /**
1190      * Returns the configured audio source sample rate in Hz.
1191      * The initial source sample rate depends on the constructor parameters,
1192      * but the source sample rate may change if {@link #setPlaybackRate(int)} is called.
1193      * If the constructor had a specific sample rate, then the initial sink sample rate is that
1194      * value.
1195      * If the constructor had {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED},
1196      * then the initial sink sample rate is a route-dependent default value based on the source [sic].
1197      */
getSampleRate()1198     public int getSampleRate() {
1199         return mSampleRate;
1200     }
1201 
1202     /**
1203      * Returns the current playback sample rate rate in Hz.
1204      */
getPlaybackRate()1205     public int getPlaybackRate() {
1206         return native_get_playback_rate();
1207     }
1208 
1209     /**
1210      * Returns the current playback parameters.
1211      * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters
1212      * @return current {@link PlaybackParams}.
1213      * @throws IllegalStateException if track is not initialized.
1214      */
getPlaybackParams()1215     public @NonNull PlaybackParams getPlaybackParams() {
1216         return native_get_playback_params();
1217     }
1218 
1219     /**
1220      * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT},
1221      * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}.
1222      */
getAudioFormat()1223     public int getAudioFormat() {
1224         return mAudioFormat;
1225     }
1226 
1227     /**
1228      * Returns the volume stream type of this AudioTrack.
1229      * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
1230      * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
1231      * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
1232      * {@link AudioManager#STREAM_NOTIFICATION}, {@link AudioManager#STREAM_DTMF} or
1233      * {@link AudioManager#STREAM_ACCESSIBILITY}.
1234      */
getStreamType()1235     public int getStreamType() {
1236         return mStreamType;
1237     }
1238 
1239     /**
1240      * Returns the configured channel position mask.
1241      * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO},
1242      * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}.
1243      * This method may return {@link AudioFormat#CHANNEL_INVALID} if
1244      * a channel index mask was used. Consider
1245      * {@link #getFormat()} instead, to obtain an {@link AudioFormat},
1246      * which contains both the channel position mask and the channel index mask.
1247      */
getChannelConfiguration()1248     public int getChannelConfiguration() {
1249         return mChannelConfiguration;
1250     }
1251 
1252     /**
1253      * Returns the configured <code>AudioTrack</code> format.
1254      * @return an {@link AudioFormat} containing the
1255      * <code>AudioTrack</code> parameters at the time of configuration.
1256      */
getFormat()1257     public @NonNull AudioFormat getFormat() {
1258         AudioFormat.Builder builder = new AudioFormat.Builder()
1259             .setSampleRate(mSampleRate)
1260             .setEncoding(mAudioFormat);
1261         if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) {
1262             builder.setChannelMask(mChannelConfiguration);
1263         }
1264         if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) {
1265             builder.setChannelIndexMask(mChannelIndexMask);
1266         }
1267         return builder.build();
1268     }
1269 
1270     /**
1271      * Returns the configured number of channels.
1272      */
getChannelCount()1273     public int getChannelCount() {
1274         return mChannelCount;
1275     }
1276 
1277     /**
1278      * Returns the state of the AudioTrack instance. This is useful after the
1279      * AudioTrack instance has been created to check if it was initialized
1280      * properly. This ensures that the appropriate resources have been acquired.
1281      * @see #STATE_UNINITIALIZED
1282      * @see #STATE_INITIALIZED
1283      * @see #STATE_NO_STATIC_DATA
1284      */
getState()1285     public int getState() {
1286         return mState;
1287     }
1288 
1289     /**
1290      * Returns the playback state of the AudioTrack instance.
1291      * @see #PLAYSTATE_STOPPED
1292      * @see #PLAYSTATE_PAUSED
1293      * @see #PLAYSTATE_PLAYING
1294      */
getPlayState()1295     public int getPlayState() {
1296         synchronized (mPlayStateLock) {
1297             return mPlayState;
1298         }
1299     }
1300 
1301 
1302     /**
1303      * Returns the effective size of the <code>AudioTrack</code> buffer
1304      * that the application writes to.
1305      * <p> This will be less than or equal to the result of
1306      * {@link #getBufferCapacityInFrames()}.
1307      * It will be equal if {@link #setBufferSizeInFrames(int)} has never been called.
1308      * <p> If the track is subsequently routed to a different output sink, the buffer
1309      * size and capacity may enlarge to accommodate.
1310      * <p> If the <code>AudioTrack</code> encoding indicates compressed data,
1311      * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
1312      * the size of the <code>AudioTrack</code> buffer in bytes.
1313      * <p> See also {@link AudioManager#getProperty(String)} for key
1314      * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
1315      * @return current size in frames of the <code>AudioTrack</code> buffer.
1316      * @throws IllegalStateException if track is not initialized.
1317      */
getBufferSizeInFrames()1318     public int getBufferSizeInFrames() {
1319         return native_get_buffer_size_frames();
1320     }
1321 
1322     /**
1323      * Limits the effective size of the <code>AudioTrack</code> buffer
1324      * that the application writes to.
1325      * <p> A write to this AudioTrack will not fill the buffer beyond this limit.
1326      * If a blocking write is used then the write will block until the data
1327      * can fit within this limit.
1328      * <p>Changing this limit modifies the latency associated with
1329      * the buffer for this track. A smaller size will give lower latency
1330      * but there may be more glitches due to buffer underruns.
1331      * <p>The actual size used may not be equal to this requested size.
1332      * It will be limited to a valid range with a maximum of
1333      * {@link #getBufferCapacityInFrames()}.
1334      * It may also be adjusted slightly for internal reasons.
1335      * If bufferSizeInFrames is less than zero then {@link #ERROR_BAD_VALUE}
1336      * will be returned.
1337      * <p>This method is only supported for PCM audio.
1338      * It is not supported for compressed audio tracks.
1339      *
1340      * @param bufferSizeInFrames requested buffer size in frames
1341      * @return the actual buffer size in frames or an error code,
1342      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
1343      * @throws IllegalStateException if track is not initialized.
1344      */
setBufferSizeInFrames(int bufferSizeInFrames)1345     public int setBufferSizeInFrames(int bufferSizeInFrames) {
1346         if (mDataLoadMode == MODE_STATIC || mState == STATE_UNINITIALIZED) {
1347             return ERROR_INVALID_OPERATION;
1348         }
1349         if (bufferSizeInFrames < 0) {
1350             return ERROR_BAD_VALUE;
1351         }
1352         return native_set_buffer_size_frames(bufferSizeInFrames);
1353     }
1354 
1355     /**
1356      *  Returns the maximum size of the <code>AudioTrack</code> buffer in frames.
1357      *  <p> If the track's creation mode is {@link #MODE_STATIC},
1358      *  it is equal to the specified bufferSizeInBytes on construction, converted to frame units.
1359      *  A static track's frame count will not change.
1360      *  <p> If the track's creation mode is {@link #MODE_STREAM},
1361      *  it is greater than or equal to the specified bufferSizeInBytes converted to frame units.
1362      *  For streaming tracks, this value may be rounded up to a larger value if needed by
1363      *  the target output sink, and
1364      *  if the track is subsequently routed to a different output sink, the
1365      *  frame count may enlarge to accommodate.
1366      *  <p> If the <code>AudioTrack</code> encoding indicates compressed data,
1367      *  e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
1368      *  the size of the <code>AudioTrack</code> buffer in bytes.
1369      *  <p> See also {@link AudioManager#getProperty(String)} for key
1370      *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
1371      *  @return maximum size in frames of the <code>AudioTrack</code> buffer.
1372      *  @throws IllegalStateException if track is not initialized.
1373      */
getBufferCapacityInFrames()1374     public int getBufferCapacityInFrames() {
1375         return native_get_buffer_capacity_frames();
1376     }
1377 
1378     /**
1379      *  Returns the frame count of the native <code>AudioTrack</code> buffer.
1380      *  @return current size in frames of the <code>AudioTrack</code> buffer.
1381      *  @throws IllegalStateException
1382      *  @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead.
1383      */
1384     @Deprecated
getNativeFrameCount()1385     protected int getNativeFrameCount() {
1386         return native_get_buffer_capacity_frames();
1387     }
1388 
1389     /**
1390      * Returns marker position expressed in frames.
1391      * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
1392      * or zero if marker is disabled.
1393      */
getNotificationMarkerPosition()1394     public int getNotificationMarkerPosition() {
1395         return native_get_marker_pos();
1396     }
1397 
1398     /**
1399      * Returns the notification update period expressed in frames.
1400      * Zero means that no position update notifications are being delivered.
1401      */
getPositionNotificationPeriod()1402     public int getPositionNotificationPeriod() {
1403         return native_get_pos_update_period();
1404     }
1405 
1406     /**
1407      * Returns the playback head position expressed in frames.
1408      * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
1409      * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
1410      * This is a continuously advancing counter.  It will wrap (overflow) periodically,
1411      * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
1412      * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}.
1413      * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates
1414      * the total number of frames played since reset,
1415      * <i>not</i> the current offset within the buffer.
1416      */
getPlaybackHeadPosition()1417     public int getPlaybackHeadPosition() {
1418         return native_get_position();
1419     }
1420 
1421     /**
1422      * Returns this track's estimated latency in milliseconds. This includes the latency due
1423      * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
1424      *
1425      * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
1426      * a better solution.
1427      * @hide
1428      */
getLatency()1429     public int getLatency() {
1430         return native_get_latency();
1431     }
1432 
1433     /**
1434      * Returns the number of underrun occurrences in the application-level write buffer
1435      * since the AudioTrack was created.
1436      * An underrun occurs if the application does not write audio
1437      * data quickly enough, causing the buffer to underflow
1438      * and a potential audio glitch or pop.
1439      * <p>
1440      * Underruns are less likely when buffer sizes are large.
1441      * It may be possible to eliminate underruns by recreating the AudioTrack with
1442      * a larger buffer.
1443      * Or by using {@link #setBufferSizeInFrames(int)} to dynamically increase the
1444      * effective size of the buffer.
1445      */
getUnderrunCount()1446     public int getUnderrunCount() {
1447         return native_get_underrun_count();
1448     }
1449 
1450     /**
1451      * Returns the current performance mode of the {@link AudioTrack}.
1452      *
1453      * @return one of {@link AudioTrack#PERFORMANCE_MODE_NONE},
1454      * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
1455      * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
1456      * Use {@link AudioTrack.Builder#setPerformanceMode}
1457      * in the {@link AudioTrack.Builder} to enable a performance mode.
1458      * @throws IllegalStateException if track is not initialized.
1459      */
getPerformanceMode()1460     public @PerformanceMode int getPerformanceMode() {
1461         final int flags = native_get_flags();
1462         if ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0) {
1463             return PERFORMANCE_MODE_LOW_LATENCY;
1464         } else if ((flags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
1465             return PERFORMANCE_MODE_POWER_SAVING;
1466         } else {
1467             return PERFORMANCE_MODE_NONE;
1468         }
1469     }
1470 
1471     /**
1472      *  Returns the output sample rate in Hz for the specified stream type.
1473      */
getNativeOutputSampleRate(int streamType)1474     static public int getNativeOutputSampleRate(int streamType) {
1475         return native_get_output_sample_rate(streamType);
1476     }
1477 
1478     /**
1479      * Returns the estimated minimum buffer size required for an AudioTrack
1480      * object to be created in the {@link #MODE_STREAM} mode.
1481      * The size is an estimate because it does not consider either the route or the sink,
1482      * since neither is known yet.  Note that this size doesn't
1483      * guarantee a smooth playback under load, and higher values should be chosen according to
1484      * the expected frequency at which the buffer will be refilled with additional data to play.
1485      * For example, if you intend to dynamically set the source sample rate of an AudioTrack
1486      * to a higher value than the initial source sample rate, be sure to configure the buffer size
1487      * based on the highest planned sample rate.
1488      * @param sampleRateInHz the source sample rate expressed in Hz.
1489      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} is not permitted.
1490      * @param channelConfig describes the configuration of the audio channels.
1491      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
1492      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
1493      * @param audioFormat the format in which the audio data is represented.
1494      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
1495      *   {@link AudioFormat#ENCODING_PCM_8BIT},
1496      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
1497      * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
1498      *   or {@link #ERROR} if unable to query for output properties,
1499      *   or the minimum buffer size expressed in bytes.
1500      */
getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat)1501     static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
1502         int channelCount = 0;
1503         switch(channelConfig) {
1504         case AudioFormat.CHANNEL_OUT_MONO:
1505         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
1506             channelCount = 1;
1507             break;
1508         case AudioFormat.CHANNEL_OUT_STEREO:
1509         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
1510             channelCount = 2;
1511             break;
1512         default:
1513             if (!isMultichannelConfigSupported(channelConfig)) {
1514                 loge("getMinBufferSize(): Invalid channel configuration.");
1515                 return ERROR_BAD_VALUE;
1516             } else {
1517                 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
1518             }
1519         }
1520 
1521         if (!AudioFormat.isPublicEncoding(audioFormat)) {
1522             loge("getMinBufferSize(): Invalid audio format.");
1523             return ERROR_BAD_VALUE;
1524         }
1525 
1526         // sample rate, note these values are subject to change
1527         // Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed
1528         if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) ||
1529                 (sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) {
1530             loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
1531             return ERROR_BAD_VALUE;
1532         }
1533 
1534         int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
1535         if (size <= 0) {
1536             loge("getMinBufferSize(): error querying hardware");
1537             return ERROR;
1538         }
1539         else {
1540             return size;
1541         }
1542     }
1543 
1544     /**
1545      * Returns the audio session ID.
1546      *
1547      * @return the ID of the audio session this AudioTrack belongs to.
1548      */
getAudioSessionId()1549     public int getAudioSessionId() {
1550         return mSessionId;
1551     }
1552 
1553    /**
1554     * Poll for a timestamp on demand.
1555     * <p>
1556     * If you need to track timestamps during initial warmup or after a routing or mode change,
1557     * you should request a new timestamp periodically until the reported timestamps
1558     * show that the frame position is advancing, or until it becomes clear that
1559     * timestamps are unavailable for this route.
1560     * <p>
1561     * After the clock is advancing at a stable rate,
1562     * query for a new timestamp approximately once every 10 seconds to once per minute.
1563     * Calling this method more often is inefficient.
1564     * It is also counter-productive to call this method more often than recommended,
1565     * because the short-term differences between successive timestamp reports are not meaningful.
1566     * If you need a high-resolution mapping between frame position and presentation time,
1567     * consider implementing that at application level, based on low-resolution timestamps.
1568     * <p>
1569     * The audio data at the returned position may either already have been
1570     * presented, or may have not yet been presented but is committed to be presented.
1571     * It is not possible to request the time corresponding to a particular position,
1572     * or to request the (fractional) position corresponding to a particular time.
1573     * If you need such features, consider implementing them at application level.
1574     *
1575     * @param timestamp a reference to a non-null AudioTimestamp instance allocated
1576     *        and owned by caller.
1577     * @return true if a timestamp is available, or false if no timestamp is available.
1578     *         If a timestamp if available,
1579     *         the AudioTimestamp instance is filled in with a position in frame units, together
1580     *         with the estimated time when that frame was presented or is committed to
1581     *         be presented.
1582     *         In the case that no timestamp is available, any supplied instance is left unaltered.
1583     *         A timestamp may be temporarily unavailable while the audio clock is stabilizing,
1584     *         or during and immediately after a route change.
1585     *         A timestamp is permanently unavailable for a given route if the route does not support
1586     *         timestamps.  In this case, the approximate frame position can be obtained
1587     *         using {@link #getPlaybackHeadPosition}.
1588     *         However, it may be useful to continue to query for
1589     *         timestamps occasionally, to recover after a route change.
1590     */
1591     // Add this text when the "on new timestamp" API is added:
1592     //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestamp(AudioTimestamp timestamp)1593     public boolean getTimestamp(AudioTimestamp timestamp)
1594     {
1595         if (timestamp == null) {
1596             throw new IllegalArgumentException();
1597         }
1598         // It's unfortunate, but we have to either create garbage every time or use synchronized
1599         long[] longArray = new long[2];
1600         int ret = native_get_timestamp(longArray);
1601         if (ret != SUCCESS) {
1602             return false;
1603         }
1604         timestamp.framePosition = longArray[0];
1605         timestamp.nanoTime = longArray[1];
1606         return true;
1607     }
1608 
1609     /**
1610      * Poll for a timestamp on demand.
1611      * <p>
1612      * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code.
1613      *
1614      * @param timestamp a reference to a non-null AudioTimestamp instance allocated
1615      *        and owned by caller.
1616      * @return {@link #SUCCESS} if a timestamp is available
1617      *         {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called
1618      *         immediately after start/ACTIVE, when the number of frames consumed is less than the
1619      *         overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll
1620      *         again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time
1621      *         for the timestamp.
1622      *         {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
1623      *         needs to be recreated.
1624      *         {@link #ERROR_INVALID_OPERATION} if current route does not support
1625      *         timestamps. In this case, the approximate frame position can be obtained
1626      *         using {@link #getPlaybackHeadPosition}.
1627      *
1628      *         The AudioTimestamp instance is filled in with a position in frame units, together
1629      *         with the estimated time when that frame was presented or is committed to
1630      *         be presented.
1631      * @hide
1632      */
1633      // Add this text when the "on new timestamp" API is added:
1634      //   Use if you need to get the most recent timestamp outside of the event callback handler.
getTimestampWithStatus(AudioTimestamp timestamp)1635      public int getTimestampWithStatus(AudioTimestamp timestamp)
1636      {
1637          if (timestamp == null) {
1638              throw new IllegalArgumentException();
1639          }
1640          // It's unfortunate, but we have to either create garbage every time or use synchronized
1641          long[] longArray = new long[2];
1642          int ret = native_get_timestamp(longArray);
1643          timestamp.framePosition = longArray[0];
1644          timestamp.nanoTime = longArray[1];
1645          return ret;
1646      }
1647 
1648     //--------------------------------------------------------------------------
1649     // Initialization / configuration
1650     //--------------------
1651     /**
1652      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
1653      * for each periodic playback head position update.
1654      * Notifications will be received in the same thread as the one in which the AudioTrack
1655      * instance was created.
1656      * @param listener
1657      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener)1658     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
1659         setPlaybackPositionUpdateListener(listener, null);
1660     }
1661 
1662     /**
1663      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
1664      * for each periodic playback head position update.
1665      * Use this method to receive AudioTrack events in the Handler associated with another
1666      * thread than the one in which you created the AudioTrack instance.
1667      * @param listener
1668      * @param handler the Handler that will receive the event notification messages.
1669      */
setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, Handler handler)1670     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
1671                                                     Handler handler) {
1672         if (listener != null) {
1673             mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler);
1674         } else {
1675             mEventHandlerDelegate = null;
1676         }
1677     }
1678 
1679 
clampGainOrLevel(float gainOrLevel)1680     private static float clampGainOrLevel(float gainOrLevel) {
1681         if (Float.isNaN(gainOrLevel)) {
1682             throw new IllegalArgumentException();
1683         }
1684         if (gainOrLevel < GAIN_MIN) {
1685             gainOrLevel = GAIN_MIN;
1686         } else if (gainOrLevel > GAIN_MAX) {
1687             gainOrLevel = GAIN_MAX;
1688         }
1689         return gainOrLevel;
1690     }
1691 
1692 
1693      /**
1694      * Sets the specified left and right output gain values on the AudioTrack.
1695      * <p>Gain values are clamped to the closed interval [0.0, max] where
1696      * max is the value of {@link #getMaxVolume}.
1697      * A value of 0.0 results in zero gain (silence), and
1698      * a value of 1.0 means unity gain (signal unchanged).
1699      * The default value is 1.0 meaning unity gain.
1700      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
1701      * @param leftGain output gain for the left channel.
1702      * @param rightGain output gain for the right channel
1703      * @return error code or success, see {@link #SUCCESS},
1704      *    {@link #ERROR_INVALID_OPERATION}
1705      * @deprecated Applications should use {@link #setVolume} instead, as it
1706      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
1707      */
1708     @Deprecated
setStereoVolume(float leftGain, float rightGain)1709     public int setStereoVolume(float leftGain, float rightGain) {
1710         if (mState == STATE_UNINITIALIZED) {
1711             return ERROR_INVALID_OPERATION;
1712         }
1713 
1714         baseSetVolume(leftGain, rightGain);
1715         return SUCCESS;
1716     }
1717 
1718     @Override
playerSetVolume(boolean muting, float leftVolume, float rightVolume)1719     void playerSetVolume(boolean muting, float leftVolume, float rightVolume) {
1720         leftVolume = clampGainOrLevel(muting ? 0.0f : leftVolume);
1721         rightVolume = clampGainOrLevel(muting ? 0.0f : rightVolume);
1722 
1723         native_setVolume(leftVolume, rightVolume);
1724     }
1725 
1726 
1727     /**
1728      * Sets the specified output gain value on all channels of this track.
1729      * <p>Gain values are clamped to the closed interval [0.0, max] where
1730      * max is the value of {@link #getMaxVolume}.
1731      * A value of 0.0 results in zero gain (silence), and
1732      * a value of 1.0 means unity gain (signal unchanged).
1733      * The default value is 1.0 meaning unity gain.
1734      * <p>This API is preferred over {@link #setStereoVolume}, as it
1735      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
1736      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
1737      * @param gain output gain for all channels.
1738      * @return error code or success, see {@link #SUCCESS},
1739      *    {@link #ERROR_INVALID_OPERATION}
1740      */
setVolume(float gain)1741     public int setVolume(float gain) {
1742         return setStereoVolume(gain, gain);
1743     }
1744 
1745     @Override
playerApplyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)1746     /* package */ int playerApplyVolumeShaper(
1747             @NonNull VolumeShaper.Configuration configuration,
1748             @NonNull VolumeShaper.Operation operation) {
1749         return native_applyVolumeShaper(configuration, operation);
1750     }
1751 
1752     @Override
playerGetVolumeShaperState(int id)1753     /* package */ @Nullable VolumeShaper.State playerGetVolumeShaperState(int id) {
1754         return native_getVolumeShaperState(id);
1755     }
1756 
1757     @Override
createVolumeShaper( @onNull VolumeShaper.Configuration configuration)1758     public @NonNull VolumeShaper createVolumeShaper(
1759             @NonNull VolumeShaper.Configuration configuration) {
1760         return new VolumeShaper(configuration, this);
1761     }
1762 
1763     /**
1764      * Sets the playback sample rate for this track. This sets the sampling rate at which
1765      * the audio data will be consumed and played back
1766      * (as set by the sampleRateInHz parameter in the
1767      * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
1768      * not the original sampling rate of the
1769      * content. For example, setting it to half the sample rate of the content will cause the
1770      * playback to last twice as long, but will also result in a pitch shift down by one octave.
1771      * The valid sample rate range is from 1 Hz to twice the value returned by
1772      * {@link #getNativeOutputSampleRate(int)}.
1773      * Use {@link #setPlaybackParams(PlaybackParams)} for speed control.
1774      * <p> This method may also be used to repurpose an existing <code>AudioTrack</code>
1775      * for playback of content of differing sample rate,
1776      * but with identical encoding and channel mask.
1777      * @param sampleRateInHz the sample rate expressed in Hz
1778      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1779      *    {@link #ERROR_INVALID_OPERATION}
1780      */
setPlaybackRate(int sampleRateInHz)1781     public int setPlaybackRate(int sampleRateInHz) {
1782         if (mState != STATE_INITIALIZED) {
1783             return ERROR_INVALID_OPERATION;
1784         }
1785         if (sampleRateInHz <= 0) {
1786             return ERROR_BAD_VALUE;
1787         }
1788         return native_set_playback_rate(sampleRateInHz);
1789     }
1790 
1791 
1792     /**
1793      * Sets the playback parameters.
1794      * This method returns failure if it cannot apply the playback parameters.
1795      * One possible cause is that the parameters for speed or pitch are out of range.
1796      * Another possible cause is that the <code>AudioTrack</code> is streaming
1797      * (see {@link #MODE_STREAM}) and the
1798      * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer
1799      * on configuration must be larger than the speed multiplied by the minimum size
1800      * {@link #getMinBufferSize(int, int, int)}) to allow proper playback.
1801      * @param params see {@link PlaybackParams}. In particular,
1802      * speed, pitch, and audio mode should be set.
1803      * @throws IllegalArgumentException if the parameters are invalid or not accepted.
1804      * @throws IllegalStateException if track is not initialized.
1805      */
setPlaybackParams(@onNull PlaybackParams params)1806     public void setPlaybackParams(@NonNull PlaybackParams params) {
1807         if (params == null) {
1808             throw new IllegalArgumentException("params is null");
1809         }
1810         native_set_playback_params(params);
1811     }
1812 
1813 
1814     /**
1815      * Sets the position of the notification marker.  At most one marker can be active.
1816      * @param markerInFrames marker position in wrapping frame units similar to
1817      * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
1818      * To set a marker at a position which would appear as zero due to wraparound,
1819      * a workaround is to use a non-zero position near zero, such as -1 or 1.
1820      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1821      *  {@link #ERROR_INVALID_OPERATION}
1822      */
setNotificationMarkerPosition(int markerInFrames)1823     public int setNotificationMarkerPosition(int markerInFrames) {
1824         if (mState == STATE_UNINITIALIZED) {
1825             return ERROR_INVALID_OPERATION;
1826         }
1827         return native_set_marker_pos(markerInFrames);
1828     }
1829 
1830 
1831     /**
1832      * Sets the period for the periodic notification event.
1833      * @param periodInFrames update period expressed in frames.
1834      * Zero period means no position updates.  A negative period is not allowed.
1835      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
1836      */
setPositionNotificationPeriod(int periodInFrames)1837     public int setPositionNotificationPeriod(int periodInFrames) {
1838         if (mState == STATE_UNINITIALIZED) {
1839             return ERROR_INVALID_OPERATION;
1840         }
1841         return native_set_pos_update_period(periodInFrames);
1842     }
1843 
1844 
1845     /**
1846      * Sets the playback head position within the static buffer.
1847      * The track must be stopped or paused for the position to be changed,
1848      * and must use the {@link #MODE_STATIC} mode.
1849      * @param positionInFrames playback head position within buffer, expressed in frames.
1850      * Zero corresponds to start of buffer.
1851      * The position must not be greater than the buffer size in frames, or negative.
1852      * Though this method and {@link #getPlaybackHeadPosition()} have similar names,
1853      * the position values have different meanings.
1854      * <br>
1855      * If looping is currently enabled and the new position is greater than or equal to the
1856      * loop end marker, the behavior varies by API level:
1857      * as of {@link android.os.Build.VERSION_CODES#M},
1858      * the looping is first disabled and then the position is set.
1859      * For earlier API levels, the behavior is unspecified.
1860      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1861      *    {@link #ERROR_INVALID_OPERATION}
1862      */
setPlaybackHeadPosition(int positionInFrames)1863     public int setPlaybackHeadPosition(int positionInFrames) {
1864         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
1865                 getPlayState() == PLAYSTATE_PLAYING) {
1866             return ERROR_INVALID_OPERATION;
1867         }
1868         if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
1869             return ERROR_BAD_VALUE;
1870         }
1871         return native_set_position(positionInFrames);
1872     }
1873 
1874     /**
1875      * Sets the loop points and the loop count. The loop can be infinite.
1876      * Similarly to setPlaybackHeadPosition,
1877      * the track must be stopped or paused for the loop points to be changed,
1878      * and must use the {@link #MODE_STATIC} mode.
1879      * @param startInFrames loop start marker expressed in frames.
1880      * Zero corresponds to start of buffer.
1881      * The start marker must not be greater than or equal to the buffer size in frames, or negative.
1882      * @param endInFrames loop end marker expressed in frames.
1883      * The total buffer size in frames corresponds to end of buffer.
1884      * The end marker must not be greater than the buffer size in frames.
1885      * For looping, the end marker must not be less than or equal to the start marker,
1886      * but to disable looping
1887      * it is permitted for start marker, end marker, and loop count to all be 0.
1888      * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}.
1889      * If the loop period (endInFrames - startInFrames) is too small for the implementation to
1890      * support,
1891      * {@link #ERROR_BAD_VALUE} is returned.
1892      * The loop range is the interval [startInFrames, endInFrames).
1893      * <br>
1894      * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged,
1895      * unless it is greater than or equal to the loop end marker, in which case
1896      * it is forced to the loop start marker.
1897      * For earlier API levels, the effect on position is unspecified.
1898      * @param loopCount the number of times the loop is looped; must be greater than or equal to -1.
1899      *    A value of -1 means infinite looping, and 0 disables looping.
1900      *    A value of positive N means to "loop" (go back) N times.  For example,
1901      *    a value of one means to play the region two times in total.
1902      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
1903      *    {@link #ERROR_INVALID_OPERATION}
1904      */
setLoopPoints(int startInFrames, int endInFrames, int loopCount)1905     public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) {
1906         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
1907                 getPlayState() == PLAYSTATE_PLAYING) {
1908             return ERROR_INVALID_OPERATION;
1909         }
1910         if (loopCount == 0) {
1911             ;   // explicitly allowed as an exception to the loop region range check
1912         } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
1913                 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
1914             return ERROR_BAD_VALUE;
1915         }
1916         return native_set_loop(startInFrames, endInFrames, loopCount);
1917     }
1918 
1919     /**
1920      * Sets the initialization state of the instance. This method was originally intended to be used
1921      * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
1922      * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
1923      * @param state the state of the AudioTrack instance
1924      * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
1925      */
1926     @Deprecated
setState(int state)1927     protected void setState(int state) {
1928         mState = state;
1929     }
1930 
1931 
1932     //---------------------------------------------------------
1933     // Transport control methods
1934     //--------------------
1935     /**
1936      * Starts playing an AudioTrack.
1937      * <p>
1938      * If track's creation mode is {@link #MODE_STATIC}, you must have called one of
1939      * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)},
1940      * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)},
1941      * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to
1942      * play().
1943      * <p>
1944      * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to
1945      * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor).
1946      * If you don't call write() first, or if you call write() but with an insufficient amount of
1947      * data, then the track will be in underrun state at play().  In this case,
1948      * playback will not actually start playing until the data path is filled to a
1949      * device-specific minimum level.  This requirement for the path to be filled
1950      * to a minimum level is also true when resuming audio playback after calling stop().
1951      * Similarly the buffer will need to be filled up again after
1952      * the track underruns due to failure to call write() in a timely manner with sufficient data.
1953      * For portability, an application should prime the data path to the maximum allowed
1954      * by writing data until the write() method returns a short transfer count.
1955      * This allows play() to start immediately, and reduces the chance of underrun.
1956      *
1957      * @throws IllegalStateException if the track isn't properly initialized
1958      */
play()1959     public void play()
1960     throws IllegalStateException {
1961         if (mState != STATE_INITIALIZED) {
1962             throw new IllegalStateException("play() called on uninitialized AudioTrack.");
1963         }
1964         //FIXME use lambda to pass startImpl to superclass
1965         final int delay = getStartDelayMs();
1966         if (delay == 0) {
1967             startImpl();
1968         } else {
1969             new Thread() {
1970                 public void run() {
1971                     try {
1972                         Thread.sleep(delay);
1973                     } catch (InterruptedException e) {
1974                         e.printStackTrace();
1975                     }
1976                     baseSetStartDelayMs(0);
1977                     try {
1978                         startImpl();
1979                     } catch (IllegalStateException e) {
1980                         // fail silently for a state exception when it is happening after
1981                         // a delayed start, as the player state could have changed between the
1982                         // call to start() and the execution of startImpl()
1983                     }
1984                 }
1985             }.start();
1986         }
1987     }
1988 
startImpl()1989     private void startImpl() {
1990         synchronized(mPlayStateLock) {
1991             baseStart();
1992             native_start();
1993             mPlayState = PLAYSTATE_PLAYING;
1994         }
1995     }
1996 
1997     /**
1998      * Stops playing the audio data.
1999      * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
2000      * after the last buffer that was written has been played. For an immediate stop, use
2001      * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
2002      * back yet.
2003      * @throws IllegalStateException
2004      */
stop()2005     public void stop()
2006     throws IllegalStateException {
2007         if (mState != STATE_INITIALIZED) {
2008             throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
2009         }
2010 
2011         // stop playing
2012         synchronized(mPlayStateLock) {
2013             native_stop();
2014             baseStop();
2015             mPlayState = PLAYSTATE_STOPPED;
2016             mAvSyncHeader = null;
2017             mAvSyncBytesRemaining = 0;
2018         }
2019     }
2020 
2021     /**
2022      * Pauses the playback of the audio data. Data that has not been played
2023      * back will not be discarded. Subsequent calls to {@link #play} will play
2024      * this data back. See {@link #flush()} to discard this data.
2025      *
2026      * @throws IllegalStateException
2027      */
pause()2028     public void pause()
2029     throws IllegalStateException {
2030         if (mState != STATE_INITIALIZED) {
2031             throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
2032         }
2033 
2034         // pause playback
2035         synchronized(mPlayStateLock) {
2036             native_pause();
2037             basePause();
2038             mPlayState = PLAYSTATE_PAUSED;
2039         }
2040     }
2041 
2042 
2043     //---------------------------------------------------------
2044     // Audio data supply
2045     //--------------------
2046 
2047     /**
2048      * Flushes the audio data currently queued for playback. Any data that has
2049      * been written but not yet presented will be discarded.  No-op if not stopped or paused,
2050      * or if the track's creation mode is not {@link #MODE_STREAM}.
2051      * <BR> Note that although data written but not yet presented is discarded, there is no
2052      * guarantee that all of the buffer space formerly used by that data
2053      * is available for a subsequent write.
2054      * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code>
2055      * less than or equal to the total buffer size
2056      * may return a short actual transfer count.
2057      */
flush()2058     public void flush() {
2059         if (mState == STATE_INITIALIZED) {
2060             // flush the data in native layer
2061             native_flush();
2062             mAvSyncHeader = null;
2063             mAvSyncBytesRemaining = 0;
2064         }
2065 
2066     }
2067 
2068     /**
2069      * Writes the audio data to the audio sink for playback (streaming mode),
2070      * or copies audio data for later playback (static buffer mode).
2071      * The format specified in the AudioTrack constructor should be
2072      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
2073      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
2074      * <p>
2075      * In streaming mode, the write will normally block until all the data has been enqueued for
2076      * playback, and will return a full transfer count.  However, if the track is stopped or paused
2077      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
2078      * occurs during the write, then the write may return a short transfer count.
2079      * <p>
2080      * In static buffer mode, copies the data to the buffer starting at offset 0.
2081      * Note that the actual playback of this data might occur after this function returns.
2082      *
2083      * @param audioData the array that holds the data to play.
2084      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
2085      *    starts.
2086      *    Must not be negative, or cause the data access to go out of bounds of the array.
2087      * @param sizeInBytes the number of bytes to write in audioData after the offset.
2088      *    Must not be negative, or cause the data access to go out of bounds of the array.
2089      * @return zero or the positive number of bytes that were written, or one of the following
2090      *    error codes. The number of bytes will be a multiple of the frame size in bytes
2091      *    not to exceed sizeInBytes.
2092      * <ul>
2093      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2094      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2095      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2096      *    needs to be recreated. The dead object error code is not returned if some data was
2097      *    successfully transferred. In this case, the error is returned at the next write()</li>
2098      * <li>{@link #ERROR} in case of other error</li>
2099      * </ul>
2100      * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code>
2101      * set to  {@link #WRITE_BLOCKING}.
2102      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes)2103     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) {
2104         return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING);
2105     }
2106 
2107     /**
2108      * Writes the audio data to the audio sink for playback (streaming mode),
2109      * or copies audio data for later playback (static buffer mode).
2110      * The format specified in the AudioTrack constructor should be
2111      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
2112      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
2113      * <p>
2114      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2115      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2116      * for playback, and will return a full transfer count.  However, if the write mode is
2117      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2118      * interrupts the write by calling stop or pause, or an I/O error
2119      * occurs during the write, then the write may return a short transfer count.
2120      * <p>
2121      * In static buffer mode, copies the data to the buffer starting at offset 0,
2122      * and the write mode is ignored.
2123      * Note that the actual playback of this data might occur after this function returns.
2124      *
2125      * @param audioData the array that holds the data to play.
2126      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
2127      *    starts.
2128      *    Must not be negative, or cause the data access to go out of bounds of the array.
2129      * @param sizeInBytes the number of bytes to write in audioData after the offset.
2130      *    Must not be negative, or cause the data access to go out of bounds of the array.
2131      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
2132      *     effect in static mode.
2133      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2134      *         to the audio sink.
2135      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2136      *     queuing as much audio data for playback as possible without blocking.
2137      * @return zero or the positive number of bytes that were written, or one of the following
2138      *    error codes. The number of bytes will be a multiple of the frame size in bytes
2139      *    not to exceed sizeInBytes.
2140      * <ul>
2141      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2142      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2143      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2144      *    needs to be recreated. The dead object error code is not returned if some data was
2145      *    successfully transferred. In this case, the error is returned at the next write()</li>
2146      * <li>{@link #ERROR} in case of other error</li>
2147      * </ul>
2148      */
write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes, @WriteMode int writeMode)2149     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes,
2150             @WriteMode int writeMode) {
2151 
2152         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
2153             return ERROR_INVALID_OPERATION;
2154         }
2155 
2156         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2157             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2158             return ERROR_BAD_VALUE;
2159         }
2160 
2161         if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
2162                 || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
2163                 || (offsetInBytes + sizeInBytes > audioData.length)) {
2164             return ERROR_BAD_VALUE;
2165         }
2166 
2167         int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
2168                 writeMode == WRITE_BLOCKING);
2169 
2170         if ((mDataLoadMode == MODE_STATIC)
2171                 && (mState == STATE_NO_STATIC_DATA)
2172                 && (ret > 0)) {
2173             // benign race with respect to other APIs that read mState
2174             mState = STATE_INITIALIZED;
2175         }
2176 
2177         return ret;
2178     }
2179 
2180     /**
2181      * Writes the audio data to the audio sink for playback (streaming mode),
2182      * or copies audio data for later playback (static buffer mode).
2183      * The format specified in the AudioTrack constructor should be
2184      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
2185      * <p>
2186      * In streaming mode, the write will normally block until all the data has been enqueued for
2187      * playback, and will return a full transfer count.  However, if the track is stopped or paused
2188      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
2189      * occurs during the write, then the write may return a short transfer count.
2190      * <p>
2191      * In static buffer mode, copies the data to the buffer starting at offset 0.
2192      * Note that the actual playback of this data might occur after this function returns.
2193      *
2194      * @param audioData the array that holds the data to play.
2195      * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
2196      *     starts.
2197      *    Must not be negative, or cause the data access to go out of bounds of the array.
2198      * @param sizeInShorts the number of shorts to read in audioData after the offset.
2199      *    Must not be negative, or cause the data access to go out of bounds of the array.
2200      * @return zero or the positive number of shorts that were written, or one of the following
2201      *    error codes. The number of shorts will be a multiple of the channel count not to
2202      *    exceed sizeInShorts.
2203      * <ul>
2204      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2205      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2206      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2207      *    needs to be recreated. The dead object error code is not returned if some data was
2208      *    successfully transferred. In this case, the error is returned at the next write()</li>
2209      * <li>{@link #ERROR} in case of other error</li>
2210      * </ul>
2211      * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code>
2212      * set to  {@link #WRITE_BLOCKING}.
2213      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts)2214     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) {
2215         return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING);
2216     }
2217 
2218     /**
2219      * Writes the audio data to the audio sink for playback (streaming mode),
2220      * or copies audio data for later playback (static buffer mode).
2221      * The format specified in the AudioTrack constructor should be
2222      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
2223      * <p>
2224      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2225      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2226      * for playback, and will return a full transfer count.  However, if the write mode is
2227      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2228      * interrupts the write by calling stop or pause, or an I/O error
2229      * occurs during the write, then the write may return a short transfer count.
2230      * <p>
2231      * In static buffer mode, copies the data to the buffer starting at offset 0.
2232      * Note that the actual playback of this data might occur after this function returns.
2233      *
2234      * @param audioData the array that holds the data to write.
2235      * @param offsetInShorts the offset expressed in shorts in audioData where the data to write
2236      *     starts.
2237      *    Must not be negative, or cause the data access to go out of bounds of the array.
2238      * @param sizeInShorts the number of shorts to read in audioData after the offset.
2239      *    Must not be negative, or cause the data access to go out of bounds of the array.
2240      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
2241      *     effect in static mode.
2242      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2243      *         to the audio sink.
2244      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2245      *     queuing as much audio data for playback as possible without blocking.
2246      * @return zero or the positive number of shorts that were written, or one of the following
2247      *    error codes. The number of shorts will be a multiple of the channel count not to
2248      *    exceed sizeInShorts.
2249      * <ul>
2250      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2251      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2252      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2253      *    needs to be recreated. The dead object error code is not returned if some data was
2254      *    successfully transferred. In this case, the error is returned at the next write()</li>
2255      * <li>{@link #ERROR} in case of other error</li>
2256      * </ul>
2257      */
write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts, @WriteMode int writeMode)2258     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts,
2259             @WriteMode int writeMode) {
2260 
2261         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
2262             return ERROR_INVALID_OPERATION;
2263         }
2264 
2265         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2266             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2267             return ERROR_BAD_VALUE;
2268         }
2269 
2270         if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
2271                 || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
2272                 || (offsetInShorts + sizeInShorts > audioData.length)) {
2273             return ERROR_BAD_VALUE;
2274         }
2275 
2276         int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat,
2277                 writeMode == WRITE_BLOCKING);
2278 
2279         if ((mDataLoadMode == MODE_STATIC)
2280                 && (mState == STATE_NO_STATIC_DATA)
2281                 && (ret > 0)) {
2282             // benign race with respect to other APIs that read mState
2283             mState = STATE_INITIALIZED;
2284         }
2285 
2286         return ret;
2287     }
2288 
2289     /**
2290      * Writes the audio data to the audio sink for playback (streaming mode),
2291      * or copies audio data for later playback (static buffer mode).
2292      * The format specified in the AudioTrack constructor should be
2293      * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array.
2294      * <p>
2295      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2296      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2297      * for playback, and will return a full transfer count.  However, if the write mode is
2298      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2299      * interrupts the write by calling stop or pause, or an I/O error
2300      * occurs during the write, then the write may return a short transfer count.
2301      * <p>
2302      * In static buffer mode, copies the data to the buffer starting at offset 0,
2303      * and the write mode is ignored.
2304      * Note that the actual playback of this data might occur after this function returns.
2305      *
2306      * @param audioData the array that holds the data to write.
2307      *     The implementation does not clip for sample values within the nominal range
2308      *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
2309      *     less than or equal to unity (1.0f), and in the absence of post-processing effects
2310      *     that could add energy, such as reverb.  For the convenience of applications
2311      *     that compute samples using filters with non-unity gain,
2312      *     sample values +3 dB beyond the nominal range are permitted.
2313      *     However such values may eventually be limited or clipped, depending on various gains
2314      *     and later processing in the audio path.  Therefore applications are encouraged
2315      *     to provide samples values within the nominal range.
2316      * @param offsetInFloats the offset, expressed as a number of floats,
2317      *     in audioData where the data to write starts.
2318      *    Must not be negative, or cause the data access to go out of bounds of the array.
2319      * @param sizeInFloats the number of floats to write in audioData after the offset.
2320      *    Must not be negative, or cause the data access to go out of bounds of the array.
2321      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
2322      *     effect in static mode.
2323      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2324      *         to the audio sink.
2325      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2326      *     queuing as much audio data for playback as possible without blocking.
2327      * @return zero or the positive number of floats that were written, or one of the following
2328      *    error codes. The number of floats will be a multiple of the channel count not to
2329      *    exceed sizeInFloats.
2330      * <ul>
2331      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2332      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2333      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2334      *    needs to be recreated. The dead object error code is not returned if some data was
2335      *    successfully transferred. In this case, the error is returned at the next write()</li>
2336      * <li>{@link #ERROR} in case of other error</li>
2337      * </ul>
2338      */
write(@onNull float[] audioData, int offsetInFloats, int sizeInFloats, @WriteMode int writeMode)2339     public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats,
2340             @WriteMode int writeMode) {
2341 
2342         if (mState == STATE_UNINITIALIZED) {
2343             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
2344             return ERROR_INVALID_OPERATION;
2345         }
2346 
2347         if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
2348             Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
2349             return ERROR_INVALID_OPERATION;
2350         }
2351 
2352         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2353             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2354             return ERROR_BAD_VALUE;
2355         }
2356 
2357         if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
2358                 || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
2359                 || (offsetInFloats + sizeInFloats > audioData.length)) {
2360             Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
2361             return ERROR_BAD_VALUE;
2362         }
2363 
2364         int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
2365                 writeMode == WRITE_BLOCKING);
2366 
2367         if ((mDataLoadMode == MODE_STATIC)
2368                 && (mState == STATE_NO_STATIC_DATA)
2369                 && (ret > 0)) {
2370             // benign race with respect to other APIs that read mState
2371             mState = STATE_INITIALIZED;
2372         }
2373 
2374         return ret;
2375     }
2376 
2377 
2378     /**
2379      * Writes the audio data to the audio sink for playback (streaming mode),
2380      * or copies audio data for later playback (static buffer mode).
2381      * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor.
2382      * <p>
2383      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
2384      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
2385      * for playback, and will return a full transfer count.  However, if the write mode is
2386      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
2387      * interrupts the write by calling stop or pause, or an I/O error
2388      * occurs during the write, then the write may return a short transfer count.
2389      * <p>
2390      * In static buffer mode, copies the data to the buffer starting at offset 0,
2391      * and the write mode is ignored.
2392      * Note that the actual playback of this data might occur after this function returns.
2393      *
2394      * @param audioData the buffer that holds the data to write, starting at the position reported
2395      *     by <code>audioData.position()</code>.
2396      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
2397      *     have been advanced to reflect the amount of data that was successfully written to
2398      *     the AudioTrack.
2399      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
2400      *     that the number of bytes requested be a multiple of the frame size (sample size in
2401      *     bytes multiplied by the channel count).
2402      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
2403      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
2404      *     effect in static mode.
2405      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2406      *         to the audio sink.
2407      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2408      *     queuing as much audio data for playback as possible without blocking.
2409      * @return zero or the positive number of bytes that were written, or one of the following
2410      *    error codes.
2411      * <ul>
2412      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2413      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2414      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2415      *    needs to be recreated. The dead object error code is not returned if some data was
2416      *    successfully transferred. In this case, the error is returned at the next write()</li>
2417      * <li>{@link #ERROR} in case of other error</li>
2418      * </ul>
2419      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode)2420     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
2421             @WriteMode int writeMode) {
2422 
2423         if (mState == STATE_UNINITIALIZED) {
2424             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
2425             return ERROR_INVALID_OPERATION;
2426         }
2427 
2428         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2429             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2430             return ERROR_BAD_VALUE;
2431         }
2432 
2433         if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
2434             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
2435             return ERROR_BAD_VALUE;
2436         }
2437 
2438         int ret = 0;
2439         if (audioData.isDirect()) {
2440             ret = native_write_native_bytes(audioData,
2441                     audioData.position(), sizeInBytes, mAudioFormat,
2442                     writeMode == WRITE_BLOCKING);
2443         } else {
2444             ret = native_write_byte(NioUtils.unsafeArray(audioData),
2445                     NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
2446                     sizeInBytes, mAudioFormat,
2447                     writeMode == WRITE_BLOCKING);
2448         }
2449 
2450         if ((mDataLoadMode == MODE_STATIC)
2451                 && (mState == STATE_NO_STATIC_DATA)
2452                 && (ret > 0)) {
2453             // benign race with respect to other APIs that read mState
2454             mState = STATE_INITIALIZED;
2455         }
2456 
2457         if (ret > 0) {
2458             audioData.position(audioData.position() + ret);
2459         }
2460 
2461         return ret;
2462     }
2463 
2464     /**
2465      * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track.
2466      * The blocking behavior will depend on the write mode.
2467      * @param audioData the buffer that holds the data to write, starting at the position reported
2468      *     by <code>audioData.position()</code>.
2469      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
2470      *     have been advanced to reflect the amount of data that was successfully written to
2471      *     the AudioTrack.
2472      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
2473      *     that the number of bytes requested be a multiple of the frame size (sample size in
2474      *     bytes multiplied by the channel count).
2475      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
2476      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}.
2477      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
2478      *         to the audio sink.
2479      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
2480      *     queuing as much audio data for playback as possible without blocking.
2481      * @param timestamp The timestamp of the first decodable audio frame in the provided audioData.
2482      * @return zero or the positive number of bytes that were written, or one of the following
2483      *    error codes.
2484      * <ul>
2485      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
2486      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
2487      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
2488      *    needs to be recreated. The dead object error code is not returned if some data was
2489      *    successfully transferred. In this case, the error is returned at the next write()</li>
2490      * <li>{@link #ERROR} in case of other error</li>
2491      * </ul>
2492      */
write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode, long timestamp)2493     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
2494             @WriteMode int writeMode, long timestamp) {
2495 
2496         if (mState == STATE_UNINITIALIZED) {
2497             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
2498             return ERROR_INVALID_OPERATION;
2499         }
2500 
2501         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
2502             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
2503             return ERROR_BAD_VALUE;
2504         }
2505 
2506         if (mDataLoadMode != MODE_STREAM) {
2507             Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track");
2508             return ERROR_INVALID_OPERATION;
2509         }
2510 
2511         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) {
2512             Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts...");
2513             return write(audioData, sizeInBytes, writeMode);
2514         }
2515 
2516         if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
2517             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
2518             return ERROR_BAD_VALUE;
2519         }
2520 
2521         // create timestamp header if none exists
2522         if (mAvSyncHeader == null) {
2523             mAvSyncHeader = ByteBuffer.allocate(16);
2524             mAvSyncHeader.order(ByteOrder.BIG_ENDIAN);
2525             mAvSyncHeader.putInt(0x55550001);
2526         }
2527 
2528         if (mAvSyncBytesRemaining == 0) {
2529             mAvSyncHeader.putInt(4, sizeInBytes);
2530             mAvSyncHeader.putLong(8, timestamp);
2531             mAvSyncHeader.position(0);
2532             mAvSyncBytesRemaining = sizeInBytes;
2533         }
2534 
2535         // write timestamp header if not completely written already
2536         int ret = 0;
2537         if (mAvSyncHeader.remaining() != 0) {
2538             ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode);
2539             if (ret < 0) {
2540                 Log.e(TAG, "AudioTrack.write() could not write timestamp header!");
2541                 mAvSyncHeader = null;
2542                 mAvSyncBytesRemaining = 0;
2543                 return ret;
2544             }
2545             if (mAvSyncHeader.remaining() > 0) {
2546                 Log.v(TAG, "AudioTrack.write() partial timestamp header written.");
2547                 return 0;
2548             }
2549         }
2550 
2551         // write audio data
2552         int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes);
2553         ret = write(audioData, sizeToWrite, writeMode);
2554         if (ret < 0) {
2555             Log.e(TAG, "AudioTrack.write() could not write audio data!");
2556             mAvSyncHeader = null;
2557             mAvSyncBytesRemaining = 0;
2558             return ret;
2559         }
2560 
2561         mAvSyncBytesRemaining -= ret;
2562 
2563         return ret;
2564     }
2565 
2566 
2567     /**
2568      * Sets the playback head position within the static buffer to zero,
2569      * that is it rewinds to start of static buffer.
2570      * The track must be stopped or paused, and
2571      * the track's creation mode must be {@link #MODE_STATIC}.
2572      * <p>
2573      * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by
2574      * {@link #getPlaybackHeadPosition()} to zero.
2575      * For earlier API levels, the reset behavior is unspecified.
2576      * <p>
2577      * Use {@link #setPlaybackHeadPosition(int)} with a zero position
2578      * if the reset of <code>getPlaybackHeadPosition()</code> is not needed.
2579      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
2580      *  {@link #ERROR_INVALID_OPERATION}
2581      */
reloadStaticData()2582     public int reloadStaticData() {
2583         if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
2584             return ERROR_INVALID_OPERATION;
2585         }
2586         return native_reload_static();
2587     }
2588 
2589     //--------------------------------------------------------------------------
2590     // Audio effects management
2591     //--------------------
2592 
2593     /**
2594      * Attaches an auxiliary effect to the audio track. A typical auxiliary
2595      * effect is a reverberation effect which can be applied on any sound source
2596      * that directs a certain amount of its energy to this effect. This amount
2597      * is defined by setAuxEffectSendLevel().
2598      * {@see #setAuxEffectSendLevel(float)}.
2599      * <p>After creating an auxiliary effect (e.g.
2600      * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
2601      * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
2602      * this method to attach the audio track to the effect.
2603      * <p>To detach the effect from the audio track, call this method with a
2604      * null effect id.
2605      *
2606      * @param effectId system wide unique id of the effect to attach
2607      * @return error code or success, see {@link #SUCCESS},
2608      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
2609      */
attachAuxEffect(int effectId)2610     public int attachAuxEffect(int effectId) {
2611         if (mState == STATE_UNINITIALIZED) {
2612             return ERROR_INVALID_OPERATION;
2613         }
2614         return native_attachAuxEffect(effectId);
2615     }
2616 
2617     /**
2618      * Sets the send level of the audio track to the attached auxiliary effect
2619      * {@link #attachAuxEffect(int)}.  Effect levels
2620      * are clamped to the closed interval [0.0, max] where
2621      * max is the value of {@link #getMaxVolume}.
2622      * A value of 0.0 results in no effect, and a value of 1.0 is full send.
2623      * <p>By default the send level is 0.0f, so even if an effect is attached to the player
2624      * this method must be called for the effect to be applied.
2625      * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
2626      * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
2627      * so an appropriate conversion from linear UI input x to level is:
2628      * x == 0 -&gt; level = 0
2629      * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
2630      *
2631      * @param level linear send level
2632      * @return error code or success, see {@link #SUCCESS},
2633      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
2634      */
setAuxEffectSendLevel(float level)2635     public int setAuxEffectSendLevel(float level) {
2636         if (mState == STATE_UNINITIALIZED) {
2637             return ERROR_INVALID_OPERATION;
2638         }
2639         return baseSetAuxEffectSendLevel(level);
2640     }
2641 
2642     @Override
playerSetAuxEffectSendLevel(boolean muting, float level)2643     int playerSetAuxEffectSendLevel(boolean muting, float level) {
2644         level = clampGainOrLevel(muting ? 0.0f : level);
2645         int err = native_setAuxEffectSendLevel(level);
2646         return err == 0 ? SUCCESS : ERROR;
2647     }
2648 
2649     //--------------------------------------------------------------------------
2650     // Explicit Routing
2651     //--------------------
2652     private AudioDeviceInfo mPreferredDevice = null;
2653 
2654     /**
2655      * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route
2656      * the output from this AudioTrack.
2657      * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink.
2658      *  If deviceInfo is null, default routing is restored.
2659      * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and
2660      * does not correspond to a valid audio output device.
2661      */
2662     @Override
setPreferredDevice(AudioDeviceInfo deviceInfo)2663     public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) {
2664         // Do some validation....
2665         if (deviceInfo != null && !deviceInfo.isSink()) {
2666             return false;
2667         }
2668         int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0;
2669         boolean status = native_setOutputDevice(preferredDeviceId);
2670         if (status == true) {
2671             synchronized (this) {
2672                 mPreferredDevice = deviceInfo;
2673             }
2674         }
2675         return status;
2676     }
2677 
2678     /**
2679      * Returns the selected output specified by {@link #setPreferredDevice}. Note that this
2680      * is not guaranteed to correspond to the actual device being used for playback.
2681      */
2682     @Override
getPreferredDevice()2683     public AudioDeviceInfo getPreferredDevice() {
2684         synchronized (this) {
2685             return mPreferredDevice;
2686         }
2687     }
2688 
2689     /**
2690      * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack.
2691      * Note: The query is only valid if the AudioTrack is currently playing. If it is not,
2692      * <code>getRoutedDevice()</code> will return null.
2693      */
2694     @Override
getRoutedDevice()2695     public AudioDeviceInfo getRoutedDevice() {
2696         int deviceId = native_getRoutedDeviceId();
2697         if (deviceId == 0) {
2698             return null;
2699         }
2700         AudioDeviceInfo[] devices =
2701                 AudioManager.getDevicesStatic(AudioManager.GET_DEVICES_OUTPUTS);
2702         for (int i = 0; i < devices.length; i++) {
2703             if (devices[i].getId() == deviceId) {
2704                 return devices[i];
2705             }
2706         }
2707         return null;
2708     }
2709 
2710     /*
2711      * Call BEFORE adding a routing callback handler.
2712      */
testEnableNativeRoutingCallbacksLocked()2713     private void testEnableNativeRoutingCallbacksLocked() {
2714         if (mRoutingChangeListeners.size() == 0) {
2715             native_enableDeviceCallback();
2716         }
2717     }
2718 
2719     /*
2720      * Call AFTER removing a routing callback handler.
2721      */
testDisableNativeRoutingCallbacksLocked()2722     private void testDisableNativeRoutingCallbacksLocked() {
2723         if (mRoutingChangeListeners.size() == 0) {
2724             native_disableDeviceCallback();
2725         }
2726     }
2727 
2728     //--------------------------------------------------------------------------
2729     // (Re)Routing Info
2730     //--------------------
2731     /**
2732      * The list of AudioRouting.OnRoutingChangedListener interfaces added (with
2733      * {@link #addOnRoutingChangedListener(android.media.AudioRouting.OnRoutingChangedListener, Handler)}
2734      * by an app to receive (re)routing notifications.
2735      */
2736     @GuardedBy("mRoutingChangeListeners")
2737     private ArrayMap<AudioRouting.OnRoutingChangedListener,
2738             NativeRoutingEventHandlerDelegate> mRoutingChangeListeners = new ArrayMap<>();
2739 
2740    /**
2741     * Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing
2742     * changes on this AudioTrack.
2743     * @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive
2744     * notifications of rerouting events.
2745     * @param handler  Specifies the {@link Handler} object for the thread on which to execute
2746     * the callback. If <code>null</code>, the {@link Handler} associated with the main
2747     * {@link Looper} will be used.
2748     */
2749     @Override
addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener, Handler handler)2750     public void addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener,
2751             Handler handler) {
2752         synchronized (mRoutingChangeListeners) {
2753             if (listener != null && !mRoutingChangeListeners.containsKey(listener)) {
2754                 testEnableNativeRoutingCallbacksLocked();
2755                 mRoutingChangeListeners.put(
2756                         listener, new NativeRoutingEventHandlerDelegate(this, listener,
2757                                 handler != null ? handler : new Handler(mInitializationLooper)));
2758             }
2759         }
2760     }
2761 
2762     /**
2763      * Removes an {@link AudioRouting.OnRoutingChangedListener} which has been previously added
2764      * to receive rerouting notifications.
2765      * @param listener The previously added {@link AudioRouting.OnRoutingChangedListener} interface
2766      * to remove.
2767      */
2768     @Override
removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener)2769     public void removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener) {
2770         synchronized (mRoutingChangeListeners) {
2771             if (mRoutingChangeListeners.containsKey(listener)) {
2772                 mRoutingChangeListeners.remove(listener);
2773             }
2774             testDisableNativeRoutingCallbacksLocked();
2775         }
2776     }
2777 
2778     //--------------------------------------------------------------------------
2779     // (Re)Routing Info
2780     //--------------------
2781     /**
2782      * Defines the interface by which applications can receive notifications of
2783      * routing changes for the associated {@link AudioTrack}.
2784      *
2785      * @deprecated users should switch to the general purpose
2786      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
2787      */
2788     @Deprecated
2789     public interface OnRoutingChangedListener extends AudioRouting.OnRoutingChangedListener {
2790         /**
2791          * Called when the routing of an AudioTrack changes from either and
2792          * explicit or policy rerouting. Use {@link #getRoutedDevice()} to
2793          * retrieve the newly routed-to device.
2794          */
onRoutingChanged(AudioTrack audioTrack)2795         public void onRoutingChanged(AudioTrack audioTrack);
2796 
2797         @Override
onRoutingChanged(AudioRouting router)2798         default public void onRoutingChanged(AudioRouting router) {
2799             if (router instanceof AudioTrack) {
2800                 onRoutingChanged((AudioTrack) router);
2801             }
2802         }
2803     }
2804 
2805     /**
2806      * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes
2807      * on this AudioTrack.
2808      * @param listener The {@link OnRoutingChangedListener} interface to receive notifications
2809      * of rerouting events.
2810      * @param handler  Specifies the {@link Handler} object for the thread on which to execute
2811      * the callback. If <code>null</code>, the {@link Handler} associated with the main
2812      * {@link Looper} will be used.
2813      * @deprecated users should switch to the general purpose
2814      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
2815      */
2816     @Deprecated
addOnRoutingChangedListener(OnRoutingChangedListener listener, android.os.Handler handler)2817     public void addOnRoutingChangedListener(OnRoutingChangedListener listener,
2818             android.os.Handler handler) {
2819         addOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener, handler);
2820     }
2821 
2822     /**
2823      * Removes an {@link OnRoutingChangedListener} which has been previously added
2824      * to receive rerouting notifications.
2825      * @param listener The previously added {@link OnRoutingChangedListener} interface to remove.
2826      * @deprecated users should switch to the general purpose
2827      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
2828      */
2829     @Deprecated
removeOnRoutingChangedListener(OnRoutingChangedListener listener)2830     public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) {
2831         removeOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener);
2832     }
2833 
2834     /**
2835      * Sends device list change notification to all listeners.
2836      */
broadcastRoutingChange()2837     private void broadcastRoutingChange() {
2838         AudioManager.resetAudioPortGeneration();
2839         synchronized (mRoutingChangeListeners) {
2840             for (NativeRoutingEventHandlerDelegate delegate : mRoutingChangeListeners.values()) {
2841                 Handler handler = delegate.getHandler();
2842                 if (handler != null) {
2843                     handler.sendEmptyMessage(AudioSystem.NATIVE_EVENT_ROUTING_CHANGE);
2844                 }
2845             }
2846         }
2847     }
2848 
2849     //---------------------------------------------------------
2850     // Interface definitions
2851     //--------------------
2852     /**
2853      * Interface definition for a callback to be invoked when the playback head position of
2854      * an AudioTrack has reached a notification marker or has increased by a certain period.
2855      */
2856     public interface OnPlaybackPositionUpdateListener  {
2857         /**
2858          * Called on the listener to notify it that the previously set marker has been reached
2859          * by the playback head.
2860          */
onMarkerReached(AudioTrack track)2861         void onMarkerReached(AudioTrack track);
2862 
2863         /**
2864          * Called on the listener to periodically notify it that the playback head has reached
2865          * a multiple of the notification period.
2866          */
onPeriodicNotification(AudioTrack track)2867         void onPeriodicNotification(AudioTrack track);
2868     }
2869 
2870     //---------------------------------------------------------
2871     // Inner classes
2872     //--------------------
2873     /**
2874      * Helper class to handle the forwarding of native events to the appropriate listener
2875      * (potentially) handled in a different thread
2876      */
2877     private class NativePositionEventHandlerDelegate {
2878         private final Handler mHandler;
2879 
NativePositionEventHandlerDelegate(final AudioTrack track, final OnPlaybackPositionUpdateListener listener, Handler handler)2880         NativePositionEventHandlerDelegate(final AudioTrack track,
2881                                    final OnPlaybackPositionUpdateListener listener,
2882                                    Handler handler) {
2883             // find the looper for our new event handler
2884             Looper looper;
2885             if (handler != null) {
2886                 looper = handler.getLooper();
2887             } else {
2888                 // no given handler, use the looper the AudioTrack was created in
2889                 looper = mInitializationLooper;
2890             }
2891 
2892             // construct the event handler with this looper
2893             if (looper != null) {
2894                 // implement the event handler delegate
2895                 mHandler = new Handler(looper) {
2896                     @Override
2897                     public void handleMessage(Message msg) {
2898                         if (track == null) {
2899                             return;
2900                         }
2901                         switch(msg.what) {
2902                         case NATIVE_EVENT_MARKER:
2903                             if (listener != null) {
2904                                 listener.onMarkerReached(track);
2905                             }
2906                             break;
2907                         case NATIVE_EVENT_NEW_POS:
2908                             if (listener != null) {
2909                                 listener.onPeriodicNotification(track);
2910                             }
2911                             break;
2912                         default:
2913                             loge("Unknown native event type: " + msg.what);
2914                             break;
2915                         }
2916                     }
2917                 };
2918             } else {
2919                 mHandler = null;
2920             }
2921         }
2922 
getHandler()2923         Handler getHandler() {
2924             return mHandler;
2925         }
2926     }
2927 
2928     /**
2929      * Helper class to handle the forwarding of native events to the appropriate listener
2930      * (potentially) handled in a different thread
2931      */
2932     private class NativeRoutingEventHandlerDelegate {
2933         private final Handler mHandler;
2934 
NativeRoutingEventHandlerDelegate(final AudioTrack track, final AudioRouting.OnRoutingChangedListener listener, Handler handler)2935         NativeRoutingEventHandlerDelegate(final AudioTrack track,
2936                                    final AudioRouting.OnRoutingChangedListener listener,
2937                                    Handler handler) {
2938             // find the looper for our new event handler
2939             Looper looper;
2940             if (handler != null) {
2941                 looper = handler.getLooper();
2942             } else {
2943                 // no given handler, use the looper the AudioTrack was created in
2944                 looper = mInitializationLooper;
2945             }
2946 
2947             // construct the event handler with this looper
2948             if (looper != null) {
2949                 // implement the event handler delegate
2950                 mHandler = new Handler(looper) {
2951                     @Override
2952                     public void handleMessage(Message msg) {
2953                         if (track == null) {
2954                             return;
2955                         }
2956                         switch(msg.what) {
2957                         case AudioSystem.NATIVE_EVENT_ROUTING_CHANGE:
2958                             if (listener != null) {
2959                                 listener.onRoutingChanged(track);
2960                             }
2961                             break;
2962                         default:
2963                             loge("Unknown native event type: " + msg.what);
2964                             break;
2965                         }
2966                     }
2967                 };
2968             } else {
2969                 mHandler = null;
2970             }
2971         }
2972 
getHandler()2973         Handler getHandler() {
2974             return mHandler;
2975         }
2976     }
2977 
2978     //---------------------------------------------------------
2979     // Methods for IPlayer interface
2980     //--------------------
2981     @Override
playerStart()2982     void playerStart() {
2983         play();
2984     }
2985 
2986     @Override
playerPause()2987     void playerPause() {
2988         pause();
2989     }
2990 
2991     @Override
playerStop()2992     void playerStop() {
2993         stop();
2994     }
2995 
2996     //---------------------------------------------------------
2997     // Java methods called from the native side
2998     //--------------------
2999     @SuppressWarnings("unused")
postEventFromNative(Object audiotrack_ref, int what, int arg1, int arg2, Object obj)3000     private static void postEventFromNative(Object audiotrack_ref,
3001             int what, int arg1, int arg2, Object obj) {
3002         //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
3003         AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get();
3004         if (track == null) {
3005             return;
3006         }
3007 
3008         if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) {
3009             track.broadcastRoutingChange();
3010             return;
3011         }
3012         NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate;
3013         if (delegate != null) {
3014             Handler handler = delegate.getHandler();
3015             if (handler != null) {
3016                 Message m = handler.obtainMessage(what, arg1, arg2, obj);
3017                 handler.sendMessage(m);
3018             }
3019         }
3020     }
3021 
3022 
3023     //---------------------------------------------------------
3024     // Native methods called from the Java side
3025     //--------------------
3026 
3027     // post-condition: mStreamType is overwritten with a value
3028     //     that reflects the audio attributes (e.g. an AudioAttributes object with a usage of
3029     //     AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC
native_setup(Object audiotrack_this, Object attributes, int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat, int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack)3030     private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this,
3031             Object /*AudioAttributes*/ attributes,
3032             int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat,
3033             int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack);
3034 
native_finalize()3035     private native final void native_finalize();
3036 
3037     /**
3038      * @hide
3039      */
native_release()3040     public native final void native_release();
3041 
native_start()3042     private native final void native_start();
3043 
native_stop()3044     private native final void native_stop();
3045 
native_pause()3046     private native final void native_pause();
3047 
native_flush()3048     private native final void native_flush();
3049 
native_write_byte(byte[] audioData, int offsetInBytes, int sizeInBytes, int format, boolean isBlocking)3050     private native final int native_write_byte(byte[] audioData,
3051                                                int offsetInBytes, int sizeInBytes, int format,
3052                                                boolean isBlocking);
3053 
native_write_short(short[] audioData, int offsetInShorts, int sizeInShorts, int format, boolean isBlocking)3054     private native final int native_write_short(short[] audioData,
3055                                                 int offsetInShorts, int sizeInShorts, int format,
3056                                                 boolean isBlocking);
3057 
native_write_float(float[] audioData, int offsetInFloats, int sizeInFloats, int format, boolean isBlocking)3058     private native final int native_write_float(float[] audioData,
3059                                                 int offsetInFloats, int sizeInFloats, int format,
3060                                                 boolean isBlocking);
3061 
native_write_native_bytes(Object audioData, int positionInBytes, int sizeInBytes, int format, boolean blocking)3062     private native final int native_write_native_bytes(Object audioData,
3063             int positionInBytes, int sizeInBytes, int format, boolean blocking);
3064 
native_reload_static()3065     private native final int native_reload_static();
3066 
native_get_buffer_size_frames()3067     private native final int native_get_buffer_size_frames();
native_set_buffer_size_frames(int bufferSizeInFrames)3068     private native final int native_set_buffer_size_frames(int bufferSizeInFrames);
native_get_buffer_capacity_frames()3069     private native final int native_get_buffer_capacity_frames();
3070 
native_setVolume(float leftVolume, float rightVolume)3071     private native final void native_setVolume(float leftVolume, float rightVolume);
3072 
native_set_playback_rate(int sampleRateInHz)3073     private native final int native_set_playback_rate(int sampleRateInHz);
native_get_playback_rate()3074     private native final int native_get_playback_rate();
3075 
native_set_playback_params(@onNull PlaybackParams params)3076     private native final void native_set_playback_params(@NonNull PlaybackParams params);
native_get_playback_params()3077     private native final @NonNull PlaybackParams native_get_playback_params();
3078 
native_set_marker_pos(int marker)3079     private native final int native_set_marker_pos(int marker);
native_get_marker_pos()3080     private native final int native_get_marker_pos();
3081 
native_set_pos_update_period(int updatePeriod)3082     private native final int native_set_pos_update_period(int updatePeriod);
native_get_pos_update_period()3083     private native final int native_get_pos_update_period();
3084 
native_set_position(int position)3085     private native final int native_set_position(int position);
native_get_position()3086     private native final int native_get_position();
3087 
native_get_latency()3088     private native final int native_get_latency();
3089 
native_get_underrun_count()3090     private native final int native_get_underrun_count();
3091 
native_get_flags()3092     private native final int native_get_flags();
3093 
3094     // longArray must be a non-null array of length >= 2
3095     // [0] is assigned the frame position
3096     // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
native_get_timestamp(long[] longArray)3097     private native final int native_get_timestamp(long[] longArray);
3098 
native_set_loop(int start, int end, int loopCount)3099     private native final int native_set_loop(int start, int end, int loopCount);
3100 
native_get_output_sample_rate(int streamType)3101     static private native final int native_get_output_sample_rate(int streamType);
native_get_min_buff_size( int sampleRateInHz, int channelConfig, int audioFormat)3102     static private native final int native_get_min_buff_size(
3103             int sampleRateInHz, int channelConfig, int audioFormat);
3104 
native_attachAuxEffect(int effectId)3105     private native final int native_attachAuxEffect(int effectId);
native_setAuxEffectSendLevel(float level)3106     private native final int native_setAuxEffectSendLevel(float level);
3107 
native_setOutputDevice(int deviceId)3108     private native final boolean native_setOutputDevice(int deviceId);
native_getRoutedDeviceId()3109     private native final int native_getRoutedDeviceId();
native_enableDeviceCallback()3110     private native final void native_enableDeviceCallback();
native_disableDeviceCallback()3111     private native final void native_disableDeviceCallback();
native_get_FCC_8()3112     static private native int native_get_FCC_8();
3113 
native_applyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)3114     private native int native_applyVolumeShaper(
3115             @NonNull VolumeShaper.Configuration configuration,
3116             @NonNull VolumeShaper.Operation operation);
3117 
native_getVolumeShaperState(int id)3118     private native @Nullable VolumeShaper.State native_getVolumeShaperState(int id);
3119 
3120     //---------------------------------------------------------
3121     // Utility methods
3122     //------------------
3123 
logd(String msg)3124     private static void logd(String msg) {
3125         Log.d(TAG, msg);
3126     }
3127 
loge(String msg)3128     private static void loge(String msg) {
3129         Log.e(TAG, msg);
3130     }
3131 }
3132