1 /* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package android.media; 18 19 import static android.media.AudioManager.AUDIO_SESSION_ID_GENERATE; 20 21 import android.annotation.CallbackExecutor; 22 import android.annotation.FloatRange; 23 import android.annotation.IntDef; 24 import android.annotation.IntRange; 25 import android.annotation.NonNull; 26 import android.annotation.Nullable; 27 import android.annotation.RequiresPermission; 28 import android.annotation.SystemApi; 29 import android.annotation.TestApi; 30 import android.compat.annotation.UnsupportedAppUsage; 31 import android.content.AttributionSource; 32 import android.content.AttributionSource.ScopedParcelState; 33 import android.content.Context; 34 import android.media.audiopolicy.AudioMix; 35 import android.media.audiopolicy.AudioMixingRule; 36 import android.media.audiopolicy.AudioPolicy; 37 import android.media.metrics.LogSessionId; 38 import android.os.Binder; 39 import android.os.Build; 40 import android.os.Handler; 41 import android.os.HandlerThread; 42 import android.os.Looper; 43 import android.os.Message; 44 import android.os.Parcel; 45 import android.os.PersistableBundle; 46 import android.util.ArrayMap; 47 import android.util.Log; 48 49 import com.android.internal.annotations.GuardedBy; 50 51 import java.lang.annotation.Retention; 52 import java.lang.annotation.RetentionPolicy; 53 import java.lang.ref.WeakReference; 54 import java.nio.ByteBuffer; 55 import java.nio.ByteOrder; 56 import java.nio.NioUtils; 57 import java.util.LinkedList; 58 import java.util.Map; 59 import java.util.Objects; 60 import java.util.concurrent.Executor; 61 62 /** 63 * The AudioTrack class manages and plays a single audio resource for Java applications. 64 * It allows streaming of PCM audio buffers to the audio sink for playback. This is 65 * achieved by "pushing" the data to the AudioTrack object using one of the 66 * {@link #write(byte[], int, int)}, {@link #write(short[], int, int)}, 67 * and {@link #write(float[], int, int, int)} methods. 68 * 69 * <p>An AudioTrack instance can operate under two modes: static or streaming.<br> 70 * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using 71 * one of the {@code write()} methods. These are blocking and return when the data has been 72 * transferred from the Java layer to the native layer and queued for playback. The streaming 73 * mode is most useful when playing blocks of audio data that for instance are: 74 * 75 * <ul> 76 * <li>too big to fit in memory because of the duration of the sound to play,</li> 77 * <li>too big to fit in memory because of the characteristics of the audio data 78 * (high sampling rate, bits per sample ...)</li> 79 * <li>received or generated while previously queued audio is playing.</li> 80 * </ul> 81 * 82 * The static mode should be chosen when dealing with short sounds that fit in memory and 83 * that need to be played with the smallest latency possible. The static mode will 84 * therefore be preferred for UI and game sounds that are played often, and with the 85 * smallest overhead possible. 86 * 87 * <p>Upon creation, an AudioTrack object initializes its associated audio buffer. 88 * The size of this buffer, specified during the construction, determines how long an AudioTrack 89 * can play before running out of data.<br> 90 * For an AudioTrack using the static mode, this size is the maximum size of the sound that can 91 * be played from it.<br> 92 * For the streaming mode, data will be written to the audio sink in chunks of 93 * sizes less than or equal to the total buffer size. 94 * 95 * AudioTrack is not final and thus permits subclasses, but such use is not recommended. 96 */ 97 public class AudioTrack extends PlayerBase 98 implements AudioRouting 99 , VolumeAutomation 100 { 101 //--------------------------------------------------------- 102 // Constants 103 //-------------------- 104 /** Minimum value for a linear gain or auxiliary effect level. 105 * This value must be exactly equal to 0.0f; do not change it. 106 */ 107 private static final float GAIN_MIN = 0.0f; 108 /** Maximum value for a linear gain or auxiliary effect level. 109 * This value must be greater than or equal to 1.0f. 110 */ 111 private static final float GAIN_MAX = 1.0f; 112 113 /** indicates AudioTrack state is stopped */ 114 public static final int PLAYSTATE_STOPPED = 1; // matches SL_PLAYSTATE_STOPPED 115 /** indicates AudioTrack state is paused */ 116 public static final int PLAYSTATE_PAUSED = 2; // matches SL_PLAYSTATE_PAUSED 117 /** indicates AudioTrack state is playing */ 118 public static final int PLAYSTATE_PLAYING = 3; // matches SL_PLAYSTATE_PLAYING 119 /** 120 * @hide 121 * indicates AudioTrack state is stopping waiting for NATIVE_EVENT_STREAM_END to 122 * transition to PLAYSTATE_STOPPED. 123 * Only valid for offload mode. 124 */ 125 private static final int PLAYSTATE_STOPPING = 4; 126 /** 127 * @hide 128 * indicates AudioTrack state is paused from stopping state. Will transition to 129 * PLAYSTATE_STOPPING if play() is called. 130 * Only valid for offload mode. 131 */ 132 private static final int PLAYSTATE_PAUSED_STOPPING = 5; 133 134 // keep these values in sync with android_media_AudioTrack.cpp 135 /** 136 * Creation mode where audio data is transferred from Java to the native layer 137 * only once before the audio starts playing. 138 */ 139 public static final int MODE_STATIC = 0; 140 /** 141 * Creation mode where audio data is streamed from Java to the native layer 142 * as the audio is playing. 143 */ 144 public static final int MODE_STREAM = 1; 145 146 /** @hide */ 147 @IntDef({ 148 MODE_STATIC, 149 MODE_STREAM 150 }) 151 @Retention(RetentionPolicy.SOURCE) 152 public @interface TransferMode {} 153 154 /** 155 * State of an AudioTrack that was not successfully initialized upon creation. 156 */ 157 public static final int STATE_UNINITIALIZED = 0; 158 /** 159 * State of an AudioTrack that is ready to be used. 160 */ 161 public static final int STATE_INITIALIZED = 1; 162 /** 163 * State of a successfully initialized AudioTrack that uses static data, 164 * but that hasn't received that data yet. 165 */ 166 public static final int STATE_NO_STATIC_DATA = 2; 167 168 /** 169 * Denotes a successful operation. 170 */ 171 public static final int SUCCESS = AudioSystem.SUCCESS; 172 /** 173 * Denotes a generic operation failure. 174 */ 175 public static final int ERROR = AudioSystem.ERROR; 176 /** 177 * Denotes a failure due to the use of an invalid value. 178 */ 179 public static final int ERROR_BAD_VALUE = AudioSystem.BAD_VALUE; 180 /** 181 * Denotes a failure due to the improper use of a method. 182 */ 183 public static final int ERROR_INVALID_OPERATION = AudioSystem.INVALID_OPERATION; 184 /** 185 * An error code indicating that the object reporting it is no longer valid and needs to 186 * be recreated. 187 */ 188 public static final int ERROR_DEAD_OBJECT = AudioSystem.DEAD_OBJECT; 189 /** 190 * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state, 191 * or immediately after start/ACTIVE. 192 * @hide 193 */ 194 public static final int ERROR_WOULD_BLOCK = AudioSystem.WOULD_BLOCK; 195 196 // Error codes: 197 // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp 198 private static final int ERROR_NATIVESETUP_AUDIOSYSTEM = -16; 199 private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK = -17; 200 private static final int ERROR_NATIVESETUP_INVALIDFORMAT = -18; 201 private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE = -19; 202 private static final int ERROR_NATIVESETUP_NATIVEINITFAILED = -20; 203 204 // Events: 205 // to keep in sync with frameworks/av/include/media/AudioTrack.h 206 // Note: To avoid collisions with other event constants, 207 // do not define an event here that is the same value as 208 // AudioSystem.NATIVE_EVENT_ROUTING_CHANGE. 209 210 /** 211 * Event id denotes when playback head has reached a previously set marker. 212 */ 213 private static final int NATIVE_EVENT_MARKER = 3; 214 /** 215 * Event id denotes when previously set update period has elapsed during playback. 216 */ 217 private static final int NATIVE_EVENT_NEW_POS = 4; 218 /** 219 * Callback for more data 220 */ 221 private static final int NATIVE_EVENT_CAN_WRITE_MORE_DATA = 9; 222 /** 223 * IAudioTrack tear down for offloaded tracks 224 * TODO: when received, java AudioTrack must be released 225 */ 226 private static final int NATIVE_EVENT_NEW_IAUDIOTRACK = 6; 227 /** 228 * Event id denotes when all the buffers queued in AF and HW are played 229 * back (after stop is called) for an offloaded track. 230 */ 231 private static final int NATIVE_EVENT_STREAM_END = 7; 232 /** 233 * Event id denotes when the codec format changes. 234 * 235 * Note: Similar to a device routing change (AudioSystem.NATIVE_EVENT_ROUTING_CHANGE), 236 * this event comes from the AudioFlinger Thread / Output Stream management 237 * (not from buffer indications as above). 238 */ 239 private static final int NATIVE_EVENT_CODEC_FORMAT_CHANGE = 100; 240 241 private final static String TAG = "android.media.AudioTrack"; 242 243 /** @hide */ 244 @IntDef({ 245 ENCAPSULATION_MODE_NONE, 246 ENCAPSULATION_MODE_ELEMENTARY_STREAM, 247 // ENCAPSULATION_MODE_HANDLE, @SystemApi 248 }) 249 @Retention(RetentionPolicy.SOURCE) 250 public @interface EncapsulationMode {} 251 252 // Important: The ENCAPSULATION_MODE values must be kept in sync with native header files. 253 /** 254 * This mode indicates no metadata encapsulation, 255 * which is the default mode for sending audio data 256 * through {@code AudioTrack}. 257 */ 258 public static final int ENCAPSULATION_MODE_NONE = 0; 259 /** 260 * This mode indicates metadata encapsulation with an elementary stream payload. 261 * Both compressed and PCM format is allowed. 262 */ 263 public static final int ENCAPSULATION_MODE_ELEMENTARY_STREAM = 1; 264 /** 265 * This mode indicates metadata encapsulation with a handle payload 266 * and is set through {@link Builder#setEncapsulationMode(int)}. 267 * The handle is a 64 bit long, provided by the Tuner API 268 * in {@link android.os.Build.VERSION_CODES#R}. 269 * @hide 270 */ 271 @SystemApi 272 @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING) 273 public static final int ENCAPSULATION_MODE_HANDLE = 2; 274 275 /** 276 * Enumeration of metadata types permitted for use by 277 * encapsulation mode audio streams. 278 * @hide 279 */ 280 @IntDef(prefix = {"ENCAPSULATION_METADATA_TYPE_"}, 281 value = 282 { 283 ENCAPSULATION_METADATA_TYPE_NONE, /* reserved */ 284 ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER, 285 ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR, 286 ENCAPSULATION_METADATA_TYPE_SUPPLEMENTARY_AUDIO_PLACEMENT, 287 }) 288 @Retention(RetentionPolicy.SOURCE) 289 public @interface EncapsulationMetadataType {} 290 291 /** 292 * Reserved do not use. 293 * @hide 294 */ 295 public static final int ENCAPSULATION_METADATA_TYPE_NONE = 0; // reserved 296 297 /** 298 * Encapsulation metadata type for framework tuner information. 299 * 300 * Refer to the Android Media TV Tuner API for details. 301 */ 302 public static final int ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER = 1; 303 304 /** 305 * Encapsulation metadata type for DVB AD descriptor. 306 * 307 * This metadata is formatted per ETSI TS 101 154 Table E.1: AD_descriptor. 308 */ 309 public static final int ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR = 2; 310 311 /** 312 * Encapsulation metadata type for placement of supplementary audio. 313 * 314 * A 32 bit integer constant, one of {@link #SUPPLEMENTARY_AUDIO_PLACEMENT_NORMAL}, {@link 315 * #SUPPLEMENTARY_AUDIO_PLACEMENT_LEFT}, {@link #SUPPLEMENTARY_AUDIO_PLACEMENT_RIGHT}. 316 */ 317 public static final int ENCAPSULATION_METADATA_TYPE_SUPPLEMENTARY_AUDIO_PLACEMENT = 3; 318 319 /** 320 * Enumeration of supplementary audio placement types. 321 * @hide 322 */ 323 @IntDef(prefix = {"SUPPLEMENTARY_AUDIO_PLACEMENT_"}, 324 value = 325 { 326 SUPPLEMENTARY_AUDIO_PLACEMENT_NORMAL, 327 SUPPLEMENTARY_AUDIO_PLACEMENT_LEFT, 328 SUPPLEMENTARY_AUDIO_PLACEMENT_RIGHT, 329 }) 330 @Retention(RetentionPolicy.SOURCE) 331 public @interface SupplementaryAudioPlacement {} 332 // Important: The SUPPLEMENTARY_AUDIO_PLACEMENT values must be kept in sync with native header 333 // files. 334 335 /** 336 * Supplementary audio placement normal. 337 */ 338 public static final int SUPPLEMENTARY_AUDIO_PLACEMENT_NORMAL = 0; 339 340 /** 341 * Supplementary audio placement left. 342 */ 343 public static final int SUPPLEMENTARY_AUDIO_PLACEMENT_LEFT = 1; 344 345 /** 346 * Supplementary audio placement right. 347 */ 348 public static final int SUPPLEMENTARY_AUDIO_PLACEMENT_RIGHT = 2; 349 350 /* Dual Mono handling is used when a stereo audio stream 351 * contains separate audio content on the left and right channels. 352 * Such information about the content of the stream may be found, for example, in 353 * ITU T-REC-J.94-201610 A.6.2.3 Component descriptor. 354 */ 355 /** @hide */ 356 @IntDef({ 357 DUAL_MONO_MODE_OFF, 358 DUAL_MONO_MODE_LR, 359 DUAL_MONO_MODE_LL, 360 DUAL_MONO_MODE_RR, 361 }) 362 @Retention(RetentionPolicy.SOURCE) 363 public @interface DualMonoMode {} 364 // Important: The DUAL_MONO_MODE values must be kept in sync with native header files. 365 /** 366 * This mode disables any Dual Mono presentation effect. 367 * 368 */ 369 public static final int DUAL_MONO_MODE_OFF = 0; 370 371 /** 372 * This mode indicates that a stereo stream should be presented 373 * with the left and right audio channels blended together 374 * and delivered to both channels. 375 * 376 * Behavior for non-stereo streams is implementation defined. 377 * A suggested guideline is that the left-right stereo symmetric 378 * channels are pairwise blended; 379 * the other channels such as center are left alone. 380 * 381 * The Dual Mono effect occurs before volume scaling. 382 */ 383 public static final int DUAL_MONO_MODE_LR = 1; 384 385 /** 386 * This mode indicates that a stereo stream should be presented 387 * with the left audio channel replicated into the right audio channel. 388 * 389 * Behavior for non-stereo streams is implementation defined. 390 * A suggested guideline is that all channels with left-right 391 * stereo symmetry will have the left channel position replicated 392 * into the right channel position. 393 * The center channels (with no left/right symmetry) or unbalanced 394 * channels are left alone. 395 * 396 * The Dual Mono effect occurs before volume scaling. 397 */ 398 public static final int DUAL_MONO_MODE_LL = 2; 399 400 /** 401 * This mode indicates that a stereo stream should be presented 402 * with the right audio channel replicated into the left audio channel. 403 * 404 * Behavior for non-stereo streams is implementation defined. 405 * A suggested guideline is that all channels with left-right 406 * stereo symmetry will have the right channel position replicated 407 * into the left channel position. 408 * The center channels (with no left/right symmetry) or unbalanced 409 * channels are left alone. 410 * 411 * The Dual Mono effect occurs before volume scaling. 412 */ 413 public static final int DUAL_MONO_MODE_RR = 3; 414 415 /** @hide */ 416 @IntDef({ 417 WRITE_BLOCKING, 418 WRITE_NON_BLOCKING 419 }) 420 @Retention(RetentionPolicy.SOURCE) 421 public @interface WriteMode {} 422 423 /** 424 * The write mode indicating the write operation will block until all data has been written, 425 * to be used as the actual value of the writeMode parameter in 426 * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)}, 427 * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and 428 * {@link #write(ByteBuffer, int, int, long)}. 429 */ 430 public final static int WRITE_BLOCKING = 0; 431 432 /** 433 * The write mode indicating the write operation will return immediately after 434 * queuing as much audio data for playback as possible without blocking, 435 * to be used as the actual value of the writeMode parameter in 436 * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)}, 437 * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and 438 * {@link #write(ByteBuffer, int, int, long)}. 439 */ 440 public final static int WRITE_NON_BLOCKING = 1; 441 442 /** @hide */ 443 @IntDef({ 444 PERFORMANCE_MODE_NONE, 445 PERFORMANCE_MODE_LOW_LATENCY, 446 PERFORMANCE_MODE_POWER_SAVING 447 }) 448 @Retention(RetentionPolicy.SOURCE) 449 public @interface PerformanceMode {} 450 451 /** 452 * Default performance mode for an {@link AudioTrack}. 453 */ 454 public static final int PERFORMANCE_MODE_NONE = 0; 455 456 /** 457 * Low latency performance mode for an {@link AudioTrack}. 458 * If the device supports it, this mode 459 * enables a lower latency path through to the audio output sink. 460 * Effects may no longer work with such an {@code AudioTrack} and 461 * the sample rate must match that of the output sink. 462 * <p> 463 * Applications should be aware that low latency requires careful 464 * buffer management, with smaller chunks of audio data written by each 465 * {@code write()} call. 466 * <p> 467 * If this flag is used without specifying a {@code bufferSizeInBytes} then the 468 * {@code AudioTrack}'s actual buffer size may be too small. 469 * It is recommended that a fairly 470 * large buffer should be specified when the {@code AudioTrack} is created. 471 * Then the actual size can be reduced by calling 472 * {@link #setBufferSizeInFrames(int)}. The buffer size can be optimized 473 * by lowering it after each {@code write()} call until the audio glitches, 474 * which is detected by calling 475 * {@link #getUnderrunCount()}. Then the buffer size can be increased 476 * until there are no glitches. 477 * This tuning step should be done while playing silence. 478 * This technique provides a compromise between latency and glitch rate. 479 */ 480 public static final int PERFORMANCE_MODE_LOW_LATENCY = 1; 481 482 /** 483 * Power saving performance mode for an {@link AudioTrack}. 484 * If the device supports it, this 485 * mode will enable a lower power path to the audio output sink. 486 * In addition, this lower power path typically will have 487 * deeper internal buffers and better underrun resistance, 488 * with a tradeoff of higher latency. 489 * <p> 490 * In this mode, applications should attempt to use a larger buffer size 491 * and deliver larger chunks of audio data per {@code write()} call. 492 * Use {@link #getBufferSizeInFrames()} to determine 493 * the actual buffer size of the {@code AudioTrack} as it may have increased 494 * to accommodate a deeper buffer. 495 */ 496 public static final int PERFORMANCE_MODE_POWER_SAVING = 2; 497 498 // keep in sync with system/media/audio/include/system/audio-base.h 499 private static final int AUDIO_OUTPUT_FLAG_FAST = 0x4; 500 private static final int AUDIO_OUTPUT_FLAG_DEEP_BUFFER = 0x8; 501 502 // Size of HW_AV_SYNC track AV header. 503 private static final float HEADER_V2_SIZE_BYTES = 20.0f; 504 505 //-------------------------------------------------------------------------- 506 // Member variables 507 //-------------------- 508 /** 509 * Indicates the state of the AudioTrack instance. 510 * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA. 511 */ 512 private int mState = STATE_UNINITIALIZED; 513 /** 514 * Indicates the play state of the AudioTrack instance. 515 * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING. 516 */ 517 private int mPlayState = PLAYSTATE_STOPPED; 518 519 /** 520 * Indicates that we are expecting an end of stream callback following a call 521 * to setOffloadEndOfStream() in a gapless track transition context. The native track 522 * will be restarted automatically. 523 */ 524 private boolean mOffloadEosPending = false; 525 526 /** 527 * Lock to ensure mPlayState updates reflect the actual state of the object. 528 */ 529 private final Object mPlayStateLock = new Object(); 530 /** 531 * Sizes of the audio buffer. 532 * These values are set during construction and can be stale. 533 * To obtain the current audio buffer frame count use {@link #getBufferSizeInFrames()}. 534 */ 535 private int mNativeBufferSizeInBytes = 0; 536 private int mNativeBufferSizeInFrames = 0; 537 /** 538 * Handler for events coming from the native code. 539 */ 540 private NativePositionEventHandlerDelegate mEventHandlerDelegate; 541 /** 542 * Looper associated with the thread that creates the AudioTrack instance. 543 */ 544 private final Looper mInitializationLooper; 545 /** 546 * The audio data source sampling rate in Hz. 547 * Never {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}. 548 */ 549 private int mSampleRate; // initialized by all constructors via audioParamCheck() 550 /** 551 * The number of audio output channels (1 is mono, 2 is stereo, etc.). 552 */ 553 private int mChannelCount = 1; 554 /** 555 * The audio channel mask used for calling native AudioTrack 556 */ 557 private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO; 558 559 /** 560 * The type of the audio stream to play. See 561 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 562 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 563 * {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and 564 * {@link AudioManager#STREAM_DTMF}. 565 */ 566 @UnsupportedAppUsage 567 private int mStreamType = AudioManager.STREAM_MUSIC; 568 569 /** 570 * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM. 571 */ 572 private int mDataLoadMode = MODE_STREAM; 573 /** 574 * The current channel position mask, as specified on AudioTrack creation. 575 * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}. 576 * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified. 577 */ 578 private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO; 579 /** 580 * The channel index mask if specified, otherwise 0. 581 */ 582 private int mChannelIndexMask = 0; 583 /** 584 * The encoding of the audio samples. 585 * @see AudioFormat#ENCODING_PCM_8BIT 586 * @see AudioFormat#ENCODING_PCM_16BIT 587 * @see AudioFormat#ENCODING_PCM_FLOAT 588 */ 589 private int mAudioFormat; // initialized by all constructors via audioParamCheck() 590 /** 591 * The AudioAttributes used in configuration. 592 */ 593 private AudioAttributes mConfiguredAudioAttributes; 594 /** 595 * Audio session ID 596 */ 597 private int mSessionId = AUDIO_SESSION_ID_GENERATE; 598 /** 599 * HW_AV_SYNC track AV Sync Header 600 */ 601 private ByteBuffer mAvSyncHeader = null; 602 /** 603 * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header 604 */ 605 private int mAvSyncBytesRemaining = 0; 606 /** 607 * Offset of the first sample of the audio in byte from start of HW_AV_SYNC track AV header. 608 */ 609 private int mOffset = 0; 610 /** 611 * Indicates whether the track is intended to play in offload mode. 612 */ 613 private boolean mOffloaded = false; 614 /** 615 * When offloaded track: delay for decoder in frames 616 */ 617 private int mOffloadDelayFrames = 0; 618 /** 619 * When offloaded track: padding for decoder in frames 620 */ 621 private int mOffloadPaddingFrames = 0; 622 623 /** 624 * The log session id used for metrics. 625 * {@link LogSessionId#LOG_SESSION_ID_NONE} here means it is not set. 626 */ 627 @NonNull private LogSessionId mLogSessionId = LogSessionId.LOG_SESSION_ID_NONE; 628 629 private AudioPolicy mAudioPolicy; 630 631 //-------------------------------- 632 // Used exclusively by native code 633 //-------------------- 634 /** 635 * @hide 636 * Accessed by native methods: provides access to C++ AudioTrack object. 637 */ 638 @SuppressWarnings("unused") 639 @UnsupportedAppUsage 640 protected long mNativeTrackInJavaObj; 641 /** 642 * Accessed by native methods: provides access to the JNI data (i.e. resources used by 643 * the native AudioTrack object, but not stored in it). 644 */ 645 @SuppressWarnings("unused") 646 @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553) 647 private long mJniData; 648 649 650 //-------------------------------------------------------------------------- 651 // Constructor, Finalize 652 //-------------------- 653 /** 654 * Class constructor. 655 * @param streamType the type of the audio stream. See 656 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 657 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 658 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 659 * @param sampleRateInHz the initial source sample rate expressed in Hz. 660 * {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value 661 * which is usually the sample rate of the sink. 662 * {@link #getSampleRate()} can be used to retrieve the actual sample rate chosen. 663 * @param channelConfig describes the configuration of the audio channels. 664 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 665 * {@link AudioFormat#CHANNEL_OUT_STEREO} 666 * @param audioFormat the format in which the audio data is represented. 667 * See {@link AudioFormat#ENCODING_PCM_16BIT}, 668 * {@link AudioFormat#ENCODING_PCM_8BIT}, 669 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 670 * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is 671 * read from for playback. This should be a nonzero multiple of the frame size in bytes. 672 * <p> If the track's creation mode is {@link #MODE_STATIC}, 673 * this is the maximum length sample, or audio clip, that can be played by this instance. 674 * <p> If the track's creation mode is {@link #MODE_STREAM}, 675 * this should be the desired buffer size 676 * for the <code>AudioTrack</code> to satisfy the application's 677 * latency requirements. 678 * If <code>bufferSizeInBytes</code> is less than the 679 * minimum buffer size for the output sink, it is increased to the minimum 680 * buffer size. 681 * The method {@link #getBufferSizeInFrames()} returns the 682 * actual size in frames of the buffer created, which 683 * determines the minimum frequency to write 684 * to the streaming <code>AudioTrack</code> to avoid underrun. 685 * See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size 686 * for an AudioTrack instance in streaming mode. 687 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 688 * @throws java.lang.IllegalArgumentException 689 * @deprecated use {@link Builder} or 690 * {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the 691 * {@link AudioAttributes} instead of the stream type which is only for volume control. 692 */ AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode)693 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 694 int bufferSizeInBytes, int mode) 695 throws IllegalArgumentException { 696 this(streamType, sampleRateInHz, channelConfig, audioFormat, 697 bufferSizeInBytes, mode, AUDIO_SESSION_ID_GENERATE); 698 } 699 700 /** 701 * Class constructor with audio session. Use this constructor when the AudioTrack must be 702 * attached to a particular audio session. The primary use of the audio session ID is to 703 * associate audio effects to a particular instance of AudioTrack: if an audio session ID 704 * is provided when creating an AudioEffect, this effect will be applied only to audio tracks 705 * and media players in the same session and not to the output mix. 706 * When an AudioTrack is created without specifying a session, it will create its own session 707 * which can be retrieved by calling the {@link #getAudioSessionId()} method. 708 * If a non-zero session ID is provided, this AudioTrack will share effects attached to this 709 * session 710 * with all other media players or audio tracks in the same session, otherwise a new session 711 * will be created for this track if none is supplied. 712 * @param streamType the type of the audio stream. See 713 * {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM}, 714 * {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC}, 715 * {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}. 716 * @param sampleRateInHz the initial source sample rate expressed in Hz. 717 * {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value 718 * which is usually the sample rate of the sink. 719 * @param channelConfig describes the configuration of the audio channels. 720 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 721 * {@link AudioFormat#CHANNEL_OUT_STEREO} 722 * @param audioFormat the format in which the audio data is represented. 723 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 724 * {@link AudioFormat#ENCODING_PCM_8BIT}, 725 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 726 * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is 727 * read from for playback. This should be a nonzero multiple of the frame size in bytes. 728 * <p> If the track's creation mode is {@link #MODE_STATIC}, 729 * this is the maximum length sample, or audio clip, that can be played by this instance. 730 * <p> If the track's creation mode is {@link #MODE_STREAM}, 731 * this should be the desired buffer size 732 * for the <code>AudioTrack</code> to satisfy the application's 733 * latency requirements. 734 * If <code>bufferSizeInBytes</code> is less than the 735 * minimum buffer size for the output sink, it is increased to the minimum 736 * buffer size. 737 * The method {@link #getBufferSizeInFrames()} returns the 738 * actual size in frames of the buffer created, which 739 * determines the minimum frequency to write 740 * to the streaming <code>AudioTrack</code> to avoid underrun. 741 * You can write data into this buffer in smaller chunks than this size. 742 * See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size 743 * for an AudioTrack instance in streaming mode. 744 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM} 745 * @param sessionId Id of audio session the AudioTrack must be attached to 746 * @throws java.lang.IllegalArgumentException 747 * @deprecated use {@link Builder} or 748 * {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the 749 * {@link AudioAttributes} instead of the stream type which is only for volume control. 750 */ AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes, int mode, int sessionId)751 public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat, 752 int bufferSizeInBytes, int mode, int sessionId) 753 throws IllegalArgumentException { 754 // mState already == STATE_UNINITIALIZED 755 this((new AudioAttributes.Builder()) 756 .setLegacyStreamType(streamType) 757 .build(), 758 (new AudioFormat.Builder()) 759 .setChannelMask(channelConfig) 760 .setEncoding(audioFormat) 761 .setSampleRate(sampleRateInHz) 762 .build(), 763 bufferSizeInBytes, 764 mode, sessionId); 765 deprecateStreamTypeForPlayback(streamType, "AudioTrack", "AudioTrack()"); 766 } 767 768 /** 769 * Class constructor with {@link AudioAttributes} and {@link AudioFormat}. 770 * @param attributes a non-null {@link AudioAttributes} instance. 771 * @param format a non-null {@link AudioFormat} instance describing the format of the data 772 * that will be played through this AudioTrack. See {@link AudioFormat.Builder} for 773 * configuring the audio format parameters such as encoding, channel mask and sample rate. 774 * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is 775 * read from for playback. This should be a nonzero multiple of the frame size in bytes. 776 * <p> If the track's creation mode is {@link #MODE_STATIC}, 777 * this is the maximum length sample, or audio clip, that can be played by this instance. 778 * <p> If the track's creation mode is {@link #MODE_STREAM}, 779 * this should be the desired buffer size 780 * for the <code>AudioTrack</code> to satisfy the application's 781 * latency requirements. 782 * If <code>bufferSizeInBytes</code> is less than the 783 * minimum buffer size for the output sink, it is increased to the minimum 784 * buffer size. 785 * The method {@link #getBufferSizeInFrames()} returns the 786 * actual size in frames of the buffer created, which 787 * determines the minimum frequency to write 788 * to the streaming <code>AudioTrack</code> to avoid underrun. 789 * See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size 790 * for an AudioTrack instance in streaming mode. 791 * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}. 792 * @param sessionId ID of audio session the AudioTrack must be attached to, or 793 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction 794 * time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before 795 * construction. 796 * @throws IllegalArgumentException 797 */ AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId)798 public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, 799 int mode, int sessionId) 800 throws IllegalArgumentException { 801 this(null /* context */, attributes, format, bufferSizeInBytes, mode, sessionId, 802 false /*offload*/, ENCAPSULATION_MODE_NONE, null /* tunerConfiguration */); 803 } 804 AudioTrack(@ullable Context context, AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes, int mode, int sessionId, boolean offload, int encapsulationMode, @Nullable TunerConfiguration tunerConfiguration)805 private AudioTrack(@Nullable Context context, AudioAttributes attributes, AudioFormat format, 806 int bufferSizeInBytes, int mode, int sessionId, boolean offload, int encapsulationMode, 807 @Nullable TunerConfiguration tunerConfiguration) 808 throws IllegalArgumentException { 809 super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK); 810 // mState already == STATE_UNINITIALIZED 811 812 mConfiguredAudioAttributes = attributes; // object copy not needed, immutable. 813 814 if (format == null) { 815 throw new IllegalArgumentException("Illegal null AudioFormat"); 816 } 817 818 // Check if we should enable deep buffer mode 819 if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) { 820 mAttributes = new AudioAttributes.Builder(mAttributes) 821 .replaceFlags((mAttributes.getAllFlags() 822 | AudioAttributes.FLAG_DEEP_BUFFER) 823 & ~AudioAttributes.FLAG_LOW_LATENCY) 824 .build(); 825 } 826 827 // remember which looper is associated with the AudioTrack instantiation 828 Looper looper; 829 if ((looper = Looper.myLooper()) == null) { 830 looper = Looper.getMainLooper(); 831 } 832 833 int rate = format.getSampleRate(); 834 if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) { 835 rate = 0; 836 } 837 838 int channelIndexMask = 0; 839 if ((format.getPropertySetMask() 840 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) { 841 channelIndexMask = format.getChannelIndexMask(); 842 } 843 int channelMask = 0; 844 if ((format.getPropertySetMask() 845 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) { 846 channelMask = format.getChannelMask(); 847 } else if (channelIndexMask == 0) { // if no masks at all, use stereo 848 channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT 849 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 850 } 851 int encoding = AudioFormat.ENCODING_DEFAULT; 852 if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) { 853 encoding = format.getEncoding(); 854 } 855 audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode); 856 mOffloaded = offload; 857 mStreamType = AudioSystem.STREAM_DEFAULT; 858 859 audioBuffSizeCheck(bufferSizeInBytes); 860 861 mInitializationLooper = looper; 862 863 if (sessionId < 0) { 864 throw new IllegalArgumentException("Invalid audio session ID: "+sessionId); 865 } 866 867 int[] sampleRate = new int[] {mSampleRate}; 868 int[] session = new int[1]; 869 session[0] = resolvePlaybackSessionId(context, sessionId); 870 871 AttributionSource attributionSource = context == null 872 ? AttributionSource.myAttributionSource() : context.getAttributionSource(); 873 874 // native initialization 875 try (ScopedParcelState attributionSourceState = attributionSource.asScopedParcelState()) { 876 int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes, 877 sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat, 878 mNativeBufferSizeInBytes, mDataLoadMode, session, 879 attributionSourceState.getParcel(), 0 /*nativeTrackInJavaObj*/, offload, 880 encapsulationMode, tunerConfiguration, getCurrentOpPackageName()); 881 if (initResult != SUCCESS) { 882 loge("Error code " + initResult + " when initializing AudioTrack."); 883 return; // with mState == STATE_UNINITIALIZED 884 } 885 } 886 887 mSampleRate = sampleRate[0]; 888 mSessionId = session[0]; 889 890 // TODO: consider caching encapsulationMode and tunerConfiguration in the Java object. 891 892 if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) { 893 int frameSizeInBytes; 894 if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) { 895 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat); 896 } else { 897 frameSizeInBytes = 1; 898 } 899 mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes; 900 } 901 902 if (mDataLoadMode == MODE_STATIC) { 903 mState = STATE_NO_STATIC_DATA; 904 } else { 905 mState = STATE_INITIALIZED; 906 } 907 908 baseRegisterPlayer(mSessionId); 909 native_setPlayerIId(mPlayerIId); // mPlayerIId now ready to send to native AudioTrack. 910 } 911 912 /** 913 * A constructor which explicitly connects a Native (C++) AudioTrack. For use by 914 * the AudioTrackRoutingProxy subclass. 915 * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack 916 * (associated with an OpenSL ES player). 917 * IMPORTANT: For "N", this method is ONLY called to setup a Java routing proxy, 918 * i.e. IAndroidConfiguration::AcquireJavaProxy(). If we call with a 0 in nativeTrackInJavaObj 919 * it means that the OpenSL player interface hasn't been realized, so there is no native 920 * Audiotrack to connect to. In this case wait to call deferred_connect() until the 921 * OpenSLES interface is realized. 922 */ AudioTrack(long nativeTrackInJavaObj)923 /*package*/ AudioTrack(long nativeTrackInJavaObj) { 924 super(new AudioAttributes.Builder().build(), 925 AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK); 926 // "final"s 927 mNativeTrackInJavaObj = 0; 928 mJniData = 0; 929 930 // remember which looper is associated with the AudioTrack instantiation 931 Looper looper; 932 if ((looper = Looper.myLooper()) == null) { 933 looper = Looper.getMainLooper(); 934 } 935 mInitializationLooper = looper; 936 937 // other initialization... 938 if (nativeTrackInJavaObj != 0) { 939 baseRegisterPlayer(AudioSystem.AUDIO_SESSION_ALLOCATE); 940 deferred_connect(nativeTrackInJavaObj); 941 } else { 942 mState = STATE_UNINITIALIZED; 943 } 944 } 945 946 /** 947 * @hide 948 */ 949 @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553) deferred_connect(long nativeTrackInJavaObj)950 /* package */ void deferred_connect(long nativeTrackInJavaObj) { 951 if (mState != STATE_INITIALIZED) { 952 // Note that for this native_setup, we are providing an already created/initialized 953 // *Native* AudioTrack, so the attributes parameters to native_setup() are ignored. 954 int[] session = { 0 }; 955 int[] rates = { 0 }; 956 try (ScopedParcelState attributionSourceState = 957 AttributionSource.myAttributionSource().asScopedParcelState()) { 958 int initResult = native_setup(new WeakReference<AudioTrack>(this), 959 null /*mAttributes - NA*/, 960 rates /*sampleRate - NA*/, 961 0 /*mChannelMask - NA*/, 962 0 /*mChannelIndexMask - NA*/, 963 0 /*mAudioFormat - NA*/, 964 0 /*mNativeBufferSizeInBytes - NA*/, 965 0 /*mDataLoadMode - NA*/, 966 session, 967 attributionSourceState.getParcel(), 968 nativeTrackInJavaObj, 969 false /*offload*/, 970 ENCAPSULATION_MODE_NONE, 971 null /* tunerConfiguration */, 972 "" /* opPackagename */); 973 if (initResult != SUCCESS) { 974 loge("Error code " + initResult + " when initializing AudioTrack."); 975 return; // with mState == STATE_UNINITIALIZED 976 } 977 } 978 979 mSessionId = session[0]; 980 981 mState = STATE_INITIALIZED; 982 } 983 } 984 985 /** 986 * TunerConfiguration is used to convey tuner information 987 * from the android.media.tv.Tuner API to AudioTrack construction. 988 * 989 * Use the Builder to construct the TunerConfiguration object, 990 * which is then used by the {@link AudioTrack.Builder} to create an AudioTrack. 991 * @hide 992 */ 993 @SystemApi 994 public static class TunerConfiguration { 995 private final int mContentId; 996 private final int mSyncId; 997 998 /** 999 * A special content id for {@link #TunerConfiguration(int, int)} 1000 * indicating audio is delivered 1001 * from an {@code AudioTrack} write, not tunneled from the tuner stack. 1002 */ 1003 public static final int CONTENT_ID_NONE = 0; 1004 1005 /** 1006 * Constructs a TunerConfiguration instance for use in {@link AudioTrack.Builder} 1007 * 1008 * @param contentId selects the audio stream to use. 1009 * The contentId may be obtained from 1010 * {@link android.media.tv.tuner.filter.Filter#getId()}, 1011 * such obtained id is always a positive number. 1012 * If audio is to be delivered through an {@code AudioTrack} write 1013 * then {@code CONTENT_ID_NONE} may be used. 1014 * @param syncId selects the clock to use for synchronization 1015 * of audio with other streams such as video. 1016 * The syncId may be obtained from 1017 * {@link android.media.tv.tuner.Tuner#getAvSyncHwId()}. 1018 * This is always a positive number. 1019 */ 1020 @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING) TunerConfiguration( @ntRangefrom = 0) int contentId, @IntRange(from = 1)int syncId)1021 public TunerConfiguration( 1022 @IntRange(from = 0) int contentId, @IntRange(from = 1)int syncId) { 1023 if (contentId < 0) { 1024 throw new IllegalArgumentException( 1025 "contentId " + contentId + " must be positive or CONTENT_ID_NONE"); 1026 } 1027 if (syncId < 1) { 1028 throw new IllegalArgumentException("syncId " + syncId + " must be positive"); 1029 } 1030 mContentId = contentId; 1031 mSyncId = syncId; 1032 } 1033 1034 /** 1035 * Returns the contentId. 1036 */ 1037 @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING) getContentId()1038 public @IntRange(from = 1) int getContentId() { 1039 return mContentId; // The Builder ensures this is > 0. 1040 } 1041 1042 /** 1043 * Returns the syncId. 1044 */ 1045 @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING) getSyncId()1046 public @IntRange(from = 1) int getSyncId() { 1047 return mSyncId; // The Builder ensures this is > 0. 1048 } 1049 } 1050 1051 /** 1052 * Builder class for {@link AudioTrack} objects. 1053 * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio 1054 * attributes and audio format parameters, you indicate which of those vary from the default 1055 * behavior on the device. 1056 * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat} 1057 * parameters, to be used by a new <code>AudioTrack</code> instance: 1058 * 1059 * <pre class="prettyprint"> 1060 * AudioTrack player = new AudioTrack.Builder() 1061 * .setAudioAttributes(new AudioAttributes.Builder() 1062 * .setUsage(AudioAttributes.USAGE_ALARM) 1063 * .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC) 1064 * .build()) 1065 * .setAudioFormat(new AudioFormat.Builder() 1066 * .setEncoding(AudioFormat.ENCODING_PCM_16BIT) 1067 * .setSampleRate(44100) 1068 * .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 1069 * .build()) 1070 * .setBufferSizeInBytes(minBuffSize) 1071 * .build(); 1072 * </pre> 1073 * <p> 1074 * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)}, 1075 * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used. 1076 * <br>If the audio format is not specified or is incomplete, its channel configuration will be 1077 * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be 1078 * {@link AudioFormat#ENCODING_PCM_16BIT}. 1079 * The sample rate will depend on the device actually selected for playback and can be queried 1080 * with {@link #getSampleRate()} method. 1081 * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)}, 1082 * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used. 1083 * <br>If the transfer mode is not specified with {@link #setTransferMode(int)}, 1084 * <code>MODE_STREAM</code> will be used. 1085 * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will 1086 * be generated. 1087 * <br>Offload is false by default. 1088 */ 1089 public static class Builder { 1090 private Context mContext; 1091 private AudioAttributes mAttributes; 1092 private AudioFormat mFormat; 1093 private int mBufferSizeInBytes; 1094 private int mEncapsulationMode = ENCAPSULATION_MODE_NONE; 1095 private int mSessionId = AUDIO_SESSION_ID_GENERATE; 1096 private int mMode = MODE_STREAM; 1097 private int mPerformanceMode = PERFORMANCE_MODE_NONE; 1098 private boolean mOffload = false; 1099 private TunerConfiguration mTunerConfiguration; 1100 private int mCallRedirectionMode = AudioManager.CALL_REDIRECT_NONE; 1101 1102 /** 1103 * Constructs a new Builder with the default values as described above. 1104 */ Builder()1105 public Builder() { 1106 } 1107 1108 /** 1109 * Sets the context the track belongs to. This context will be used to pull information, 1110 * such as {@link android.content.AttributionSource} and device specific audio session ids, 1111 * which will be associated with the {@link AudioTrack}. However, the context itself will 1112 * not be retained by the {@link AudioTrack}. 1113 * @param context a non-null {@link Context} instance 1114 * @return the same Builder instance. 1115 */ setContext(@onNull Context context)1116 public @NonNull Builder setContext(@NonNull Context context) { 1117 mContext = Objects.requireNonNull(context); 1118 return this; 1119 } 1120 1121 /** 1122 * Sets the {@link AudioAttributes}. 1123 * @param attributes a non-null {@link AudioAttributes} instance that describes the audio 1124 * data to be played. 1125 * @return the same Builder instance. 1126 * @throws IllegalArgumentException 1127 */ setAudioAttributes(@onNull AudioAttributes attributes)1128 public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes) 1129 throws IllegalArgumentException { 1130 if (attributes == null) { 1131 throw new IllegalArgumentException("Illegal null AudioAttributes argument"); 1132 } 1133 // keep reference, we only copy the data when building 1134 mAttributes = attributes; 1135 return this; 1136 } 1137 1138 /** 1139 * Sets the format of the audio data to be played by the {@link AudioTrack}. 1140 * See {@link AudioFormat.Builder} for configuring the audio format parameters such 1141 * as encoding, channel mask and sample rate. 1142 * @param format a non-null {@link AudioFormat} instance. 1143 * @return the same Builder instance. 1144 * @throws IllegalArgumentException 1145 */ setAudioFormat(@onNull AudioFormat format)1146 public @NonNull Builder setAudioFormat(@NonNull AudioFormat format) 1147 throws IllegalArgumentException { 1148 if (format == null) { 1149 throw new IllegalArgumentException("Illegal null AudioFormat argument"); 1150 } 1151 // keep reference, we only copy the data when building 1152 mFormat = format; 1153 return this; 1154 } 1155 1156 /** 1157 * Sets the total size (in bytes) of the buffer where audio data is read from for playback. 1158 * If using the {@link AudioTrack} in streaming mode 1159 * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller 1160 * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine 1161 * the estimated minimum buffer size for the creation of an AudioTrack instance 1162 * in streaming mode. 1163 * <br>If using the <code>AudioTrack</code> in static mode (see 1164 * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be 1165 * played by this instance. 1166 * @param bufferSizeInBytes 1167 * @return the same Builder instance. 1168 * @throws IllegalArgumentException 1169 */ setBufferSizeInBytes(@ntRangefrom = 0) int bufferSizeInBytes)1170 public @NonNull Builder setBufferSizeInBytes(@IntRange(from = 0) int bufferSizeInBytes) 1171 throws IllegalArgumentException { 1172 if (bufferSizeInBytes <= 0) { 1173 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes); 1174 } 1175 mBufferSizeInBytes = bufferSizeInBytes; 1176 return this; 1177 } 1178 1179 /** 1180 * Sets the encapsulation mode. 1181 * 1182 * Encapsulation mode allows metadata to be sent together with 1183 * the audio data payload in a {@code ByteBuffer}. 1184 * This requires a compatible hardware audio codec. 1185 * 1186 * @param encapsulationMode one of {@link AudioTrack#ENCAPSULATION_MODE_NONE}, 1187 * or {@link AudioTrack#ENCAPSULATION_MODE_ELEMENTARY_STREAM}. 1188 * @return the same Builder instance. 1189 */ 1190 // Note: with the correct permission {@code AudioTrack#ENCAPSULATION_MODE_HANDLE} 1191 // may be used as well. setEncapsulationMode(@ncapsulationMode int encapsulationMode)1192 public @NonNull Builder setEncapsulationMode(@EncapsulationMode int encapsulationMode) { 1193 switch (encapsulationMode) { 1194 case ENCAPSULATION_MODE_NONE: 1195 case ENCAPSULATION_MODE_ELEMENTARY_STREAM: 1196 case ENCAPSULATION_MODE_HANDLE: 1197 mEncapsulationMode = encapsulationMode; 1198 break; 1199 default: 1200 throw new IllegalArgumentException( 1201 "Invalid encapsulation mode " + encapsulationMode); 1202 } 1203 return this; 1204 } 1205 1206 /** 1207 * Sets the mode under which buffers of audio data are transferred from the 1208 * {@link AudioTrack} to the framework. 1209 * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}. 1210 * @return the same Builder instance. 1211 * @throws IllegalArgumentException 1212 */ setTransferMode(@ransferMode int mode)1213 public @NonNull Builder setTransferMode(@TransferMode int mode) 1214 throws IllegalArgumentException { 1215 switch(mode) { 1216 case MODE_STREAM: 1217 case MODE_STATIC: 1218 mMode = mode; 1219 break; 1220 default: 1221 throw new IllegalArgumentException("Invalid transfer mode " + mode); 1222 } 1223 return this; 1224 } 1225 1226 /** 1227 * Sets the session ID the {@link AudioTrack} will be attached to. 1228 * 1229 * Note, that if there's a device specific session id asociated with the context, explicitly 1230 * setting a session id using this method will override it 1231 * (see {@link Builder#setContext(Context)}). 1232 * @param sessionId a strictly positive ID number retrieved from another 1233 * <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by 1234 * {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or 1235 * {@link AudioManager#AUDIO_SESSION_ID_GENERATE}. 1236 * @return the same Builder instance. 1237 * @throws IllegalArgumentException 1238 */ setSessionId(@ntRangefrom = 1) int sessionId)1239 public @NonNull Builder setSessionId(@IntRange(from = 1) int sessionId) 1240 throws IllegalArgumentException { 1241 if ((sessionId != AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) { 1242 throw new IllegalArgumentException("Invalid audio session ID " + sessionId); 1243 } 1244 mSessionId = sessionId; 1245 return this; 1246 } 1247 1248 /** 1249 * Sets the {@link AudioTrack} performance mode. This is an advisory request which 1250 * may not be supported by the particular device, and the framework is free 1251 * to ignore such request if it is incompatible with other requests or hardware. 1252 * 1253 * @param performanceMode one of 1254 * {@link AudioTrack#PERFORMANCE_MODE_NONE}, 1255 * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY}, 1256 * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}. 1257 * @return the same Builder instance. 1258 * @throws IllegalArgumentException if {@code performanceMode} is not valid. 1259 */ setPerformanceMode(@erformanceMode int performanceMode)1260 public @NonNull Builder setPerformanceMode(@PerformanceMode int performanceMode) { 1261 switch (performanceMode) { 1262 case PERFORMANCE_MODE_NONE: 1263 case PERFORMANCE_MODE_LOW_LATENCY: 1264 case PERFORMANCE_MODE_POWER_SAVING: 1265 mPerformanceMode = performanceMode; 1266 break; 1267 default: 1268 throw new IllegalArgumentException( 1269 "Invalid performance mode " + performanceMode); 1270 } 1271 return this; 1272 } 1273 1274 /** 1275 * Sets whether this track will play through the offloaded audio path. 1276 * When set to true, at build time, the audio format will be checked against 1277 * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)} 1278 * to verify the audio format used by this track is supported on the device's offload 1279 * path (if any). 1280 * <br>Offload is only supported for media audio streams, and therefore requires that 1281 * the usage be {@link AudioAttributes#USAGE_MEDIA}. 1282 * @param offload true to require the offload path for playback. 1283 * @return the same Builder instance. 1284 */ setOffloadedPlayback(boolean offload)1285 public @NonNull Builder setOffloadedPlayback(boolean offload) { 1286 mOffload = offload; 1287 return this; 1288 } 1289 1290 /** 1291 * Sets the tuner configuration for the {@code AudioTrack}. 1292 * 1293 * The {@link AudioTrack.TunerConfiguration} consists of parameters obtained from 1294 * the Android TV tuner API which indicate the audio content stream id and the 1295 * synchronization id for the {@code AudioTrack}. 1296 * 1297 * @param tunerConfiguration obtained by {@link AudioTrack.TunerConfiguration.Builder}. 1298 * @return the same Builder instance. 1299 * @hide 1300 */ 1301 @SystemApi 1302 @RequiresPermission(android.Manifest.permission.MODIFY_AUDIO_ROUTING) setTunerConfiguration( @onNull TunerConfiguration tunerConfiguration)1303 public @NonNull Builder setTunerConfiguration( 1304 @NonNull TunerConfiguration tunerConfiguration) { 1305 if (tunerConfiguration == null) { 1306 throw new IllegalArgumentException("tunerConfiguration is null"); 1307 } 1308 mTunerConfiguration = tunerConfiguration; 1309 return this; 1310 } 1311 1312 /** 1313 * @hide 1314 * Sets the {@link AudioTrack} call redirection mode. 1315 * Used when creating an AudioTrack to inject audio to call uplink path. The mode 1316 * indicates if the call is a PSTN call or a VoIP call in which case a dynamic audio 1317 * policy is created to use this track as the source for all capture with voice 1318 * communication preset. 1319 * 1320 * @param callRedirectionMode one of 1321 * {@link AudioManager#CALL_REDIRECT_NONE}, 1322 * {@link AudioManager#CALL_REDIRECT_PSTN}, 1323 * or {@link AAudioManager#CALL_REDIRECT_VOIP}. 1324 * @return the same Builder instance. 1325 * @throws IllegalArgumentException if {@code callRedirectionMode} is not valid. 1326 */ setCallRedirectionMode( @udioManager.CallRedirectionMode int callRedirectionMode)1327 public @NonNull Builder setCallRedirectionMode( 1328 @AudioManager.CallRedirectionMode int callRedirectionMode) { 1329 switch (callRedirectionMode) { 1330 case AudioManager.CALL_REDIRECT_NONE: 1331 case AudioManager.CALL_REDIRECT_PSTN: 1332 case AudioManager.CALL_REDIRECT_VOIP: 1333 mCallRedirectionMode = callRedirectionMode; 1334 break; 1335 default: 1336 throw new IllegalArgumentException( 1337 "Invalid call redirection mode " + callRedirectionMode); 1338 } 1339 return this; 1340 } 1341 buildCallInjectionTrack()1342 private @NonNull AudioTrack buildCallInjectionTrack() { 1343 AudioMixingRule audioMixingRule = new AudioMixingRule.Builder() 1344 .addMixRule(AudioMixingRule.RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET, 1345 new AudioAttributes.Builder() 1346 .setCapturePreset(MediaRecorder.AudioSource.VOICE_COMMUNICATION) 1347 .setForCallRedirection() 1348 .build()) 1349 .setTargetMixRole(AudioMixingRule.MIX_ROLE_INJECTOR) 1350 .build(); 1351 AudioMix audioMix = new AudioMix.Builder(audioMixingRule) 1352 .setFormat(mFormat) 1353 .setRouteFlags(AudioMix.ROUTE_FLAG_LOOP_BACK) 1354 .build(); 1355 AudioPolicy audioPolicy = 1356 new AudioPolicy.Builder(/*context=*/ mContext).addMix(audioMix).build(); 1357 1358 if (AudioManager.registerAudioPolicyStatic(audioPolicy) != 0) { 1359 throw new UnsupportedOperationException("Error: could not register audio policy"); 1360 } 1361 AudioTrack track = audioPolicy.createAudioTrackSource(audioMix); 1362 if (track == null) { 1363 throw new UnsupportedOperationException("Cannot create injection AudioTrack"); 1364 } 1365 track.unregisterAudioPolicyOnRelease(audioPolicy); 1366 return track; 1367 } 1368 1369 /** 1370 * Builds an {@link AudioTrack} instance initialized with all the parameters set 1371 * on this <code>Builder</code>. 1372 * @return a new successfully initialized {@link AudioTrack} instance. 1373 * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code> 1374 * were incompatible, or if they are not supported by the device, 1375 * or if the device was not available. 1376 */ build()1377 public @NonNull AudioTrack build() throws UnsupportedOperationException { 1378 if (mAttributes == null) { 1379 mAttributes = new AudioAttributes.Builder() 1380 .setUsage(AudioAttributes.USAGE_MEDIA) 1381 .build(); 1382 } 1383 switch (mPerformanceMode) { 1384 case PERFORMANCE_MODE_LOW_LATENCY: 1385 mAttributes = new AudioAttributes.Builder(mAttributes) 1386 .replaceFlags((mAttributes.getAllFlags() 1387 | AudioAttributes.FLAG_LOW_LATENCY) 1388 & ~AudioAttributes.FLAG_DEEP_BUFFER) 1389 .build(); 1390 break; 1391 case PERFORMANCE_MODE_NONE: 1392 if (!shouldEnablePowerSaving(mAttributes, mFormat, mBufferSizeInBytes, mMode)) { 1393 break; // do not enable deep buffer mode. 1394 } 1395 // permitted to fall through to enable deep buffer 1396 case PERFORMANCE_MODE_POWER_SAVING: 1397 mAttributes = new AudioAttributes.Builder(mAttributes) 1398 .replaceFlags((mAttributes.getAllFlags() 1399 | AudioAttributes.FLAG_DEEP_BUFFER) 1400 & ~AudioAttributes.FLAG_LOW_LATENCY) 1401 .build(); 1402 break; 1403 } 1404 1405 if (mFormat == null) { 1406 mFormat = new AudioFormat.Builder() 1407 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO) 1408 //.setSampleRate(AudioFormat.SAMPLE_RATE_UNSPECIFIED) 1409 .setEncoding(AudioFormat.ENCODING_DEFAULT) 1410 .build(); 1411 } 1412 1413 if (mCallRedirectionMode == AudioManager.CALL_REDIRECT_VOIP) { 1414 return buildCallInjectionTrack(); 1415 } else if (mCallRedirectionMode == AudioManager.CALL_REDIRECT_PSTN) { 1416 mAttributes = new AudioAttributes.Builder(mAttributes) 1417 .setForCallRedirection() 1418 .build(); 1419 } 1420 1421 if (mOffload) { 1422 if (mPerformanceMode == PERFORMANCE_MODE_LOW_LATENCY) { 1423 throw new UnsupportedOperationException( 1424 "Offload and low latency modes are incompatible"); 1425 } 1426 if (AudioSystem.getDirectPlaybackSupport(mFormat, mAttributes) 1427 == AudioSystem.DIRECT_NOT_SUPPORTED) { 1428 throw new UnsupportedOperationException( 1429 "Cannot create AudioTrack, offload format / attributes not supported"); 1430 } 1431 } 1432 1433 // TODO: Check mEncapsulationMode compatibility with MODE_STATIC, etc? 1434 1435 // If the buffer size is not specified in streaming mode, 1436 // use a single frame for the buffer size and let the 1437 // native code figure out the minimum buffer size. 1438 if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) { 1439 int bytesPerSample = 1; 1440 if (AudioFormat.isEncodingLinearFrames(mFormat.getEncoding())) { 1441 try { 1442 bytesPerSample = mFormat.getBytesPerSample(mFormat.getEncoding()); 1443 } catch (IllegalArgumentException e) { 1444 // do nothing 1445 } 1446 } 1447 mBufferSizeInBytes = mFormat.getChannelCount() * bytesPerSample; 1448 } 1449 1450 try { 1451 final AudioTrack track = new AudioTrack( 1452 mContext, mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId, 1453 mOffload, mEncapsulationMode, mTunerConfiguration); 1454 if (track.getState() == STATE_UNINITIALIZED) { 1455 // release is not necessary 1456 throw new UnsupportedOperationException("Cannot create AudioTrack"); 1457 } 1458 return track; 1459 } catch (IllegalArgumentException e) { 1460 throw new UnsupportedOperationException(e.getMessage()); 1461 } 1462 } 1463 } 1464 1465 /** 1466 * Sets an {@link AudioPolicy} to automatically unregister when the track is released. 1467 * 1468 * <p>This is to prevent users of the call audio injection API from having to manually 1469 * unregister the policy that was used to create the track. 1470 */ unregisterAudioPolicyOnRelease(AudioPolicy audioPolicy)1471 private void unregisterAudioPolicyOnRelease(AudioPolicy audioPolicy) { 1472 mAudioPolicy = audioPolicy; 1473 } 1474 1475 /** 1476 * Configures the delay and padding values for the current compressed stream playing 1477 * in offload mode. 1478 * This can only be used on a track successfully initialized with 1479 * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}. The unit is frames, where a 1480 * frame indicates the number of samples per channel, e.g. 100 frames for a stereo compressed 1481 * stream corresponds to 200 decoded interleaved PCM samples. 1482 * @param delayInFrames number of frames to be ignored at the beginning of the stream. A value 1483 * of 0 indicates no delay is to be applied. 1484 * @param paddingInFrames number of frames to be ignored at the end of the stream. A value of 0 1485 * of 0 indicates no padding is to be applied. 1486 */ setOffloadDelayPadding(@ntRangefrom = 0) int delayInFrames, @IntRange(from = 0) int paddingInFrames)1487 public void setOffloadDelayPadding(@IntRange(from = 0) int delayInFrames, 1488 @IntRange(from = 0) int paddingInFrames) { 1489 if (paddingInFrames < 0) { 1490 throw new IllegalArgumentException("Illegal negative padding"); 1491 } 1492 if (delayInFrames < 0) { 1493 throw new IllegalArgumentException("Illegal negative delay"); 1494 } 1495 if (!mOffloaded) { 1496 throw new IllegalStateException("Illegal use of delay/padding on non-offloaded track"); 1497 } 1498 if (mState == STATE_UNINITIALIZED) { 1499 throw new IllegalStateException("Uninitialized track"); 1500 } 1501 mOffloadDelayFrames = delayInFrames; 1502 mOffloadPaddingFrames = paddingInFrames; 1503 native_set_delay_padding(delayInFrames, paddingInFrames); 1504 } 1505 1506 /** 1507 * Return the decoder delay of an offloaded track, expressed in frames, previously set with 1508 * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified. 1509 * <p>This delay indicates the number of frames to be ignored at the beginning of the stream. 1510 * This value can only be queried on a track successfully initialized with 1511 * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}. 1512 * @return decoder delay expressed in frames. 1513 */ getOffloadDelay()1514 public @IntRange(from = 0) int getOffloadDelay() { 1515 if (!mOffloaded) { 1516 throw new IllegalStateException("Illegal query of delay on non-offloaded track"); 1517 } 1518 if (mState == STATE_UNINITIALIZED) { 1519 throw new IllegalStateException("Illegal query of delay on uninitialized track"); 1520 } 1521 return mOffloadDelayFrames; 1522 } 1523 1524 /** 1525 * Return the decoder padding of an offloaded track, expressed in frames, previously set with 1526 * {@link #setOffloadDelayPadding(int, int)}, or 0 if it was never modified. 1527 * <p>This padding indicates the number of frames to be ignored at the end of the stream. 1528 * This value can only be queried on a track successfully initialized with 1529 * {@link AudioTrack.Builder#setOffloadedPlayback(boolean)}. 1530 * @return decoder padding expressed in frames. 1531 */ getOffloadPadding()1532 public @IntRange(from = 0) int getOffloadPadding() { 1533 if (!mOffloaded) { 1534 throw new IllegalStateException("Illegal query of padding on non-offloaded track"); 1535 } 1536 if (mState == STATE_UNINITIALIZED) { 1537 throw new IllegalStateException("Illegal query of padding on uninitialized track"); 1538 } 1539 return mOffloadPaddingFrames; 1540 } 1541 1542 /** 1543 * Declares that the last write() operation on this track provided the last buffer of this 1544 * stream. 1545 * After the end of stream, previously set padding and delay values are ignored. 1546 * Can only be called only if the AudioTrack is opened in offload mode 1547 * {@see Builder#setOffloadedPlayback(boolean)}. 1548 * Can only be called only if the AudioTrack is in state {@link #PLAYSTATE_PLAYING} 1549 * {@see #getPlayState()}. 1550 * Use this method in the same thread as any write() operation. 1551 */ setOffloadEndOfStream()1552 public void setOffloadEndOfStream() { 1553 if (!mOffloaded) { 1554 throw new IllegalStateException("EOS not supported on non-offloaded track"); 1555 } 1556 if (mState == STATE_UNINITIALIZED) { 1557 throw new IllegalStateException("Uninitialized track"); 1558 } 1559 if (mPlayState != PLAYSTATE_PLAYING) { 1560 throw new IllegalStateException("EOS not supported if not playing"); 1561 } 1562 synchronized (mStreamEventCbLock) { 1563 if (mStreamEventCbInfoList.size() == 0) { 1564 throw new IllegalStateException("EOS not supported without StreamEventCallback"); 1565 } 1566 } 1567 1568 synchronized (mPlayStateLock) { 1569 native_stop(); 1570 mOffloadEosPending = true; 1571 mPlayState = PLAYSTATE_STOPPING; 1572 } 1573 } 1574 1575 /** 1576 * Returns whether the track was built with {@link Builder#setOffloadedPlayback(boolean)} set 1577 * to {@code true}. 1578 * @return true if the track is using offloaded playback. 1579 */ isOffloadedPlayback()1580 public boolean isOffloadedPlayback() { 1581 return mOffloaded; 1582 } 1583 1584 /** 1585 * Returns whether direct playback of an audio format with the provided attributes is 1586 * currently supported on the system. 1587 * <p>Direct playback means that the audio stream is not resampled or downmixed 1588 * by the framework. Checking for direct support can help the app select the representation 1589 * of audio content that most closely matches the capabilities of the device and peripherials 1590 * (e.g. A/V receiver) connected to it. Note that the provided stream can still be re-encoded 1591 * or mixed with other streams, if needed. 1592 * <p>Also note that this query only provides information about the support of an audio format. 1593 * It does not indicate whether the resources necessary for the playback are available 1594 * at that instant. 1595 * @param format a non-null {@link AudioFormat} instance describing the format of 1596 * the audio data. 1597 * @param attributes a non-null {@link AudioAttributes} instance. 1598 * @return true if the given audio format can be played directly. 1599 * @deprecated Use {@link AudioManager#getDirectPlaybackSupport(AudioFormat, AudioAttributes)} 1600 * instead. 1601 */ 1602 @Deprecated isDirectPlaybackSupported(@onNull AudioFormat format, @NonNull AudioAttributes attributes)1603 public static boolean isDirectPlaybackSupported(@NonNull AudioFormat format, 1604 @NonNull AudioAttributes attributes) { 1605 if (format == null) { 1606 throw new IllegalArgumentException("Illegal null AudioFormat argument"); 1607 } 1608 if (attributes == null) { 1609 throw new IllegalArgumentException("Illegal null AudioAttributes argument"); 1610 } 1611 return native_is_direct_output_supported(format.getEncoding(), format.getSampleRate(), 1612 format.getChannelMask(), format.getChannelIndexMask(), 1613 attributes.getContentType(), attributes.getUsage(), attributes.getFlags()); 1614 } 1615 1616 /* 1617 * The MAX_LEVEL should be exactly representable by an IEEE 754-2008 base32 float. 1618 * This means fractions must be divisible by a power of 2. For example, 1619 * 10.25f is OK as 0.25 is 1/4, but 10.1f is NOT OK as 1/10 is not expressable by 1620 * a finite binary fraction. 1621 * 1622 * 48.f is the nominal max for API level {@link android os.Build.VERSION_CODES#R}. 1623 * We use this to suggest a baseline range for implementation. 1624 * 1625 * The API contract specification allows increasing this value in a future 1626 * API release, but not decreasing this value. 1627 */ 1628 private static final float MAX_AUDIO_DESCRIPTION_MIX_LEVEL = 48.f; 1629 isValidAudioDescriptionMixLevel(float level)1630 private static boolean isValidAudioDescriptionMixLevel(float level) { 1631 return !(Float.isNaN(level) || level > MAX_AUDIO_DESCRIPTION_MIX_LEVEL); 1632 } 1633 1634 /** 1635 * Sets the Audio Description mix level in dB. 1636 * 1637 * For AudioTracks incorporating a secondary Audio Description stream 1638 * (where such contents may be sent through an Encapsulation Mode 1639 * other than {@link #ENCAPSULATION_MODE_NONE}). 1640 * or internally by a HW channel), 1641 * the level of mixing of the Audio Description to the Main Audio stream 1642 * is controlled by this method. 1643 * 1644 * Such mixing occurs <strong>prior</strong> to overall volume scaling. 1645 * 1646 * @param level a floating point value between 1647 * {@code Float.NEGATIVE_INFINITY} to {@code +48.f}, 1648 * where {@code Float.NEGATIVE_INFINITY} means the Audio Description is not mixed 1649 * and a level of {@code 0.f} means the Audio Description is mixed without scaling. 1650 * @return true on success, false on failure. 1651 */ setAudioDescriptionMixLeveldB( @loatRangeto = 48.f, toInclusive = true) float level)1652 public boolean setAudioDescriptionMixLeveldB( 1653 @FloatRange(to = 48.f, toInclusive = true) float level) { 1654 if (!isValidAudioDescriptionMixLevel(level)) { 1655 throw new IllegalArgumentException("level is out of range" + level); 1656 } 1657 return native_set_audio_description_mix_level_db(level) == SUCCESS; 1658 } 1659 1660 /** 1661 * Returns the Audio Description mix level in dB. 1662 * 1663 * If Audio Description mixing is unavailable from the hardware device, 1664 * a value of {@code Float.NEGATIVE_INFINITY} is returned. 1665 * 1666 * @return the current Audio Description Mix Level in dB. 1667 * A value of {@code Float.NEGATIVE_INFINITY} means 1668 * that the audio description is not mixed or 1669 * the hardware is not available. 1670 * This should reflect the <strong>true</strong> internal device mix level; 1671 * hence the application might receive any floating value 1672 * except {@code Float.NaN}. 1673 */ getAudioDescriptionMixLeveldB()1674 public float getAudioDescriptionMixLeveldB() { 1675 float[] level = { Float.NEGATIVE_INFINITY }; 1676 try { 1677 final int status = native_get_audio_description_mix_level_db(level); 1678 if (status != SUCCESS || Float.isNaN(level[0])) { 1679 return Float.NEGATIVE_INFINITY; 1680 } 1681 } catch (Exception e) { 1682 return Float.NEGATIVE_INFINITY; 1683 } 1684 return level[0]; 1685 } 1686 isValidDualMonoMode(@ualMonoMode int dualMonoMode)1687 private static boolean isValidDualMonoMode(@DualMonoMode int dualMonoMode) { 1688 switch (dualMonoMode) { 1689 case DUAL_MONO_MODE_OFF: 1690 case DUAL_MONO_MODE_LR: 1691 case DUAL_MONO_MODE_LL: 1692 case DUAL_MONO_MODE_RR: 1693 return true; 1694 default: 1695 return false; 1696 } 1697 } 1698 1699 /** 1700 * Sets the Dual Mono mode presentation on the output device. 1701 * 1702 * The Dual Mono mode is generally applied to stereo audio streams 1703 * where the left and right channels come from separate sources. 1704 * 1705 * For compressed audio, where the decoding is done in hardware, 1706 * Dual Mono presentation needs to be performed 1707 * by the hardware output device 1708 * as the PCM audio is not available to the framework. 1709 * 1710 * @param dualMonoMode one of {@link #DUAL_MONO_MODE_OFF}, 1711 * {@link #DUAL_MONO_MODE_LR}, 1712 * {@link #DUAL_MONO_MODE_LL}, 1713 * {@link #DUAL_MONO_MODE_RR}. 1714 * 1715 * @return true on success, false on failure if the output device 1716 * does not support Dual Mono mode. 1717 */ setDualMonoMode(@ualMonoMode int dualMonoMode)1718 public boolean setDualMonoMode(@DualMonoMode int dualMonoMode) { 1719 if (!isValidDualMonoMode(dualMonoMode)) { 1720 throw new IllegalArgumentException( 1721 "Invalid Dual Mono mode " + dualMonoMode); 1722 } 1723 return native_set_dual_mono_mode(dualMonoMode) == SUCCESS; 1724 } 1725 1726 /** 1727 * Returns the Dual Mono mode presentation setting. 1728 * 1729 * If no Dual Mono presentation is available for the output device, 1730 * then {@link #DUAL_MONO_MODE_OFF} is returned. 1731 * 1732 * @return one of {@link #DUAL_MONO_MODE_OFF}, 1733 * {@link #DUAL_MONO_MODE_LR}, 1734 * {@link #DUAL_MONO_MODE_LL}, 1735 * {@link #DUAL_MONO_MODE_RR}. 1736 */ getDualMonoMode()1737 public @DualMonoMode int getDualMonoMode() { 1738 int[] dualMonoMode = { DUAL_MONO_MODE_OFF }; 1739 try { 1740 final int status = native_get_dual_mono_mode(dualMonoMode); 1741 if (status != SUCCESS || !isValidDualMonoMode(dualMonoMode[0])) { 1742 return DUAL_MONO_MODE_OFF; 1743 } 1744 } catch (Exception e) { 1745 return DUAL_MONO_MODE_OFF; 1746 } 1747 return dualMonoMode[0]; 1748 } 1749 1750 // mask of all the positional channels supported, however the allowed combinations 1751 // are further restricted by the matching left/right rule and 1752 // AudioSystem.OUT_CHANNEL_COUNT_MAX 1753 private static final int SUPPORTED_OUT_CHANNELS = 1754 AudioFormat.CHANNEL_OUT_FRONT_LEFT | 1755 AudioFormat.CHANNEL_OUT_FRONT_RIGHT | 1756 AudioFormat.CHANNEL_OUT_FRONT_CENTER | 1757 AudioFormat.CHANNEL_OUT_LOW_FREQUENCY | 1758 AudioFormat.CHANNEL_OUT_BACK_LEFT | 1759 AudioFormat.CHANNEL_OUT_BACK_RIGHT | 1760 AudioFormat.CHANNEL_OUT_FRONT_LEFT_OF_CENTER | 1761 AudioFormat.CHANNEL_OUT_FRONT_RIGHT_OF_CENTER | 1762 AudioFormat.CHANNEL_OUT_BACK_CENTER | 1763 AudioFormat.CHANNEL_OUT_SIDE_LEFT | 1764 AudioFormat.CHANNEL_OUT_SIDE_RIGHT | 1765 AudioFormat.CHANNEL_OUT_TOP_CENTER | 1766 AudioFormat.CHANNEL_OUT_TOP_FRONT_LEFT | 1767 AudioFormat.CHANNEL_OUT_TOP_FRONT_CENTER | 1768 AudioFormat.CHANNEL_OUT_TOP_FRONT_RIGHT | 1769 AudioFormat.CHANNEL_OUT_TOP_BACK_LEFT | 1770 AudioFormat.CHANNEL_OUT_TOP_BACK_CENTER | 1771 AudioFormat.CHANNEL_OUT_TOP_BACK_RIGHT | 1772 AudioFormat.CHANNEL_OUT_TOP_SIDE_LEFT | 1773 AudioFormat.CHANNEL_OUT_TOP_SIDE_RIGHT | 1774 AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_LEFT | 1775 AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_CENTER | 1776 AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_RIGHT | 1777 AudioFormat.CHANNEL_OUT_LOW_FREQUENCY_2 | 1778 AudioFormat.CHANNEL_OUT_FRONT_WIDE_LEFT | 1779 AudioFormat.CHANNEL_OUT_FRONT_WIDE_RIGHT; 1780 1781 // Returns a boolean whether the attributes, format, bufferSizeInBytes, mode allow 1782 // power saving to be automatically enabled for an AudioTrack. Returns false if 1783 // power saving is already enabled in the attributes parameter. shouldEnablePowerSaving( @ullable AudioAttributes attributes, @Nullable AudioFormat format, int bufferSizeInBytes, int mode)1784 private static boolean shouldEnablePowerSaving( 1785 @Nullable AudioAttributes attributes, @Nullable AudioFormat format, 1786 int bufferSizeInBytes, int mode) { 1787 // If no attributes, OK 1788 // otherwise check attributes for USAGE_MEDIA and CONTENT_UNKNOWN, MUSIC, or MOVIE. 1789 // Only consider flags that are not compatible with FLAG_DEEP_BUFFER. We include 1790 // FLAG_DEEP_BUFFER because if set the request is explicit and 1791 // shouldEnablePowerSaving() should return false. 1792 final int flags = attributes.getAllFlags() 1793 & (AudioAttributes.FLAG_DEEP_BUFFER | AudioAttributes.FLAG_LOW_LATENCY 1794 | AudioAttributes.FLAG_HW_AV_SYNC | AudioAttributes.FLAG_BEACON); 1795 1796 if (attributes != null && 1797 (flags != 0 // cannot have any special flags 1798 || attributes.getUsage() != AudioAttributes.USAGE_MEDIA 1799 || (attributes.getContentType() != AudioAttributes.CONTENT_TYPE_UNKNOWN 1800 && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MUSIC 1801 && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MOVIE))) { 1802 return false; 1803 } 1804 1805 // Format must be fully specified and be linear pcm 1806 if (format == null 1807 || format.getSampleRate() == AudioFormat.SAMPLE_RATE_UNSPECIFIED 1808 || !AudioFormat.isEncodingLinearPcm(format.getEncoding()) 1809 || !AudioFormat.isValidEncoding(format.getEncoding()) 1810 || format.getChannelCount() < 1) { 1811 return false; 1812 } 1813 1814 // Mode must be streaming 1815 if (mode != MODE_STREAM) { 1816 return false; 1817 } 1818 1819 // A buffer size of 0 is always compatible with deep buffer (when called from the Builder) 1820 // but for app compatibility we only use deep buffer power saving for large buffer sizes. 1821 if (bufferSizeInBytes != 0) { 1822 final long BUFFER_TARGET_MODE_STREAM_MS = 100; 1823 final int MILLIS_PER_SECOND = 1000; 1824 final long bufferTargetSize = 1825 BUFFER_TARGET_MODE_STREAM_MS 1826 * format.getChannelCount() 1827 * format.getBytesPerSample(format.getEncoding()) 1828 * format.getSampleRate() 1829 / MILLIS_PER_SECOND; 1830 if (bufferSizeInBytes < bufferTargetSize) { 1831 return false; 1832 } 1833 } 1834 1835 return true; 1836 } 1837 1838 // Convenience method for the constructor's parameter checks. 1839 // This is where constructor IllegalArgumentException-s are thrown 1840 // postconditions: 1841 // mChannelCount is valid 1842 // mChannelMask is valid 1843 // mAudioFormat is valid 1844 // mSampleRate is valid 1845 // mDataLoadMode is valid audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, int audioFormat, int mode)1846 private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask, 1847 int audioFormat, int mode) { 1848 //-------------- 1849 // sample rate, note these values are subject to change 1850 if ((sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN || 1851 sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) && 1852 sampleRateInHz != AudioFormat.SAMPLE_RATE_UNSPECIFIED) { 1853 throw new IllegalArgumentException(sampleRateInHz 1854 + "Hz is not a supported sample rate."); 1855 } 1856 mSampleRate = sampleRateInHz; 1857 1858 if (audioFormat == AudioFormat.ENCODING_IEC61937 1859 && channelConfig != AudioFormat.CHANNEL_OUT_STEREO 1860 && AudioFormat.channelCountFromOutChannelMask(channelConfig) != 8) { 1861 Log.w(TAG, "ENCODING_IEC61937 is configured with channel mask as " + channelConfig 1862 + ", which is not 2 or 8 channels"); 1863 } 1864 1865 //-------------- 1866 // channel config 1867 mChannelConfiguration = channelConfig; 1868 1869 switch (channelConfig) { 1870 case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT 1871 case AudioFormat.CHANNEL_OUT_MONO: 1872 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 1873 mChannelCount = 1; 1874 mChannelMask = AudioFormat.CHANNEL_OUT_MONO; 1875 break; 1876 case AudioFormat.CHANNEL_OUT_STEREO: 1877 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 1878 mChannelCount = 2; 1879 mChannelMask = AudioFormat.CHANNEL_OUT_STEREO; 1880 break; 1881 default: 1882 if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) { 1883 mChannelCount = 0; 1884 break; // channel index configuration only 1885 } 1886 if (!isMultichannelConfigSupported(channelConfig, audioFormat)) { 1887 throw new IllegalArgumentException( 1888 "Unsupported channel mask configuration " + channelConfig 1889 + " for encoding " + audioFormat); 1890 } 1891 mChannelMask = channelConfig; 1892 mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 1893 } 1894 // check the channel index configuration (if present) 1895 mChannelIndexMask = channelIndexMask; 1896 if (mChannelIndexMask != 0) { 1897 // As of S, we accept up to 24 channel index mask. 1898 final int fullIndexMask = (1 << AudioSystem.FCC_24) - 1; 1899 final int channelIndexCount = Integer.bitCount(channelIndexMask); 1900 final boolean accepted = (channelIndexMask & ~fullIndexMask) == 0 1901 && (!AudioFormat.isEncodingLinearFrames(audioFormat) // compressed OK 1902 || channelIndexCount <= AudioSystem.OUT_CHANNEL_COUNT_MAX); // PCM 1903 if (!accepted) { 1904 throw new IllegalArgumentException( 1905 "Unsupported channel index mask configuration " + channelIndexMask 1906 + " for encoding " + audioFormat); 1907 } 1908 if (mChannelCount == 0) { 1909 mChannelCount = channelIndexCount; 1910 } else if (mChannelCount != channelIndexCount) { 1911 throw new IllegalArgumentException("Channel count must match"); 1912 } 1913 } 1914 1915 //-------------- 1916 // audio format 1917 if (audioFormat == AudioFormat.ENCODING_DEFAULT) { 1918 audioFormat = AudioFormat.ENCODING_PCM_16BIT; 1919 } 1920 1921 if (!AudioFormat.isPublicEncoding(audioFormat)) { 1922 throw new IllegalArgumentException("Unsupported audio encoding."); 1923 } 1924 mAudioFormat = audioFormat; 1925 1926 //-------------- 1927 // audio load mode 1928 if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) || 1929 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) { 1930 throw new IllegalArgumentException("Invalid mode."); 1931 } 1932 mDataLoadMode = mode; 1933 } 1934 1935 // General pair map 1936 private static final Map<String, Integer> CHANNEL_PAIR_MAP = Map.of( 1937 "front", AudioFormat.CHANNEL_OUT_FRONT_LEFT 1938 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT, 1939 "back", AudioFormat.CHANNEL_OUT_BACK_LEFT 1940 | AudioFormat.CHANNEL_OUT_BACK_RIGHT, 1941 "front of center", AudioFormat.CHANNEL_OUT_FRONT_LEFT_OF_CENTER 1942 | AudioFormat.CHANNEL_OUT_FRONT_RIGHT_OF_CENTER, 1943 "side", AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT, 1944 "top front", AudioFormat.CHANNEL_OUT_TOP_FRONT_LEFT 1945 | AudioFormat.CHANNEL_OUT_TOP_FRONT_RIGHT, 1946 "top back", AudioFormat.CHANNEL_OUT_TOP_BACK_LEFT 1947 | AudioFormat.CHANNEL_OUT_TOP_BACK_RIGHT, 1948 "top side", AudioFormat.CHANNEL_OUT_TOP_SIDE_LEFT 1949 | AudioFormat.CHANNEL_OUT_TOP_SIDE_RIGHT, 1950 "bottom front", AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_LEFT 1951 | AudioFormat.CHANNEL_OUT_BOTTOM_FRONT_RIGHT, 1952 "front wide", AudioFormat.CHANNEL_OUT_FRONT_WIDE_LEFT 1953 | AudioFormat.CHANNEL_OUT_FRONT_WIDE_RIGHT); 1954 1955 /** 1956 * Convenience method to check that the channel configuration (a.k.a channel mask) is supported 1957 * @param channelConfig the mask to validate 1958 * @return false if the AudioTrack can't be used with such a mask 1959 */ isMultichannelConfigSupported(int channelConfig, int encoding)1960 private static boolean isMultichannelConfigSupported(int channelConfig, int encoding) { 1961 // check for unsupported channels 1962 if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) { 1963 loge("Channel configuration features unsupported channels"); 1964 return false; 1965 } 1966 final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 1967 final int channelCountLimit; 1968 try { 1969 channelCountLimit = AudioFormat.isEncodingLinearFrames(encoding) 1970 ? AudioSystem.OUT_CHANNEL_COUNT_MAX // PCM limited to OUT_CHANNEL_COUNT_MAX 1971 : AudioSystem.FCC_24; // Compressed limited to 24 channels 1972 } catch (IllegalArgumentException iae) { 1973 loge("Unsupported encoding " + iae); 1974 return false; 1975 } 1976 if (channelCount > channelCountLimit) { 1977 loge("Channel configuration contains too many channels for encoding " 1978 + encoding + "(" + channelCount + " > " + channelCountLimit + ")"); 1979 return false; 1980 } 1981 // check for unsupported multichannel combinations: 1982 // - FL/FR must be present 1983 // - L/R channels must be paired (e.g. no single L channel) 1984 final int frontPair = 1985 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT; 1986 if ((channelConfig & frontPair) != frontPair) { 1987 loge("Front channels must be present in multichannel configurations"); 1988 return false; 1989 } 1990 // Check all pairs to see that they are matched (front duplicated here). 1991 for (Map.Entry<String, Integer> e : CHANNEL_PAIR_MAP.entrySet()) { 1992 final int positionPair = e.getValue(); 1993 if ((channelConfig & positionPair) != 0 1994 && (channelConfig & positionPair) != positionPair) { 1995 loge("Channel pair (" + e.getKey() + ") cannot be used independently"); 1996 return false; 1997 } 1998 } 1999 return true; 2000 } 2001 2002 2003 // Convenience method for the constructor's audio buffer size check. 2004 // preconditions: 2005 // mChannelCount is valid 2006 // mAudioFormat is valid 2007 // postcondition: 2008 // mNativeBufferSizeInBytes is valid (multiple of frame size, positive) audioBuffSizeCheck(int audioBufferSize)2009 private void audioBuffSizeCheck(int audioBufferSize) { 2010 // NB: this section is only valid with PCM or IEC61937 data. 2011 // To update when supporting compressed formats 2012 int frameSizeInBytes; 2013 if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) { 2014 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat); 2015 } else { 2016 frameSizeInBytes = 1; 2017 } 2018 if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) { 2019 throw new IllegalArgumentException("Invalid audio buffer size."); 2020 } 2021 2022 mNativeBufferSizeInBytes = audioBufferSize; 2023 mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes; 2024 } 2025 2026 2027 /** 2028 * Releases the native AudioTrack resources. 2029 */ release()2030 public void release() { 2031 synchronized (mStreamEventCbLock){ 2032 endStreamEventHandling(); 2033 } 2034 // even though native_release() stops the native AudioTrack, we need to stop 2035 // AudioTrack subclasses too. 2036 try { 2037 stop(); 2038 } catch(IllegalStateException ise) { 2039 // don't raise an exception, we're releasing the resources. 2040 } 2041 if (mAudioPolicy != null) { 2042 AudioManager.unregisterAudioPolicyAsyncStatic(mAudioPolicy); 2043 mAudioPolicy = null; 2044 } 2045 2046 baseRelease(); 2047 native_release(); 2048 synchronized (mPlayStateLock) { 2049 mState = STATE_UNINITIALIZED; 2050 mPlayState = PLAYSTATE_STOPPED; 2051 mPlayStateLock.notify(); 2052 } 2053 } 2054 2055 @Override finalize()2056 protected void finalize() { 2057 tryToDisableNativeRoutingCallback(); 2058 baseRelease(); 2059 native_finalize(); 2060 } 2061 2062 //-------------------------------------------------------------------------- 2063 // Getters 2064 //-------------------- 2065 /** 2066 * Returns the minimum gain value, which is the constant 0.0. 2067 * Gain values less than 0.0 will be clamped to 0.0. 2068 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 2069 * @return the minimum value, which is the constant 0.0. 2070 */ getMinVolume()2071 static public float getMinVolume() { 2072 return GAIN_MIN; 2073 } 2074 2075 /** 2076 * Returns the maximum gain value, which is greater than or equal to 1.0. 2077 * Gain values greater than the maximum will be clamped to the maximum. 2078 * <p>The word "volume" in the API name is historical; this is actually a gain. 2079 * expressed as a linear multiplier on sample values, where a maximum value of 1.0 2080 * corresponds to a gain of 0 dB (sample values left unmodified). 2081 * @return the maximum value, which is greater than or equal to 1.0. 2082 */ getMaxVolume()2083 static public float getMaxVolume() { 2084 return GAIN_MAX; 2085 } 2086 2087 /** 2088 * Returns the configured audio source sample rate in Hz. 2089 * The initial source sample rate depends on the constructor parameters, 2090 * but the source sample rate may change if {@link #setPlaybackRate(int)} is called. 2091 * If the constructor had a specific sample rate, then the initial sink sample rate is that 2092 * value. 2093 * If the constructor had {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}, 2094 * then the initial sink sample rate is a route-dependent default value based on the source [sic]. 2095 */ getSampleRate()2096 public int getSampleRate() { 2097 return mSampleRate; 2098 } 2099 2100 /** 2101 * Returns the current playback sample rate rate in Hz. 2102 */ getPlaybackRate()2103 public int getPlaybackRate() { 2104 return native_get_playback_rate(); 2105 } 2106 2107 /** 2108 * Returns the current playback parameters. 2109 * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters 2110 * @return current {@link PlaybackParams}. 2111 * @throws IllegalStateException if track is not initialized. 2112 */ getPlaybackParams()2113 public @NonNull PlaybackParams getPlaybackParams() { 2114 return native_get_playback_params(); 2115 } 2116 2117 /** 2118 * Returns the {@link AudioAttributes} used in configuration. 2119 * If a {@code streamType} is used instead of an {@code AudioAttributes} 2120 * to configure the AudioTrack 2121 * (the use of {@code streamType} for configuration is deprecated), 2122 * then the {@code AudioAttributes} 2123 * equivalent to the {@code streamType} is returned. 2124 * @return The {@code AudioAttributes} used to configure the AudioTrack. 2125 * @throws IllegalStateException If the track is not initialized. 2126 */ getAudioAttributes()2127 public @NonNull AudioAttributes getAudioAttributes() { 2128 if (mState == STATE_UNINITIALIZED || mConfiguredAudioAttributes == null) { 2129 throw new IllegalStateException("track not initialized"); 2130 } 2131 return mConfiguredAudioAttributes; 2132 } 2133 2134 /** 2135 * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT}, 2136 * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}. 2137 */ getAudioFormat()2138 public int getAudioFormat() { 2139 return mAudioFormat; 2140 } 2141 2142 /** 2143 * Returns the volume stream type of this AudioTrack. 2144 * Compare the result against {@link AudioManager#STREAM_VOICE_CALL}, 2145 * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING}, 2146 * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM}, 2147 * {@link AudioManager#STREAM_NOTIFICATION}, {@link AudioManager#STREAM_DTMF} or 2148 * {@link AudioManager#STREAM_ACCESSIBILITY}. 2149 */ getStreamType()2150 public int getStreamType() { 2151 return mStreamType; 2152 } 2153 2154 /** 2155 * Returns the configured channel position mask. 2156 * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO}, 2157 * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}. 2158 * This method may return {@link AudioFormat#CHANNEL_INVALID} if 2159 * a channel index mask was used. Consider 2160 * {@link #getFormat()} instead, to obtain an {@link AudioFormat}, 2161 * which contains both the channel position mask and the channel index mask. 2162 */ getChannelConfiguration()2163 public int getChannelConfiguration() { 2164 return mChannelConfiguration; 2165 } 2166 2167 /** 2168 * Returns the configured <code>AudioTrack</code> format. 2169 * @return an {@link AudioFormat} containing the 2170 * <code>AudioTrack</code> parameters at the time of configuration. 2171 */ getFormat()2172 public @NonNull AudioFormat getFormat() { 2173 AudioFormat.Builder builder = new AudioFormat.Builder() 2174 .setSampleRate(mSampleRate) 2175 .setEncoding(mAudioFormat); 2176 if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) { 2177 builder.setChannelMask(mChannelConfiguration); 2178 } 2179 if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) { 2180 builder.setChannelIndexMask(mChannelIndexMask); 2181 } 2182 return builder.build(); 2183 } 2184 2185 /** 2186 * Returns the configured number of channels. 2187 */ getChannelCount()2188 public int getChannelCount() { 2189 return mChannelCount; 2190 } 2191 2192 /** 2193 * Returns the state of the AudioTrack instance. This is useful after the 2194 * AudioTrack instance has been created to check if it was initialized 2195 * properly. This ensures that the appropriate resources have been acquired. 2196 * @see #STATE_UNINITIALIZED 2197 * @see #STATE_INITIALIZED 2198 * @see #STATE_NO_STATIC_DATA 2199 */ getState()2200 public int getState() { 2201 return mState; 2202 } 2203 2204 /** 2205 * Returns the playback state of the AudioTrack instance. 2206 * @see #PLAYSTATE_STOPPED 2207 * @see #PLAYSTATE_PAUSED 2208 * @see #PLAYSTATE_PLAYING 2209 */ getPlayState()2210 public int getPlayState() { 2211 synchronized (mPlayStateLock) { 2212 switch (mPlayState) { 2213 case PLAYSTATE_STOPPING: 2214 return PLAYSTATE_PLAYING; 2215 case PLAYSTATE_PAUSED_STOPPING: 2216 return PLAYSTATE_PAUSED; 2217 default: 2218 return mPlayState; 2219 } 2220 } 2221 } 2222 2223 2224 /** 2225 * Returns the effective size of the <code>AudioTrack</code> buffer 2226 * that the application writes to. 2227 * <p> This will be less than or equal to the result of 2228 * {@link #getBufferCapacityInFrames()}. 2229 * It will be equal if {@link #setBufferSizeInFrames(int)} has never been called. 2230 * <p> If the track is subsequently routed to a different output sink, the buffer 2231 * size and capacity may enlarge to accommodate. 2232 * <p> If the <code>AudioTrack</code> encoding indicates compressed data, 2233 * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is 2234 * the size of the <code>AudioTrack</code> buffer in bytes. 2235 * <p> See also {@link AudioManager#getProperty(String)} for key 2236 * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}. 2237 * @return current size in frames of the <code>AudioTrack</code> buffer. 2238 * @throws IllegalStateException if track is not initialized. 2239 */ getBufferSizeInFrames()2240 public @IntRange (from = 0) int getBufferSizeInFrames() { 2241 return native_get_buffer_size_frames(); 2242 } 2243 2244 /** 2245 * Limits the effective size of the <code>AudioTrack</code> buffer 2246 * that the application writes to. 2247 * <p> A write to this AudioTrack will not fill the buffer beyond this limit. 2248 * If a blocking write is used then the write will block until the data 2249 * can fit within this limit. 2250 * <p>Changing this limit modifies the latency associated with 2251 * the buffer for this track. A smaller size will give lower latency 2252 * but there may be more glitches due to buffer underruns. 2253 * <p>The actual size used may not be equal to this requested size. 2254 * It will be limited to a valid range with a maximum of 2255 * {@link #getBufferCapacityInFrames()}. 2256 * It may also be adjusted slightly for internal reasons. 2257 * If bufferSizeInFrames is less than zero then {@link #ERROR_BAD_VALUE} 2258 * will be returned. 2259 * <p>This method is supported for PCM audio at all API levels. 2260 * Compressed audio is supported in API levels 33 and above. 2261 * For compressed streams the size of a frame is considered to be exactly one byte. 2262 * 2263 * @param bufferSizeInFrames requested buffer size in frames 2264 * @return the actual buffer size in frames or an error code, 2265 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION} 2266 * @throws IllegalStateException if track is not initialized. 2267 */ setBufferSizeInFrames(@ntRange from = 0) int bufferSizeInFrames)2268 public int setBufferSizeInFrames(@IntRange (from = 0) int bufferSizeInFrames) { 2269 if (mDataLoadMode == MODE_STATIC || mState == STATE_UNINITIALIZED) { 2270 return ERROR_INVALID_OPERATION; 2271 } 2272 if (bufferSizeInFrames < 0) { 2273 return ERROR_BAD_VALUE; 2274 } 2275 return native_set_buffer_size_frames(bufferSizeInFrames); 2276 } 2277 2278 /** 2279 * Returns the maximum size of the <code>AudioTrack</code> buffer in frames. 2280 * <p> If the track's creation mode is {@link #MODE_STATIC}, 2281 * it is equal to the specified bufferSizeInBytes on construction, converted to frame units. 2282 * A static track's frame count will not change. 2283 * <p> If the track's creation mode is {@link #MODE_STREAM}, 2284 * it is greater than or equal to the specified bufferSizeInBytes converted to frame units. 2285 * For streaming tracks, this value may be rounded up to a larger value if needed by 2286 * the target output sink, and 2287 * if the track is subsequently routed to a different output sink, the 2288 * frame count may enlarge to accommodate. 2289 * <p> If the <code>AudioTrack</code> encoding indicates compressed data, 2290 * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is 2291 * the size of the <code>AudioTrack</code> buffer in bytes. 2292 * <p> See also {@link AudioManager#getProperty(String)} for key 2293 * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}. 2294 * @return maximum size in frames of the <code>AudioTrack</code> buffer. 2295 * @throws IllegalStateException if track is not initialized. 2296 */ getBufferCapacityInFrames()2297 public @IntRange (from = 0) int getBufferCapacityInFrames() { 2298 return native_get_buffer_capacity_frames(); 2299 } 2300 2301 /** 2302 * Sets the streaming start threshold for an <code>AudioTrack</code>. 2303 * <p> The streaming start threshold is the buffer level that the written audio 2304 * data must reach for audio streaming to start after {@link #play()} is called. 2305 * <p> For compressed streams, the size of a frame is considered to be exactly one byte. 2306 * 2307 * @param startThresholdInFrames the desired start threshold. 2308 * @return the actual start threshold in frames value. This is 2309 * an integer between 1 to the buffer capacity 2310 * (see {@link #getBufferCapacityInFrames()}), 2311 * and might change if the output sink changes after track creation. 2312 * @throws IllegalStateException if the track is not initialized or the 2313 * track transfer mode is not {@link #MODE_STREAM}. 2314 * @throws IllegalArgumentException if startThresholdInFrames is not positive. 2315 * @see #getStartThresholdInFrames() 2316 */ setStartThresholdInFrames( @ntRange from = 1) int startThresholdInFrames)2317 public @IntRange(from = 1) int setStartThresholdInFrames( 2318 @IntRange (from = 1) int startThresholdInFrames) { 2319 if (mState != STATE_INITIALIZED) { 2320 throw new IllegalStateException("AudioTrack is not initialized"); 2321 } 2322 if (mDataLoadMode != MODE_STREAM) { 2323 throw new IllegalStateException("AudioTrack must be a streaming track"); 2324 } 2325 if (startThresholdInFrames < 1) { 2326 throw new IllegalArgumentException("startThresholdInFrames " 2327 + startThresholdInFrames + " must be positive"); 2328 } 2329 return native_setStartThresholdInFrames(startThresholdInFrames); 2330 } 2331 2332 /** 2333 * Returns the streaming start threshold of the <code>AudioTrack</code>. 2334 * <p> The streaming start threshold is the buffer level that the written audio 2335 * data must reach for audio streaming to start after {@link #play()} is called. 2336 * When an <code>AudioTrack</code> is created, the streaming start threshold 2337 * is the buffer capacity in frames. If the buffer size in frames is reduced 2338 * by {@link #setBufferSizeInFrames(int)} to a value smaller than the start threshold 2339 * then that value will be used instead for the streaming start threshold. 2340 * <p> For compressed streams, the size of a frame is considered to be exactly one byte. 2341 * 2342 * @return the current start threshold in frames value. This is 2343 * an integer between 1 to the buffer capacity 2344 * (see {@link #getBufferCapacityInFrames()}), 2345 * and might change if the output sink changes after track creation. 2346 * @throws IllegalStateException if the track is not initialized or the 2347 * track is not {@link #MODE_STREAM}. 2348 * @see #setStartThresholdInFrames(int) 2349 */ getStartThresholdInFrames()2350 public @IntRange (from = 1) int getStartThresholdInFrames() { 2351 if (mState != STATE_INITIALIZED) { 2352 throw new IllegalStateException("AudioTrack is not initialized"); 2353 } 2354 if (mDataLoadMode != MODE_STREAM) { 2355 throw new IllegalStateException("AudioTrack must be a streaming track"); 2356 } 2357 return native_getStartThresholdInFrames(); 2358 } 2359 2360 /** 2361 * Returns the frame count of the native <code>AudioTrack</code> buffer. 2362 * @return current size in frames of the <code>AudioTrack</code> buffer. 2363 * @throws IllegalStateException 2364 * @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead. 2365 */ 2366 @Deprecated getNativeFrameCount()2367 protected int getNativeFrameCount() { 2368 return native_get_buffer_capacity_frames(); 2369 } 2370 2371 /** 2372 * Returns marker position expressed in frames. 2373 * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition}, 2374 * or zero if marker is disabled. 2375 */ getNotificationMarkerPosition()2376 public int getNotificationMarkerPosition() { 2377 return native_get_marker_pos(); 2378 } 2379 2380 /** 2381 * Returns the notification update period expressed in frames. 2382 * Zero means that no position update notifications are being delivered. 2383 */ getPositionNotificationPeriod()2384 public int getPositionNotificationPeriod() { 2385 return native_get_pos_update_period(); 2386 } 2387 2388 /** 2389 * Returns the playback head position expressed in frames. 2390 * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is 2391 * unsigned 32-bits. That is, the next position after 0x7FFFFFFF is (int) 0x80000000. 2392 * This is a continuously advancing counter. It will wrap (overflow) periodically, 2393 * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz. 2394 * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}. 2395 * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates 2396 * the total number of frames played since reset, 2397 * <i>not</i> the current offset within the buffer. 2398 */ getPlaybackHeadPosition()2399 public int getPlaybackHeadPosition() { 2400 return native_get_position(); 2401 } 2402 2403 /** 2404 * Returns this track's estimated latency in milliseconds. This includes the latency due 2405 * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver. 2406 * 2407 * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need 2408 * a better solution. 2409 * @hide 2410 */ 2411 @UnsupportedAppUsage(trackingBug = 130237544) getLatency()2412 public int getLatency() { 2413 return native_get_latency(); 2414 } 2415 2416 /** 2417 * Returns the number of underrun occurrences in the application-level write buffer 2418 * since the AudioTrack was created. 2419 * An underrun occurs if the application does not write audio 2420 * data quickly enough, causing the buffer to underflow 2421 * and a potential audio glitch or pop. 2422 * <p> 2423 * Underruns are less likely when buffer sizes are large. 2424 * It may be possible to eliminate underruns by recreating the AudioTrack with 2425 * a larger buffer. 2426 * Or by using {@link #setBufferSizeInFrames(int)} to dynamically increase the 2427 * effective size of the buffer. 2428 */ getUnderrunCount()2429 public int getUnderrunCount() { 2430 return native_get_underrun_count(); 2431 } 2432 2433 /** 2434 * Returns the current performance mode of the {@link AudioTrack}. 2435 * 2436 * @return one of {@link AudioTrack#PERFORMANCE_MODE_NONE}, 2437 * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY}, 2438 * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}. 2439 * Use {@link AudioTrack.Builder#setPerformanceMode} 2440 * in the {@link AudioTrack.Builder} to enable a performance mode. 2441 * @throws IllegalStateException if track is not initialized. 2442 */ getPerformanceMode()2443 public @PerformanceMode int getPerformanceMode() { 2444 final int flags = native_get_flags(); 2445 if ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0) { 2446 return PERFORMANCE_MODE_LOW_LATENCY; 2447 } else if ((flags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) { 2448 return PERFORMANCE_MODE_POWER_SAVING; 2449 } else { 2450 return PERFORMANCE_MODE_NONE; 2451 } 2452 } 2453 2454 /** 2455 * Returns the output sample rate in Hz for the specified stream type. 2456 */ getNativeOutputSampleRate(int streamType)2457 static public int getNativeOutputSampleRate(int streamType) { 2458 return native_get_output_sample_rate(streamType); 2459 } 2460 2461 /** 2462 * Returns the estimated minimum buffer size required for an AudioTrack 2463 * object to be created in the {@link #MODE_STREAM} mode. 2464 * The size is an estimate because it does not consider either the route or the sink, 2465 * since neither is known yet. Note that this size doesn't 2466 * guarantee a smooth playback under load, and higher values should be chosen according to 2467 * the expected frequency at which the buffer will be refilled with additional data to play. 2468 * For example, if you intend to dynamically set the source sample rate of an AudioTrack 2469 * to a higher value than the initial source sample rate, be sure to configure the buffer size 2470 * based on the highest planned sample rate. 2471 * @param sampleRateInHz the source sample rate expressed in Hz. 2472 * {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} is not permitted. 2473 * @param channelConfig describes the configuration of the audio channels. 2474 * See {@link AudioFormat#CHANNEL_OUT_MONO} and 2475 * {@link AudioFormat#CHANNEL_OUT_STEREO} 2476 * @param audioFormat the format in which the audio data is represented. 2477 * See {@link AudioFormat#ENCODING_PCM_16BIT} and 2478 * {@link AudioFormat#ENCODING_PCM_8BIT}, 2479 * and {@link AudioFormat#ENCODING_PCM_FLOAT}. 2480 * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed, 2481 * or {@link #ERROR} if unable to query for output properties, 2482 * or the minimum buffer size expressed in bytes. 2483 */ getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat)2484 static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) { 2485 int channelCount = 0; 2486 switch(channelConfig) { 2487 case AudioFormat.CHANNEL_OUT_MONO: 2488 case AudioFormat.CHANNEL_CONFIGURATION_MONO: 2489 channelCount = 1; 2490 break; 2491 case AudioFormat.CHANNEL_OUT_STEREO: 2492 case AudioFormat.CHANNEL_CONFIGURATION_STEREO: 2493 channelCount = 2; 2494 break; 2495 default: 2496 if (!isMultichannelConfigSupported(channelConfig, audioFormat)) { 2497 loge("getMinBufferSize(): Invalid channel configuration."); 2498 return ERROR_BAD_VALUE; 2499 } else { 2500 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig); 2501 } 2502 } 2503 2504 if (!AudioFormat.isPublicEncoding(audioFormat)) { 2505 loge("getMinBufferSize(): Invalid audio format."); 2506 return ERROR_BAD_VALUE; 2507 } 2508 2509 // sample rate, note these values are subject to change 2510 // Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed 2511 if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) || 2512 (sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) { 2513 loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate."); 2514 return ERROR_BAD_VALUE; 2515 } 2516 2517 int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat); 2518 if (size <= 0) { 2519 loge("getMinBufferSize(): error querying hardware"); 2520 return ERROR; 2521 } 2522 else { 2523 return size; 2524 } 2525 } 2526 2527 /** 2528 * Returns the audio session ID. 2529 * 2530 * @return the ID of the audio session this AudioTrack belongs to. 2531 */ getAudioSessionId()2532 public int getAudioSessionId() { 2533 return mSessionId; 2534 } 2535 2536 /** 2537 * Poll for a timestamp on demand. 2538 * <p> 2539 * If you need to track timestamps during initial warmup or after a routing or mode change, 2540 * you should request a new timestamp periodically until the reported timestamps 2541 * show that the frame position is advancing, or until it becomes clear that 2542 * timestamps are unavailable for this route. 2543 * <p> 2544 * After the clock is advancing at a stable rate, 2545 * query for a new timestamp approximately once every 10 seconds to once per minute. 2546 * Calling this method more often is inefficient. 2547 * It is also counter-productive to call this method more often than recommended, 2548 * because the short-term differences between successive timestamp reports are not meaningful. 2549 * If you need a high-resolution mapping between frame position and presentation time, 2550 * consider implementing that at application level, based on low-resolution timestamps. 2551 * <p> 2552 * The audio data at the returned position may either already have been 2553 * presented, or may have not yet been presented but is committed to be presented. 2554 * It is not possible to request the time corresponding to a particular position, 2555 * or to request the (fractional) position corresponding to a particular time. 2556 * If you need such features, consider implementing them at application level. 2557 * 2558 * @param timestamp a reference to a non-null AudioTimestamp instance allocated 2559 * and owned by caller. 2560 * @return true if a timestamp is available, or false if no timestamp is available. 2561 * If a timestamp is available, 2562 * the AudioTimestamp instance is filled in with a position in frame units, together 2563 * with the estimated time when that frame was presented or is committed to 2564 * be presented. 2565 * In the case that no timestamp is available, any supplied instance is left unaltered. 2566 * A timestamp may be temporarily unavailable while the audio clock is stabilizing, 2567 * or during and immediately after a route change. 2568 * A timestamp is permanently unavailable for a given route if the route does not support 2569 * timestamps. In this case, the approximate frame position can be obtained 2570 * using {@link #getPlaybackHeadPosition}. 2571 * However, it may be useful to continue to query for 2572 * timestamps occasionally, to recover after a route change. 2573 */ 2574 // Add this text when the "on new timestamp" API is added: 2575 // Use if you need to get the most recent timestamp outside of the event callback handler. getTimestamp(AudioTimestamp timestamp)2576 public boolean getTimestamp(AudioTimestamp timestamp) 2577 { 2578 if (timestamp == null) { 2579 throw new IllegalArgumentException(); 2580 } 2581 // It's unfortunate, but we have to either create garbage every time or use synchronized 2582 long[] longArray = new long[2]; 2583 int ret = native_get_timestamp(longArray); 2584 if (ret != SUCCESS) { 2585 return false; 2586 } 2587 timestamp.framePosition = longArray[0]; 2588 timestamp.nanoTime = longArray[1]; 2589 return true; 2590 } 2591 2592 /** 2593 * Poll for a timestamp on demand. 2594 * <p> 2595 * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code. 2596 * 2597 * @param timestamp a reference to a non-null AudioTimestamp instance allocated 2598 * and owned by caller. 2599 * @return {@link #SUCCESS} if a timestamp is available 2600 * {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called 2601 * immediately after start/ACTIVE, when the number of frames consumed is less than the 2602 * overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll 2603 * again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time 2604 * for the timestamp. 2605 * {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 2606 * needs to be recreated. 2607 * {@link #ERROR_INVALID_OPERATION} if current route does not support 2608 * timestamps. In this case, the approximate frame position can be obtained 2609 * using {@link #getPlaybackHeadPosition}. 2610 * 2611 * The AudioTimestamp instance is filled in with a position in frame units, together 2612 * with the estimated time when that frame was presented or is committed to 2613 * be presented. 2614 * @hide 2615 */ 2616 // Add this text when the "on new timestamp" API is added: 2617 // Use if you need to get the most recent timestamp outside of the event callback handler. getTimestampWithStatus(AudioTimestamp timestamp)2618 public int getTimestampWithStatus(AudioTimestamp timestamp) 2619 { 2620 if (timestamp == null) { 2621 throw new IllegalArgumentException(); 2622 } 2623 // It's unfortunate, but we have to either create garbage every time or use synchronized 2624 long[] longArray = new long[2]; 2625 int ret = native_get_timestamp(longArray); 2626 timestamp.framePosition = longArray[0]; 2627 timestamp.nanoTime = longArray[1]; 2628 return ret; 2629 } 2630 2631 /** 2632 * Return Metrics data about the current AudioTrack instance. 2633 * 2634 * @return a {@link PersistableBundle} containing the set of attributes and values 2635 * available for the media being handled by this instance of AudioTrack 2636 * The attributes are descibed in {@link MetricsConstants}. 2637 * 2638 * Additional vendor-specific fields may also be present in 2639 * the return value. 2640 */ getMetrics()2641 public PersistableBundle getMetrics() { 2642 PersistableBundle bundle = native_getMetrics(); 2643 return bundle; 2644 } 2645 native_getMetrics()2646 private native PersistableBundle native_getMetrics(); 2647 2648 //-------------------------------------------------------------------------- 2649 // Initialization / configuration 2650 //-------------------- 2651 /** 2652 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 2653 * for each periodic playback head position update. 2654 * Notifications will be received in the same thread as the one in which the AudioTrack 2655 * instance was created. 2656 * @param listener 2657 */ setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener)2658 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) { 2659 setPlaybackPositionUpdateListener(listener, null); 2660 } 2661 2662 /** 2663 * Sets the listener the AudioTrack notifies when a previously set marker is reached or 2664 * for each periodic playback head position update. 2665 * Use this method to receive AudioTrack events in the Handler associated with another 2666 * thread than the one in which you created the AudioTrack instance. 2667 * @param listener 2668 * @param handler the Handler that will receive the event notification messages. 2669 */ setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, Handler handler)2670 public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener, 2671 Handler handler) { 2672 if (listener != null) { 2673 mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler); 2674 } else { 2675 mEventHandlerDelegate = null; 2676 } 2677 } 2678 2679 clampGainOrLevel(float gainOrLevel)2680 private static float clampGainOrLevel(float gainOrLevel) { 2681 if (Float.isNaN(gainOrLevel)) { 2682 throw new IllegalArgumentException(); 2683 } 2684 if (gainOrLevel < GAIN_MIN) { 2685 gainOrLevel = GAIN_MIN; 2686 } else if (gainOrLevel > GAIN_MAX) { 2687 gainOrLevel = GAIN_MAX; 2688 } 2689 return gainOrLevel; 2690 } 2691 2692 2693 /** 2694 * Sets the specified left and right output gain values on the AudioTrack. 2695 * <p>Gain values are clamped to the closed interval [0.0, max] where 2696 * max is the value of {@link #getMaxVolume}. 2697 * A value of 0.0 results in zero gain (silence), and 2698 * a value of 1.0 means unity gain (signal unchanged). 2699 * The default value is 1.0 meaning unity gain. 2700 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 2701 * @param leftGain output gain for the left channel. 2702 * @param rightGain output gain for the right channel 2703 * @return error code or success, see {@link #SUCCESS}, 2704 * {@link #ERROR_INVALID_OPERATION} 2705 * @deprecated Applications should use {@link #setVolume} instead, as it 2706 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 2707 */ 2708 @Deprecated setStereoVolume(float leftGain, float rightGain)2709 public int setStereoVolume(float leftGain, float rightGain) { 2710 if (mState == STATE_UNINITIALIZED) { 2711 return ERROR_INVALID_OPERATION; 2712 } 2713 2714 baseSetVolume(leftGain, rightGain); 2715 return SUCCESS; 2716 } 2717 2718 @Override playerSetVolume(boolean muting, float leftVolume, float rightVolume)2719 void playerSetVolume(boolean muting, float leftVolume, float rightVolume) { 2720 leftVolume = clampGainOrLevel(muting ? 0.0f : leftVolume); 2721 rightVolume = clampGainOrLevel(muting ? 0.0f : rightVolume); 2722 2723 native_setVolume(leftVolume, rightVolume); 2724 } 2725 2726 2727 /** 2728 * Sets the specified output gain value on all channels of this track. 2729 * <p>Gain values are clamped to the closed interval [0.0, max] where 2730 * max is the value of {@link #getMaxVolume}. 2731 * A value of 0.0 results in zero gain (silence), and 2732 * a value of 1.0 means unity gain (signal unchanged). 2733 * The default value is 1.0 meaning unity gain. 2734 * <p>This API is preferred over {@link #setStereoVolume}, as it 2735 * more gracefully scales down to mono, and up to multi-channel content beyond stereo. 2736 * <p>The word "volume" in the API name is historical; this is actually a linear gain. 2737 * @param gain output gain for all channels. 2738 * @return error code or success, see {@link #SUCCESS}, 2739 * {@link #ERROR_INVALID_OPERATION} 2740 */ setVolume(float gain)2741 public int setVolume(float gain) { 2742 return setStereoVolume(gain, gain); 2743 } 2744 2745 @Override playerApplyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)2746 /* package */ int playerApplyVolumeShaper( 2747 @NonNull VolumeShaper.Configuration configuration, 2748 @NonNull VolumeShaper.Operation operation) { 2749 return native_applyVolumeShaper(configuration, operation); 2750 } 2751 2752 @Override playerGetVolumeShaperState(int id)2753 /* package */ @Nullable VolumeShaper.State playerGetVolumeShaperState(int id) { 2754 return native_getVolumeShaperState(id); 2755 } 2756 2757 @Override createVolumeShaper( @onNull VolumeShaper.Configuration configuration)2758 public @NonNull VolumeShaper createVolumeShaper( 2759 @NonNull VolumeShaper.Configuration configuration) { 2760 return new VolumeShaper(configuration, this); 2761 } 2762 2763 /** 2764 * Sets the playback sample rate for this track. This sets the sampling rate at which 2765 * the audio data will be consumed and played back 2766 * (as set by the sampleRateInHz parameter in the 2767 * {@link #AudioTrack(int, int, int, int, int, int)} constructor), 2768 * not the original sampling rate of the 2769 * content. For example, setting it to half the sample rate of the content will cause the 2770 * playback to last twice as long, but will also result in a pitch shift down by one octave. 2771 * The valid sample rate range is from 1 Hz to twice the value returned by 2772 * {@link #getNativeOutputSampleRate(int)}. 2773 * Use {@link #setPlaybackParams(PlaybackParams)} for speed control. 2774 * <p> This method may also be used to repurpose an existing <code>AudioTrack</code> 2775 * for playback of content of differing sample rate, 2776 * but with identical encoding and channel mask. 2777 * @param sampleRateInHz the sample rate expressed in Hz 2778 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 2779 * {@link #ERROR_INVALID_OPERATION} 2780 */ setPlaybackRate(int sampleRateInHz)2781 public int setPlaybackRate(int sampleRateInHz) { 2782 if (mState != STATE_INITIALIZED) { 2783 return ERROR_INVALID_OPERATION; 2784 } 2785 if (sampleRateInHz <= 0) { 2786 return ERROR_BAD_VALUE; 2787 } 2788 return native_set_playback_rate(sampleRateInHz); 2789 } 2790 2791 2792 /** 2793 * Sets the playback parameters. 2794 * This method returns failure if it cannot apply the playback parameters. 2795 * One possible cause is that the parameters for speed or pitch are out of range. 2796 * Another possible cause is that the <code>AudioTrack</code> is streaming 2797 * (see {@link #MODE_STREAM}) and the 2798 * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer 2799 * on configuration must be larger than the speed multiplied by the minimum size 2800 * {@link #getMinBufferSize(int, int, int)}) to allow proper playback. 2801 * @param params see {@link PlaybackParams}. In particular, 2802 * speed, pitch, and audio mode should be set. 2803 * @throws IllegalArgumentException if the parameters are invalid or not accepted. 2804 * @throws IllegalStateException if track is not initialized. 2805 */ setPlaybackParams(@onNull PlaybackParams params)2806 public void setPlaybackParams(@NonNull PlaybackParams params) { 2807 if (params == null) { 2808 throw new IllegalArgumentException("params is null"); 2809 } 2810 native_set_playback_params(params); 2811 } 2812 2813 2814 /** 2815 * Sets the position of the notification marker. At most one marker can be active. 2816 * @param markerInFrames marker position in wrapping frame units similar to 2817 * {@link #getPlaybackHeadPosition}, or zero to disable the marker. 2818 * To set a marker at a position which would appear as zero due to wraparound, 2819 * a workaround is to use a non-zero position near zero, such as -1 or 1. 2820 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 2821 * {@link #ERROR_INVALID_OPERATION} 2822 */ setNotificationMarkerPosition(int markerInFrames)2823 public int setNotificationMarkerPosition(int markerInFrames) { 2824 if (mState == STATE_UNINITIALIZED) { 2825 return ERROR_INVALID_OPERATION; 2826 } 2827 return native_set_marker_pos(markerInFrames); 2828 } 2829 2830 2831 /** 2832 * Sets the period for the periodic notification event. 2833 * @param periodInFrames update period expressed in frames. 2834 * Zero period means no position updates. A negative period is not allowed. 2835 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION} 2836 */ setPositionNotificationPeriod(int periodInFrames)2837 public int setPositionNotificationPeriod(int periodInFrames) { 2838 if (mState == STATE_UNINITIALIZED) { 2839 return ERROR_INVALID_OPERATION; 2840 } 2841 return native_set_pos_update_period(periodInFrames); 2842 } 2843 2844 2845 /** 2846 * Sets the playback head position within the static buffer. 2847 * The track must be stopped or paused for the position to be changed, 2848 * and must use the {@link #MODE_STATIC} mode. 2849 * @param positionInFrames playback head position within buffer, expressed in frames. 2850 * Zero corresponds to start of buffer. 2851 * The position must not be greater than the buffer size in frames, or negative. 2852 * Though this method and {@link #getPlaybackHeadPosition()} have similar names, 2853 * the position values have different meanings. 2854 * <br> 2855 * If looping is currently enabled and the new position is greater than or equal to the 2856 * loop end marker, the behavior varies by API level: 2857 * as of {@link android.os.Build.VERSION_CODES#M}, 2858 * the looping is first disabled and then the position is set. 2859 * For earlier API levels, the behavior is unspecified. 2860 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 2861 * {@link #ERROR_INVALID_OPERATION} 2862 */ setPlaybackHeadPosition(@ntRange from = 0) int positionInFrames)2863 public int setPlaybackHeadPosition(@IntRange (from = 0) int positionInFrames) { 2864 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 2865 getPlayState() == PLAYSTATE_PLAYING) { 2866 return ERROR_INVALID_OPERATION; 2867 } 2868 if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) { 2869 return ERROR_BAD_VALUE; 2870 } 2871 return native_set_position(positionInFrames); 2872 } 2873 2874 /** 2875 * Sets the loop points and the loop count. The loop can be infinite. 2876 * Similarly to setPlaybackHeadPosition, 2877 * the track must be stopped or paused for the loop points to be changed, 2878 * and must use the {@link #MODE_STATIC} mode. 2879 * @param startInFrames loop start marker expressed in frames. 2880 * Zero corresponds to start of buffer. 2881 * The start marker must not be greater than or equal to the buffer size in frames, or negative. 2882 * @param endInFrames loop end marker expressed in frames. 2883 * The total buffer size in frames corresponds to end of buffer. 2884 * The end marker must not be greater than the buffer size in frames. 2885 * For looping, the end marker must not be less than or equal to the start marker, 2886 * but to disable looping 2887 * it is permitted for start marker, end marker, and loop count to all be 0. 2888 * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}. 2889 * If the loop period (endInFrames - startInFrames) is too small for the implementation to 2890 * support, 2891 * {@link #ERROR_BAD_VALUE} is returned. 2892 * The loop range is the interval [startInFrames, endInFrames). 2893 * <br> 2894 * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged, 2895 * unless it is greater than or equal to the loop end marker, in which case 2896 * it is forced to the loop start marker. 2897 * For earlier API levels, the effect on position is unspecified. 2898 * @param loopCount the number of times the loop is looped; must be greater than or equal to -1. 2899 * A value of -1 means infinite looping, and 0 disables looping. 2900 * A value of positive N means to "loop" (go back) N times. For example, 2901 * a value of one means to play the region two times in total. 2902 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 2903 * {@link #ERROR_INVALID_OPERATION} 2904 */ setLoopPoints(@ntRange from = 0) int startInFrames, @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount)2905 public int setLoopPoints(@IntRange (from = 0) int startInFrames, 2906 @IntRange (from = 0) int endInFrames, @IntRange (from = -1) int loopCount) { 2907 if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED || 2908 getPlayState() == PLAYSTATE_PLAYING) { 2909 return ERROR_INVALID_OPERATION; 2910 } 2911 if (loopCount == 0) { 2912 ; // explicitly allowed as an exception to the loop region range check 2913 } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames && 2914 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) { 2915 return ERROR_BAD_VALUE; 2916 } 2917 return native_set_loop(startInFrames, endInFrames, loopCount); 2918 } 2919 2920 /** 2921 * Sets the audio presentation. 2922 * If the audio presentation is invalid then {@link #ERROR_BAD_VALUE} will be returned. 2923 * If a multi-stream decoder (MSD) is not present, or the format does not support 2924 * multiple presentations, then {@link #ERROR_INVALID_OPERATION} will be returned. 2925 * {@link #ERROR} is returned in case of any other error. 2926 * @param presentation see {@link AudioPresentation}. In particular, id should be set. 2927 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR}, 2928 * {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION} 2929 * @throws IllegalArgumentException if the audio presentation is null. 2930 * @throws IllegalStateException if track is not initialized. 2931 */ setPresentation(@onNull AudioPresentation presentation)2932 public int setPresentation(@NonNull AudioPresentation presentation) { 2933 if (presentation == null) { 2934 throw new IllegalArgumentException("audio presentation is null"); 2935 } 2936 return native_setPresentation(presentation.getPresentationId(), 2937 presentation.getProgramId()); 2938 } 2939 2940 /** 2941 * Sets the initialization state of the instance. This method was originally intended to be used 2942 * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state. 2943 * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete. 2944 * @param state the state of the AudioTrack instance 2945 * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack. 2946 */ 2947 @Deprecated setState(int state)2948 protected void setState(int state) { 2949 mState = state; 2950 } 2951 2952 2953 //--------------------------------------------------------- 2954 // Transport control methods 2955 //-------------------- 2956 /** 2957 * Starts playing an AudioTrack. 2958 * <p> 2959 * If track's creation mode is {@link #MODE_STATIC}, you must have called one of 2960 * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)}, 2961 * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)}, 2962 * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to 2963 * play(). 2964 * <p> 2965 * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to 2966 * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor). 2967 * If you don't call write() first, or if you call write() but with an insufficient amount of 2968 * data, then the track will be in underrun state at play(). In this case, 2969 * playback will not actually start playing until the data path is filled to a 2970 * device-specific minimum level. This requirement for the path to be filled 2971 * to a minimum level is also true when resuming audio playback after calling stop(). 2972 * Similarly the buffer will need to be filled up again after 2973 * the track underruns due to failure to call write() in a timely manner with sufficient data. 2974 * For portability, an application should prime the data path to the maximum allowed 2975 * by writing data until the write() method returns a short transfer count. 2976 * This allows play() to start immediately, and reduces the chance of underrun. 2977 *<p> 2978 * As of {@link android.os.Build.VERSION_CODES#S} the minimum level to start playing 2979 * can be obtained using {@link #getStartThresholdInFrames()} and set with 2980 * {@link #setStartThresholdInFrames(int)}. 2981 * 2982 * @throws IllegalStateException if the track isn't properly initialized 2983 */ play()2984 public void play() 2985 throws IllegalStateException { 2986 if (mState != STATE_INITIALIZED) { 2987 throw new IllegalStateException("play() called on uninitialized AudioTrack."); 2988 } 2989 //FIXME use lambda to pass startImpl to superclass 2990 final int delay = getStartDelayMs(); 2991 if (delay == 0) { 2992 startImpl(); 2993 } else { 2994 new Thread() { 2995 public void run() { 2996 try { 2997 Thread.sleep(delay); 2998 } catch (InterruptedException e) { 2999 e.printStackTrace(); 3000 } 3001 baseSetStartDelayMs(0); 3002 try { 3003 startImpl(); 3004 } catch (IllegalStateException e) { 3005 // fail silently for a state exception when it is happening after 3006 // a delayed start, as the player state could have changed between the 3007 // call to start() and the execution of startImpl() 3008 } 3009 } 3010 }.start(); 3011 } 3012 } 3013 startImpl()3014 private void startImpl() { 3015 synchronized (mRoutingChangeListeners) { 3016 if (!mEnableSelfRoutingMonitor) { 3017 mEnableSelfRoutingMonitor = testEnableNativeRoutingCallbacksLocked(); 3018 } 3019 } 3020 synchronized(mPlayStateLock) { 3021 baseStart(0); // unknown device at this point 3022 native_start(); 3023 // FIXME see b/179218630 3024 //baseStart(native_getRoutedDeviceId()); 3025 if (mPlayState == PLAYSTATE_PAUSED_STOPPING) { 3026 mPlayState = PLAYSTATE_STOPPING; 3027 } else { 3028 mPlayState = PLAYSTATE_PLAYING; 3029 mOffloadEosPending = false; 3030 } 3031 } 3032 } 3033 3034 /** 3035 * Stops playing the audio data. 3036 * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing 3037 * after the last buffer that was written has been played. For an immediate stop, use 3038 * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played 3039 * back yet. 3040 * @throws IllegalStateException 3041 */ stop()3042 public void stop() 3043 throws IllegalStateException { 3044 if (mState != STATE_INITIALIZED) { 3045 throw new IllegalStateException("stop() called on uninitialized AudioTrack."); 3046 } 3047 3048 // stop playing 3049 synchronized(mPlayStateLock) { 3050 native_stop(); 3051 baseStop(); 3052 if (mOffloaded && mPlayState != PLAYSTATE_PAUSED_STOPPING) { 3053 mPlayState = PLAYSTATE_STOPPING; 3054 } else { 3055 mPlayState = PLAYSTATE_STOPPED; 3056 mOffloadEosPending = false; 3057 mAvSyncHeader = null; 3058 mAvSyncBytesRemaining = 0; 3059 mPlayStateLock.notify(); 3060 } 3061 } 3062 tryToDisableNativeRoutingCallback(); 3063 } 3064 3065 /** 3066 * Pauses the playback of the audio data. Data that has not been played 3067 * back will not be discarded. Subsequent calls to {@link #play} will play 3068 * this data back. See {@link #flush()} to discard this data. 3069 * 3070 * @throws IllegalStateException 3071 */ pause()3072 public void pause() 3073 throws IllegalStateException { 3074 if (mState != STATE_INITIALIZED) { 3075 throw new IllegalStateException("pause() called on uninitialized AudioTrack."); 3076 } 3077 3078 // pause playback 3079 synchronized(mPlayStateLock) { 3080 native_pause(); 3081 basePause(); 3082 if (mPlayState == PLAYSTATE_STOPPING) { 3083 mPlayState = PLAYSTATE_PAUSED_STOPPING; 3084 } else { 3085 mPlayState = PLAYSTATE_PAUSED; 3086 } 3087 } 3088 } 3089 3090 3091 //--------------------------------------------------------- 3092 // Audio data supply 3093 //-------------------- 3094 3095 /** 3096 * Flushes the audio data currently queued for playback. Any data that has 3097 * been written but not yet presented will be discarded. No-op if not stopped or paused, 3098 * or if the track's creation mode is not {@link #MODE_STREAM}. 3099 * <BR> Note that although data written but not yet presented is discarded, there is no 3100 * guarantee that all of the buffer space formerly used by that data 3101 * is available for a subsequent write. 3102 * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code> 3103 * less than or equal to the total buffer size 3104 * may return a short actual transfer count. 3105 */ flush()3106 public void flush() { 3107 if (mState == STATE_INITIALIZED) { 3108 // flush the data in native layer 3109 native_flush(); 3110 mAvSyncHeader = null; 3111 mAvSyncBytesRemaining = 0; 3112 } 3113 3114 } 3115 3116 /** 3117 * Writes the audio data to the audio sink for playback (streaming mode), 3118 * or copies audio data for later playback (static buffer mode). 3119 * The format specified in the AudioTrack constructor should be 3120 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 3121 * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated. 3122 * <p> 3123 * In streaming mode, the write will normally block until all the data has been enqueued for 3124 * playback, and will return a full transfer count. However, if the track is stopped or paused 3125 * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error 3126 * occurs during the write, then the write may return a short transfer count. 3127 * <p> 3128 * In static buffer mode, copies the data to the buffer starting at offset 0. 3129 * Note that the actual playback of this data might occur after this function returns. 3130 * 3131 * @param audioData the array that holds the data to play. 3132 * @param offsetInBytes the offset expressed in bytes in audioData where the data to write 3133 * starts. 3134 * Must not be negative, or cause the data access to go out of bounds of the array. 3135 * @param sizeInBytes the number of bytes to write in audioData after the offset. 3136 * Must not be negative, or cause the data access to go out of bounds of the array. 3137 * @return zero or the positive number of bytes that were written, or one of the following 3138 * error codes. The number of bytes will be a multiple of the frame size in bytes 3139 * not to exceed sizeInBytes. 3140 * <ul> 3141 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 3142 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 3143 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 3144 * needs to be recreated. The dead object error code is not returned if some data was 3145 * successfully transferred. In this case, the error is returned at the next write()</li> 3146 * <li>{@link #ERROR} in case of other error</li> 3147 * </ul> 3148 * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code> 3149 * set to {@link #WRITE_BLOCKING}. 3150 */ write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes)3151 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) { 3152 return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING); 3153 } 3154 3155 /** 3156 * Writes the audio data to the audio sink for playback (streaming mode), 3157 * or copies audio data for later playback (static buffer mode). 3158 * The format specified in the AudioTrack constructor should be 3159 * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array. 3160 * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated. 3161 * <p> 3162 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 3163 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 3164 * for playback, and will return a full transfer count. However, if the write mode is 3165 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 3166 * interrupts the write by calling stop or pause, or an I/O error 3167 * occurs during the write, then the write may return a short transfer count. 3168 * <p> 3169 * In static buffer mode, copies the data to the buffer starting at offset 0, 3170 * and the write mode is ignored. 3171 * Note that the actual playback of this data might occur after this function returns. 3172 * 3173 * @param audioData the array that holds the data to play. 3174 * @param offsetInBytes the offset expressed in bytes in audioData where the data to write 3175 * starts. 3176 * Must not be negative, or cause the data access to go out of bounds of the array. 3177 * @param sizeInBytes the number of bytes to write in audioData after the offset. 3178 * Must not be negative, or cause the data access to go out of bounds of the array. 3179 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 3180 * effect in static mode. 3181 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 3182 * to the audio sink. 3183 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 3184 * queuing as much audio data for playback as possible without blocking. 3185 * @return zero or the positive number of bytes that were written, or one of the following 3186 * error codes. The number of bytes will be a multiple of the frame size in bytes 3187 * not to exceed sizeInBytes. 3188 * <ul> 3189 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 3190 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 3191 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 3192 * needs to be recreated. The dead object error code is not returned if some data was 3193 * successfully transferred. In this case, the error is returned at the next write()</li> 3194 * <li>{@link #ERROR} in case of other error</li> 3195 * </ul> 3196 */ write(@onNull byte[] audioData, int offsetInBytes, int sizeInBytes, @WriteMode int writeMode)3197 public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes, 3198 @WriteMode int writeMode) { 3199 // Note: we allow writes of extended integers and compressed formats from a byte array. 3200 if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) { 3201 return ERROR_INVALID_OPERATION; 3202 } 3203 3204 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 3205 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 3206 return ERROR_BAD_VALUE; 3207 } 3208 3209 if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0) 3210 || (offsetInBytes + sizeInBytes < 0) // detect integer overflow 3211 || (offsetInBytes + sizeInBytes > audioData.length)) { 3212 return ERROR_BAD_VALUE; 3213 } 3214 3215 if (!blockUntilOffloadDrain(writeMode)) { 3216 return 0; 3217 } 3218 3219 final int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat, 3220 writeMode == WRITE_BLOCKING); 3221 3222 if ((mDataLoadMode == MODE_STATIC) 3223 && (mState == STATE_NO_STATIC_DATA) 3224 && (ret > 0)) { 3225 // benign race with respect to other APIs that read mState 3226 mState = STATE_INITIALIZED; 3227 } 3228 3229 return ret; 3230 } 3231 3232 /** 3233 * Writes the audio data to the audio sink for playback (streaming mode), 3234 * or copies audio data for later playback (static buffer mode). 3235 * The format specified in the AudioTrack constructor should be 3236 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 3237 * <p> 3238 * In streaming mode, the write will normally block until all the data has been enqueued for 3239 * playback, and will return a full transfer count. However, if the track is stopped or paused 3240 * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error 3241 * occurs during the write, then the write may return a short transfer count. 3242 * <p> 3243 * In static buffer mode, copies the data to the buffer starting at offset 0. 3244 * Note that the actual playback of this data might occur after this function returns. 3245 * 3246 * @param audioData the array that holds the data to play. 3247 * @param offsetInShorts the offset expressed in shorts in audioData where the data to play 3248 * starts. 3249 * Must not be negative, or cause the data access to go out of bounds of the array. 3250 * @param sizeInShorts the number of shorts to read in audioData after the offset. 3251 * Must not be negative, or cause the data access to go out of bounds of the array. 3252 * @return zero or the positive number of shorts that were written, or one of the following 3253 * error codes. The number of shorts will be a multiple of the channel count not to 3254 * exceed sizeInShorts. 3255 * <ul> 3256 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 3257 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 3258 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 3259 * needs to be recreated. The dead object error code is not returned if some data was 3260 * successfully transferred. In this case, the error is returned at the next write()</li> 3261 * <li>{@link #ERROR} in case of other error</li> 3262 * </ul> 3263 * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code> 3264 * set to {@link #WRITE_BLOCKING}. 3265 */ write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts)3266 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) { 3267 return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING); 3268 } 3269 3270 /** 3271 * Writes the audio data to the audio sink for playback (streaming mode), 3272 * or copies audio data for later playback (static buffer mode). 3273 * The format specified in the AudioTrack constructor should be 3274 * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array. 3275 * <p> 3276 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 3277 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 3278 * for playback, and will return a full transfer count. However, if the write mode is 3279 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 3280 * interrupts the write by calling stop or pause, or an I/O error 3281 * occurs during the write, then the write may return a short transfer count. 3282 * <p> 3283 * In static buffer mode, copies the data to the buffer starting at offset 0. 3284 * Note that the actual playback of this data might occur after this function returns. 3285 * 3286 * @param audioData the array that holds the data to write. 3287 * @param offsetInShorts the offset expressed in shorts in audioData where the data to write 3288 * starts. 3289 * Must not be negative, or cause the data access to go out of bounds of the array. 3290 * @param sizeInShorts the number of shorts to read in audioData after the offset. 3291 * Must not be negative, or cause the data access to go out of bounds of the array. 3292 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 3293 * effect in static mode. 3294 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 3295 * to the audio sink. 3296 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 3297 * queuing as much audio data for playback as possible without blocking. 3298 * @return zero or the positive number of shorts that were written, or one of the following 3299 * error codes. The number of shorts will be a multiple of the channel count not to 3300 * exceed sizeInShorts. 3301 * <ul> 3302 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 3303 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 3304 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 3305 * needs to be recreated. The dead object error code is not returned if some data was 3306 * successfully transferred. In this case, the error is returned at the next write()</li> 3307 * <li>{@link #ERROR} in case of other error</li> 3308 * </ul> 3309 */ write(@onNull short[] audioData, int offsetInShorts, int sizeInShorts, @WriteMode int writeMode)3310 public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts, 3311 @WriteMode int writeMode) { 3312 3313 if (mState == STATE_UNINITIALIZED 3314 || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT 3315 // use ByteBuffer or byte[] instead for later encodings 3316 || mAudioFormat > AudioFormat.ENCODING_LEGACY_SHORT_ARRAY_THRESHOLD) { 3317 return ERROR_INVALID_OPERATION; 3318 } 3319 3320 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 3321 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 3322 return ERROR_BAD_VALUE; 3323 } 3324 3325 if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0) 3326 || (offsetInShorts + sizeInShorts < 0) // detect integer overflow 3327 || (offsetInShorts + sizeInShorts > audioData.length)) { 3328 return ERROR_BAD_VALUE; 3329 } 3330 3331 if (!blockUntilOffloadDrain(writeMode)) { 3332 return 0; 3333 } 3334 3335 final int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat, 3336 writeMode == WRITE_BLOCKING); 3337 3338 if ((mDataLoadMode == MODE_STATIC) 3339 && (mState == STATE_NO_STATIC_DATA) 3340 && (ret > 0)) { 3341 // benign race with respect to other APIs that read mState 3342 mState = STATE_INITIALIZED; 3343 } 3344 3345 return ret; 3346 } 3347 3348 /** 3349 * Writes the audio data to the audio sink for playback (streaming mode), 3350 * or copies audio data for later playback (static buffer mode). 3351 * The format specified in the AudioTrack constructor should be 3352 * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array. 3353 * <p> 3354 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 3355 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 3356 * for playback, and will return a full transfer count. However, if the write mode is 3357 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 3358 * interrupts the write by calling stop or pause, or an I/O error 3359 * occurs during the write, then the write may return a short transfer count. 3360 * <p> 3361 * In static buffer mode, copies the data to the buffer starting at offset 0, 3362 * and the write mode is ignored. 3363 * Note that the actual playback of this data might occur after this function returns. 3364 * 3365 * @param audioData the array that holds the data to write. 3366 * The implementation does not clip for sample values within the nominal range 3367 * [-1.0f, 1.0f], provided that all gains in the audio pipeline are 3368 * less than or equal to unity (1.0f), and in the absence of post-processing effects 3369 * that could add energy, such as reverb. For the convenience of applications 3370 * that compute samples using filters with non-unity gain, 3371 * sample values +3 dB beyond the nominal range are permitted. 3372 * However such values may eventually be limited or clipped, depending on various gains 3373 * and later processing in the audio path. Therefore applications are encouraged 3374 * to provide samples values within the nominal range. 3375 * @param offsetInFloats the offset, expressed as a number of floats, 3376 * in audioData where the data to write starts. 3377 * Must not be negative, or cause the data access to go out of bounds of the array. 3378 * @param sizeInFloats the number of floats to write in audioData after the offset. 3379 * Must not be negative, or cause the data access to go out of bounds of the array. 3380 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 3381 * effect in static mode. 3382 * <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 3383 * to the audio sink. 3384 * <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 3385 * queuing as much audio data for playback as possible without blocking. 3386 * @return zero or the positive number of floats that were written, or one of the following 3387 * error codes. The number of floats will be a multiple of the channel count not to 3388 * exceed sizeInFloats. 3389 * <ul> 3390 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 3391 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 3392 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 3393 * needs to be recreated. The dead object error code is not returned if some data was 3394 * successfully transferred. In this case, the error is returned at the next write()</li> 3395 * <li>{@link #ERROR} in case of other error</li> 3396 * </ul> 3397 */ write(@onNull float[] audioData, int offsetInFloats, int sizeInFloats, @WriteMode int writeMode)3398 public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats, 3399 @WriteMode int writeMode) { 3400 3401 if (mState == STATE_UNINITIALIZED) { 3402 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 3403 return ERROR_INVALID_OPERATION; 3404 } 3405 3406 if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) { 3407 Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT"); 3408 return ERROR_INVALID_OPERATION; 3409 } 3410 3411 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 3412 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 3413 return ERROR_BAD_VALUE; 3414 } 3415 3416 if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0) 3417 || (offsetInFloats + sizeInFloats < 0) // detect integer overflow 3418 || (offsetInFloats + sizeInFloats > audioData.length)) { 3419 Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size"); 3420 return ERROR_BAD_VALUE; 3421 } 3422 3423 if (!blockUntilOffloadDrain(writeMode)) { 3424 return 0; 3425 } 3426 3427 final int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat, 3428 writeMode == WRITE_BLOCKING); 3429 3430 if ((mDataLoadMode == MODE_STATIC) 3431 && (mState == STATE_NO_STATIC_DATA) 3432 && (ret > 0)) { 3433 // benign race with respect to other APIs that read mState 3434 mState = STATE_INITIALIZED; 3435 } 3436 3437 return ret; 3438 } 3439 3440 3441 /** 3442 * Writes the audio data to the audio sink for playback (streaming mode), 3443 * or copies audio data for later playback (static buffer mode). 3444 * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor. 3445 * <p> 3446 * In streaming mode, the blocking behavior depends on the write mode. If the write mode is 3447 * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued 3448 * for playback, and will return a full transfer count. However, if the write mode is 3449 * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread 3450 * interrupts the write by calling stop or pause, or an I/O error 3451 * occurs during the write, then the write may return a short transfer count. 3452 * <p> 3453 * In static buffer mode, copies the data to the buffer starting at offset 0, 3454 * and the write mode is ignored. 3455 * Note that the actual playback of this data might occur after this function returns. 3456 * 3457 * @param audioData the buffer that holds the data to write, starting at the position reported 3458 * by <code>audioData.position()</code>. 3459 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 3460 * have been advanced to reflect the amount of data that was successfully written to 3461 * the AudioTrack. 3462 * @param sizeInBytes number of bytes to write. It is recommended but not enforced 3463 * that the number of bytes requested be a multiple of the frame size (sample size in 3464 * bytes multiplied by the channel count). 3465 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 3466 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no 3467 * effect in static mode. 3468 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 3469 * to the audio sink. 3470 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 3471 * queuing as much audio data for playback as possible without blocking. 3472 * @return zero or the positive number of bytes that were written, or one of the following 3473 * error codes. 3474 * <ul> 3475 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 3476 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 3477 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 3478 * needs to be recreated. The dead object error code is not returned if some data was 3479 * successfully transferred. In this case, the error is returned at the next write()</li> 3480 * <li>{@link #ERROR} in case of other error</li> 3481 * </ul> 3482 */ write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode)3483 public int write(@NonNull ByteBuffer audioData, int sizeInBytes, 3484 @WriteMode int writeMode) { 3485 3486 if (mState == STATE_UNINITIALIZED) { 3487 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 3488 return ERROR_INVALID_OPERATION; 3489 } 3490 3491 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 3492 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 3493 return ERROR_BAD_VALUE; 3494 } 3495 3496 if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 3497 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 3498 return ERROR_BAD_VALUE; 3499 } 3500 3501 if (!blockUntilOffloadDrain(writeMode)) { 3502 return 0; 3503 } 3504 3505 int ret = 0; 3506 if (audioData.isDirect()) { 3507 ret = native_write_native_bytes(audioData, 3508 audioData.position(), sizeInBytes, mAudioFormat, 3509 writeMode == WRITE_BLOCKING); 3510 } else { 3511 ret = native_write_byte(NioUtils.unsafeArray(audioData), 3512 NioUtils.unsafeArrayOffset(audioData) + audioData.position(), 3513 sizeInBytes, mAudioFormat, 3514 writeMode == WRITE_BLOCKING); 3515 } 3516 3517 if ((mDataLoadMode == MODE_STATIC) 3518 && (mState == STATE_NO_STATIC_DATA) 3519 && (ret > 0)) { 3520 // benign race with respect to other APIs that read mState 3521 mState = STATE_INITIALIZED; 3522 } 3523 3524 if (ret > 0) { 3525 audioData.position(audioData.position() + ret); 3526 } 3527 3528 return ret; 3529 } 3530 3531 /** 3532 * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track. 3533 * The blocking behavior will depend on the write mode. 3534 * @param audioData the buffer that holds the data to write, starting at the position reported 3535 * by <code>audioData.position()</code>. 3536 * <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will 3537 * have been advanced to reflect the amount of data that was successfully written to 3538 * the AudioTrack. 3539 * @param sizeInBytes number of bytes to write. It is recommended but not enforced 3540 * that the number of bytes requested be a multiple of the frame size (sample size in 3541 * bytes multiplied by the channel count). 3542 * <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it. 3543 * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. 3544 * <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written 3545 * to the audio sink. 3546 * <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after 3547 * queuing as much audio data for playback as possible without blocking. 3548 * @param timestamp The timestamp, in nanoseconds, of the first decodable audio frame in the 3549 * provided audioData. 3550 * @return zero or the positive number of bytes that were written, or one of the following 3551 * error codes. 3552 * <ul> 3553 * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li> 3554 * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li> 3555 * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and 3556 * needs to be recreated. The dead object error code is not returned if some data was 3557 * successfully transferred. In this case, the error is returned at the next write()</li> 3558 * <li>{@link #ERROR} in case of other error</li> 3559 * </ul> 3560 */ write(@onNull ByteBuffer audioData, int sizeInBytes, @WriteMode int writeMode, long timestamp)3561 public int write(@NonNull ByteBuffer audioData, int sizeInBytes, 3562 @WriteMode int writeMode, long timestamp) { 3563 3564 if (mState == STATE_UNINITIALIZED) { 3565 Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED"); 3566 return ERROR_INVALID_OPERATION; 3567 } 3568 3569 if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) { 3570 Log.e(TAG, "AudioTrack.write() called with invalid blocking mode"); 3571 return ERROR_BAD_VALUE; 3572 } 3573 3574 if (mDataLoadMode != MODE_STREAM) { 3575 Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track"); 3576 return ERROR_INVALID_OPERATION; 3577 } 3578 3579 if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) { 3580 Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts..."); 3581 return write(audioData, sizeInBytes, writeMode); 3582 } 3583 3584 if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) { 3585 Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value"); 3586 return ERROR_BAD_VALUE; 3587 } 3588 3589 if (!blockUntilOffloadDrain(writeMode)) { 3590 return 0; 3591 } 3592 3593 // create timestamp header if none exists 3594 if (mAvSyncHeader == null) { 3595 mAvSyncHeader = ByteBuffer.allocate(mOffset); 3596 mAvSyncHeader.order(ByteOrder.BIG_ENDIAN); 3597 mAvSyncHeader.putInt(0x55550002); 3598 } 3599 3600 if (mAvSyncBytesRemaining == 0) { 3601 mAvSyncHeader.putInt(4, sizeInBytes); 3602 mAvSyncHeader.putLong(8, timestamp); 3603 mAvSyncHeader.putInt(16, mOffset); 3604 mAvSyncHeader.position(0); 3605 mAvSyncBytesRemaining = sizeInBytes; 3606 } 3607 3608 // write timestamp header if not completely written already 3609 int ret = 0; 3610 if (mAvSyncHeader.remaining() != 0) { 3611 ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode); 3612 if (ret < 0) { 3613 Log.e(TAG, "AudioTrack.write() could not write timestamp header!"); 3614 mAvSyncHeader = null; 3615 mAvSyncBytesRemaining = 0; 3616 return ret; 3617 } 3618 if (mAvSyncHeader.remaining() > 0) { 3619 Log.v(TAG, "AudioTrack.write() partial timestamp header written."); 3620 return 0; 3621 } 3622 } 3623 3624 // write audio data 3625 int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes); 3626 ret = write(audioData, sizeToWrite, writeMode); 3627 if (ret < 0) { 3628 Log.e(TAG, "AudioTrack.write() could not write audio data!"); 3629 mAvSyncHeader = null; 3630 mAvSyncBytesRemaining = 0; 3631 return ret; 3632 } 3633 3634 mAvSyncBytesRemaining -= ret; 3635 3636 return ret; 3637 } 3638 3639 3640 /** 3641 * Sets the playback head position within the static buffer to zero, 3642 * that is it rewinds to start of static buffer. 3643 * The track must be stopped or paused, and 3644 * the track's creation mode must be {@link #MODE_STATIC}. 3645 * <p> 3646 * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by 3647 * {@link #getPlaybackHeadPosition()} to zero. 3648 * For earlier API levels, the reset behavior is unspecified. 3649 * <p> 3650 * Use {@link #setPlaybackHeadPosition(int)} with a zero position 3651 * if the reset of <code>getPlaybackHeadPosition()</code> is not needed. 3652 * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE}, 3653 * {@link #ERROR_INVALID_OPERATION} 3654 */ reloadStaticData()3655 public int reloadStaticData() { 3656 if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) { 3657 return ERROR_INVALID_OPERATION; 3658 } 3659 return native_reload_static(); 3660 } 3661 3662 /** 3663 * When an AudioTrack in offload mode is in STOPPING play state, wait until event STREAM_END is 3664 * received if blocking write or return with 0 frames written if non blocking mode. 3665 */ blockUntilOffloadDrain(int writeMode)3666 private boolean blockUntilOffloadDrain(int writeMode) { 3667 synchronized (mPlayStateLock) { 3668 while (mPlayState == PLAYSTATE_STOPPING || mPlayState == PLAYSTATE_PAUSED_STOPPING) { 3669 if (writeMode == WRITE_NON_BLOCKING) { 3670 return false; 3671 } 3672 try { 3673 mPlayStateLock.wait(); 3674 } catch (InterruptedException e) { 3675 } 3676 } 3677 return true; 3678 } 3679 } 3680 3681 //-------------------------------------------------------------------------- 3682 // Audio effects management 3683 //-------------------- 3684 3685 /** 3686 * Attaches an auxiliary effect to the audio track. A typical auxiliary 3687 * effect is a reverberation effect which can be applied on any sound source 3688 * that directs a certain amount of its energy to this effect. This amount 3689 * is defined by setAuxEffectSendLevel(). 3690 * {@see #setAuxEffectSendLevel(float)}. 3691 * <p>After creating an auxiliary effect (e.g. 3692 * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with 3693 * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling 3694 * this method to attach the audio track to the effect. 3695 * <p>To detach the effect from the audio track, call this method with a 3696 * null effect id. 3697 * 3698 * @param effectId system wide unique id of the effect to attach 3699 * @return error code or success, see {@link #SUCCESS}, 3700 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE} 3701 */ attachAuxEffect(int effectId)3702 public int attachAuxEffect(int effectId) { 3703 if (mState == STATE_UNINITIALIZED) { 3704 return ERROR_INVALID_OPERATION; 3705 } 3706 return native_attachAuxEffect(effectId); 3707 } 3708 3709 /** 3710 * Sets the send level of the audio track to the attached auxiliary effect 3711 * {@link #attachAuxEffect(int)}. Effect levels 3712 * are clamped to the closed interval [0.0, max] where 3713 * max is the value of {@link #getMaxVolume}. 3714 * A value of 0.0 results in no effect, and a value of 1.0 is full send. 3715 * <p>By default the send level is 0.0f, so even if an effect is attached to the player 3716 * this method must be called for the effect to be applied. 3717 * <p>Note that the passed level value is a linear scalar. UI controls should be scaled 3718 * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB, 3719 * so an appropriate conversion from linear UI input x to level is: 3720 * x == 0 -> level = 0 3721 * 0 < x <= R -> level = 10^(72*(x-R)/20/R) 3722 * 3723 * @param level linear send level 3724 * @return error code or success, see {@link #SUCCESS}, 3725 * {@link #ERROR_INVALID_OPERATION}, {@link #ERROR} 3726 */ setAuxEffectSendLevel(@loatRangefrom = 0.0) float level)3727 public int setAuxEffectSendLevel(@FloatRange(from = 0.0) float level) { 3728 if (mState == STATE_UNINITIALIZED) { 3729 return ERROR_INVALID_OPERATION; 3730 } 3731 return baseSetAuxEffectSendLevel(level); 3732 } 3733 3734 @Override playerSetAuxEffectSendLevel(boolean muting, float level)3735 int playerSetAuxEffectSendLevel(boolean muting, float level) { 3736 level = clampGainOrLevel(muting ? 0.0f : level); 3737 int err = native_setAuxEffectSendLevel(level); 3738 return err == 0 ? SUCCESS : ERROR; 3739 } 3740 3741 //-------------------------------------------------------------------------- 3742 // Explicit Routing 3743 //-------------------- 3744 private AudioDeviceInfo mPreferredDevice = null; 3745 3746 /** 3747 * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route 3748 * the output from this AudioTrack. 3749 * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink. 3750 * If deviceInfo is null, default routing is restored. 3751 * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and 3752 * does not correspond to a valid audio output device. 3753 */ 3754 @Override setPreferredDevice(AudioDeviceInfo deviceInfo)3755 public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) { 3756 // Do some validation.... 3757 if (deviceInfo != null && !deviceInfo.isSink()) { 3758 return false; 3759 } 3760 int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0; 3761 boolean status = native_setOutputDevice(preferredDeviceId); 3762 if (status == true) { 3763 synchronized (this) { 3764 mPreferredDevice = deviceInfo; 3765 } 3766 } 3767 return status; 3768 } 3769 3770 /** 3771 * Returns the selected output specified by {@link #setPreferredDevice}. Note that this 3772 * is not guaranteed to correspond to the actual device being used for playback. 3773 */ 3774 @Override getPreferredDevice()3775 public AudioDeviceInfo getPreferredDevice() { 3776 synchronized (this) { 3777 return mPreferredDevice; 3778 } 3779 } 3780 3781 /** 3782 * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack. 3783 * Note: The query is only valid if the AudioTrack is currently playing. If it is not, 3784 * <code>getRoutedDevice()</code> will return null. 3785 */ 3786 @Override getRoutedDevice()3787 public AudioDeviceInfo getRoutedDevice() { 3788 int deviceId = native_getRoutedDeviceId(); 3789 if (deviceId == 0) { 3790 return null; 3791 } 3792 return AudioManager.getDeviceForPortId(deviceId, AudioManager.GET_DEVICES_OUTPUTS); 3793 } 3794 tryToDisableNativeRoutingCallback()3795 private void tryToDisableNativeRoutingCallback() { 3796 synchronized (mRoutingChangeListeners) { 3797 if (mEnableSelfRoutingMonitor) { 3798 mEnableSelfRoutingMonitor = false; 3799 testDisableNativeRoutingCallbacksLocked(); 3800 } 3801 } 3802 } 3803 3804 /** 3805 * Call BEFORE adding a routing callback handler and when enabling self routing listener 3806 * @return returns true for success, false otherwise. 3807 */ 3808 @GuardedBy("mRoutingChangeListeners") testEnableNativeRoutingCallbacksLocked()3809 private boolean testEnableNativeRoutingCallbacksLocked() { 3810 if (mRoutingChangeListeners.size() == 0 && !mEnableSelfRoutingMonitor) { 3811 try { 3812 native_enableDeviceCallback(); 3813 return true; 3814 } catch (IllegalStateException e) { 3815 if (Log.isLoggable(TAG, Log.DEBUG)) { 3816 Log.d(TAG, "testEnableNativeRoutingCallbacks failed", e); 3817 } 3818 } 3819 } 3820 return false; 3821 } 3822 3823 /* 3824 * Call AFTER removing a routing callback handler and when disabling self routing listener. 3825 */ 3826 @GuardedBy("mRoutingChangeListeners") testDisableNativeRoutingCallbacksLocked()3827 private void testDisableNativeRoutingCallbacksLocked() { 3828 if (mRoutingChangeListeners.size() == 0 && !mEnableSelfRoutingMonitor) { 3829 try { 3830 native_disableDeviceCallback(); 3831 } catch (IllegalStateException e) { 3832 // Fail silently as track state could have changed in between stop 3833 // and disabling routing callback 3834 } 3835 } 3836 } 3837 3838 //-------------------------------------------------------------------------- 3839 // (Re)Routing Info 3840 //-------------------- 3841 /** 3842 * The list of AudioRouting.OnRoutingChangedListener interfaces added (with 3843 * {@link #addOnRoutingChangedListener(android.media.AudioRouting.OnRoutingChangedListener, Handler)} 3844 * by an app to receive (re)routing notifications. 3845 */ 3846 @GuardedBy("mRoutingChangeListeners") 3847 private ArrayMap<AudioRouting.OnRoutingChangedListener, 3848 NativeRoutingEventHandlerDelegate> mRoutingChangeListeners = new ArrayMap<>(); 3849 3850 @GuardedBy("mRoutingChangeListeners") 3851 private boolean mEnableSelfRoutingMonitor; 3852 3853 /** 3854 * Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing 3855 * changes on this AudioTrack. 3856 * @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive 3857 * notifications of rerouting events. 3858 * @param handler Specifies the {@link Handler} object for the thread on which to execute 3859 * the callback. If <code>null</code>, the {@link Handler} associated with the main 3860 * {@link Looper} will be used. 3861 */ 3862 @Override addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener, Handler handler)3863 public void addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener, 3864 Handler handler) { 3865 synchronized (mRoutingChangeListeners) { 3866 if (listener != null && !mRoutingChangeListeners.containsKey(listener)) { 3867 mEnableSelfRoutingMonitor = testEnableNativeRoutingCallbacksLocked(); 3868 mRoutingChangeListeners.put( 3869 listener, new NativeRoutingEventHandlerDelegate(this, listener, 3870 handler != null ? handler : new Handler(mInitializationLooper))); 3871 } 3872 } 3873 } 3874 3875 /** 3876 * Removes an {@link AudioRouting.OnRoutingChangedListener} which has been previously added 3877 * to receive rerouting notifications. 3878 * @param listener The previously added {@link AudioRouting.OnRoutingChangedListener} interface 3879 * to remove. 3880 */ 3881 @Override removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener)3882 public void removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener) { 3883 synchronized (mRoutingChangeListeners) { 3884 if (mRoutingChangeListeners.containsKey(listener)) { 3885 mRoutingChangeListeners.remove(listener); 3886 } 3887 testDisableNativeRoutingCallbacksLocked(); 3888 } 3889 } 3890 3891 //-------------------------------------------------------------------------- 3892 // (Re)Routing Info 3893 //-------------------- 3894 /** 3895 * Defines the interface by which applications can receive notifications of 3896 * routing changes for the associated {@link AudioTrack}. 3897 * 3898 * @deprecated users should switch to the general purpose 3899 * {@link AudioRouting.OnRoutingChangedListener} class instead. 3900 */ 3901 @Deprecated 3902 public interface OnRoutingChangedListener extends AudioRouting.OnRoutingChangedListener { 3903 /** 3904 * Called when the routing of an AudioTrack changes from either and 3905 * explicit or policy rerouting. Use {@link #getRoutedDevice()} to 3906 * retrieve the newly routed-to device. 3907 */ onRoutingChanged(AudioTrack audioTrack)3908 public void onRoutingChanged(AudioTrack audioTrack); 3909 3910 @Override onRoutingChanged(AudioRouting router)3911 default public void onRoutingChanged(AudioRouting router) { 3912 if (router instanceof AudioTrack) { 3913 onRoutingChanged((AudioTrack) router); 3914 } 3915 } 3916 } 3917 3918 /** 3919 * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes 3920 * on this AudioTrack. 3921 * @param listener The {@link OnRoutingChangedListener} interface to receive notifications 3922 * of rerouting events. 3923 * @param handler Specifies the {@link Handler} object for the thread on which to execute 3924 * the callback. If <code>null</code>, the {@link Handler} associated with the main 3925 * {@link Looper} will be used. 3926 * @deprecated users should switch to the general purpose 3927 * {@link AudioRouting.OnRoutingChangedListener} class instead. 3928 */ 3929 @Deprecated addOnRoutingChangedListener(OnRoutingChangedListener listener, android.os.Handler handler)3930 public void addOnRoutingChangedListener(OnRoutingChangedListener listener, 3931 android.os.Handler handler) { 3932 addOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener, handler); 3933 } 3934 3935 /** 3936 * Removes an {@link OnRoutingChangedListener} which has been previously added 3937 * to receive rerouting notifications. 3938 * @param listener The previously added {@link OnRoutingChangedListener} interface to remove. 3939 * @deprecated users should switch to the general purpose 3940 * {@link AudioRouting.OnRoutingChangedListener} class instead. 3941 */ 3942 @Deprecated removeOnRoutingChangedListener(OnRoutingChangedListener listener)3943 public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) { 3944 removeOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener); 3945 } 3946 3947 /** 3948 * Sends device list change notification to all listeners. 3949 */ broadcastRoutingChange()3950 private void broadcastRoutingChange() { 3951 AudioManager.resetAudioPortGeneration(); 3952 baseUpdateDeviceId(getRoutedDevice()); 3953 synchronized (mRoutingChangeListeners) { 3954 for (NativeRoutingEventHandlerDelegate delegate : mRoutingChangeListeners.values()) { 3955 delegate.notifyClient(); 3956 } 3957 } 3958 } 3959 3960 //-------------------------------------------------------------------------- 3961 // Codec notifications 3962 //-------------------- 3963 3964 // OnCodecFormatChangedListener notifications uses an instance 3965 // of ListenerList to manage its listeners. 3966 3967 private final Utils.ListenerList<AudioMetadataReadMap> mCodecFormatChangedListeners = 3968 new Utils.ListenerList(); 3969 3970 /** 3971 * Interface definition for a listener for codec format changes. 3972 */ 3973 public interface OnCodecFormatChangedListener { 3974 /** 3975 * Called when the compressed codec format changes. 3976 * 3977 * @param audioTrack is the {@code AudioTrack} instance associated with the codec. 3978 * @param info is a {@link AudioMetadataReadMap} of values which contains decoded format 3979 * changes reported by the codec. Not all hardware 3980 * codecs indicate codec format changes. Acceptable keys are taken from 3981 * {@code AudioMetadata.Format.KEY_*} range, with the associated value type. 3982 */ onCodecFormatChanged( @onNull AudioTrack audioTrack, @Nullable AudioMetadataReadMap info)3983 void onCodecFormatChanged( 3984 @NonNull AudioTrack audioTrack, @Nullable AudioMetadataReadMap info); 3985 } 3986 3987 /** 3988 * Adds an {@link OnCodecFormatChangedListener} to receive notifications of 3989 * codec format change events on this {@code AudioTrack}. 3990 * 3991 * @param executor Specifies the {@link Executor} object to control execution. 3992 * 3993 * @param listener The {@link OnCodecFormatChangedListener} interface to receive 3994 * notifications of codec events. 3995 */ addOnCodecFormatChangedListener( @onNull @allbackExecutor Executor executor, @NonNull OnCodecFormatChangedListener listener)3996 public void addOnCodecFormatChangedListener( 3997 @NonNull @CallbackExecutor Executor executor, 3998 @NonNull OnCodecFormatChangedListener listener) { // NPE checks done by ListenerList. 3999 mCodecFormatChangedListeners.add( 4000 listener, /* key for removal */ 4001 executor, 4002 (int eventCode, AudioMetadataReadMap readMap) -> { 4003 // eventCode is unused by this implementation. 4004 listener.onCodecFormatChanged(this, readMap); 4005 } 4006 ); 4007 } 4008 4009 /** 4010 * Removes an {@link OnCodecFormatChangedListener} which has been previously added 4011 * to receive codec format change events. 4012 * 4013 * @param listener The previously added {@link OnCodecFormatChangedListener} interface 4014 * to remove. 4015 */ removeOnCodecFormatChangedListener( @onNull OnCodecFormatChangedListener listener)4016 public void removeOnCodecFormatChangedListener( 4017 @NonNull OnCodecFormatChangedListener listener) { 4018 mCodecFormatChangedListeners.remove(listener); // NPE checks done by ListenerList. 4019 } 4020 4021 //--------------------------------------------------------- 4022 // Interface definitions 4023 //-------------------- 4024 /** 4025 * Interface definition for a callback to be invoked when the playback head position of 4026 * an AudioTrack has reached a notification marker or has increased by a certain period. 4027 */ 4028 public interface OnPlaybackPositionUpdateListener { 4029 /** 4030 * Called on the listener to notify it that the previously set marker has been reached 4031 * by the playback head. 4032 */ onMarkerReached(AudioTrack track)4033 void onMarkerReached(AudioTrack track); 4034 4035 /** 4036 * Called on the listener to periodically notify it that the playback head has reached 4037 * a multiple of the notification period. 4038 */ onPeriodicNotification(AudioTrack track)4039 void onPeriodicNotification(AudioTrack track); 4040 } 4041 4042 /** 4043 * Abstract class to receive event notifications about the stream playback in offloaded mode. 4044 * See {@link AudioTrack#registerStreamEventCallback(Executor, StreamEventCallback)} to register 4045 * the callback on the given {@link AudioTrack} instance. 4046 */ 4047 public abstract static class StreamEventCallback { 4048 /** 4049 * Called when an offloaded track is no longer valid and has been discarded by the system. 4050 * An example of this happening is when an offloaded track has been paused too long, and 4051 * gets invalidated by the system to prevent any other offload. 4052 * @param track the {@link AudioTrack} on which the event happened. 4053 */ onTearDown(@onNull AudioTrack track)4054 public void onTearDown(@NonNull AudioTrack track) { } 4055 /** 4056 * Called when all the buffers of an offloaded track that were queued in the audio system 4057 * (e.g. the combination of the Android audio framework and the device's audio hardware) 4058 * have been played after {@link AudioTrack#stop()} has been called. 4059 * @param track the {@link AudioTrack} on which the event happened. 4060 */ onPresentationEnded(@onNull AudioTrack track)4061 public void onPresentationEnded(@NonNull AudioTrack track) { } 4062 /** 4063 * Called when more audio data can be written without blocking on an offloaded track. 4064 * @param track the {@link AudioTrack} on which the event happened. 4065 * @param sizeInFrames the number of frames available to write without blocking. 4066 * Note that the frame size of a compressed stream is 1 byte. 4067 */ onDataRequest(@onNull AudioTrack track, @IntRange(from = 0) int sizeInFrames)4068 public void onDataRequest(@NonNull AudioTrack track, @IntRange(from = 0) int sizeInFrames) { 4069 } 4070 } 4071 4072 /** 4073 * Registers a callback for the notification of stream events. 4074 * This callback can only be registered for instances operating in offloaded mode 4075 * (see {@link AudioTrack.Builder#setOffloadedPlayback(boolean)} and 4076 * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat,AudioAttributes)} for 4077 * more details). 4078 * @param executor {@link Executor} to handle the callbacks. 4079 * @param eventCallback the callback to receive the stream event notifications. 4080 */ registerStreamEventCallback(@onNull @allbackExecutor Executor executor, @NonNull StreamEventCallback eventCallback)4081 public void registerStreamEventCallback(@NonNull @CallbackExecutor Executor executor, 4082 @NonNull StreamEventCallback eventCallback) { 4083 if (eventCallback == null) { 4084 throw new IllegalArgumentException("Illegal null StreamEventCallback"); 4085 } 4086 if (!mOffloaded) { 4087 throw new IllegalStateException( 4088 "Cannot register StreamEventCallback on non-offloaded AudioTrack"); 4089 } 4090 if (executor == null) { 4091 throw new IllegalArgumentException("Illegal null Executor for the StreamEventCallback"); 4092 } 4093 synchronized (mStreamEventCbLock) { 4094 // check if eventCallback already in list 4095 for (StreamEventCbInfo seci : mStreamEventCbInfoList) { 4096 if (seci.mStreamEventCb == eventCallback) { 4097 throw new IllegalArgumentException( 4098 "StreamEventCallback already registered"); 4099 } 4100 } 4101 beginStreamEventHandling(); 4102 mStreamEventCbInfoList.add(new StreamEventCbInfo(executor, eventCallback)); 4103 } 4104 } 4105 4106 /** 4107 * Unregisters the callback for notification of stream events, previously registered 4108 * with {@link #registerStreamEventCallback(Executor, StreamEventCallback)}. 4109 * @param eventCallback the callback to unregister. 4110 */ unregisterStreamEventCallback(@onNull StreamEventCallback eventCallback)4111 public void unregisterStreamEventCallback(@NonNull StreamEventCallback eventCallback) { 4112 if (eventCallback == null) { 4113 throw new IllegalArgumentException("Illegal null StreamEventCallback"); 4114 } 4115 if (!mOffloaded) { 4116 throw new IllegalStateException("No StreamEventCallback on non-offloaded AudioTrack"); 4117 } 4118 synchronized (mStreamEventCbLock) { 4119 StreamEventCbInfo seciToRemove = null; 4120 for (StreamEventCbInfo seci : mStreamEventCbInfoList) { 4121 if (seci.mStreamEventCb == eventCallback) { 4122 // ok to remove while iterating over list as we exit iteration 4123 mStreamEventCbInfoList.remove(seci); 4124 if (mStreamEventCbInfoList.size() == 0) { 4125 endStreamEventHandling(); 4126 } 4127 return; 4128 } 4129 } 4130 throw new IllegalArgumentException("StreamEventCallback was not registered"); 4131 } 4132 } 4133 4134 //--------------------------------------------------------- 4135 // Offload 4136 //-------------------- 4137 private static class StreamEventCbInfo { 4138 final Executor mStreamEventExec; 4139 final StreamEventCallback mStreamEventCb; 4140 StreamEventCbInfo(Executor e, StreamEventCallback cb)4141 StreamEventCbInfo(Executor e, StreamEventCallback cb) { 4142 mStreamEventExec = e; 4143 mStreamEventCb = cb; 4144 } 4145 } 4146 4147 private final Object mStreamEventCbLock = new Object(); 4148 @GuardedBy("mStreamEventCbLock") 4149 @NonNull private LinkedList<StreamEventCbInfo> mStreamEventCbInfoList = 4150 new LinkedList<StreamEventCbInfo>(); 4151 /** 4152 * Dedicated thread for handling the StreamEvent callbacks 4153 */ 4154 private @Nullable HandlerThread mStreamEventHandlerThread; 4155 private @Nullable volatile StreamEventHandler mStreamEventHandler; 4156 4157 /** 4158 * Called from native AudioTrack callback thread, filter messages if necessary 4159 * and repost event on AudioTrack message loop to prevent blocking native thread. 4160 * @param what event code received from native 4161 * @param arg optional argument for event 4162 */ handleStreamEventFromNative(int what, int arg)4163 void handleStreamEventFromNative(int what, int arg) { 4164 if (mStreamEventHandler == null) { 4165 return; 4166 } 4167 switch (what) { 4168 case NATIVE_EVENT_CAN_WRITE_MORE_DATA: 4169 // replace previous CAN_WRITE_MORE_DATA messages with the latest value 4170 mStreamEventHandler.removeMessages(NATIVE_EVENT_CAN_WRITE_MORE_DATA); 4171 mStreamEventHandler.sendMessage( 4172 mStreamEventHandler.obtainMessage( 4173 NATIVE_EVENT_CAN_WRITE_MORE_DATA, arg, 0/*ignored*/)); 4174 break; 4175 case NATIVE_EVENT_NEW_IAUDIOTRACK: 4176 mStreamEventHandler.sendMessage( 4177 mStreamEventHandler.obtainMessage(NATIVE_EVENT_NEW_IAUDIOTRACK)); 4178 break; 4179 case NATIVE_EVENT_STREAM_END: 4180 mStreamEventHandler.sendMessage( 4181 mStreamEventHandler.obtainMessage(NATIVE_EVENT_STREAM_END)); 4182 break; 4183 } 4184 } 4185 4186 private class StreamEventHandler extends Handler { 4187 StreamEventHandler(Looper looper)4188 StreamEventHandler(Looper looper) { 4189 super(looper); 4190 } 4191 4192 @Override handleMessage(Message msg)4193 public void handleMessage(Message msg) { 4194 final LinkedList<StreamEventCbInfo> cbInfoList; 4195 synchronized (mStreamEventCbLock) { 4196 if (msg.what == NATIVE_EVENT_STREAM_END) { 4197 synchronized (mPlayStateLock) { 4198 if (mPlayState == PLAYSTATE_STOPPING) { 4199 if (mOffloadEosPending) { 4200 native_start(); 4201 mPlayState = PLAYSTATE_PLAYING; 4202 } else { 4203 mAvSyncHeader = null; 4204 mAvSyncBytesRemaining = 0; 4205 mPlayState = PLAYSTATE_STOPPED; 4206 } 4207 mOffloadEosPending = false; 4208 mPlayStateLock.notify(); 4209 } 4210 } 4211 } 4212 if (mStreamEventCbInfoList.size() == 0) { 4213 return; 4214 } 4215 cbInfoList = new LinkedList<StreamEventCbInfo>(mStreamEventCbInfoList); 4216 } 4217 4218 final long identity = Binder.clearCallingIdentity(); 4219 try { 4220 for (StreamEventCbInfo cbi : cbInfoList) { 4221 switch (msg.what) { 4222 case NATIVE_EVENT_CAN_WRITE_MORE_DATA: 4223 cbi.mStreamEventExec.execute(() -> 4224 cbi.mStreamEventCb.onDataRequest(AudioTrack.this, msg.arg1)); 4225 break; 4226 case NATIVE_EVENT_NEW_IAUDIOTRACK: 4227 // TODO also release track as it's not longer usable 4228 cbi.mStreamEventExec.execute(() -> 4229 cbi.mStreamEventCb.onTearDown(AudioTrack.this)); 4230 break; 4231 case NATIVE_EVENT_STREAM_END: 4232 cbi.mStreamEventExec.execute(() -> 4233 cbi.mStreamEventCb.onPresentationEnded(AudioTrack.this)); 4234 break; 4235 } 4236 } 4237 } finally { 4238 Binder.restoreCallingIdentity(identity); 4239 } 4240 } 4241 } 4242 4243 @GuardedBy("mStreamEventCbLock") beginStreamEventHandling()4244 private void beginStreamEventHandling() { 4245 if (mStreamEventHandlerThread == null) { 4246 mStreamEventHandlerThread = new HandlerThread(TAG + ".StreamEvent"); 4247 mStreamEventHandlerThread.start(); 4248 final Looper looper = mStreamEventHandlerThread.getLooper(); 4249 if (looper != null) { 4250 mStreamEventHandler = new StreamEventHandler(looper); 4251 } 4252 } 4253 } 4254 4255 @GuardedBy("mStreamEventCbLock") endStreamEventHandling()4256 private void endStreamEventHandling() { 4257 if (mStreamEventHandlerThread != null) { 4258 mStreamEventHandlerThread.quit(); 4259 mStreamEventHandlerThread = null; 4260 } 4261 } 4262 4263 /** 4264 * Sets a {@link LogSessionId} instance to this AudioTrack for metrics collection. 4265 * 4266 * @param logSessionId a {@link LogSessionId} instance which is used to 4267 * identify this object to the metrics service. Proper generated 4268 * Ids must be obtained from the Java metrics service and should 4269 * be considered opaque. Use 4270 * {@link LogSessionId#LOG_SESSION_ID_NONE} to remove the 4271 * logSessionId association. 4272 * @throws IllegalStateException if AudioTrack not initialized. 4273 * 4274 */ setLogSessionId(@onNull LogSessionId logSessionId)4275 public void setLogSessionId(@NonNull LogSessionId logSessionId) { 4276 Objects.requireNonNull(logSessionId); 4277 if (mState == STATE_UNINITIALIZED) { 4278 throw new IllegalStateException("track not initialized"); 4279 } 4280 String stringId = logSessionId.getStringId(); 4281 native_setLogSessionId(stringId); 4282 mLogSessionId = logSessionId; 4283 } 4284 4285 /** 4286 * Returns the {@link LogSessionId}. 4287 */ 4288 @NonNull getLogSessionId()4289 public LogSessionId getLogSessionId() { 4290 return mLogSessionId; 4291 } 4292 4293 //--------------------------------------------------------- 4294 // Inner classes 4295 //-------------------- 4296 /** 4297 * Helper class to handle the forwarding of native events to the appropriate listener 4298 * (potentially) handled in a different thread 4299 */ 4300 private class NativePositionEventHandlerDelegate { 4301 private final Handler mHandler; 4302 NativePositionEventHandlerDelegate(final AudioTrack track, final OnPlaybackPositionUpdateListener listener, Handler handler)4303 NativePositionEventHandlerDelegate(final AudioTrack track, 4304 final OnPlaybackPositionUpdateListener listener, 4305 Handler handler) { 4306 // find the looper for our new event handler 4307 Looper looper; 4308 if (handler != null) { 4309 looper = handler.getLooper(); 4310 } else { 4311 // no given handler, use the looper the AudioTrack was created in 4312 looper = mInitializationLooper; 4313 } 4314 4315 // construct the event handler with this looper 4316 if (looper != null) { 4317 // implement the event handler delegate 4318 mHandler = new Handler(looper) { 4319 @Override 4320 public void handleMessage(Message msg) { 4321 if (track == null) { 4322 return; 4323 } 4324 switch(msg.what) { 4325 case NATIVE_EVENT_MARKER: 4326 if (listener != null) { 4327 listener.onMarkerReached(track); 4328 } 4329 break; 4330 case NATIVE_EVENT_NEW_POS: 4331 if (listener != null) { 4332 listener.onPeriodicNotification(track); 4333 } 4334 break; 4335 default: 4336 loge("Unknown native event type: " + msg.what); 4337 break; 4338 } 4339 } 4340 }; 4341 } else { 4342 mHandler = null; 4343 } 4344 } 4345 getHandler()4346 Handler getHandler() { 4347 return mHandler; 4348 } 4349 } 4350 4351 //--------------------------------------------------------- 4352 // Methods for IPlayer interface 4353 //-------------------- 4354 @Override playerStart()4355 void playerStart() { 4356 play(); 4357 } 4358 4359 @Override playerPause()4360 void playerPause() { 4361 pause(); 4362 } 4363 4364 @Override playerStop()4365 void playerStop() { 4366 stop(); 4367 } 4368 4369 //--------------------------------------------------------- 4370 // Java methods called from the native side 4371 //-------------------- 4372 @SuppressWarnings("unused") 4373 @UnsupportedAppUsage(maxTargetSdk = Build.VERSION_CODES.R, trackingBug = 170729553) postEventFromNative(Object audiotrack_ref, int what, int arg1, int arg2, Object obj)4374 private static void postEventFromNative(Object audiotrack_ref, 4375 int what, int arg1, int arg2, Object obj) { 4376 //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2); 4377 final AudioTrack track = (AudioTrack) ((WeakReference) audiotrack_ref).get(); 4378 if (track == null) { 4379 return; 4380 } 4381 4382 if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) { 4383 track.broadcastRoutingChange(); 4384 return; 4385 } 4386 4387 if (what == NATIVE_EVENT_CODEC_FORMAT_CHANGE) { 4388 ByteBuffer buffer = (ByteBuffer) obj; 4389 buffer.order(ByteOrder.nativeOrder()); 4390 buffer.rewind(); 4391 AudioMetadataReadMap audioMetaData = AudioMetadata.fromByteBuffer(buffer); 4392 if (audioMetaData == null) { 4393 Log.e(TAG, "Unable to get audio metadata from byte buffer"); 4394 return; 4395 } 4396 track.mCodecFormatChangedListeners.notify(0 /* eventCode, unused */, audioMetaData); 4397 return; 4398 } 4399 4400 if (what == NATIVE_EVENT_CAN_WRITE_MORE_DATA 4401 || what == NATIVE_EVENT_NEW_IAUDIOTRACK 4402 || what == NATIVE_EVENT_STREAM_END) { 4403 track.handleStreamEventFromNative(what, arg1); 4404 return; 4405 } 4406 4407 NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate; 4408 if (delegate != null) { 4409 Handler handler = delegate.getHandler(); 4410 if (handler != null) { 4411 Message m = handler.obtainMessage(what, arg1, arg2, obj); 4412 handler.sendMessage(m); 4413 } 4414 } 4415 } 4416 4417 //--------------------------------------------------------- 4418 // Native methods called from the Java side 4419 //-------------------- 4420 native_is_direct_output_supported(int encoding, int sampleRate, int channelMask, int channelIndexMask, int contentType, int usage, int flags)4421 private static native boolean native_is_direct_output_supported(int encoding, int sampleRate, 4422 int channelMask, int channelIndexMask, int contentType, int usage, int flags); 4423 4424 // post-condition: mStreamType is overwritten with a value 4425 // that reflects the audio attributes (e.g. an AudioAttributes object with a usage of 4426 // AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC native_setup(Object audiotrack_this, Object attributes, int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat, int buffSizeInBytes, int mode, int[] sessionId, @NonNull Parcel attributionSource, long nativeAudioTrack, boolean offload, int encapsulationMode, Object tunerConfiguration, @NonNull String opPackageName)4427 private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this, 4428 Object /*AudioAttributes*/ attributes, 4429 int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat, 4430 int buffSizeInBytes, int mode, int[] sessionId, @NonNull Parcel attributionSource, 4431 long nativeAudioTrack, boolean offload, int encapsulationMode, 4432 Object tunerConfiguration, @NonNull String opPackageName); 4433 native_finalize()4434 private native final void native_finalize(); 4435 4436 /** 4437 * @hide 4438 */ 4439 @UnsupportedAppUsage native_release()4440 public native final void native_release(); 4441 native_start()4442 private native final void native_start(); 4443 native_stop()4444 private native final void native_stop(); 4445 native_pause()4446 private native final void native_pause(); 4447 native_flush()4448 private native final void native_flush(); 4449 native_write_byte(byte[] audioData, int offsetInBytes, int sizeInBytes, int format, boolean isBlocking)4450 private native final int native_write_byte(byte[] audioData, 4451 int offsetInBytes, int sizeInBytes, int format, 4452 boolean isBlocking); 4453 native_write_short(short[] audioData, int offsetInShorts, int sizeInShorts, int format, boolean isBlocking)4454 private native final int native_write_short(short[] audioData, 4455 int offsetInShorts, int sizeInShorts, int format, 4456 boolean isBlocking); 4457 native_write_float(float[] audioData, int offsetInFloats, int sizeInFloats, int format, boolean isBlocking)4458 private native final int native_write_float(float[] audioData, 4459 int offsetInFloats, int sizeInFloats, int format, 4460 boolean isBlocking); 4461 native_write_native_bytes(ByteBuffer audioData, int positionInBytes, int sizeInBytes, int format, boolean blocking)4462 private native final int native_write_native_bytes(ByteBuffer audioData, 4463 int positionInBytes, int sizeInBytes, int format, boolean blocking); 4464 native_reload_static()4465 private native final int native_reload_static(); 4466 native_get_buffer_size_frames()4467 private native final int native_get_buffer_size_frames(); native_set_buffer_size_frames(int bufferSizeInFrames)4468 private native final int native_set_buffer_size_frames(int bufferSizeInFrames); native_get_buffer_capacity_frames()4469 private native final int native_get_buffer_capacity_frames(); 4470 native_setVolume(float leftVolume, float rightVolume)4471 private native final void native_setVolume(float leftVolume, float rightVolume); 4472 native_set_playback_rate(int sampleRateInHz)4473 private native final int native_set_playback_rate(int sampleRateInHz); native_get_playback_rate()4474 private native final int native_get_playback_rate(); 4475 native_set_playback_params(@onNull PlaybackParams params)4476 private native final void native_set_playback_params(@NonNull PlaybackParams params); native_get_playback_params()4477 private native final @NonNull PlaybackParams native_get_playback_params(); 4478 native_set_marker_pos(int marker)4479 private native final int native_set_marker_pos(int marker); native_get_marker_pos()4480 private native final int native_get_marker_pos(); 4481 native_set_pos_update_period(int updatePeriod)4482 private native final int native_set_pos_update_period(int updatePeriod); native_get_pos_update_period()4483 private native final int native_get_pos_update_period(); 4484 native_set_position(int position)4485 private native final int native_set_position(int position); native_get_position()4486 private native final int native_get_position(); 4487 native_get_latency()4488 private native final int native_get_latency(); 4489 native_get_underrun_count()4490 private native final int native_get_underrun_count(); 4491 native_get_flags()4492 private native final int native_get_flags(); 4493 4494 // longArray must be a non-null array of length >= 2 4495 // [0] is assigned the frame position 4496 // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds native_get_timestamp(long[] longArray)4497 private native final int native_get_timestamp(long[] longArray); 4498 native_set_loop(int start, int end, int loopCount)4499 private native final int native_set_loop(int start, int end, int loopCount); 4500 native_get_output_sample_rate(int streamType)4501 static private native final int native_get_output_sample_rate(int streamType); native_get_min_buff_size( int sampleRateInHz, int channelConfig, int audioFormat)4502 static private native final int native_get_min_buff_size( 4503 int sampleRateInHz, int channelConfig, int audioFormat); 4504 native_attachAuxEffect(int effectId)4505 private native final int native_attachAuxEffect(int effectId); native_setAuxEffectSendLevel(float level)4506 private native final int native_setAuxEffectSendLevel(float level); 4507 native_setOutputDevice(int deviceId)4508 private native final boolean native_setOutputDevice(int deviceId); native_getRoutedDeviceId()4509 private native final int native_getRoutedDeviceId(); native_enableDeviceCallback()4510 private native final void native_enableDeviceCallback(); native_disableDeviceCallback()4511 private native final void native_disableDeviceCallback(); 4512 native_applyVolumeShaper( @onNull VolumeShaper.Configuration configuration, @NonNull VolumeShaper.Operation operation)4513 private native int native_applyVolumeShaper( 4514 @NonNull VolumeShaper.Configuration configuration, 4515 @NonNull VolumeShaper.Operation operation); 4516 native_getVolumeShaperState(int id)4517 private native @Nullable VolumeShaper.State native_getVolumeShaperState(int id); native_setPresentation(int presentationId, int programId)4518 private native final int native_setPresentation(int presentationId, int programId); 4519 native_getPortId()4520 private native int native_getPortId(); 4521 native_set_delay_padding(int delayInFrames, int paddingInFrames)4522 private native void native_set_delay_padding(int delayInFrames, int paddingInFrames); 4523 native_set_audio_description_mix_level_db(float level)4524 private native int native_set_audio_description_mix_level_db(float level); native_get_audio_description_mix_level_db(float[] level)4525 private native int native_get_audio_description_mix_level_db(float[] level); native_set_dual_mono_mode(int dualMonoMode)4526 private native int native_set_dual_mono_mode(int dualMonoMode); native_get_dual_mono_mode(int[] dualMonoMode)4527 private native int native_get_dual_mono_mode(int[] dualMonoMode); native_setLogSessionId(@ullable String logSessionId)4528 private native void native_setLogSessionId(@Nullable String logSessionId); native_setStartThresholdInFrames(int startThresholdInFrames)4529 private native int native_setStartThresholdInFrames(int startThresholdInFrames); native_getStartThresholdInFrames()4530 private native int native_getStartThresholdInFrames(); 4531 4532 /** 4533 * Sets the audio service Player Interface Id. 4534 * 4535 * The playerIId does not change over the lifetime of the client 4536 * Java AudioTrack and is set automatically on creation. 4537 * 4538 * This call informs the native AudioTrack for metrics logging purposes. 4539 * 4540 * @param id the value reported by AudioManager when registering the track. 4541 * A value of -1 indicates invalid - the playerIId was never set. 4542 * @throws IllegalStateException if AudioTrack not initialized. 4543 */ native_setPlayerIId(int playerIId)4544 private native void native_setPlayerIId(int playerIId); 4545 4546 //--------------------------------------------------------- 4547 // Utility methods 4548 //------------------ 4549 logd(String msg)4550 private static void logd(String msg) { 4551 Log.d(TAG, msg); 4552 } 4553 loge(String msg)4554 private static void loge(String msg) { 4555 Log.e(TAG, msg); 4556 } 4557 4558 public final static class MetricsConstants 4559 { MetricsConstants()4560 private MetricsConstants() {} 4561 4562 // MM_PREFIX is slightly different than TAG, used to avoid cut-n-paste errors. 4563 private static final String MM_PREFIX = "android.media.audiotrack."; 4564 4565 /** 4566 * Key to extract the stream type for this track 4567 * from the {@link AudioTrack#getMetrics} return value. 4568 * This value may not exist in API level {@link android.os.Build.VERSION_CODES#P}. 4569 * The value is a {@code String}. 4570 */ 4571 public static final String STREAMTYPE = MM_PREFIX + "streamtype"; 4572 4573 /** 4574 * Key to extract the attribute content type for this track 4575 * from the {@link AudioTrack#getMetrics} return value. 4576 * The value is a {@code String}. 4577 */ 4578 public static final String CONTENTTYPE = MM_PREFIX + "type"; 4579 4580 /** 4581 * Key to extract the attribute usage for this track 4582 * from the {@link AudioTrack#getMetrics} return value. 4583 * The value is a {@code String}. 4584 */ 4585 public static final String USAGE = MM_PREFIX + "usage"; 4586 4587 /** 4588 * Key to extract the sample rate for this track in Hz 4589 * from the {@link AudioTrack#getMetrics} return value. 4590 * The value is an {@code int}. 4591 * @deprecated This does not work. Use {@link AudioTrack#getSampleRate()} instead. 4592 */ 4593 @Deprecated 4594 public static final String SAMPLERATE = "android.media.audiorecord.samplerate"; 4595 4596 /** 4597 * Key to extract the native channel mask information for this track 4598 * from the {@link AudioTrack#getMetrics} return value. 4599 * 4600 * The value is a {@code long}. 4601 * @deprecated This does not work. Use {@link AudioTrack#getFormat()} and read from 4602 * the returned format instead. 4603 */ 4604 @Deprecated 4605 public static final String CHANNELMASK = "android.media.audiorecord.channelmask"; 4606 4607 /** 4608 * Use for testing only. Do not expose. 4609 * The current sample rate. 4610 * The value is an {@code int}. 4611 * @hide 4612 */ 4613 @TestApi 4614 public static final String SAMPLE_RATE = MM_PREFIX + "sampleRate"; 4615 4616 /** 4617 * Use for testing only. Do not expose. 4618 * The native channel mask. 4619 * The value is a {@code long}. 4620 * @hide 4621 */ 4622 @TestApi 4623 public static final String CHANNEL_MASK = MM_PREFIX + "channelMask"; 4624 4625 /** 4626 * Use for testing only. Do not expose. 4627 * The output audio data encoding. 4628 * The value is a {@code String}. 4629 * @hide 4630 */ 4631 @TestApi 4632 public static final String ENCODING = MM_PREFIX + "encoding"; 4633 4634 /** 4635 * Use for testing only. Do not expose. 4636 * The port id of this track port in audioserver. 4637 * The value is an {@code int}. 4638 * @hide 4639 */ 4640 @TestApi 4641 public static final String PORT_ID = MM_PREFIX + "portId"; 4642 4643 /** 4644 * Use for testing only. Do not expose. 4645 * The buffer frameCount. 4646 * The value is an {@code int}. 4647 * @hide 4648 */ 4649 @TestApi 4650 public static final String FRAME_COUNT = MM_PREFIX + "frameCount"; 4651 4652 /** 4653 * Use for testing only. Do not expose. 4654 * The actual track attributes used. 4655 * The value is a {@code String}. 4656 * @hide 4657 */ 4658 @TestApi 4659 public static final String ATTRIBUTES = MM_PREFIX + "attributes"; 4660 } 4661 } 4662