1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.media;
18 
19 import android.annotation.IntDef;
20 import android.annotation.NonNull;
21 import android.annotation.TestApi;
22 import android.os.Parcel;
23 import android.os.Parcelable;
24 
25 import java.lang.annotation.Retention;
26 import java.lang.annotation.RetentionPolicy;
27 import java.util.Arrays;
28 import java.util.Objects;
29 
30 /**
31  * The {@link AudioFormat} class is used to access a number of audio format and
32  * channel configuration constants. They are for instance used
33  * in {@link AudioTrack} and {@link AudioRecord}, as valid values in individual parameters of
34  * constructors like {@link AudioTrack#AudioTrack(int, int, int, int, int, int)}, where the fourth
35  * parameter is one of the <code>AudioFormat.ENCODING_*</code> constants.
36  * The <code>AudioFormat</code> constants are also used in {@link MediaFormat} to specify
37  * audio related values commonly used in media, such as for {@link MediaFormat#KEY_CHANNEL_MASK}.
38  * <p>The {@link AudioFormat.Builder} class can be used to create instances of
39  * the <code>AudioFormat</code> format class.
40  * Refer to
41  * {@link AudioFormat.Builder} for documentation on the mechanics of the configuration and building
42  * of such instances. Here we describe the main concepts that the <code>AudioFormat</code> class
43  * allow you to convey in each instance, they are:
44  * <ol>
45  * <li><a href="#sampleRate">sample rate</a>
46  * <li><a href="#encoding">encoding</a>
47  * <li><a href="#channelMask">channel masks</a>
48  * </ol>
49  * <p>Closely associated with the <code>AudioFormat</code> is the notion of an
50  * <a href="#audioFrame">audio frame</a>, which is used throughout the documentation
51  * to represent the minimum size complete unit of audio data.
52  *
53  * <h4 id="sampleRate">Sample rate</h4>
54  * <p>Expressed in Hz, the sample rate in an <code>AudioFormat</code> instance expresses the number
55  * of audio samples for each channel per second in the content you are playing or recording. It is
56  * not the sample rate
57  * at which content is rendered or produced. For instance a sound at a media sample rate of 8000Hz
58  * can be played on a device operating at a sample rate of 48000Hz; the sample rate conversion is
59  * automatically handled by the platform, it will not play at 6x speed.
60  *
61  * <p>As of API {@link android.os.Build.VERSION_CODES#M},
62  * sample rates up to 192kHz are supported
63  * for <code>AudioRecord</code> and <code>AudioTrack</code>, with sample rate conversion
64  * performed as needed.
65  * To improve efficiency and avoid lossy conversions, it is recommended to match the sample rate
66  * for <code>AudioRecord</code> and <code>AudioTrack</code> to the endpoint device
67  * sample rate, and limit the sample rate to no more than 48kHz unless there are special
68  * device capabilities that warrant a higher rate.
69  *
70  * <h4 id="encoding">Encoding</h4>
71  * <p>Audio encoding is used to describe the bit representation of audio data, which can be
72  * either linear PCM or compressed audio, such as AC3 or DTS.
73  * <p>For linear PCM, the audio encoding describes the sample size, 8 bits, 16 bits, or 32 bits,
74  * and the sample representation, integer or float.
75  * <ul>
76  * <li> {@link #ENCODING_PCM_8BIT}: The audio sample is a 8 bit unsigned integer in the
77  * range [0, 255], with a 128 offset for zero. This is typically stored as a Java byte in a
78  * byte array or ByteBuffer. Since the Java byte is <em>signed</em>,
79  * be careful with math operations and conversions as the most significant bit is inverted.
80  * </li>
81  * <li> {@link #ENCODING_PCM_16BIT}: The audio sample is a 16 bit signed integer
82  * typically stored as a Java short in a short array, but when the short
83  * is stored in a ByteBuffer, it is native endian (as compared to the default Java big endian).
84  * The short has full range from [-32768, 32767],
85  * and is sometimes interpreted as fixed point Q.15 data.
86  * </li>
87  * <li> {@link #ENCODING_PCM_FLOAT}: Introduced in
88  * API {@link android.os.Build.VERSION_CODES#LOLLIPOP}, this encoding specifies that
89  * the audio sample is a 32 bit IEEE single precision float. The sample can be
90  * manipulated as a Java float in a float array, though within a ByteBuffer
91  * it is stored in native endian byte order.
92  * The nominal range of <code>ENCODING_PCM_FLOAT</code> audio data is [-1.0, 1.0].
93  * It is implementation dependent whether the positive maximum of 1.0 is included
94  * in the interval. Values outside of the nominal range are clamped before
95  * sending to the endpoint device. Beware that
96  * the handling of NaN is undefined; subnormals may be treated as zero; and
97  * infinities are generally clamped just like other values for <code>AudioTrack</code>
98  * &ndash; try to avoid infinities because they can easily generate a NaN.
99  * <br>
100  * To achieve higher audio bit depth than a signed 16 bit integer short,
101  * it is recommended to use <code>ENCODING_PCM_FLOAT</code> for audio capture, processing,
102  * and playback.
103  * Floats are efficiently manipulated by modern CPUs,
104  * have greater precision than 24 bit signed integers,
105  * and have greater dynamic range than 32 bit signed integers.
106  * <code>AudioRecord</code> as of API {@link android.os.Build.VERSION_CODES#M} and
107  * <code>AudioTrack</code> as of API {@link android.os.Build.VERSION_CODES#LOLLIPOP}
108  * support <code>ENCODING_PCM_FLOAT</code>.
109  * </li>
110  * </ul>
111  * <p>For compressed audio, the encoding specifies the method of compression,
112  * for example {@link #ENCODING_AC3} and {@link #ENCODING_DTS}. The compressed
113  * audio data is typically stored as bytes in
114  * a byte array or ByteBuffer. When a compressed audio encoding is specified
115  * for an <code>AudioTrack</code>, it creates a direct (non-mixed) track
116  * for output to an endpoint (such as HDMI) capable of decoding the compressed audio.
117  * For (most) other endpoints, which are not capable of decoding such compressed audio,
118  * you will need to decode the data first, typically by creating a {@link MediaCodec}.
119  * Alternatively, one may use {@link MediaPlayer} for playback of compressed
120  * audio files or streams.
121  * <p>When compressed audio is sent out through a direct <code>AudioTrack</code>,
122  * it need not be written in exact multiples of the audio access unit;
123  * this differs from <code>MediaCodec</code> input buffers.
124  *
125  * <h4 id="channelMask">Channel mask</h4>
126  * <p>Channel masks are used in <code>AudioTrack</code> and <code>AudioRecord</code> to describe
127  * the samples and their arrangement in the audio frame. They are also used in the endpoint (e.g.
128  * a USB audio interface, a DAC connected to headphones) to specify allowable configurations of a
129  * particular device.
130  * <br>As of API {@link android.os.Build.VERSION_CODES#M}, there are two types of channel masks:
131  * channel position masks and channel index masks.
132  *
133  * <h5 id="channelPositionMask">Channel position masks</h5>
134  * Channel position masks are the original Android channel masks, and are used since API
135  * {@link android.os.Build.VERSION_CODES#BASE}.
136  * For input and output, they imply a positional nature - the location of a speaker or a microphone
137  * for recording or playback.
138  * <br>For a channel position mask, each allowed channel position corresponds to a bit in the
139  * channel mask. If that channel position is present in the audio frame, that bit is set,
140  * otherwise it is zero. The order of the bits (from lsb to msb) corresponds to the order of that
141  * position's sample in the audio frame.
142  * <br>The canonical channel position masks by channel count are as follows:
143  * <br><table>
144  * <tr><td>channel count</td><td>channel position mask</td></tr>
145  * <tr><td>1</td><td>{@link #CHANNEL_OUT_MONO}</td></tr>
146  * <tr><td>2</td><td>{@link #CHANNEL_OUT_STEREO}</td></tr>
147  * <tr><td>3</td><td>{@link #CHANNEL_OUT_STEREO} | {@link #CHANNEL_OUT_FRONT_CENTER}</td></tr>
148  * <tr><td>4</td><td>{@link #CHANNEL_OUT_QUAD}</td></tr>
149  * <tr><td>5</td><td>{@link #CHANNEL_OUT_QUAD} | {@link #CHANNEL_OUT_FRONT_CENTER}</td></tr>
150  * <tr><td>6</td><td>{@link #CHANNEL_OUT_5POINT1}</td></tr>
151  * <tr><td>7</td><td>{@link #CHANNEL_OUT_5POINT1} | {@link #CHANNEL_OUT_BACK_CENTER}</td></tr>
152  * <tr><td>8</td><td>{@link #CHANNEL_OUT_7POINT1_SURROUND}</td></tr>
153  * </table>
154  * <br>These masks are an ORed composite of individual channel masks. For example
155  * {@link #CHANNEL_OUT_STEREO} is composed of {@link #CHANNEL_OUT_FRONT_LEFT} and
156  * {@link #CHANNEL_OUT_FRONT_RIGHT}.
157  *
158  * <h5 id="channelIndexMask">Channel index masks</h5>
159  * Channel index masks are introduced in API {@link android.os.Build.VERSION_CODES#M}. They allow
160  * the selection of a particular channel from the source or sink endpoint by number, i.e. the first
161  * channel, the second channel, and so forth. This avoids problems with artificially assigning
162  * positions to channels of an endpoint, or figuring what the i<sup>th</sup> position bit is within
163  * an endpoint's channel position mask etc.
164  * <br>Here's an example where channel index masks address this confusion: dealing with a 4 channel
165  * USB device. Using a position mask, and based on the channel count, this would be a
166  * {@link #CHANNEL_OUT_QUAD} device, but really one is only interested in channel 0
167  * through channel 3. The USB device would then have the following individual bit channel masks:
168  * {@link #CHANNEL_OUT_FRONT_LEFT},
169  * {@link #CHANNEL_OUT_FRONT_RIGHT}, {@link #CHANNEL_OUT_BACK_LEFT}
170  * and {@link #CHANNEL_OUT_BACK_RIGHT}. But which is channel 0 and which is
171  * channel 3?
172  * <br>For a channel index mask, each channel number is represented as a bit in the mask, from the
173  * lsb (channel 0) upwards to the msb, numerically this bit value is
174  * <code>1 << channelNumber</code>.
175  * A set bit indicates that channel is present in the audio frame, otherwise it is cleared.
176  * The order of the bits also correspond to that channel number's sample order in the audio frame.
177  * <br>For the previous 4 channel USB device example, the device would have a channel index mask
178  * <code>0xF</code>. Suppose we wanted to select only the first and the third channels; this would
179  * correspond to a channel index mask <code>0x5</code> (the first and third bits set). If an
180  * <code>AudioTrack</code> uses this channel index mask, the audio frame would consist of two
181  * samples, the first sample of each frame routed to channel 0, and the second sample of each frame
182  * routed to channel 2.
183  * The canonical channel index masks by channel count are given by the formula
184  * <code>(1 << channelCount) - 1</code>.
185  *
186  * <h5>Use cases</h5>
187  * <ul>
188  * <li><i>Channel position mask for an endpoint:</i> <code>CHANNEL_OUT_FRONT_LEFT</code>,
189  *  <code>CHANNEL_OUT_FRONT_CENTER</code>, etc. for HDMI home theater purposes.
190  * <li><i>Channel position mask for an audio stream:</i> Creating an <code>AudioTrack</code>
191  *  to output movie content, where 5.1 multichannel output is to be written.
192  * <li><i>Channel index mask for an endpoint:</i> USB devices for which input and output do not
193  *  correspond to left or right speaker or microphone.
194  * <li><i>Channel index mask for an audio stream:</i> An <code>AudioRecord</code> may only want the
195  *  third and fourth audio channels of the endpoint (i.e. the second channel pair), and not care the
196  *  about position it corresponds to, in which case the channel index mask is <code>0xC</code>.
197  *  Multichannel <code>AudioRecord</code> sessions should use channel index masks.
198  * </ul>
199  * <h4 id="audioFrame">Audio Frame</h4>
200  * <p>For linear PCM, an audio frame consists of a set of samples captured at the same time,
201  * whose count and
202  * channel association are given by the <a href="#channelMask">channel mask</a>,
203  * and whose sample contents are specified by the <a href="#encoding">encoding</a>.
204  * For example, a stereo 16 bit PCM frame consists of
205  * two 16 bit linear PCM samples, with a frame size of 4 bytes.
206  * For compressed audio, an audio frame may alternately
207  * refer to an access unit of compressed data bytes that is logically grouped together for
208  * decoding and bitstream access (e.g. {@link MediaCodec}),
209  * or a single byte of compressed data (e.g. {@link AudioTrack#getBufferSizeInFrames()
210  * AudioTrack.getBufferSizeInFrames()}),
211  * or the linear PCM frame result from decoding the compressed data
212  * (e.g.{@link AudioTrack#getPlaybackHeadPosition()
213  * AudioTrack.getPlaybackHeadPosition()}),
214  * depending on the context where audio frame is used.
215  */
216 public final class AudioFormat implements Parcelable {
217 
218     //---------------------------------------------------------
219     // Constants
220     //--------------------
221     /** Invalid audio data format */
222     public static final int ENCODING_INVALID = 0;
223     /** Default audio data format */
224     public static final int ENCODING_DEFAULT = 1;
225 
226     // These values must be kept in sync with core/jni/android_media_AudioFormat.h
227     // Also sync av/services/audiopolicy/managerdefault/ConfigParsingUtils.h
228     /** Audio data format: PCM 16 bit per sample. Guaranteed to be supported by devices. */
229     public static final int ENCODING_PCM_16BIT = 2;
230     /** Audio data format: PCM 8 bit per sample. Not guaranteed to be supported by devices. */
231     public static final int ENCODING_PCM_8BIT = 3;
232     /** Audio data format: single-precision floating-point per sample */
233     public static final int ENCODING_PCM_FLOAT = 4;
234     /** Audio data format: AC-3 compressed */
235     public static final int ENCODING_AC3 = 5;
236     /** Audio data format: E-AC-3 compressed */
237     public static final int ENCODING_E_AC3 = 6;
238     /** Audio data format: DTS compressed */
239     public static final int ENCODING_DTS = 7;
240     /** Audio data format: DTS HD compressed */
241     public static final int ENCODING_DTS_HD = 8;
242     /** Audio data format: MP3 compressed */
243     public static final int ENCODING_MP3 = 9;
244     /** Audio data format: AAC LC compressed */
245     public static final int ENCODING_AAC_LC = 10;
246     /** Audio data format: AAC HE V1 compressed */
247     public static final int ENCODING_AAC_HE_V1 = 11;
248     /** Audio data format: AAC HE V2 compressed */
249     public static final int ENCODING_AAC_HE_V2 = 12;
250 
251     /** Audio data format: compressed audio wrapped in PCM for HDMI
252      * or S/PDIF passthrough.
253      * IEC61937 uses a stereo stream of 16-bit samples as the wrapper.
254      * So the channel mask for the track must be {@link #CHANNEL_OUT_STEREO}.
255      * Data should be written to the stream in a short[] array.
256      * If the data is written in a byte[] array then there may be endian problems
257      * on some platforms when converting to short internally.
258      */
259     public static final int ENCODING_IEC61937 = 13;
260     /** Audio data format: DOLBY TRUEHD compressed
261      **/
262     public static final int ENCODING_DOLBY_TRUEHD = 14;
263     /** Audio data format: AAC ELD compressed */
264     public static final int ENCODING_AAC_ELD = 15;
265     /** Audio data format: AAC xHE compressed */
266     public static final int ENCODING_AAC_XHE = 16;
267     /** Audio data format: AC-4 sync frame transport format */
268     public static final int ENCODING_AC4 = 17;
269     /** Audio data format: E-AC-3-JOC compressed
270      * E-AC-3-JOC streams can be decoded by downstream devices supporting {@link #ENCODING_E_AC3}.
271      * Use {@link #ENCODING_E_AC3} as the AudioTrack encoding when the downstream device
272      * supports {@link #ENCODING_E_AC3} but not {@link #ENCODING_E_AC3_JOC}.
273      **/
274     public static final int ENCODING_E_AC3_JOC = 18;
275 
276     /** @hide */
toLogFriendlyEncoding(int enc)277     public static String toLogFriendlyEncoding(int enc) {
278         switch(enc) {
279             case ENCODING_INVALID:
280                 return "ENCODING_INVALID";
281             case ENCODING_PCM_16BIT:
282                 return "ENCODING_PCM_16BIT";
283             case ENCODING_PCM_8BIT:
284                 return "ENCODING_PCM_8BIT";
285             case ENCODING_PCM_FLOAT:
286                 return "ENCODING_PCM_FLOAT";
287             case ENCODING_AC3:
288                 return "ENCODING_AC3";
289             case ENCODING_E_AC3:
290                 return "ENCODING_E_AC3";
291             case ENCODING_DTS:
292                 return "ENCODING_DTS";
293             case ENCODING_DTS_HD:
294                 return "ENCODING_DTS_HD";
295             case ENCODING_MP3:
296                 return "ENCODING_MP3";
297             case ENCODING_AAC_LC:
298                 return "ENCODING_AAC_LC";
299             case ENCODING_AAC_HE_V1:
300                 return "ENCODING_AAC_HE_V1";
301             case ENCODING_AAC_HE_V2:
302                 return "ENCODING_AAC_HE_V2";
303             case ENCODING_IEC61937:
304                 return "ENCODING_IEC61937";
305             case ENCODING_DOLBY_TRUEHD:
306                 return "ENCODING_DOLBY_TRUEHD";
307             case ENCODING_AAC_ELD:
308                 return "ENCODING_AAC_ELD";
309             case ENCODING_AAC_XHE:
310                 return "ENCODING_AAC_XHE";
311             case ENCODING_AC4:
312                 return "ENCODING_AC4";
313             default :
314                 return "invalid encoding " + enc;
315         }
316     }
317 
318     /** Invalid audio channel configuration */
319     /** @deprecated Use {@link #CHANNEL_INVALID} instead.  */
320     @Deprecated    public static final int CHANNEL_CONFIGURATION_INVALID   = 0;
321     /** Default audio channel configuration */
322     /** @deprecated Use {@link #CHANNEL_OUT_DEFAULT} or {@link #CHANNEL_IN_DEFAULT} instead.  */
323     @Deprecated    public static final int CHANNEL_CONFIGURATION_DEFAULT   = 1;
324     /** Mono audio configuration */
325     /** @deprecated Use {@link #CHANNEL_OUT_MONO} or {@link #CHANNEL_IN_MONO} instead.  */
326     @Deprecated    public static final int CHANNEL_CONFIGURATION_MONO      = 2;
327     /** Stereo (2 channel) audio configuration */
328     /** @deprecated Use {@link #CHANNEL_OUT_STEREO} or {@link #CHANNEL_IN_STEREO} instead.  */
329     @Deprecated    public static final int CHANNEL_CONFIGURATION_STEREO    = 3;
330 
331     /** Invalid audio channel mask */
332     public static final int CHANNEL_INVALID = 0;
333     /** Default audio channel mask */
334     public static final int CHANNEL_OUT_DEFAULT = 1;
335 
336     // Output channel mask definitions below are translated to the native values defined in
337     //  in /system/media/audio/include/system/audio.h in the JNI code of AudioTrack
338     public static final int CHANNEL_OUT_FRONT_LEFT = 0x4;
339     public static final int CHANNEL_OUT_FRONT_RIGHT = 0x8;
340     public static final int CHANNEL_OUT_FRONT_CENTER = 0x10;
341     public static final int CHANNEL_OUT_LOW_FREQUENCY = 0x20;
342     public static final int CHANNEL_OUT_BACK_LEFT = 0x40;
343     public static final int CHANNEL_OUT_BACK_RIGHT = 0x80;
344     public static final int CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x100;
345     public static final int CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x200;
346     public static final int CHANNEL_OUT_BACK_CENTER = 0x400;
347     public static final int CHANNEL_OUT_SIDE_LEFT =         0x800;
348     public static final int CHANNEL_OUT_SIDE_RIGHT =       0x1000;
349     /** @hide */
350     public static final int CHANNEL_OUT_TOP_CENTER =       0x2000;
351     /** @hide */
352     public static final int CHANNEL_OUT_TOP_FRONT_LEFT =   0x4000;
353     /** @hide */
354     public static final int CHANNEL_OUT_TOP_FRONT_CENTER = 0x8000;
355     /** @hide */
356     public static final int CHANNEL_OUT_TOP_FRONT_RIGHT = 0x10000;
357     /** @hide */
358     public static final int CHANNEL_OUT_TOP_BACK_LEFT =   0x20000;
359     /** @hide */
360     public static final int CHANNEL_OUT_TOP_BACK_CENTER = 0x40000;
361     /** @hide */
362     public static final int CHANNEL_OUT_TOP_BACK_RIGHT =  0x80000;
363 
364     public static final int CHANNEL_OUT_MONO = CHANNEL_OUT_FRONT_LEFT;
365     public static final int CHANNEL_OUT_STEREO = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT);
366     // aka QUAD_BACK
367     public static final int CHANNEL_OUT_QUAD = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
368             CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT);
369     /** @hide */
370     public static final int CHANNEL_OUT_QUAD_SIDE = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
371             CHANNEL_OUT_SIDE_LEFT | CHANNEL_OUT_SIDE_RIGHT);
372     public static final int CHANNEL_OUT_SURROUND = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
373             CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_BACK_CENTER);
374     // aka 5POINT1_BACK
375     public static final int CHANNEL_OUT_5POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
376             CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT);
377     /** @hide */
378     public static final int CHANNEL_OUT_5POINT1_SIDE = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
379             CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY |
380             CHANNEL_OUT_SIDE_LEFT | CHANNEL_OUT_SIDE_RIGHT);
381     // different from AUDIO_CHANNEL_OUT_7POINT1 used internally, and not accepted by AudioRecord.
382     /** @deprecated Not the typical 7.1 surround configuration. Use {@link #CHANNEL_OUT_7POINT1_SURROUND} instead. */
383     @Deprecated    public static final int CHANNEL_OUT_7POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
384             CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT |
385             CHANNEL_OUT_FRONT_LEFT_OF_CENTER | CHANNEL_OUT_FRONT_RIGHT_OF_CENTER);
386     // matches AUDIO_CHANNEL_OUT_7POINT1
387     public static final int CHANNEL_OUT_7POINT1_SURROUND = (
388             CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_FRONT_RIGHT |
389             CHANNEL_OUT_SIDE_LEFT | CHANNEL_OUT_SIDE_RIGHT |
390             CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT |
391             CHANNEL_OUT_LOW_FREQUENCY);
392     // CHANNEL_OUT_ALL is not yet defined; if added then it should match AUDIO_CHANNEL_OUT_ALL
393 
394     /** Minimum value for sample rate,
395      *  assuming AudioTrack and AudioRecord share the same limitations.
396      * @hide
397      */
398     // never unhide
399     public static final int SAMPLE_RATE_HZ_MIN = 4000;
400     /** Maximum value for sample rate,
401      *  assuming AudioTrack and AudioRecord share the same limitations.
402      * @hide
403      */
404     // never unhide
405     public static final int SAMPLE_RATE_HZ_MAX = 192000;
406     /** Sample rate will be a route-dependent value.
407      * For AudioTrack, it is usually the sink sample rate,
408      * and for AudioRecord it is usually the source sample rate.
409      */
410     public static final int SAMPLE_RATE_UNSPECIFIED = 0;
411 
412     /**
413      * @hide
414      * Return the input channel mask corresponding to an output channel mask.
415      * This can be used for submix rerouting for the mask of the recorder to map to that of the mix.
416      * @param outMask a combination of the CHANNEL_OUT_* definitions, but not CHANNEL_OUT_DEFAULT
417      * @return a combination of CHANNEL_IN_* definitions matching an output channel mask
418      * @throws IllegalArgumentException
419      */
inChannelMaskFromOutChannelMask(int outMask)420     public static int inChannelMaskFromOutChannelMask(int outMask) throws IllegalArgumentException {
421         if (outMask == CHANNEL_OUT_DEFAULT) {
422             throw new IllegalArgumentException(
423                     "Illegal CHANNEL_OUT_DEFAULT channel mask for input.");
424         }
425         switch (channelCountFromOutChannelMask(outMask)) {
426             case 1:
427                 return CHANNEL_IN_MONO;
428             case 2:
429                 return CHANNEL_IN_STEREO;
430             default:
431                 throw new IllegalArgumentException("Unsupported channel configuration for input.");
432         }
433     }
434 
435     /**
436      * @hide
437      * Return the number of channels from an input channel mask
438      * @param mask a combination of the CHANNEL_IN_* definitions, even CHANNEL_IN_DEFAULT
439      * @return number of channels for the mask
440      */
441     @TestApi
channelCountFromInChannelMask(int mask)442     public static int channelCountFromInChannelMask(int mask) {
443         return Integer.bitCount(mask);
444     }
445     /**
446      * @hide
447      * Return the number of channels from an output channel mask
448      * @param mask a combination of the CHANNEL_OUT_* definitions, but not CHANNEL_OUT_DEFAULT
449      * @return number of channels for the mask
450      */
451     @TestApi
channelCountFromOutChannelMask(int mask)452     public static int channelCountFromOutChannelMask(int mask) {
453         return Integer.bitCount(mask);
454     }
455     /**
456      * @hide
457      * Return a channel mask ready to be used by native code
458      * @param mask a combination of the CHANNEL_OUT_* definitions, but not CHANNEL_OUT_DEFAULT
459      * @return a native channel mask
460      */
convertChannelOutMaskToNativeMask(int javaMask)461     public static int convertChannelOutMaskToNativeMask(int javaMask) {
462         return (javaMask >> 2);
463     }
464 
465     /**
466      * @hide
467      * Return a java output channel mask
468      * @param mask a native channel mask
469      * @return a combination of the CHANNEL_OUT_* definitions
470      */
convertNativeChannelMaskToOutMask(int nativeMask)471     public static int convertNativeChannelMaskToOutMask(int nativeMask) {
472         return (nativeMask << 2);
473     }
474 
475     public static final int CHANNEL_IN_DEFAULT = 1;
476     // These directly match native
477     public static final int CHANNEL_IN_LEFT = 0x4;
478     public static final int CHANNEL_IN_RIGHT = 0x8;
479     public static final int CHANNEL_IN_FRONT = 0x10;
480     public static final int CHANNEL_IN_BACK = 0x20;
481     public static final int CHANNEL_IN_LEFT_PROCESSED = 0x40;
482     public static final int CHANNEL_IN_RIGHT_PROCESSED = 0x80;
483     public static final int CHANNEL_IN_FRONT_PROCESSED = 0x100;
484     public static final int CHANNEL_IN_BACK_PROCESSED = 0x200;
485     public static final int CHANNEL_IN_PRESSURE = 0x400;
486     public static final int CHANNEL_IN_X_AXIS = 0x800;
487     public static final int CHANNEL_IN_Y_AXIS = 0x1000;
488     public static final int CHANNEL_IN_Z_AXIS = 0x2000;
489     public static final int CHANNEL_IN_VOICE_UPLINK = 0x4000;
490     public static final int CHANNEL_IN_VOICE_DNLINK = 0x8000;
491     public static final int CHANNEL_IN_MONO = CHANNEL_IN_FRONT;
492     public static final int CHANNEL_IN_STEREO = (CHANNEL_IN_LEFT | CHANNEL_IN_RIGHT);
493     /** @hide */
494     public static final int CHANNEL_IN_FRONT_BACK = CHANNEL_IN_FRONT | CHANNEL_IN_BACK;
495     // CHANNEL_IN_ALL is not yet defined; if added then it should match AUDIO_CHANNEL_IN_ALL
496 
497     /** @hide */
498     @TestApi
getBytesPerSample(int audioFormat)499     public static int getBytesPerSample(int audioFormat)
500     {
501         switch (audioFormat) {
502         case ENCODING_PCM_8BIT:
503             return 1;
504         case ENCODING_PCM_16BIT:
505         case ENCODING_IEC61937:
506         case ENCODING_DEFAULT:
507             return 2;
508         case ENCODING_PCM_FLOAT:
509             return 4;
510         case ENCODING_INVALID:
511         default:
512             throw new IllegalArgumentException("Bad audio format " + audioFormat);
513         }
514     }
515 
516     /** @hide */
isValidEncoding(int audioFormat)517     public static boolean isValidEncoding(int audioFormat)
518     {
519         switch (audioFormat) {
520         case ENCODING_PCM_8BIT:
521         case ENCODING_PCM_16BIT:
522         case ENCODING_PCM_FLOAT:
523         case ENCODING_AC3:
524         case ENCODING_E_AC3:
525         case ENCODING_E_AC3_JOC:
526         case ENCODING_DTS:
527         case ENCODING_DTS_HD:
528         case ENCODING_MP3:
529         case ENCODING_AAC_LC:
530         case ENCODING_AAC_HE_V1:
531         case ENCODING_AAC_HE_V2:
532         case ENCODING_IEC61937:
533         case ENCODING_AAC_ELD:
534         case ENCODING_AAC_XHE:
535         case ENCODING_AC4:
536             return true;
537         default:
538             return false;
539         }
540     }
541 
542     /** @hide */
isPublicEncoding(int audioFormat)543     public static boolean isPublicEncoding(int audioFormat)
544     {
545         switch (audioFormat) {
546         case ENCODING_PCM_8BIT:
547         case ENCODING_PCM_16BIT:
548         case ENCODING_PCM_FLOAT:
549         case ENCODING_AC3:
550         case ENCODING_E_AC3:
551         case ENCODING_E_AC3_JOC:
552         case ENCODING_DTS:
553         case ENCODING_DTS_HD:
554         case ENCODING_IEC61937:
555         case ENCODING_MP3:
556         case ENCODING_AAC_LC:
557         case ENCODING_AAC_HE_V1:
558         case ENCODING_AAC_HE_V2:
559         case ENCODING_AAC_ELD:
560         case ENCODING_AAC_XHE:
561         case ENCODING_AC4:
562             return true;
563         default:
564             return false;
565         }
566     }
567 
568     /** @hide */
569     @TestApi
isEncodingLinearPcm(int audioFormat)570     public static boolean isEncodingLinearPcm(int audioFormat)
571     {
572         switch (audioFormat) {
573         case ENCODING_PCM_8BIT:
574         case ENCODING_PCM_16BIT:
575         case ENCODING_PCM_FLOAT:
576         case ENCODING_DEFAULT:
577             return true;
578         case ENCODING_AC3:
579         case ENCODING_E_AC3:
580         case ENCODING_E_AC3_JOC:
581         case ENCODING_DTS:
582         case ENCODING_DTS_HD:
583         case ENCODING_MP3:
584         case ENCODING_AAC_LC:
585         case ENCODING_AAC_HE_V1:
586         case ENCODING_AAC_HE_V2:
587         case ENCODING_IEC61937: // wrapped in PCM but compressed
588         case ENCODING_AAC_ELD:
589         case ENCODING_AAC_XHE:
590         case ENCODING_AC4:
591             return false;
592         case ENCODING_INVALID:
593         default:
594             throw new IllegalArgumentException("Bad audio format " + audioFormat);
595         }
596     }
597 
598     /** @hide */
isEncodingLinearFrames(int audioFormat)599     public static boolean isEncodingLinearFrames(int audioFormat)
600     {
601         switch (audioFormat) {
602         case ENCODING_PCM_8BIT:
603         case ENCODING_PCM_16BIT:
604         case ENCODING_PCM_FLOAT:
605         case ENCODING_IEC61937: // same size as stereo PCM
606         case ENCODING_DEFAULT:
607             return true;
608         case ENCODING_AC3:
609         case ENCODING_E_AC3:
610         case ENCODING_E_AC3_JOC:
611         case ENCODING_DTS:
612         case ENCODING_DTS_HD:
613         case ENCODING_MP3:
614         case ENCODING_AAC_LC:
615         case ENCODING_AAC_HE_V1:
616         case ENCODING_AAC_HE_V2:
617         case ENCODING_AAC_ELD:
618         case ENCODING_AAC_XHE:
619         case ENCODING_AC4:
620             return false;
621         case ENCODING_INVALID:
622         default:
623             throw new IllegalArgumentException("Bad audio format " + audioFormat);
624         }
625     }
626     /**
627      * Returns an array of public encoding values extracted from an array of
628      * encoding values.
629      * @hide
630      */
filterPublicFormats(int[] formats)631     public static int[] filterPublicFormats(int[] formats) {
632         if (formats == null) {
633             return null;
634         }
635         int[] myCopy = Arrays.copyOf(formats, formats.length);
636         int size = 0;
637         for (int i = 0; i < myCopy.length; i++) {
638             if (isPublicEncoding(myCopy[i])) {
639                 if (size != i) {
640                     myCopy[size] = myCopy[i];
641                 }
642                 size++;
643             }
644         }
645         return Arrays.copyOf(myCopy, size);
646     }
647 
648     /** @removed */
AudioFormat()649     public AudioFormat()
650     {
651         throw new UnsupportedOperationException("There is no valid usage of this constructor");
652     }
653 
654     /**
655      * Private constructor with an ignored argument to differentiate from the removed default ctor
656      * @param ignoredArgument
657      */
AudioFormat(int ignoredArgument)658     private AudioFormat(int ignoredArgument) {
659     }
660 
661     /**
662      * Constructor used by the JNI.  Parameters are not checked for validity.
663      */
664     // Update sound trigger JNI in core/jni/android_hardware_SoundTrigger.cpp when modifying this
665     // constructor
AudioFormat(int encoding, int sampleRate, int channelMask, int channelIndexMask)666     private AudioFormat(int encoding, int sampleRate, int channelMask, int channelIndexMask) {
667         mEncoding = encoding;
668         mSampleRate = sampleRate;
669         mChannelMask = channelMask;
670         mChannelIndexMask = channelIndexMask;
671         mPropertySetMask = AUDIO_FORMAT_HAS_PROPERTY_ENCODING |
672                 AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE |
673                 AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK |
674                 AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK;
675     }
676 
677     /** @hide */
678     public final static int AUDIO_FORMAT_HAS_PROPERTY_NONE = 0x0;
679     /** @hide */
680     public final static int AUDIO_FORMAT_HAS_PROPERTY_ENCODING = 0x1 << 0;
681     /** @hide */
682     public final static int AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE = 0x1 << 1;
683     /** @hide */
684     public final static int AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK = 0x1 << 2;
685     /** @hide */
686     public final static int AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK = 0x1 << 3;
687 
688     private int mEncoding;
689     private int mSampleRate;
690     private int mChannelMask;
691     private int mChannelIndexMask;
692     private int mPropertySetMask;
693 
694     /**
695      * Return the encoding.
696      * See the section on <a href="#encoding">encodings</a> for more information about the different
697      * types of supported audio encoding.
698      * @return one of the values that can be set in {@link Builder#setEncoding(int)} or
699      * {@link AudioFormat#ENCODING_INVALID} if not set.
700      */
getEncoding()701     public int getEncoding() {
702         if ((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_ENCODING) == 0) {
703             return ENCODING_INVALID;
704         }
705         return mEncoding;
706     }
707 
708     /**
709      * Return the sample rate.
710      * @return one of the values that can be set in {@link Builder#setSampleRate(int)} or
711      * {@link #SAMPLE_RATE_UNSPECIFIED} if not set.
712      */
getSampleRate()713     public int getSampleRate() {
714         return mSampleRate;
715     }
716 
717     /**
718      * Return the channel mask.
719      * See the section on <a href="#channelMask">channel masks</a> for more information about
720      * the difference between index-based masks(as returned by {@link #getChannelIndexMask()}) and
721      * the position-based mask returned by this function.
722      * @return one of the values that can be set in {@link Builder#setChannelMask(int)} or
723      * {@link AudioFormat#CHANNEL_INVALID} if not set.
724      */
getChannelMask()725     public int getChannelMask() {
726         if ((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) == 0) {
727             return CHANNEL_INVALID;
728         }
729         return mChannelMask;
730     }
731 
732     /**
733      * Return the channel index mask.
734      * See the section on <a href="#channelMask">channel masks</a> for more information about
735      * the difference between index-based masks, and position-based masks (as returned
736      * by {@link #getChannelMask()}).
737      * @return one of the values that can be set in {@link Builder#setChannelIndexMask(int)} or
738      * {@link AudioFormat#CHANNEL_INVALID} if not set or an invalid mask was used.
739      */
getChannelIndexMask()740     public int getChannelIndexMask() {
741         if ((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) == 0) {
742             return CHANNEL_INVALID;
743         }
744         return mChannelIndexMask;
745     }
746 
747     /**
748      * Return the channel count.
749      * @return the channel count derived from the channel position mask or the channel index mask.
750      * Zero is returned if both the channel position mask and the channel index mask are not set.
751      */
getChannelCount()752     public int getChannelCount() {
753         final int channelIndexCount = Integer.bitCount(getChannelIndexMask());
754         int channelCount = channelCountFromOutChannelMask(getChannelMask());
755         if (channelCount == 0) {
756             channelCount = channelIndexCount;
757         } else if (channelCount != channelIndexCount && channelIndexCount != 0) {
758             channelCount = 0; // position and index channel count mismatch
759         }
760         return channelCount;
761     }
762 
763     /** @hide */
getPropertySetMask()764     public int getPropertySetMask() {
765         return mPropertySetMask;
766     }
767 
768     /** @hide */
toLogFriendlyString()769     public String toLogFriendlyString() {
770         return String.format("%dch %dHz %s",
771                 getChannelCount(), mSampleRate, toLogFriendlyEncoding(mEncoding));
772     }
773 
774     /**
775      * Builder class for {@link AudioFormat} objects.
776      * Use this class to configure and create an AudioFormat instance. By setting format
777      * characteristics such as audio encoding, channel mask or sample rate, you indicate which
778      * of those are to vary from the default behavior on this device wherever this audio format
779      * is used. See {@link AudioFormat} for a complete description of the different parameters that
780      * can be used to configure an <code>AudioFormat</code> instance.
781      * <p>{@link AudioFormat} is for instance used in
782      * {@link AudioTrack#AudioTrack(AudioAttributes, AudioFormat, int, int, int)}. In this
783      * constructor, every format characteristic set on the <code>Builder</code> (e.g. with
784      * {@link #setSampleRate(int)}) will alter the default values used by an
785      * <code>AudioTrack</code>. In this case for audio playback with <code>AudioTrack</code>, the
786      * sample rate set in the <code>Builder</code> would override the platform output sample rate
787      * which would otherwise be selected by default.
788      */
789     public static class Builder {
790         private int mEncoding = ENCODING_INVALID;
791         private int mSampleRate = SAMPLE_RATE_UNSPECIFIED;
792         private int mChannelMask = CHANNEL_INVALID;
793         private int mChannelIndexMask = 0;
794         private int mPropertySetMask = AUDIO_FORMAT_HAS_PROPERTY_NONE;
795 
796         /**
797          * Constructs a new Builder with none of the format characteristics set.
798          */
Builder()799         public Builder() {
800         }
801 
802         /**
803          * Constructs a new Builder from a given {@link AudioFormat}.
804          * @param af the {@link AudioFormat} object whose data will be reused in the new Builder.
805          */
Builder(AudioFormat af)806         public Builder(AudioFormat af) {
807             mEncoding = af.mEncoding;
808             mSampleRate = af.mSampleRate;
809             mChannelMask = af.mChannelMask;
810             mChannelIndexMask = af.mChannelIndexMask;
811             mPropertySetMask = af.mPropertySetMask;
812         }
813 
814         /**
815          * Combines all of the format characteristics that have been set and return a new
816          * {@link AudioFormat} object.
817          * @return a new {@link AudioFormat} object
818          */
build()819         public AudioFormat build() {
820             AudioFormat af = new AudioFormat(1980/*ignored*/);
821             af.mEncoding = mEncoding;
822             // not calling setSampleRate is equivalent to calling
823             // setSampleRate(SAMPLE_RATE_UNSPECIFIED)
824             af.mSampleRate = mSampleRate;
825             af.mChannelMask = mChannelMask;
826             af.mChannelIndexMask = mChannelIndexMask;
827             af.mPropertySetMask = mPropertySetMask;
828             return af;
829         }
830 
831         /**
832          * Sets the data encoding format.
833          * @param encoding the specified encoding or default.
834          * @return the same Builder instance.
835          * @throws java.lang.IllegalArgumentException
836          */
setEncoding(@ncoding int encoding)837         public Builder setEncoding(@Encoding int encoding) throws IllegalArgumentException {
838             switch (encoding) {
839                 case ENCODING_DEFAULT:
840                     mEncoding = ENCODING_PCM_16BIT;
841                     break;
842                 case ENCODING_PCM_8BIT:
843                 case ENCODING_PCM_16BIT:
844                 case ENCODING_PCM_FLOAT:
845                 case ENCODING_AC3:
846                 case ENCODING_E_AC3:
847                 case ENCODING_E_AC3_JOC:
848                 case ENCODING_DTS:
849                 case ENCODING_DTS_HD:
850                 case ENCODING_IEC61937:
851                 case ENCODING_MP3:
852                 case ENCODING_AAC_LC:
853                 case ENCODING_AAC_HE_V1:
854                 case ENCODING_AAC_HE_V2:
855                 case ENCODING_AAC_ELD:
856                 case ENCODING_AAC_XHE:
857                 case ENCODING_AC4:
858                     mEncoding = encoding;
859                     break;
860                 case ENCODING_INVALID:
861                 default:
862                     throw new IllegalArgumentException("Invalid encoding " + encoding);
863             }
864             mPropertySetMask |= AUDIO_FORMAT_HAS_PROPERTY_ENCODING;
865             return this;
866         }
867 
868         /**
869          * Sets the channel position mask.
870          * The channel position mask specifies the association between audio samples in a frame
871          * with named endpoint channels. The samples in the frame correspond to the
872          * named set bits in the channel position mask, in ascending bit order.
873          * See {@link #setChannelIndexMask(int)} to specify channels
874          * based on endpoint numbered channels. This <a href="#channelPositionMask>description of
875          * channel position masks</a> covers the concept in more details.
876          * @param channelMask describes the configuration of the audio channels.
877          *    <p> For output, the channelMask can be an OR-ed combination of
878          *    channel position masks, e.g.
879          *    {@link AudioFormat#CHANNEL_OUT_FRONT_LEFT},
880          *    {@link AudioFormat#CHANNEL_OUT_FRONT_RIGHT},
881          *    {@link AudioFormat#CHANNEL_OUT_FRONT_CENTER},
882          *    {@link AudioFormat#CHANNEL_OUT_LOW_FREQUENCY}
883          *    {@link AudioFormat#CHANNEL_OUT_BACK_LEFT},
884          *    {@link AudioFormat#CHANNEL_OUT_BACK_RIGHT},
885          *    {@link AudioFormat#CHANNEL_OUT_BACK_CENTER},
886          *    {@link AudioFormat#CHANNEL_OUT_SIDE_LEFT},
887          *    {@link AudioFormat#CHANNEL_OUT_SIDE_RIGHT}.
888          *    <p> For a valid {@link AudioTrack} channel position mask,
889          *    the following conditions apply:
890          *    <br> (1) at most eight channel positions may be used;
891          *    <br> (2) right/left pairs should be matched.
892          *    <p> For input or {@link AudioRecord}, the mask should be
893          *    {@link AudioFormat#CHANNEL_IN_MONO} or
894          *    {@link AudioFormat#CHANNEL_IN_STEREO}.  {@link AudioFormat#CHANNEL_IN_MONO} is
895          *    guaranteed to work on all devices.
896          * @return the same <code>Builder</code> instance.
897          * @throws IllegalArgumentException if the channel mask is invalid or
898          *    if both channel index mask and channel position mask
899          *    are specified but do not have the same channel count.
900          */
setChannelMask(int channelMask)901         public @NonNull Builder setChannelMask(int channelMask) {
902             if (channelMask == CHANNEL_INVALID) {
903                 throw new IllegalArgumentException("Invalid zero channel mask");
904             } else if (/* channelMask != 0 && */ mChannelIndexMask != 0 &&
905                     Integer.bitCount(channelMask) != Integer.bitCount(mChannelIndexMask)) {
906                 throw new IllegalArgumentException("Mismatched channel count for mask " +
907                         Integer.toHexString(channelMask).toUpperCase());
908             }
909             mChannelMask = channelMask;
910             mPropertySetMask |= AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK;
911             return this;
912         }
913 
914         /**
915          * Sets the channel index mask.
916          * A channel index mask specifies the association of audio samples in the frame
917          * with numbered endpoint channels. The i-th bit in the channel index
918          * mask corresponds to the i-th endpoint channel.
919          * For example, an endpoint with four channels is represented
920          * as index mask bits 0 through 3. This <a href="#channelIndexMask>description of channel
921          * index masks</a> covers the concept in more details.
922          * See {@link #setChannelMask(int)} for a positional mask interpretation.
923          * <p> Both {@link AudioTrack} and {@link AudioRecord} support
924          * a channel index mask.
925          * If a channel index mask is specified it is used,
926          * otherwise the channel position mask specified
927          * by <code>setChannelMask</code> is used.
928          * For <code>AudioTrack</code> and <code>AudioRecord</code>,
929          * a channel position mask is not required if a channel index mask is specified.
930          *
931          * @param channelIndexMask describes the configuration of the audio channels.
932          *    <p> For output, the <code>channelIndexMask</code> is an OR-ed combination of
933          *    bits representing the mapping of <code>AudioTrack</code> write samples
934          *    to output sink channels.
935          *    For example, a mask of <code>0xa</code>, or binary <code>1010</code>,
936          *    means the <code>AudioTrack</code> write frame consists of two samples,
937          *    which are routed to the second and the fourth channels of the output sink.
938          *    Unmatched output sink channels are zero filled and unmatched
939          *    <code>AudioTrack</code> write samples are dropped.
940          *    <p> For input, the <code>channelIndexMask</code> is an OR-ed combination of
941          *    bits representing the mapping of input source channels to
942          *    <code>AudioRecord</code> read samples.
943          *    For example, a mask of <code>0x5</code>, or binary
944          *    <code>101</code>, will read from the first and third channel of the input
945          *    source device and store them in the first and second sample of the
946          *    <code>AudioRecord</code> read frame.
947          *    Unmatched input source channels are dropped and
948          *    unmatched <code>AudioRecord</code> read samples are zero filled.
949          * @return the same <code>Builder</code> instance.
950          * @throws IllegalArgumentException if the channel index mask is invalid or
951          *    if both channel index mask and channel position mask
952          *    are specified but do not have the same channel count.
953          */
setChannelIndexMask(int channelIndexMask)954         public @NonNull Builder setChannelIndexMask(int channelIndexMask) {
955             if (channelIndexMask == 0) {
956                 throw new IllegalArgumentException("Invalid zero channel index mask");
957             } else if (/* channelIndexMask != 0 && */ mChannelMask != 0 &&
958                     Integer.bitCount(channelIndexMask) != Integer.bitCount(mChannelMask)) {
959                 throw new IllegalArgumentException("Mismatched channel count for index mask " +
960                         Integer.toHexString(channelIndexMask).toUpperCase());
961             }
962             mChannelIndexMask = channelIndexMask;
963             mPropertySetMask |= AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK;
964             return this;
965         }
966 
967         /**
968          * Sets the sample rate.
969          * @param sampleRate the sample rate expressed in Hz
970          * @return the same Builder instance.
971          * @throws java.lang.IllegalArgumentException
972          */
setSampleRate(int sampleRate)973         public Builder setSampleRate(int sampleRate) throws IllegalArgumentException {
974             // TODO Consider whether to keep the MIN and MAX range checks here.
975             // It is not necessary and poses the problem of defining the limits independently from
976             // native implementation or platform capabilities.
977             if (((sampleRate < SAMPLE_RATE_HZ_MIN) || (sampleRate > SAMPLE_RATE_HZ_MAX)) &&
978                     sampleRate != SAMPLE_RATE_UNSPECIFIED) {
979                 throw new IllegalArgumentException("Invalid sample rate " + sampleRate);
980             }
981             mSampleRate = sampleRate;
982             mPropertySetMask |= AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE;
983             return this;
984         }
985     }
986 
987     @Override
equals(Object o)988     public boolean equals(Object o) {
989         if (this == o) return true;
990         if (o == null || getClass() != o.getClass()) return false;
991 
992         AudioFormat that = (AudioFormat) o;
993 
994         if (mPropertySetMask != that.mPropertySetMask) return false;
995 
996         // return false if any of the properties is set and the values differ
997         return !((((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0)
998                             && (mEncoding != that.mEncoding))
999                     || (((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE) != 0)
1000                             && (mSampleRate != that.mSampleRate))
1001                     || (((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0)
1002                             && (mChannelMask != that.mChannelMask))
1003                     || (((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0)
1004                             && (mChannelIndexMask != that.mChannelIndexMask)));
1005     }
1006 
1007     @Override
hashCode()1008     public int hashCode() {
1009         return Objects.hash(mPropertySetMask, mSampleRate, mEncoding, mChannelMask,
1010                 mChannelIndexMask);
1011     }
1012 
1013     @Override
describeContents()1014     public int describeContents() {
1015         return 0;
1016     }
1017 
1018     @Override
writeToParcel(Parcel dest, int flags)1019     public void writeToParcel(Parcel dest, int flags) {
1020         dest.writeInt(mPropertySetMask);
1021         dest.writeInt(mEncoding);
1022         dest.writeInt(mSampleRate);
1023         dest.writeInt(mChannelMask);
1024         dest.writeInt(mChannelIndexMask);
1025     }
1026 
AudioFormat(Parcel in)1027     private AudioFormat(Parcel in) {
1028         mPropertySetMask = in.readInt();
1029         mEncoding = in.readInt();
1030         mSampleRate = in.readInt();
1031         mChannelMask = in.readInt();
1032         mChannelIndexMask = in.readInt();
1033     }
1034 
1035     public static final Parcelable.Creator<AudioFormat> CREATOR =
1036             new Parcelable.Creator<AudioFormat>() {
1037         public AudioFormat createFromParcel(Parcel p) {
1038             return new AudioFormat(p);
1039         }
1040         public AudioFormat[] newArray(int size) {
1041             return new AudioFormat[size];
1042         }
1043     };
1044 
1045     @Override
toString()1046     public String toString () {
1047         return new String("AudioFormat:"
1048                 + " props=" + mPropertySetMask
1049                 + " enc=" + mEncoding
1050                 + " chan=0x" + Integer.toHexString(mChannelMask).toUpperCase()
1051                 + " chan_index=0x" + Integer.toHexString(mChannelIndexMask).toUpperCase()
1052                 + " rate=" + mSampleRate);
1053     }
1054 
1055     /** @hide */
1056     @IntDef(flag = false, prefix = "ENCODING", value = {
1057         ENCODING_DEFAULT,
1058         ENCODING_PCM_8BIT,
1059         ENCODING_PCM_16BIT,
1060         ENCODING_PCM_FLOAT,
1061         ENCODING_AC3,
1062         ENCODING_E_AC3,
1063         ENCODING_E_AC3_JOC,
1064         ENCODING_DTS,
1065         ENCODING_DTS_HD,
1066         ENCODING_IEC61937,
1067         ENCODING_AAC_HE_V1,
1068         ENCODING_AAC_HE_V2,
1069         ENCODING_AAC_LC,
1070         ENCODING_AAC_ELD,
1071         ENCODING_AAC_XHE,
1072         ENCODING_AC4 }
1073     )
1074     @Retention(RetentionPolicy.SOURCE)
1075     public @interface Encoding {}
1076 
1077     /** @hide */
1078     public static final int[] SURROUND_SOUND_ENCODING = {
1079             ENCODING_AC3,
1080             ENCODING_E_AC3,
1081             ENCODING_DTS,
1082             ENCODING_DTS_HD,
1083             ENCODING_AAC_LC,
1084             ENCODING_DOLBY_TRUEHD,
1085             ENCODING_E_AC3_JOC,
1086     };
1087 
1088     /** @hide */
1089     @IntDef(flag = false, prefix = "ENCODING", value = {
1090             ENCODING_AC3,
1091             ENCODING_E_AC3,
1092             ENCODING_DTS,
1093             ENCODING_DTS_HD,
1094             ENCODING_AAC_LC,
1095             ENCODING_DOLBY_TRUEHD,
1096             ENCODING_E_AC3_JOC }
1097     )
1098     @Retention(RetentionPolicy.SOURCE)
1099     public @interface SurroundSoundEncoding {}
1100 
1101     /**
1102      * @hide
1103      *
1104      * Return default name for a surround format. This is not an International name.
1105      * It is just a default to use if an international name is not available.
1106      *
1107      * @param audioFormat a surround format
1108      * @return short default name for the format, eg. “AC3” for ENCODING_AC3.
1109      */
toDisplayName(@urroundSoundEncoding int audioFormat)1110     public static String toDisplayName(@SurroundSoundEncoding int audioFormat) {
1111         switch (audioFormat) {
1112             case ENCODING_AC3:
1113                 return "Dolby Digital (AC3)";
1114             case ENCODING_E_AC3:
1115                 return "Dolby Digital Plus (E_AC3)";
1116             case ENCODING_DTS:
1117                 return "DTS";
1118             case ENCODING_DTS_HD:
1119                 return "DTS HD";
1120             case ENCODING_AAC_LC:
1121                 return "AAC";
1122             case ENCODING_DOLBY_TRUEHD:
1123                 return "Dolby TrueHD";
1124             case ENCODING_E_AC3_JOC:
1125                 return "Dolby Atmos";
1126             default:
1127                 return "Unknown surround sound format";
1128         }
1129     }
1130 
1131 }
1132