Home | History | Annotate | Download | only in media
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 package android.media;
     18 
     19 import android.annotation.IntDef;
     20 import android.annotation.NonNull;
     21 import android.os.Parcel;
     22 import android.os.Parcelable;
     23 
     24 import java.lang.annotation.Retention;
     25 import java.lang.annotation.RetentionPolicy;
     26 import java.util.Arrays;
     27 import java.util.Objects;
     28 
     29 /**
     30  * The {@link AudioFormat} class is used to access a number of audio format and
     31  * channel configuration constants. They are for instance used
     32  * in {@link AudioTrack} and {@link AudioRecord}, as valid values in individual parameters of
     33  * constructors like {@link AudioTrack#AudioTrack(int, int, int, int, int, int)}, where the fourth
     34  * parameter is one of the <code>AudioFormat.ENCODING_*</code> constants.
     35  * The <code>AudioFormat</code> constants are also used in {@link MediaFormat} to specify
     36  * audio related values commonly used in media, such as for {@link MediaFormat#KEY_CHANNEL_MASK}.
     37  * <p>The {@link AudioFormat.Builder} class can be used to create instances of
     38  * the <code>AudioFormat</code> format class.
     39  * Refer to
     40  * {@link AudioFormat.Builder} for documentation on the mechanics of the configuration and building
     41  * of such instances. Here we describe the main concepts that the <code>AudioFormat</code> class
     42  * allow you to convey in each instance, they are:
     43  * <ol>
     44  * <li><a href="#sampleRate">sample rate</a>
     45  * <li><a href="#encoding">encoding</a>
     46  * <li><a href="#channelMask">channel masks</a>
     47  * </ol>
     48  * <p>Closely associated with the <code>AudioFormat</code> is the notion of an
     49  * <a href="#audioFrame">audio frame</a>, which is used throughout the documentation
     50  * to represent the minimum size complete unit of audio data.
     51  *
     52  * <h4 id="sampleRate">Sample rate</h4>
     53  * <p>Expressed in Hz, the sample rate in an <code>AudioFormat</code> instance expresses the number
     54  * of audio samples for each channel per second in the content you are playing or recording. It is
     55  * not the sample rate
     56  * at which content is rendered or produced. For instance a sound at a media sample rate of 8000Hz
     57  * can be played on a device operating at a sample rate of 48000Hz; the sample rate conversion is
     58  * automatically handled by the platform, it will not play at 6x speed.
     59  *
     60  * <p>As of API {@link android.os.Build.VERSION_CODES#M},
     61  * sample rates up to 192kHz are supported
     62  * for <code>AudioRecord</code> and <code>AudioTrack</code>, with sample rate conversion
     63  * performed as needed.
     64  * To improve efficiency and avoid lossy conversions, it is recommended to match the sample rate
     65  * for <code>AudioRecord</code> and <code>AudioTrack</code> to the endpoint device
     66  * sample rate, and limit the sample rate to no more than 48kHz unless there are special
     67  * device capabilities that warrant a higher rate.
     68  *
     69  * <h4 id="encoding">Encoding</h4>
     70  * <p>Audio encoding is used to describe the bit representation of audio data, which can be
     71  * either linear PCM or compressed audio, such as AC3 or DTS.
     72  * <p>For linear PCM, the audio encoding describes the sample size, 8 bits, 16 bits, or 32 bits,
     73  * and the sample representation, integer or float.
     74  * <ul>
     75  * <li> {@link #ENCODING_PCM_8BIT}: The audio sample is a 8 bit unsigned integer in the
     76  * range [0, 255], with a 128 offset for zero. This is typically stored as a Java byte in a
     77  * byte array or ByteBuffer. Since the Java byte is <em>signed</em>,
     78  * be careful with math operations and conversions as the most significant bit is inverted.
     79  * </li>
     80  * <li> {@link #ENCODING_PCM_16BIT}: The audio sample is a 16 bit signed integer
     81  * typically stored as a Java short in a short array, but when the short
     82  * is stored in a ByteBuffer, it is native endian (as compared to the default Java big endian).
     83  * The short has full range from [-32768, 32767],
     84  * and is sometimes interpreted as fixed point Q.15 data.
     85  * </li>
     86  * <li> {@link #ENCODING_PCM_FLOAT}: Introduced in
     87  * API {@link android.os.Build.VERSION_CODES#LOLLIPOP}, this encoding specifies that
     88  * the audio sample is a 32 bit IEEE single precision float. The sample can be
     89  * manipulated as a Java float in a float array, though within a ByteBuffer
     90  * it is stored in native endian byte order.
     91  * The nominal range of <code>ENCODING_PCM_FLOAT</code> audio data is [-1.0, 1.0].
     92  * It is implementation dependent whether the positive maximum of 1.0 is included
     93  * in the interval. Values outside of the nominal range are clamped before
     94  * sending to the endpoint device. Beware that
     95  * the handling of NaN is undefined; subnormals may be treated as zero; and
     96  * infinities are generally clamped just like other values for <code>AudioTrack</code>
     97  * &ndash; try to avoid infinities because they can easily generate a NaN.
     98  * <br>
     99  * To achieve higher audio bit depth than a signed 16 bit integer short,
    100  * it is recommended to use <code>ENCODING_PCM_FLOAT</code> for audio capture, processing,
    101  * and playback.
    102  * Floats are efficiently manipulated by modern CPUs,
    103  * have greater precision than 24 bit signed integers,
    104  * and have greater dynamic range than 32 bit signed integers.
    105  * <code>AudioRecord</code> as of API {@link android.os.Build.VERSION_CODES#M} and
    106  * <code>AudioTrack</code> as of API {@link android.os.Build.VERSION_CODES#LOLLIPOP}
    107  * support <code>ENCODING_PCM_FLOAT</code>.
    108  * </li>
    109  * </ul>
    110  * <p>For compressed audio, the encoding specifies the method of compression,
    111  * for example {@link #ENCODING_AC3} and {@link #ENCODING_DTS}. The compressed
    112  * audio data is typically stored as bytes in
    113  * a byte array or ByteBuffer. When a compressed audio encoding is specified
    114  * for an <code>AudioTrack</code>, it creates a direct (non-mixed) track
    115  * for output to an endpoint (such as HDMI) capable of decoding the compressed audio.
    116  * For (most) other endpoints, which are not capable of decoding such compressed audio,
    117  * you will need to decode the data first, typically by creating a {@link MediaCodec}.
    118  * Alternatively, one may use {@link MediaPlayer} for playback of compressed
    119  * audio files or streams.
    120  * <p>When compressed audio is sent out through a direct <code>AudioTrack</code>,
    121  * it need not be written in exact multiples of the audio access unit;
    122  * this differs from <code>MediaCodec</code> input buffers.
    123  *
    124  * <h4 id="channelMask">Channel mask</h4>
    125  * <p>Channel masks are used in <code>AudioTrack</code> and <code>AudioRecord</code> to describe
    126  * the samples and their arrangement in the audio frame. They are also used in the endpoint (e.g.
    127  * a USB audio interface, a DAC connected to headphones) to specify allowable configurations of a
    128  * particular device.
    129  * <br>As of API {@link android.os.Build.VERSION_CODES#M}, there are two types of channel masks:
    130  * channel position masks and channel index masks.
    131  *
    132  * <h5 id="channelPositionMask">Channel position masks</h5>
    133  * Channel position masks are the original Android channel masks, and are used since API
    134  * {@link android.os.Build.VERSION_CODES#BASE}.
    135  * For input and output, they imply a positional nature - the location of a speaker or a microphone
    136  * for recording or playback.
    137  * <br>For a channel position mask, each allowed channel position corresponds to a bit in the
    138  * channel mask. If that channel position is present in the audio frame, that bit is set,
    139  * otherwise it is zero. The order of the bits (from lsb to msb) corresponds to the order of that
    140  * position's sample in the audio frame.
    141  * <br>The canonical channel position masks by channel count are as follows:
    142  * <br><table>
    143  * <tr><td>channel count</td><td>channel position mask</td></tr>
    144  * <tr><td>1</td><td>{@link #CHANNEL_OUT_MONO}</td></tr>
    145  * <tr><td>2</td><td>{@link #CHANNEL_OUT_STEREO}</td></tr>
    146  * <tr><td>3</td><td>{@link #CHANNEL_OUT_STEREO} | {@link #CHANNEL_OUT_FRONT_CENTER}</td></tr>
    147  * <tr><td>4</td><td>{@link #CHANNEL_OUT_QUAD}</td></tr>
    148  * <tr><td>5</td><td>{@link #CHANNEL_OUT_QUAD} | {@link #CHANNEL_OUT_FRONT_CENTER}</td></tr>
    149  * <tr><td>6</td><td>{@link #CHANNEL_OUT_5POINT1}</td></tr>
    150  * <tr><td>7</td><td>{@link #CHANNEL_OUT_5POINT1} | {@link #CHANNEL_OUT_BACK_CENTER}</td></tr>
    151  * <tr><td>8</td><td>{@link #CHANNEL_OUT_7POINT1_SURROUND}</td></tr>
    152  * </table>
    153  * <br>These masks are an ORed composite of individual channel masks. For example
    154  * {@link #CHANNEL_OUT_STEREO} is composed of {@link #CHANNEL_OUT_FRONT_LEFT} and
    155  * {@link #CHANNEL_OUT_FRONT_RIGHT}.
    156  *
    157  * <h5 id="channelIndexMask">Channel index masks</h5>
    158  * Channel index masks are introduced in API {@link android.os.Build.VERSION_CODES#M}. They allow
    159  * the selection of a particular channel from the source or sink endpoint by number, i.e. the first
    160  * channel, the second channel, and so forth. This avoids problems with artificially assigning
    161  * positions to channels of an endpoint, or figuring what the i<sup>th</sup> position bit is within
    162  * an endpoint's channel position mask etc.
    163  * <br>Here's an example where channel index masks address this confusion: dealing with a 4 channel
    164  * USB device. Using a position mask, and based on the channel count, this would be a
    165  * {@link #CHANNEL_OUT_QUAD} device, but really one is only interested in channel 0
    166  * through channel 3. The USB device would then have the following individual bit channel masks:
    167  * {@link #CHANNEL_OUT_FRONT_LEFT},
    168  * {@link #CHANNEL_OUT_FRONT_RIGHT}, {@link #CHANNEL_OUT_BACK_LEFT}
    169  * and {@link #CHANNEL_OUT_BACK_RIGHT}. But which is channel 0 and which is
    170  * channel 3?
    171  * <br>For a channel index mask, each channel number is represented as a bit in the mask, from the
    172  * lsb (channel 0) upwards to the msb, numerically this bit value is
    173  * <code>1 << channelNumber</code>.
    174  * A set bit indicates that channel is present in the audio frame, otherwise it is cleared.
    175  * The order of the bits also correspond to that channel number's sample order in the audio frame.
    176  * <br>For the previous 4 channel USB device example, the device would have a channel index mask
    177  * <code>0xF</code>. Suppose we wanted to select only the first and the third channels; this would
    178  * correspond to a channel index mask <code>0x5</code> (the first and third bits set). If an
    179  * <code>AudioTrack</code> uses this channel index mask, the audio frame would consist of two
    180  * samples, the first sample of each frame routed to channel 0, and the second sample of each frame
    181  * routed to channel 2.
    182  * The canonical channel index masks by channel count are given by the formula
    183  * <code>(1 << channelCount) - 1</code>.
    184  *
    185  * <h5>Use cases</h5>
    186  * <ul>
    187  * <li><i>Channel position mask for an endpoint:</i> <code>CHANNEL_OUT_FRONT_LEFT</code>,
    188  *  <code>CHANNEL_OUT_FRONT_CENTER</code>, etc. for HDMI home theater purposes.
    189  * <li><i>Channel position mask for an audio stream:</i> Creating an <code>AudioTrack</code>
    190  *  to output movie content, where 5.1 multichannel output is to be written.
    191  * <li><i>Channel index mask for an endpoint:</i> USB devices for which input and output do not
    192  *  correspond to left or right speaker or microphone.
    193  * <li><i>Channel index mask for an audio stream:</i> An <code>AudioRecord</code> may only want the
    194  *  third and fourth audio channels of the endpoint (i.e. the second channel pair), and not care the
    195  *  about position it corresponds to, in which case the channel index mask is <code>0xC</code>.
    196  *  Multichannel <code>AudioRecord</code> sessions should use channel index masks.
    197  * </ul>
    198  * <h4 id="audioFrame">Audio Frame</h4>
    199  * <p>For linear PCM, an audio frame consists of a set of samples captured at the same time,
    200  * whose count and
    201  * channel association are given by the <a href="#channelMask">channel mask</a>,
    202  * and whose sample contents are specified by the <a href="#encoding">encoding</a>.
    203  * For example, a stereo 16 bit PCM frame consists of
    204  * two 16 bit linear PCM samples, with a frame size of 4 bytes.
    205  * For compressed audio, an audio frame may alternately
    206  * refer to an access unit of compressed data bytes that is logically grouped together for
    207  * decoding and bitstream access (e.g. {@link MediaCodec}),
    208  * or a single byte of compressed data (e.g. {@link AudioTrack#getBufferSizeInFrames()
    209  * AudioTrack.getBufferSizeInFrames()}),
    210  * or the linear PCM frame result from decoding the compressed data
    211  * (e.g.{@link AudioTrack#getPlaybackHeadPosition()
    212  * AudioTrack.getPlaybackHeadPosition()}),
    213  * depending on the context where audio frame is used.
    214  */
    215 public final class AudioFormat implements Parcelable {
    216 
    217     //---------------------------------------------------------
    218     // Constants
    219     //--------------------
    220     /** Invalid audio data format */
    221     public static final int ENCODING_INVALID = 0;
    222     /** Default audio data format */
    223     public static final int ENCODING_DEFAULT = 1;
    224 
    225     // These values must be kept in sync with core/jni/android_media_AudioFormat.h
    226     // Also sync av/services/audiopolicy/managerdefault/ConfigParsingUtils.h
    227     /** Audio data format: PCM 16 bit per sample. Guaranteed to be supported by devices. */
    228     public static final int ENCODING_PCM_16BIT = 2;
    229     /** Audio data format: PCM 8 bit per sample. Not guaranteed to be supported by devices. */
    230     public static final int ENCODING_PCM_8BIT = 3;
    231     /** Audio data format: single-precision floating-point per sample */
    232     public static final int ENCODING_PCM_FLOAT = 4;
    233     /** Audio data format: AC-3 compressed */
    234     public static final int ENCODING_AC3 = 5;
    235     /** Audio data format: E-AC-3 compressed */
    236     public static final int ENCODING_E_AC3 = 6;
    237     /** Audio data format: DTS compressed */
    238     public static final int ENCODING_DTS = 7;
    239     /** Audio data format: DTS HD compressed */
    240     public static final int ENCODING_DTS_HD = 8;
    241     /** Audio data format: MP3 compressed
    242      * @hide
    243      * */
    244     public static final int ENCODING_MP3 = 9;
    245     /** Audio data format: AAC LC compressed
    246      * @hide
    247      * */
    248     public static final int ENCODING_AAC_LC = 10;
    249     /** Audio data format: AAC HE V1 compressed
    250      * @hide
    251      * */
    252     public static final int ENCODING_AAC_HE_V1 = 11;
    253     /** Audio data format: AAC HE V2 compressed
    254      * @hide
    255      * */
    256     public static final int ENCODING_AAC_HE_V2 = 12;
    257     /** Audio data format: compressed audio wrapped in PCM for HDMI
    258      * or S/PDIF passthrough.
    259      * IEC61937 uses a stereo stream of 16-bit samples as the wrapper.
    260      * So the channel mask for the track must be {@link #CHANNEL_OUT_STEREO}.
    261      * Data should be written to the stream in a short[] array.
    262      * If the data is written in a byte[] array then there may be endian problems
    263      * on some platforms when converting to short internally.
    264      */
    265     public static final int ENCODING_IEC61937 = 13;
    266     /** Audio data format: DOLBY TRUEHD compressed
    267      **/
    268     public static final int ENCODING_DOLBY_TRUEHD = 14;
    269 
    270     /** Invalid audio channel configuration */
    271     /** @deprecated Use {@link #CHANNEL_INVALID} instead.  */
    272     @Deprecated    public static final int CHANNEL_CONFIGURATION_INVALID   = 0;
    273     /** Default audio channel configuration */
    274     /** @deprecated Use {@link #CHANNEL_OUT_DEFAULT} or {@link #CHANNEL_IN_DEFAULT} instead.  */
    275     @Deprecated    public static final int CHANNEL_CONFIGURATION_DEFAULT   = 1;
    276     /** Mono audio configuration */
    277     /** @deprecated Use {@link #CHANNEL_OUT_MONO} or {@link #CHANNEL_IN_MONO} instead.  */
    278     @Deprecated    public static final int CHANNEL_CONFIGURATION_MONO      = 2;
    279     /** Stereo (2 channel) audio configuration */
    280     /** @deprecated Use {@link #CHANNEL_OUT_STEREO} or {@link #CHANNEL_IN_STEREO} instead.  */
    281     @Deprecated    public static final int CHANNEL_CONFIGURATION_STEREO    = 3;
    282 
    283     /** Invalid audio channel mask */
    284     public static final int CHANNEL_INVALID = 0;
    285     /** Default audio channel mask */
    286     public static final int CHANNEL_OUT_DEFAULT = 1;
    287 
    288     // Output channel mask definitions below are translated to the native values defined in
    289     //  in /system/media/audio/include/system/audio.h in the JNI code of AudioTrack
    290     public static final int CHANNEL_OUT_FRONT_LEFT = 0x4;
    291     public static final int CHANNEL_OUT_FRONT_RIGHT = 0x8;
    292     public static final int CHANNEL_OUT_FRONT_CENTER = 0x10;
    293     public static final int CHANNEL_OUT_LOW_FREQUENCY = 0x20;
    294     public static final int CHANNEL_OUT_BACK_LEFT = 0x40;
    295     public static final int CHANNEL_OUT_BACK_RIGHT = 0x80;
    296     public static final int CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x100;
    297     public static final int CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x200;
    298     public static final int CHANNEL_OUT_BACK_CENTER = 0x400;
    299     public static final int CHANNEL_OUT_SIDE_LEFT =         0x800;
    300     public static final int CHANNEL_OUT_SIDE_RIGHT =       0x1000;
    301     /** @hide */
    302     public static final int CHANNEL_OUT_TOP_CENTER =       0x2000;
    303     /** @hide */
    304     public static final int CHANNEL_OUT_TOP_FRONT_LEFT =   0x4000;
    305     /** @hide */
    306     public static final int CHANNEL_OUT_TOP_FRONT_CENTER = 0x8000;
    307     /** @hide */
    308     public static final int CHANNEL_OUT_TOP_FRONT_RIGHT = 0x10000;
    309     /** @hide */
    310     public static final int CHANNEL_OUT_TOP_BACK_LEFT =   0x20000;
    311     /** @hide */
    312     public static final int CHANNEL_OUT_TOP_BACK_CENTER = 0x40000;
    313     /** @hide */
    314     public static final int CHANNEL_OUT_TOP_BACK_RIGHT =  0x80000;
    315 
    316     public static final int CHANNEL_OUT_MONO = CHANNEL_OUT_FRONT_LEFT;
    317     public static final int CHANNEL_OUT_STEREO = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT);
    318     // aka QUAD_BACK
    319     public static final int CHANNEL_OUT_QUAD = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
    320             CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT);
    321     /** @hide */
    322     public static final int CHANNEL_OUT_QUAD_SIDE = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
    323             CHANNEL_OUT_SIDE_LEFT | CHANNEL_OUT_SIDE_RIGHT);
    324     public static final int CHANNEL_OUT_SURROUND = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
    325             CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_BACK_CENTER);
    326     // aka 5POINT1_BACK
    327     public static final int CHANNEL_OUT_5POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
    328             CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT);
    329     /** @hide */
    330     public static final int CHANNEL_OUT_5POINT1_SIDE = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
    331             CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY |
    332             CHANNEL_OUT_SIDE_LEFT | CHANNEL_OUT_SIDE_RIGHT);
    333     // different from AUDIO_CHANNEL_OUT_7POINT1 used internally, and not accepted by AudioRecord.
    334     /** @deprecated Not the typical 7.1 surround configuration. Use {@link #CHANNEL_OUT_7POINT1_SURROUND} instead. */
    335     @Deprecated    public static final int CHANNEL_OUT_7POINT1 = (CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_RIGHT |
    336             CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_LOW_FREQUENCY | CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT |
    337             CHANNEL_OUT_FRONT_LEFT_OF_CENTER | CHANNEL_OUT_FRONT_RIGHT_OF_CENTER);
    338     // matches AUDIO_CHANNEL_OUT_7POINT1
    339     public static final int CHANNEL_OUT_7POINT1_SURROUND = (
    340             CHANNEL_OUT_FRONT_LEFT | CHANNEL_OUT_FRONT_CENTER | CHANNEL_OUT_FRONT_RIGHT |
    341             CHANNEL_OUT_SIDE_LEFT | CHANNEL_OUT_SIDE_RIGHT |
    342             CHANNEL_OUT_BACK_LEFT | CHANNEL_OUT_BACK_RIGHT |
    343             CHANNEL_OUT_LOW_FREQUENCY);
    344     // CHANNEL_OUT_ALL is not yet defined; if added then it should match AUDIO_CHANNEL_OUT_ALL
    345 
    346     /** Minimum value for sample rate,
    347      *  assuming AudioTrack and AudioRecord share the same limitations.
    348      * @hide
    349      */
    350     // never unhide
    351     public static final int SAMPLE_RATE_HZ_MIN = 4000;
    352     /** Maximum value for sample rate,
    353      *  assuming AudioTrack and AudioRecord share the same limitations.
    354      * @hide
    355      */
    356     // never unhide
    357     public static final int SAMPLE_RATE_HZ_MAX = 192000;
    358     /** Sample rate will be a route-dependent value.
    359      * For AudioTrack, it is usually the sink sample rate,
    360      * and for AudioRecord it is usually the source sample rate.
    361      */
    362     public static final int SAMPLE_RATE_UNSPECIFIED = 0;
    363 
    364     /**
    365      * @hide
    366      * Return the input channel mask corresponding to an output channel mask.
    367      * This can be used for submix rerouting for the mask of the recorder to map to that of the mix.
    368      * @param outMask a combination of the CHANNEL_OUT_* definitions, but not CHANNEL_OUT_DEFAULT
    369      * @return a combination of CHANNEL_IN_* definitions matching an output channel mask
    370      * @throws IllegalArgumentException
    371      */
    372     public static int inChannelMaskFromOutChannelMask(int outMask) throws IllegalArgumentException {
    373         if (outMask == CHANNEL_OUT_DEFAULT) {
    374             throw new IllegalArgumentException(
    375                     "Illegal CHANNEL_OUT_DEFAULT channel mask for input.");
    376         }
    377         switch (channelCountFromOutChannelMask(outMask)) {
    378             case 1:
    379                 return CHANNEL_IN_MONO;
    380             case 2:
    381                 return CHANNEL_IN_STEREO;
    382             default:
    383                 throw new IllegalArgumentException("Unsupported channel configuration for input.");
    384         }
    385     }
    386 
    387     /**
    388      * @hide
    389      * Return the number of channels from an input channel mask
    390      * @param mask a combination of the CHANNEL_IN_* definitions, even CHANNEL_IN_DEFAULT
    391      * @return number of channels for the mask
    392      */
    393     public static int channelCountFromInChannelMask(int mask) {
    394         return Integer.bitCount(mask);
    395     }
    396     /**
    397      * @hide
    398      * Return the number of channels from an output channel mask
    399      * @param mask a combination of the CHANNEL_OUT_* definitions, but not CHANNEL_OUT_DEFAULT
    400      * @return number of channels for the mask
    401      */
    402     public static int channelCountFromOutChannelMask(int mask) {
    403         return Integer.bitCount(mask);
    404     }
    405     /**
    406      * @hide
    407      * Return a channel mask ready to be used by native code
    408      * @param mask a combination of the CHANNEL_OUT_* definitions, but not CHANNEL_OUT_DEFAULT
    409      * @return a native channel mask
    410      */
    411     public static int convertChannelOutMaskToNativeMask(int javaMask) {
    412         return (javaMask >> 2);
    413     }
    414 
    415     /**
    416      * @hide
    417      * Return a java output channel mask
    418      * @param mask a native channel mask
    419      * @return a combination of the CHANNEL_OUT_* definitions
    420      */
    421     public static int convertNativeChannelMaskToOutMask(int nativeMask) {
    422         return (nativeMask << 2);
    423     }
    424 
    425     public static final int CHANNEL_IN_DEFAULT = 1;
    426     // These directly match native
    427     public static final int CHANNEL_IN_LEFT = 0x4;
    428     public static final int CHANNEL_IN_RIGHT = 0x8;
    429     public static final int CHANNEL_IN_FRONT = 0x10;
    430     public static final int CHANNEL_IN_BACK = 0x20;
    431     public static final int CHANNEL_IN_LEFT_PROCESSED = 0x40;
    432     public static final int CHANNEL_IN_RIGHT_PROCESSED = 0x80;
    433     public static final int CHANNEL_IN_FRONT_PROCESSED = 0x100;
    434     public static final int CHANNEL_IN_BACK_PROCESSED = 0x200;
    435     public static final int CHANNEL_IN_PRESSURE = 0x400;
    436     public static final int CHANNEL_IN_X_AXIS = 0x800;
    437     public static final int CHANNEL_IN_Y_AXIS = 0x1000;
    438     public static final int CHANNEL_IN_Z_AXIS = 0x2000;
    439     public static final int CHANNEL_IN_VOICE_UPLINK = 0x4000;
    440     public static final int CHANNEL_IN_VOICE_DNLINK = 0x8000;
    441     public static final int CHANNEL_IN_MONO = CHANNEL_IN_FRONT;
    442     public static final int CHANNEL_IN_STEREO = (CHANNEL_IN_LEFT | CHANNEL_IN_RIGHT);
    443     /** @hide */
    444     public static final int CHANNEL_IN_FRONT_BACK = CHANNEL_IN_FRONT | CHANNEL_IN_BACK;
    445     // CHANNEL_IN_ALL is not yet defined; if added then it should match AUDIO_CHANNEL_IN_ALL
    446 
    447     /** @hide */
    448     public static int getBytesPerSample(int audioFormat)
    449     {
    450         switch (audioFormat) {
    451         case ENCODING_PCM_8BIT:
    452             return 1;
    453         case ENCODING_PCM_16BIT:
    454         case ENCODING_IEC61937:
    455         case ENCODING_DEFAULT:
    456             return 2;
    457         case ENCODING_PCM_FLOAT:
    458             return 4;
    459         case ENCODING_INVALID:
    460         default:
    461             throw new IllegalArgumentException("Bad audio format " + audioFormat);
    462         }
    463     }
    464 
    465     /** @hide */
    466     public static boolean isValidEncoding(int audioFormat)
    467     {
    468         switch (audioFormat) {
    469         case ENCODING_PCM_8BIT:
    470         case ENCODING_PCM_16BIT:
    471         case ENCODING_PCM_FLOAT:
    472         case ENCODING_AC3:
    473         case ENCODING_E_AC3:
    474         case ENCODING_DTS:
    475         case ENCODING_DTS_HD:
    476         case ENCODING_MP3:
    477         case ENCODING_AAC_LC:
    478         case ENCODING_AAC_HE_V1:
    479         case ENCODING_AAC_HE_V2:
    480         case ENCODING_IEC61937:
    481             return true;
    482         default:
    483             return false;
    484         }
    485     }
    486 
    487     /** @hide */
    488     public static boolean isPublicEncoding(int audioFormat)
    489     {
    490         switch (audioFormat) {
    491         case ENCODING_PCM_8BIT:
    492         case ENCODING_PCM_16BIT:
    493         case ENCODING_PCM_FLOAT:
    494         case ENCODING_AC3:
    495         case ENCODING_E_AC3:
    496         case ENCODING_DTS:
    497         case ENCODING_DTS_HD:
    498         case ENCODING_IEC61937:
    499             return true;
    500         default:
    501             return false;
    502         }
    503     }
    504 
    505     /** @hide */
    506     public static boolean isEncodingLinearPcm(int audioFormat)
    507     {
    508         switch (audioFormat) {
    509         case ENCODING_PCM_8BIT:
    510         case ENCODING_PCM_16BIT:
    511         case ENCODING_PCM_FLOAT:
    512         case ENCODING_DEFAULT:
    513             return true;
    514         case ENCODING_AC3:
    515         case ENCODING_E_AC3:
    516         case ENCODING_DTS:
    517         case ENCODING_DTS_HD:
    518         case ENCODING_MP3:
    519         case ENCODING_AAC_LC:
    520         case ENCODING_AAC_HE_V1:
    521         case ENCODING_AAC_HE_V2:
    522         case ENCODING_IEC61937: // wrapped in PCM but compressed
    523             return false;
    524         case ENCODING_INVALID:
    525         default:
    526             throw new IllegalArgumentException("Bad audio format " + audioFormat);
    527         }
    528     }
    529 
    530     /** @hide */
    531     public static boolean isEncodingLinearFrames(int audioFormat)
    532     {
    533         switch (audioFormat) {
    534         case ENCODING_PCM_8BIT:
    535         case ENCODING_PCM_16BIT:
    536         case ENCODING_PCM_FLOAT:
    537         case ENCODING_IEC61937: // same size as stereo PCM
    538         case ENCODING_DEFAULT:
    539             return true;
    540         case ENCODING_AC3:
    541         case ENCODING_E_AC3:
    542         case ENCODING_DTS:
    543         case ENCODING_DTS_HD:
    544         case ENCODING_MP3:
    545         case ENCODING_AAC_LC:
    546         case ENCODING_AAC_HE_V1:
    547         case ENCODING_AAC_HE_V2:
    548             return false;
    549         case ENCODING_INVALID:
    550         default:
    551             throw new IllegalArgumentException("Bad audio format " + audioFormat);
    552         }
    553     }
    554     /**
    555      * Returns an array of public encoding values extracted from an array of
    556      * encoding values.
    557      * @hide
    558      */
    559     public static int[] filterPublicFormats(int[] formats) {
    560         if (formats == null) {
    561             return null;
    562         }
    563         int[] myCopy = Arrays.copyOf(formats, formats.length);
    564         int size = 0;
    565         for (int i = 0; i < myCopy.length; i++) {
    566             if (isPublicEncoding(myCopy[i])) {
    567                 if (size != i) {
    568                     myCopy[size] = myCopy[i];
    569                 }
    570                 size++;
    571             }
    572         }
    573         return Arrays.copyOf(myCopy, size);
    574     }
    575 
    576     /** @removed */
    577     public AudioFormat()
    578     {
    579         throw new UnsupportedOperationException("There is no valid usage of this constructor");
    580     }
    581 
    582     /**
    583      * Private constructor with an ignored argument to differentiate from the removed default ctor
    584      * @param ignoredArgument
    585      */
    586     private AudioFormat(int ignoredArgument) {
    587     }
    588 
    589     /**
    590      * Constructor used by the JNI.  Parameters are not checked for validity.
    591      */
    592     // Update sound trigger JNI in core/jni/android_hardware_SoundTrigger.cpp when modifying this
    593     // constructor
    594     private AudioFormat(int encoding, int sampleRate, int channelMask, int channelIndexMask) {
    595         mEncoding = encoding;
    596         mSampleRate = sampleRate;
    597         mChannelMask = channelMask;
    598         mChannelIndexMask = channelIndexMask;
    599         mPropertySetMask = AUDIO_FORMAT_HAS_PROPERTY_ENCODING |
    600                 AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE |
    601                 AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK |
    602                 AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK;
    603     }
    604 
    605     /** @hide */
    606     public final static int AUDIO_FORMAT_HAS_PROPERTY_NONE = 0x0;
    607     /** @hide */
    608     public final static int AUDIO_FORMAT_HAS_PROPERTY_ENCODING = 0x1 << 0;
    609     /** @hide */
    610     public final static int AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE = 0x1 << 1;
    611     /** @hide */
    612     public final static int AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK = 0x1 << 2;
    613     /** @hide */
    614     public final static int AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK = 0x1 << 3;
    615 
    616     private int mEncoding;
    617     private int mSampleRate;
    618     private int mChannelMask;
    619     private int mChannelIndexMask;
    620     private int mPropertySetMask;
    621 
    622     /**
    623      * Return the encoding.
    624      * See the section on <a href="#encoding">encodings</a> for more information about the different
    625      * types of supported audio encoding.
    626      * @return one of the values that can be set in {@link Builder#setEncoding(int)} or
    627      * {@link AudioFormat#ENCODING_INVALID} if not set.
    628      */
    629     public int getEncoding() {
    630         if ((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_ENCODING) == 0) {
    631             return ENCODING_INVALID;
    632         }
    633         return mEncoding;
    634     }
    635 
    636     /**
    637      * Return the sample rate.
    638      * @return one of the values that can be set in {@link Builder#setSampleRate(int)} or
    639      * {@link #SAMPLE_RATE_UNSPECIFIED} if not set.
    640      */
    641     public int getSampleRate() {
    642         return mSampleRate;
    643     }
    644 
    645     /**
    646      * Return the channel mask.
    647      * See the section on <a href="#channelMask">channel masks</a> for more information about
    648      * the difference between index-based masks(as returned by {@link #getChannelIndexMask()}) and
    649      * the position-based mask returned by this function.
    650      * @return one of the values that can be set in {@link Builder#setChannelMask(int)} or
    651      * {@link AudioFormat#CHANNEL_INVALID} if not set.
    652      */
    653     public int getChannelMask() {
    654         if ((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) == 0) {
    655             return CHANNEL_INVALID;
    656         }
    657         return mChannelMask;
    658     }
    659 
    660     /**
    661      * Return the channel index mask.
    662      * See the section on <a href="#channelMask">channel masks</a> for more information about
    663      * the difference between index-based masks, and position-based masks (as returned
    664      * by {@link #getChannelMask()}).
    665      * @return one of the values that can be set in {@link Builder#setChannelIndexMask(int)} or
    666      * {@link AudioFormat#CHANNEL_INVALID} if not set or an invalid mask was used.
    667      */
    668     public int getChannelIndexMask() {
    669         if ((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) == 0) {
    670             return CHANNEL_INVALID;
    671         }
    672         return mChannelIndexMask;
    673     }
    674 
    675     /**
    676      * Return the channel count.
    677      * @return the channel count derived from the channel position mask or the channel index mask.
    678      * Zero is returned if both the channel position mask and the channel index mask are not set.
    679      */
    680     public int getChannelCount() {
    681         final int channelIndexCount = Integer.bitCount(getChannelIndexMask());
    682         int channelCount = channelCountFromOutChannelMask(getChannelMask());
    683         if (channelCount == 0) {
    684             channelCount = channelIndexCount;
    685         } else if (channelCount != channelIndexCount && channelIndexCount != 0) {
    686             channelCount = 0; // position and index channel count mismatch
    687         }
    688         return channelCount;
    689     }
    690 
    691     /** @hide */
    692     public int getPropertySetMask() {
    693         return mPropertySetMask;
    694     }
    695 
    696     /**
    697      * Builder class for {@link AudioFormat} objects.
    698      * Use this class to configure and create an AudioFormat instance. By setting format
    699      * characteristics such as audio encoding, channel mask or sample rate, you indicate which
    700      * of those are to vary from the default behavior on this device wherever this audio format
    701      * is used. See {@link AudioFormat} for a complete description of the different parameters that
    702      * can be used to configure an <code>AudioFormat</code> instance.
    703      * <p>{@link AudioFormat} is for instance used in
    704      * {@link AudioTrack#AudioTrack(AudioAttributes, AudioFormat, int, int, int)}. In this
    705      * constructor, every format characteristic set on the <code>Builder</code> (e.g. with
    706      * {@link #setSampleRate(int)}) will alter the default values used by an
    707      * <code>AudioTrack</code>. In this case for audio playback with <code>AudioTrack</code>, the
    708      * sample rate set in the <code>Builder</code> would override the platform output sample rate
    709      * which would otherwise be selected by default.
    710      */
    711     public static class Builder {
    712         private int mEncoding = ENCODING_INVALID;
    713         private int mSampleRate = SAMPLE_RATE_UNSPECIFIED;
    714         private int mChannelMask = CHANNEL_INVALID;
    715         private int mChannelIndexMask = 0;
    716         private int mPropertySetMask = AUDIO_FORMAT_HAS_PROPERTY_NONE;
    717 
    718         /**
    719          * Constructs a new Builder with none of the format characteristics set.
    720          */
    721         public Builder() {
    722         }
    723 
    724         /**
    725          * Constructs a new Builder from a given {@link AudioFormat}.
    726          * @param af the {@link AudioFormat} object whose data will be reused in the new Builder.
    727          */
    728         public Builder(AudioFormat af) {
    729             mEncoding = af.mEncoding;
    730             mSampleRate = af.mSampleRate;
    731             mChannelMask = af.mChannelMask;
    732             mChannelIndexMask = af.mChannelIndexMask;
    733             mPropertySetMask = af.mPropertySetMask;
    734         }
    735 
    736         /**
    737          * Combines all of the format characteristics that have been set and return a new
    738          * {@link AudioFormat} object.
    739          * @return a new {@link AudioFormat} object
    740          */
    741         public AudioFormat build() {
    742             AudioFormat af = new AudioFormat(1980/*ignored*/);
    743             af.mEncoding = mEncoding;
    744             // not calling setSampleRate is equivalent to calling
    745             // setSampleRate(SAMPLE_RATE_UNSPECIFIED)
    746             af.mSampleRate = mSampleRate;
    747             af.mChannelMask = mChannelMask;
    748             af.mChannelIndexMask = mChannelIndexMask;
    749             af.mPropertySetMask = mPropertySetMask;
    750             return af;
    751         }
    752 
    753         /**
    754          * Sets the data encoding format.
    755          * @param encoding one of {@link AudioFormat#ENCODING_DEFAULT},
    756          *     {@link AudioFormat#ENCODING_PCM_8BIT},
    757          *     {@link AudioFormat#ENCODING_PCM_16BIT},
    758          *     {@link AudioFormat#ENCODING_PCM_FLOAT},
    759          *     {@link AudioFormat#ENCODING_AC3},
    760          *     {@link AudioFormat#ENCODING_E_AC3}.
    761          *     {@link AudioFormat#ENCODING_DTS},
    762          *     {@link AudioFormat#ENCODING_DTS_HD}.
    763          * @return the same Builder instance.
    764          * @throws java.lang.IllegalArgumentException
    765          */
    766         public Builder setEncoding(@Encoding int encoding) throws IllegalArgumentException {
    767             switch (encoding) {
    768                 case ENCODING_DEFAULT:
    769                     mEncoding = ENCODING_PCM_16BIT;
    770                     break;
    771                 case ENCODING_PCM_8BIT:
    772                 case ENCODING_PCM_16BIT:
    773                 case ENCODING_PCM_FLOAT:
    774                 case ENCODING_AC3:
    775                 case ENCODING_E_AC3:
    776                 case ENCODING_DTS:
    777                 case ENCODING_DTS_HD:
    778                 case ENCODING_IEC61937:
    779                     mEncoding = encoding;
    780                     break;
    781                 case ENCODING_INVALID:
    782                 default:
    783                     throw new IllegalArgumentException("Invalid encoding " + encoding);
    784             }
    785             mPropertySetMask |= AUDIO_FORMAT_HAS_PROPERTY_ENCODING;
    786             return this;
    787         }
    788 
    789         /**
    790          * Sets the channel position mask.
    791          * The channel position mask specifies the association between audio samples in a frame
    792          * with named endpoint channels. The samples in the frame correspond to the
    793          * named set bits in the channel position mask, in ascending bit order.
    794          * See {@link #setChannelIndexMask(int)} to specify channels
    795          * based on endpoint numbered channels. This <a href="#channelPositionMask>description of
    796          * channel position masks</a> covers the concept in more details.
    797          * @param channelMask describes the configuration of the audio channels.
    798          *    <p> For output, the channelMask can be an OR-ed combination of
    799          *    channel position masks, e.g.
    800          *    {@link AudioFormat#CHANNEL_OUT_FRONT_LEFT},
    801          *    {@link AudioFormat#CHANNEL_OUT_FRONT_RIGHT},
    802          *    {@link AudioFormat#CHANNEL_OUT_FRONT_CENTER},
    803          *    {@link AudioFormat#CHANNEL_OUT_LOW_FREQUENCY}
    804          *    {@link AudioFormat#CHANNEL_OUT_BACK_LEFT},
    805          *    {@link AudioFormat#CHANNEL_OUT_BACK_RIGHT},
    806          *    {@link AudioFormat#CHANNEL_OUT_BACK_CENTER},
    807          *    {@link AudioFormat#CHANNEL_OUT_SIDE_LEFT},
    808          *    {@link AudioFormat#CHANNEL_OUT_SIDE_RIGHT}.
    809          *    <p> For a valid {@link AudioTrack} channel position mask,
    810          *    the following conditions apply:
    811          *    <br> (1) at most eight channel positions may be used;
    812          *    <br> (2) right/left pairs should be matched.
    813          *    <p> For input or {@link AudioRecord}, the mask should be
    814          *    {@link AudioFormat#CHANNEL_IN_MONO} or
    815          *    {@link AudioFormat#CHANNEL_IN_STEREO}.  {@link AudioFormat#CHANNEL_IN_MONO} is
    816          *    guaranteed to work on all devices.
    817          * @return the same <code>Builder</code> instance.
    818          * @throws IllegalArgumentException if the channel mask is invalid or
    819          *    if both channel index mask and channel position mask
    820          *    are specified but do not have the same channel count.
    821          */
    822         public @NonNull Builder setChannelMask(int channelMask) {
    823             if (channelMask == CHANNEL_INVALID) {
    824                 throw new IllegalArgumentException("Invalid zero channel mask");
    825             } else if (/* channelMask != 0 && */ mChannelIndexMask != 0 &&
    826                     Integer.bitCount(channelMask) != Integer.bitCount(mChannelIndexMask)) {
    827                 throw new IllegalArgumentException("Mismatched channel count for mask " +
    828                         Integer.toHexString(channelMask).toUpperCase());
    829             }
    830             mChannelMask = channelMask;
    831             mPropertySetMask |= AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK;
    832             return this;
    833         }
    834 
    835         /**
    836          * Sets the channel index mask.
    837          * A channel index mask specifies the association of audio samples in the frame
    838          * with numbered endpoint channels. The i-th bit in the channel index
    839          * mask corresponds to the i-th endpoint channel.
    840          * For example, an endpoint with four channels is represented
    841          * as index mask bits 0 through 3. This <a href="#channelIndexMask>description of channel
    842          * index masks</a> covers the concept in more details.
    843          * See {@link #setChannelMask(int)} for a positional mask interpretation.
    844          * <p> Both {@link AudioTrack} and {@link AudioRecord} support
    845          * a channel index mask.
    846          * If a channel index mask is specified it is used,
    847          * otherwise the channel position mask specified
    848          * by <code>setChannelMask</code> is used.
    849          * For <code>AudioTrack</code> and <code>AudioRecord</code>,
    850          * a channel position mask is not required if a channel index mask is specified.
    851          *
    852          * @param channelIndexMask describes the configuration of the audio channels.
    853          *    <p> For output, the <code>channelIndexMask</code> is an OR-ed combination of
    854          *    bits representing the mapping of <code>AudioTrack</code> write samples
    855          *    to output sink channels.
    856          *    For example, a mask of <code>0xa</code>, or binary <code>1010</code>,
    857          *    means the <code>AudioTrack</code> write frame consists of two samples,
    858          *    which are routed to the second and the fourth channels of the output sink.
    859          *    Unmatched output sink channels are zero filled and unmatched
    860          *    <code>AudioTrack</code> write samples are dropped.
    861          *    <p> For input, the <code>channelIndexMask</code> is an OR-ed combination of
    862          *    bits representing the mapping of input source channels to
    863          *    <code>AudioRecord</code> read samples.
    864          *    For example, a mask of <code>0x5</code>, or binary
    865          *    <code>101</code>, will read from the first and third channel of the input
    866          *    source device and store them in the first and second sample of the
    867          *    <code>AudioRecord</code> read frame.
    868          *    Unmatched input source channels are dropped and
    869          *    unmatched <code>AudioRecord</code> read samples are zero filled.
    870          * @return the same <code>Builder</code> instance.
    871          * @throws IllegalArgumentException if the channel index mask is invalid or
    872          *    if both channel index mask and channel position mask
    873          *    are specified but do not have the same channel count.
    874          */
    875         public @NonNull Builder setChannelIndexMask(int channelIndexMask) {
    876             if (channelIndexMask == 0) {
    877                 throw new IllegalArgumentException("Invalid zero channel index mask");
    878             } else if (/* channelIndexMask != 0 && */ mChannelMask != 0 &&
    879                     Integer.bitCount(channelIndexMask) != Integer.bitCount(mChannelMask)) {
    880                 throw new IllegalArgumentException("Mismatched channel count for index mask " +
    881                         Integer.toHexString(channelIndexMask).toUpperCase());
    882             }
    883             mChannelIndexMask = channelIndexMask;
    884             mPropertySetMask |= AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK;
    885             return this;
    886         }
    887 
    888         /**
    889          * Sets the sample rate.
    890          * @param sampleRate the sample rate expressed in Hz
    891          * @return the same Builder instance.
    892          * @throws java.lang.IllegalArgumentException
    893          */
    894         public Builder setSampleRate(int sampleRate) throws IllegalArgumentException {
    895             // TODO Consider whether to keep the MIN and MAX range checks here.
    896             // It is not necessary and poses the problem of defining the limits independently from
    897             // native implementation or platform capabilities.
    898             if (((sampleRate < SAMPLE_RATE_HZ_MIN) || (sampleRate > SAMPLE_RATE_HZ_MAX)) &&
    899                     sampleRate != SAMPLE_RATE_UNSPECIFIED) {
    900                 throw new IllegalArgumentException("Invalid sample rate " + sampleRate);
    901             }
    902             mSampleRate = sampleRate;
    903             mPropertySetMask |= AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE;
    904             return this;
    905         }
    906     }
    907 
    908     @Override
    909     public boolean equals(Object o) {
    910         if (this == o) return true;
    911         if (o == null || getClass() != o.getClass()) return false;
    912 
    913         AudioFormat that = (AudioFormat) o;
    914 
    915         if (mPropertySetMask != that.mPropertySetMask) return false;
    916 
    917         // return false if any of the properties is set and the values differ
    918         return !((((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0)
    919                             && (mEncoding != that.mEncoding))
    920                     || (((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE) != 0)
    921                             && (mSampleRate != that.mSampleRate))
    922                     || (((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0)
    923                             && (mChannelMask != that.mChannelMask))
    924                     || (((mPropertySetMask & AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0)
    925                             && (mChannelIndexMask != that.mChannelIndexMask)));
    926     }
    927 
    928     @Override
    929     public int hashCode() {
    930         return Objects.hash(mPropertySetMask, mSampleRate, mEncoding, mChannelMask,
    931                 mChannelIndexMask);
    932     }
    933 
    934     @Override
    935     public int describeContents() {
    936         return 0;
    937     }
    938 
    939     @Override
    940     public void writeToParcel(Parcel dest, int flags) {
    941         dest.writeInt(mPropertySetMask);
    942         dest.writeInt(mEncoding);
    943         dest.writeInt(mSampleRate);
    944         dest.writeInt(mChannelMask);
    945         dest.writeInt(mChannelIndexMask);
    946     }
    947 
    948     private AudioFormat(Parcel in) {
    949         mPropertySetMask = in.readInt();
    950         mEncoding = in.readInt();
    951         mSampleRate = in.readInt();
    952         mChannelMask = in.readInt();
    953         mChannelIndexMask = in.readInt();
    954     }
    955 
    956     public static final Parcelable.Creator<AudioFormat> CREATOR =
    957             new Parcelable.Creator<AudioFormat>() {
    958         public AudioFormat createFromParcel(Parcel p) {
    959             return new AudioFormat(p);
    960         }
    961         public AudioFormat[] newArray(int size) {
    962             return new AudioFormat[size];
    963         }
    964     };
    965 
    966     @Override
    967     public String toString () {
    968         return new String("AudioFormat:"
    969                 + " props=" + mPropertySetMask
    970                 + " enc=" + mEncoding
    971                 + " chan=0x" + Integer.toHexString(mChannelMask).toUpperCase()
    972                 + " chan_index=0x" + Integer.toHexString(mChannelIndexMask).toUpperCase()
    973                 + " rate=" + mSampleRate);
    974     }
    975 
    976     /** @hide */
    977     @IntDef({
    978         ENCODING_DEFAULT,
    979         ENCODING_PCM_8BIT,
    980         ENCODING_PCM_16BIT,
    981         ENCODING_PCM_FLOAT,
    982         ENCODING_AC3,
    983         ENCODING_E_AC3,
    984         ENCODING_DTS,
    985         ENCODING_DTS_HD,
    986         ENCODING_IEC61937
    987     })
    988     @Retention(RetentionPolicy.SOURCE)
    989     public @interface Encoding {}
    990 
    991 }
    992