Home | History | Annotate | Download | only in media
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 package android.media;
     18 
     19 import java.lang.annotation.Retention;
     20 import java.lang.annotation.RetentionPolicy;
     21 import java.lang.ref.WeakReference;
     22 import java.lang.Math;
     23 import java.nio.ByteBuffer;
     24 import java.nio.ByteOrder;
     25 import java.nio.NioUtils;
     26 import java.util.Collection;
     27 import java.util.concurrent.Executor;
     28 
     29 import android.annotation.CallbackExecutor;
     30 import android.annotation.IntDef;
     31 import android.annotation.NonNull;
     32 import android.annotation.Nullable;
     33 import android.app.ActivityThread;
     34 import android.content.Context;
     35 import android.os.Handler;
     36 import android.os.IBinder;
     37 import android.os.Looper;
     38 import android.os.Message;
     39 import android.os.PersistableBundle;
     40 import android.os.Process;
     41 import android.os.RemoteException;
     42 import android.os.ServiceManager;
     43 import android.util.ArrayMap;
     44 import android.util.Log;
     45 
     46 import com.android.internal.annotations.GuardedBy;
     47 
     48 /**
     49  * The AudioTrack class manages and plays a single audio resource for Java applications.
     50  * It allows streaming of PCM audio buffers to the audio sink for playback. This is
     51  * achieved by "pushing" the data to the AudioTrack object using one of the
     52  *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
     53  *  and {@link #write(float[], int, int, int)} methods.
     54  *
     55  * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
     56  * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
     57  * one of the {@code write()} methods. These are blocking and return when the data has been
     58  * transferred from the Java layer to the native layer and queued for playback. The streaming
     59  * mode is most useful when playing blocks of audio data that for instance are:
     60  *
     61  * <ul>
     62  *   <li>too big to fit in memory because of the duration of the sound to play,</li>
     63  *   <li>too big to fit in memory because of the characteristics of the audio data
     64  *         (high sampling rate, bits per sample ...)</li>
     65  *   <li>received or generated while previously queued audio is playing.</li>
     66  * </ul>
     67  *
     68  * The static mode should be chosen when dealing with short sounds that fit in memory and
     69  * that need to be played with the smallest latency possible. The static mode will
     70  * therefore be preferred for UI and game sounds that are played often, and with the
     71  * smallest overhead possible.
     72  *
     73  * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
     74  * The size of this buffer, specified during the construction, determines how long an AudioTrack
     75  * can play before running out of data.<br>
     76  * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
     77  * be played from it.<br>
     78  * For the streaming mode, data will be written to the audio sink in chunks of
     79  * sizes less than or equal to the total buffer size.
     80  *
     81  * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
     82  */
     83 public class AudioTrack extends PlayerBase
     84                         implements AudioRouting
     85                                  , VolumeAutomation
     86 {
     87     //---------------------------------------------------------
     88     // Constants
     89     //--------------------
     90     /** Minimum value for a linear gain or auxiliary effect level.
     91      *  This value must be exactly equal to 0.0f; do not change it.
     92      */
     93     private static final float GAIN_MIN = 0.0f;
     94     /** Maximum value for a linear gain or auxiliary effect level.
     95      *  This value must be greater than or equal to 1.0f.
     96      */
     97     private static final float GAIN_MAX = 1.0f;
     98 
     99     /** Maximum value for AudioTrack channel count
    100      * @hide public for MediaCode only, do not un-hide or change to a numeric literal
    101      */
    102     public static final int CHANNEL_COUNT_MAX = native_get_FCC_8();
    103 
    104     /** indicates AudioTrack state is stopped */
    105     public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
    106     /** indicates AudioTrack state is paused */
    107     public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
    108     /** indicates AudioTrack state is playing */
    109     public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
    110 
    111     // keep these values in sync with android_media_AudioTrack.cpp
    112     /**
    113      * Creation mode where audio data is transferred from Java to the native layer
    114      * only once before the audio starts playing.
    115      */
    116     public static final int MODE_STATIC = 0;
    117     /**
    118      * Creation mode where audio data is streamed from Java to the native layer
    119      * as the audio is playing.
    120      */
    121     public static final int MODE_STREAM = 1;
    122 
    123     /** @hide */
    124     @IntDef({
    125         MODE_STATIC,
    126         MODE_STREAM
    127     })
    128     @Retention(RetentionPolicy.SOURCE)
    129     public @interface TransferMode {}
    130 
    131     /**
    132      * State of an AudioTrack that was not successfully initialized upon creation.
    133      */
    134     public static final int STATE_UNINITIALIZED = 0;
    135     /**
    136      * State of an AudioTrack that is ready to be used.
    137      */
    138     public static final int STATE_INITIALIZED   = 1;
    139     /**
    140      * State of a successfully initialized AudioTrack that uses static data,
    141      * but that hasn't received that data yet.
    142      */
    143     public static final int STATE_NO_STATIC_DATA = 2;
    144 
    145     /**
    146      * Denotes a successful operation.
    147      */
    148     public  static final int SUCCESS                               = AudioSystem.SUCCESS;
    149     /**
    150      * Denotes a generic operation failure.
    151      */
    152     public  static final int ERROR                                 = AudioSystem.ERROR;
    153     /**
    154      * Denotes a failure due to the use of an invalid value.
    155      */
    156     public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
    157     /**
    158      * Denotes a failure due to the improper use of a method.
    159      */
    160     public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
    161     /**
    162      * An error code indicating that the object reporting it is no longer valid and needs to
    163      * be recreated.
    164      */
    165     public  static final int ERROR_DEAD_OBJECT                     = AudioSystem.DEAD_OBJECT;
    166     /**
    167      * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state,
    168      * or immediately after start/ACTIVE.
    169      * @hide
    170      */
    171     public  static final int ERROR_WOULD_BLOCK                     = AudioSystem.WOULD_BLOCK;
    172 
    173     // Error codes:
    174     // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
    175     private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
    176     private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
    177     private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
    178     private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
    179     private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
    180 
    181     // Events:
    182     // to keep in sync with frameworks/av/include/media/AudioTrack.h
    183     /**
    184      * Event id denotes when playback head has reached a previously set marker.
    185      */
    186     private static final int NATIVE_EVENT_MARKER  = 3;
    187     /**
    188      * Event id denotes when previously set update period has elapsed during playback.
    189      */
    190     private static final int NATIVE_EVENT_NEW_POS = 4;
    191     /**
    192      * Callback for more data
    193      * TODO only for offload
    194      */
    195     private static final int NATIVE_EVENT_MORE_DATA = 0;
    196     /**
    197      * IAudioTrack tear down for offloaded tracks
    198      * TODO: when received, java AudioTrack must be released
    199      */
    200     private static final int NATIVE_EVENT_NEW_IAUDIOTRACK = 6;
    201     /**
    202      * Event id denotes when all the buffers queued in AF and HW are played
    203      * back (after stop is called) for an offloaded track.
    204      * TODO: not just for offload
    205      */
    206     private static final int NATIVE_EVENT_STREAM_END = 7;
    207 
    208     private final static String TAG = "android.media.AudioTrack";
    209 
    210 
    211     /** @hide */
    212     @IntDef({
    213         WRITE_BLOCKING,
    214         WRITE_NON_BLOCKING
    215     })
    216     @Retention(RetentionPolicy.SOURCE)
    217     public @interface WriteMode {}
    218 
    219     /**
    220      * The write mode indicating the write operation will block until all data has been written,
    221      * to be used as the actual value of the writeMode parameter in
    222      * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)},
    223      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
    224      * {@link #write(ByteBuffer, int, int, long)}.
    225      */
    226     public final static int WRITE_BLOCKING = 0;
    227 
    228     /**
    229      * The write mode indicating the write operation will return immediately after
    230      * queuing as much audio data for playback as possible without blocking,
    231      * to be used as the actual value of the writeMode parameter in
    232      * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)},
    233      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
    234      * {@link #write(ByteBuffer, int, int, long)}.
    235      */
    236     public final static int WRITE_NON_BLOCKING = 1;
    237 
    238     /** @hide */
    239     @IntDef({
    240         PERFORMANCE_MODE_NONE,
    241         PERFORMANCE_MODE_LOW_LATENCY,
    242         PERFORMANCE_MODE_POWER_SAVING
    243     })
    244     @Retention(RetentionPolicy.SOURCE)
    245     public @interface PerformanceMode {}
    246 
    247     /**
    248      * Default performance mode for an {@link AudioTrack}.
    249      */
    250     public static final int PERFORMANCE_MODE_NONE = 0;
    251 
    252     /**
    253      * Low latency performance mode for an {@link AudioTrack}.
    254      * If the device supports it, this mode
    255      * enables a lower latency path through to the audio output sink.
    256      * Effects may no longer work with such an {@code AudioTrack} and
    257      * the sample rate must match that of the output sink.
    258      * <p>
    259      * Applications should be aware that low latency requires careful
    260      * buffer management, with smaller chunks of audio data written by each
    261      * {@code write()} call.
    262      * <p>
    263      * If this flag is used without specifying a {@code bufferSizeInBytes} then the
    264      * {@code AudioTrack}'s actual buffer size may be too small.
    265      * It is recommended that a fairly
    266      * large buffer should be specified when the {@code AudioTrack} is created.
    267      * Then the actual size can be reduced by calling
    268      * {@link #setBufferSizeInFrames(int)}. The buffer size can be optimized
    269      * by lowering it after each {@code write()} call until the audio glitches,
    270      * which is detected by calling
    271      * {@link #getUnderrunCount()}. Then the buffer size can be increased
    272      * until there are no glitches.
    273      * This tuning step should be done while playing silence.
    274      * This technique provides a compromise between latency and glitch rate.
    275      */
    276     public static final int PERFORMANCE_MODE_LOW_LATENCY = 1;
    277 
    278     /**
    279      * Power saving performance mode for an {@link AudioTrack}.
    280      * If the device supports it, this
    281      * mode will enable a lower power path to the audio output sink.
    282      * In addition, this lower power path typically will have
    283      * deeper internal buffers and better underrun resistance,
    284      * with a tradeoff of higher latency.
    285      * <p>
    286      * In this mode, applications should attempt to use a larger buffer size
    287      * and deliver larger chunks of audio data per {@code write()} call.
    288      * Use {@link #getBufferSizeInFrames()} to determine
    289      * the actual buffer size of the {@code AudioTrack} as it may have increased
    290      * to accommodate a deeper buffer.
    291      */
    292     public static final int PERFORMANCE_MODE_POWER_SAVING = 2;
    293 
    294     // keep in sync with system/media/audio/include/system/audio-base.h
    295     private static final int AUDIO_OUTPUT_FLAG_FAST = 0x4;
    296     private static final int AUDIO_OUTPUT_FLAG_DEEP_BUFFER = 0x8;
    297 
    298     // Size of HW_AV_SYNC track AV header.
    299     private static final float HEADER_V2_SIZE_BYTES = 20.0f;
    300 
    301     //--------------------------------------------------------------------------
    302     // Member variables
    303     //--------------------
    304     /**
    305      * Indicates the state of the AudioTrack instance.
    306      * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA.
    307      */
    308     private int mState = STATE_UNINITIALIZED;
    309     /**
    310      * Indicates the play state of the AudioTrack instance.
    311      * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING.
    312      */
    313     private int mPlayState = PLAYSTATE_STOPPED;
    314     /**
    315      * Lock to ensure mPlayState updates reflect the actual state of the object.
    316      */
    317     private final Object mPlayStateLock = new Object();
    318     /**
    319      * Sizes of the audio buffer.
    320      * These values are set during construction and can be stale.
    321      * To obtain the current audio buffer frame count use {@link #getBufferSizeInFrames()}.
    322      */
    323     private int mNativeBufferSizeInBytes = 0;
    324     private int mNativeBufferSizeInFrames = 0;
    325     /**
    326      * Handler for events coming from the native code.
    327      */
    328     private NativePositionEventHandlerDelegate mEventHandlerDelegate;
    329     /**
    330      * Looper associated with the thread that creates the AudioTrack instance.
    331      */
    332     private final Looper mInitializationLooper;
    333     /**
    334      * The audio data source sampling rate in Hz.
    335      * Never {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED}.
    336      */
    337     private int mSampleRate; // initialized by all constructors via audioParamCheck()
    338     /**
    339      * The number of audio output channels (1 is mono, 2 is stereo, etc.).
    340      */
    341     private int mChannelCount = 1;
    342     /**
    343      * The audio channel mask used for calling native AudioTrack
    344      */
    345     private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
    346 
    347     /**
    348      * The type of the audio stream to play. See
    349      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
    350      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
    351      *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
    352      *   {@link AudioManager#STREAM_DTMF}.
    353      */
    354     private int mStreamType = AudioManager.STREAM_MUSIC;
    355 
    356     /**
    357      * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM.
    358      */
    359     private int mDataLoadMode = MODE_STREAM;
    360     /**
    361      * The current channel position mask, as specified on AudioTrack creation.
    362      * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}.
    363      * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified.
    364      */
    365     private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
    366     /**
    367      * The channel index mask if specified, otherwise 0.
    368      */
    369     private int mChannelIndexMask = 0;
    370     /**
    371      * The encoding of the audio samples.
    372      * @see AudioFormat#ENCODING_PCM_8BIT
    373      * @see AudioFormat#ENCODING_PCM_16BIT
    374      * @see AudioFormat#ENCODING_PCM_FLOAT
    375      */
    376     private int mAudioFormat;   // initialized by all constructors via audioParamCheck()
    377     /**
    378      * Audio session ID
    379      */
    380     private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
    381     /**
    382      * HW_AV_SYNC track AV Sync Header
    383      */
    384     private ByteBuffer mAvSyncHeader = null;
    385     /**
    386      * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header
    387      */
    388     private int mAvSyncBytesRemaining = 0;
    389     /**
    390      * Offset of the first sample of the audio in byte from start of HW_AV_SYNC track AV header.
    391      */
    392     private int mOffset = 0;
    393 
    394     //--------------------------------
    395     // Used exclusively by native code
    396     //--------------------
    397     /**
    398      * @hide
    399      * Accessed by native methods: provides access to C++ AudioTrack object.
    400      */
    401     @SuppressWarnings("unused")
    402     protected long mNativeTrackInJavaObj;
    403     /**
    404      * Accessed by native methods: provides access to the JNI data (i.e. resources used by
    405      * the native AudioTrack object, but not stored in it).
    406      */
    407     @SuppressWarnings("unused")
    408     private long mJniData;
    409 
    410 
    411     //--------------------------------------------------------------------------
    412     // Constructor, Finalize
    413     //--------------------
    414     /**
    415      * Class constructor.
    416      * @param streamType the type of the audio stream. See
    417      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
    418      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
    419      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
    420      * @param sampleRateInHz the initial source sample rate expressed in Hz.
    421      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
    422      *   which is usually the sample rate of the sink.
    423      *   {@link #getSampleRate()} can be used to retrieve the actual sample rate chosen.
    424      * @param channelConfig describes the configuration of the audio channels.
    425      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
    426      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
    427      * @param audioFormat the format in which the audio data is represented.
    428      *   See {@link AudioFormat#ENCODING_PCM_16BIT},
    429      *   {@link AudioFormat#ENCODING_PCM_8BIT},
    430      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
    431      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
    432      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
    433      *   <p> If the track's creation mode is {@link #MODE_STATIC},
    434      *   this is the maximum length sample, or audio clip, that can be played by this instance.
    435      *   <p> If the track's creation mode is {@link #MODE_STREAM},
    436      *   this should be the desired buffer size
    437      *   for the <code>AudioTrack</code> to satisfy the application's
    438      *   latency requirements.
    439      *   If <code>bufferSizeInBytes</code> is less than the
    440      *   minimum buffer size for the output sink, it is increased to the minimum
    441      *   buffer size.
    442      *   The method {@link #getBufferSizeInFrames()} returns the
    443      *   actual size in frames of the buffer created, which
    444      *   determines the minimum frequency to write
    445      *   to the streaming <code>AudioTrack</code> to avoid underrun.
    446      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
    447      *   for an AudioTrack instance in streaming mode.
    448      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
    449      * @throws java.lang.IllegalArgumentException
    450      * @deprecated use {@link Builder} or
    451      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
    452      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
    453      */
    454     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
    455             int bufferSizeInBytes, int mode)
    456     throws IllegalArgumentException {
    457         this(streamType, sampleRateInHz, channelConfig, audioFormat,
    458                 bufferSizeInBytes, mode, AudioManager.AUDIO_SESSION_ID_GENERATE);
    459     }
    460 
    461     /**
    462      * Class constructor with audio session. Use this constructor when the AudioTrack must be
    463      * attached to a particular audio session. The primary use of the audio session ID is to
    464      * associate audio effects to a particular instance of AudioTrack: if an audio session ID
    465      * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
    466      * and media players in the same session and not to the output mix.
    467      * When an AudioTrack is created without specifying a session, it will create its own session
    468      * which can be retrieved by calling the {@link #getAudioSessionId()} method.
    469      * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
    470      * session
    471      * with all other media players or audio tracks in the same session, otherwise a new session
    472      * will be created for this track if none is supplied.
    473      * @param streamType the type of the audio stream. See
    474      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
    475      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
    476      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
    477      * @param sampleRateInHz the initial source sample rate expressed in Hz.
    478      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} means to use a route-dependent value
    479      *   which is usually the sample rate of the sink.
    480      * @param channelConfig describes the configuration of the audio channels.
    481      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
    482      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
    483      * @param audioFormat the format in which the audio data is represented.
    484      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
    485      *   {@link AudioFormat#ENCODING_PCM_8BIT},
    486      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
    487      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
    488      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
    489      *   <p> If the track's creation mode is {@link #MODE_STATIC},
    490      *   this is the maximum length sample, or audio clip, that can be played by this instance.
    491      *   <p> If the track's creation mode is {@link #MODE_STREAM},
    492      *   this should be the desired buffer size
    493      *   for the <code>AudioTrack</code> to satisfy the application's
    494      *   latency requirements.
    495      *   If <code>bufferSizeInBytes</code> is less than the
    496      *   minimum buffer size for the output sink, it is increased to the minimum
    497      *   buffer size.
    498      *   The method {@link #getBufferSizeInFrames()} returns the
    499      *   actual size in frames of the buffer created, which
    500      *   determines the minimum frequency to write
    501      *   to the streaming <code>AudioTrack</code> to avoid underrun.
    502      *   You can write data into this buffer in smaller chunks than this size.
    503      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
    504      *   for an AudioTrack instance in streaming mode.
    505      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
    506      * @param sessionId Id of audio session the AudioTrack must be attached to
    507      * @throws java.lang.IllegalArgumentException
    508      * @deprecated use {@link Builder} or
    509      *   {@link #AudioTrack(AudioAttributes, AudioFormat, int, int, int)} to specify the
    510      *   {@link AudioAttributes} instead of the stream type which is only for volume control.
    511      */
    512     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
    513             int bufferSizeInBytes, int mode, int sessionId)
    514     throws IllegalArgumentException {
    515         // mState already == STATE_UNINITIALIZED
    516         this((new AudioAttributes.Builder())
    517                     .setLegacyStreamType(streamType)
    518                     .build(),
    519                 (new AudioFormat.Builder())
    520                     .setChannelMask(channelConfig)
    521                     .setEncoding(audioFormat)
    522                     .setSampleRate(sampleRateInHz)
    523                     .build(),
    524                 bufferSizeInBytes,
    525                 mode, sessionId);
    526         deprecateStreamTypeForPlayback(streamType, "AudioTrack", "AudioTrack()");
    527     }
    528 
    529     /**
    530      * Class constructor with {@link AudioAttributes} and {@link AudioFormat}.
    531      * @param attributes a non-null {@link AudioAttributes} instance.
    532      * @param format a non-null {@link AudioFormat} instance describing the format of the data
    533      *     that will be played through this AudioTrack. See {@link AudioFormat.Builder} for
    534      *     configuring the audio format parameters such as encoding, channel mask and sample rate.
    535      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
    536      *   read from for playback. This should be a nonzero multiple of the frame size in bytes.
    537      *   <p> If the track's creation mode is {@link #MODE_STATIC},
    538      *   this is the maximum length sample, or audio clip, that can be played by this instance.
    539      *   <p> If the track's creation mode is {@link #MODE_STREAM},
    540      *   this should be the desired buffer size
    541      *   for the <code>AudioTrack</code> to satisfy the application's
    542      *   latency requirements.
    543      *   If <code>bufferSizeInBytes</code> is less than the
    544      *   minimum buffer size for the output sink, it is increased to the minimum
    545      *   buffer size.
    546      *   The method {@link #getBufferSizeInFrames()} returns the
    547      *   actual size in frames of the buffer created, which
    548      *   determines the minimum frequency to write
    549      *   to the streaming <code>AudioTrack</code> to avoid underrun.
    550      *   See {@link #getMinBufferSize(int, int, int)} to determine the estimated minimum buffer size
    551      *   for an AudioTrack instance in streaming mode.
    552      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}.
    553      * @param sessionId ID of audio session the AudioTrack must be attached to, or
    554      *   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction
    555      *   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before
    556      *   construction.
    557      * @throws IllegalArgumentException
    558      */
    559     public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
    560             int mode, int sessionId)
    561                     throws IllegalArgumentException {
    562         this(attributes, format, bufferSizeInBytes, mode, sessionId, false /*offload*/);
    563     }
    564 
    565     private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
    566             int mode, int sessionId, boolean offload)
    567                     throws IllegalArgumentException {
    568         super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
    569         // mState already == STATE_UNINITIALIZED
    570 
    571         if (format == null) {
    572             throw new IllegalArgumentException("Illegal null AudioFormat");
    573         }
    574 
    575         // Check if we should enable deep buffer mode
    576         if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {
    577             mAttributes = new AudioAttributes.Builder(mAttributes)
    578                 .replaceFlags((mAttributes.getAllFlags()
    579                         | AudioAttributes.FLAG_DEEP_BUFFER)
    580                         & ~AudioAttributes.FLAG_LOW_LATENCY)
    581                 .build();
    582         }
    583 
    584         // remember which looper is associated with the AudioTrack instantiation
    585         Looper looper;
    586         if ((looper = Looper.myLooper()) == null) {
    587             looper = Looper.getMainLooper();
    588         }
    589 
    590         int rate = format.getSampleRate();
    591         if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
    592             rate = 0;
    593         }
    594 
    595         int channelIndexMask = 0;
    596         if ((format.getPropertySetMask()
    597                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
    598             channelIndexMask = format.getChannelIndexMask();
    599         }
    600         int channelMask = 0;
    601         if ((format.getPropertySetMask()
    602                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
    603             channelMask = format.getChannelMask();
    604         } else if (channelIndexMask == 0) { // if no masks at all, use stereo
    605             channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
    606                     | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
    607         }
    608         int encoding = AudioFormat.ENCODING_DEFAULT;
    609         if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
    610             encoding = format.getEncoding();
    611         }
    612         audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
    613         mStreamType = AudioSystem.STREAM_DEFAULT;
    614 
    615         audioBuffSizeCheck(bufferSizeInBytes);
    616 
    617         mInitializationLooper = looper;
    618 
    619         if (sessionId < 0) {
    620             throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
    621         }
    622 
    623         int[] sampleRate = new int[] {mSampleRate};
    624         int[] session = new int[1];
    625         session[0] = sessionId;
    626         // native initialization
    627         int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
    628                 sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
    629                 mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/,
    630                 offload);
    631         if (initResult != SUCCESS) {
    632             loge("Error code "+initResult+" when initializing AudioTrack.");
    633             return; // with mState == STATE_UNINITIALIZED
    634         }
    635 
    636         mSampleRate = sampleRate[0];
    637         mSessionId = session[0];
    638 
    639         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) {
    640             int frameSizeInBytes;
    641             if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
    642                 frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
    643             } else {
    644                 frameSizeInBytes = 1;
    645             }
    646             mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes;
    647         }
    648 
    649         if (mDataLoadMode == MODE_STATIC) {
    650             mState = STATE_NO_STATIC_DATA;
    651         } else {
    652             mState = STATE_INITIALIZED;
    653         }
    654 
    655         baseRegisterPlayer();
    656     }
    657 
    658     /**
    659      * A constructor which explicitly connects a Native (C++) AudioTrack. For use by
    660      * the AudioTrackRoutingProxy subclass.
    661      * @param nativeTrackInJavaObj a C/C++ pointer to a native AudioTrack
    662      * (associated with an OpenSL ES player).
    663      * IMPORTANT: For "N", this method is ONLY called to setup a Java routing proxy,
    664      * i.e. IAndroidConfiguration::AcquireJavaProxy(). If we call with a 0 in nativeTrackInJavaObj
    665      * it means that the OpenSL player interface hasn't been realized, so there is no native
    666      * Audiotrack to connect to. In this case wait to call deferred_connect() until the
    667      * OpenSLES interface is realized.
    668      */
    669     /*package*/ AudioTrack(long nativeTrackInJavaObj) {
    670         super(new AudioAttributes.Builder().build(),
    671                 AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
    672         // "final"s
    673         mNativeTrackInJavaObj = 0;
    674         mJniData = 0;
    675 
    676         // remember which looper is associated with the AudioTrack instantiation
    677         Looper looper;
    678         if ((looper = Looper.myLooper()) == null) {
    679             looper = Looper.getMainLooper();
    680         }
    681         mInitializationLooper = looper;
    682 
    683         // other initialization...
    684         if (nativeTrackInJavaObj != 0) {
    685             baseRegisterPlayer();
    686             deferred_connect(nativeTrackInJavaObj);
    687         } else {
    688             mState = STATE_UNINITIALIZED;
    689         }
    690     }
    691 
    692     /**
    693      * @hide
    694      */
    695     /* package */ void deferred_connect(long nativeTrackInJavaObj) {
    696         if (mState != STATE_INITIALIZED) {
    697             // Note that for this native_setup, we are providing an already created/initialized
    698             // *Native* AudioTrack, so the attributes parameters to native_setup() are ignored.
    699             int[] session = { 0 };
    700             int[] rates = { 0 };
    701             int initResult = native_setup(new WeakReference<AudioTrack>(this),
    702                     null /*mAttributes - NA*/,
    703                     rates /*sampleRate - NA*/,
    704                     0 /*mChannelMask - NA*/,
    705                     0 /*mChannelIndexMask - NA*/,
    706                     0 /*mAudioFormat - NA*/,
    707                     0 /*mNativeBufferSizeInBytes - NA*/,
    708                     0 /*mDataLoadMode - NA*/,
    709                     session,
    710                     nativeTrackInJavaObj,
    711                     false /*offload*/);
    712             if (initResult != SUCCESS) {
    713                 loge("Error code "+initResult+" when initializing AudioTrack.");
    714                 return; // with mState == STATE_UNINITIALIZED
    715             }
    716 
    717             mSessionId = session[0];
    718 
    719             mState = STATE_INITIALIZED;
    720         }
    721     }
    722 
    723     /**
    724      * Builder class for {@link AudioTrack} objects.
    725      * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio
    726      * attributes and audio format parameters, you indicate which of those vary from the default
    727      * behavior on the device.
    728      * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat}
    729      * parameters, to be used by a new <code>AudioTrack</code> instance:
    730      *
    731      * <pre class="prettyprint">
    732      * AudioTrack player = new AudioTrack.Builder()
    733      *         .setAudioAttributes(new AudioAttributes.Builder()
    734      *                  .setUsage(AudioAttributes.USAGE_ALARM)
    735      *                  .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
    736      *                  .build())
    737      *         .setAudioFormat(new AudioFormat.Builder()
    738      *                 .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
    739      *                 .setSampleRate(44100)
    740      *                 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
    741      *                 .build())
    742      *         .setBufferSizeInBytes(minBuffSize)
    743      *         .build();
    744      * </pre>
    745      * <p>
    746      * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)},
    747      * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used.
    748      * <br>If the audio format is not specified or is incomplete, its channel configuration will be
    749      * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be
    750      * {@link AudioFormat#ENCODING_PCM_16BIT}.
    751      * The sample rate will depend on the device actually selected for playback and can be queried
    752      * with {@link #getSampleRate()} method.
    753      * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)},
    754      * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used.
    755      * <br>If the transfer mode is not specified with {@link #setTransferMode(int)},
    756      * <code>MODE_STREAM</code> will be used.
    757      * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will
    758      * be generated.
    759      * <br>Offload is false by default.
    760      */
    761     public static class Builder {
    762         private AudioAttributes mAttributes;
    763         private AudioFormat mFormat;
    764         private int mBufferSizeInBytes;
    765         private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
    766         private int mMode = MODE_STREAM;
    767         private int mPerformanceMode = PERFORMANCE_MODE_NONE;
    768         private boolean mOffload = false;
    769 
    770         /**
    771          * Constructs a new Builder with the default values as described above.
    772          */
    773         public Builder() {
    774         }
    775 
    776         /**
    777          * Sets the {@link AudioAttributes}.
    778          * @param attributes a non-null {@link AudioAttributes} instance that describes the audio
    779          *     data to be played.
    780          * @return the same Builder instance.
    781          * @throws IllegalArgumentException
    782          */
    783         public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes)
    784                 throws IllegalArgumentException {
    785             if (attributes == null) {
    786                 throw new IllegalArgumentException("Illegal null AudioAttributes argument");
    787             }
    788             // keep reference, we only copy the data when building
    789             mAttributes = attributes;
    790             return this;
    791         }
    792 
    793         /**
    794          * Sets the format of the audio data to be played by the {@link AudioTrack}.
    795          * See {@link AudioFormat.Builder} for configuring the audio format parameters such
    796          * as encoding, channel mask and sample rate.
    797          * @param format a non-null {@link AudioFormat} instance.
    798          * @return the same Builder instance.
    799          * @throws IllegalArgumentException
    800          */
    801         public @NonNull Builder setAudioFormat(@NonNull AudioFormat format)
    802                 throws IllegalArgumentException {
    803             if (format == null) {
    804                 throw new IllegalArgumentException("Illegal null AudioFormat argument");
    805             }
    806             // keep reference, we only copy the data when building
    807             mFormat = format;
    808             return this;
    809         }
    810 
    811         /**
    812          * Sets the total size (in bytes) of the buffer where audio data is read from for playback.
    813          * If using the {@link AudioTrack} in streaming mode
    814          * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller
    815          * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine
    816          * the estimated minimum buffer size for the creation of an AudioTrack instance
    817          * in streaming mode.
    818          * <br>If using the <code>AudioTrack</code> in static mode (see
    819          * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be
    820          * played by this instance.
    821          * @param bufferSizeInBytes
    822          * @return the same Builder instance.
    823          * @throws IllegalArgumentException
    824          */
    825         public @NonNull Builder setBufferSizeInBytes(int bufferSizeInBytes)
    826                 throws IllegalArgumentException {
    827             if (bufferSizeInBytes <= 0) {
    828                 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes);
    829             }
    830             mBufferSizeInBytes = bufferSizeInBytes;
    831             return this;
    832         }
    833 
    834         /**
    835          * Sets the mode under which buffers of audio data are transferred from the
    836          * {@link AudioTrack} to the framework.
    837          * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}.
    838          * @return the same Builder instance.
    839          * @throws IllegalArgumentException
    840          */
    841         public @NonNull Builder setTransferMode(@TransferMode int mode)
    842                 throws IllegalArgumentException {
    843             switch(mode) {
    844                 case MODE_STREAM:
    845                 case MODE_STATIC:
    846                     mMode = mode;
    847                     break;
    848                 default:
    849                     throw new IllegalArgumentException("Invalid transfer mode " + mode);
    850             }
    851             return this;
    852         }
    853 
    854         /**
    855          * Sets the session ID the {@link AudioTrack} will be attached to.
    856          * @param sessionId a strictly positive ID number retrieved from another
    857          *     <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by
    858          *     {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or
    859          *     {@link AudioManager#AUDIO_SESSION_ID_GENERATE}.
    860          * @return the same Builder instance.
    861          * @throws IllegalArgumentException
    862          */
    863         public @NonNull Builder setSessionId(int sessionId)
    864                 throws IllegalArgumentException {
    865             if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) {
    866                 throw new IllegalArgumentException("Invalid audio session ID " + sessionId);
    867             }
    868             mSessionId = sessionId;
    869             return this;
    870         }
    871 
    872         /**
    873          * Sets the {@link AudioTrack} performance mode.  This is an advisory request which
    874          * may not be supported by the particular device, and the framework is free
    875          * to ignore such request if it is incompatible with other requests or hardware.
    876          *
    877          * @param performanceMode one of
    878          * {@link AudioTrack#PERFORMANCE_MODE_NONE},
    879          * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
    880          * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
    881          * @return the same Builder instance.
    882          * @throws IllegalArgumentException if {@code performanceMode} is not valid.
    883          */
    884         public @NonNull Builder setPerformanceMode(@PerformanceMode int performanceMode) {
    885             switch (performanceMode) {
    886                 case PERFORMANCE_MODE_NONE:
    887                 case PERFORMANCE_MODE_LOW_LATENCY:
    888                 case PERFORMANCE_MODE_POWER_SAVING:
    889                     mPerformanceMode = performanceMode;
    890                     break;
    891                 default:
    892                     throw new IllegalArgumentException(
    893                             "Invalid performance mode " + performanceMode);
    894             }
    895             return this;
    896         }
    897 
    898         /**
    899          * @hide
    900          * Sets whether this track will play through the offloaded audio path.
    901          * When set to true, at build time, the audio format will be checked against
    902          * {@link AudioManager#isOffloadedPlaybackSupported(AudioFormat)} to verify the audio format
    903          * used by this track is supported on the device's offload path (if any).
    904          * <br>Offload is only supported for media audio streams, and therefore requires that
    905          * the usage be {@link AudioAttributes#USAGE_MEDIA}.
    906          * @param offload true to require the offload path for playback.
    907          * @return the same Builder instance.
    908          */
    909         public @NonNull Builder setOffloadedPlayback(boolean offload) {
    910             mOffload = offload;
    911             return this;
    912         }
    913 
    914         /**
    915          * Builds an {@link AudioTrack} instance initialized with all the parameters set
    916          * on this <code>Builder</code>.
    917          * @return a new successfully initialized {@link AudioTrack} instance.
    918          * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code>
    919          *     were incompatible, or if they are not supported by the device,
    920          *     or if the device was not available.
    921          */
    922         public @NonNull AudioTrack build() throws UnsupportedOperationException {
    923             if (mAttributes == null) {
    924                 mAttributes = new AudioAttributes.Builder()
    925                         .setUsage(AudioAttributes.USAGE_MEDIA)
    926                         .build();
    927             }
    928             switch (mPerformanceMode) {
    929             case PERFORMANCE_MODE_LOW_LATENCY:
    930                 mAttributes = new AudioAttributes.Builder(mAttributes)
    931                     .replaceFlags((mAttributes.getAllFlags()
    932                             | AudioAttributes.FLAG_LOW_LATENCY)
    933                             & ~AudioAttributes.FLAG_DEEP_BUFFER)
    934                     .build();
    935                 break;
    936             case PERFORMANCE_MODE_NONE:
    937                 if (!shouldEnablePowerSaving(mAttributes, mFormat, mBufferSizeInBytes, mMode)) {
    938                     break; // do not enable deep buffer mode.
    939                 }
    940                 // permitted to fall through to enable deep buffer
    941             case PERFORMANCE_MODE_POWER_SAVING:
    942                 mAttributes = new AudioAttributes.Builder(mAttributes)
    943                 .replaceFlags((mAttributes.getAllFlags()
    944                         | AudioAttributes.FLAG_DEEP_BUFFER)
    945                         & ~AudioAttributes.FLAG_LOW_LATENCY)
    946                 .build();
    947                 break;
    948             }
    949 
    950             if (mFormat == null) {
    951                 mFormat = new AudioFormat.Builder()
    952                         .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
    953                         //.setSampleRate(AudioFormat.SAMPLE_RATE_UNSPECIFIED)
    954                         .setEncoding(AudioFormat.ENCODING_DEFAULT)
    955                         .build();
    956             }
    957 
    958             //TODO tie offload to PERFORMANCE_MODE_POWER_SAVING?
    959             if (mOffload) {
    960                 if (mAttributes.getUsage() != AudioAttributes.USAGE_MEDIA) {
    961                     throw new UnsupportedOperationException(
    962                             "Cannot create AudioTrack, offload requires USAGE_MEDIA");
    963                 }
    964                 if (!AudioSystem.isOffloadSupported(mFormat)) {
    965                     throw new UnsupportedOperationException(
    966                             "Cannot create AudioTrack, offload format not supported");
    967                 }
    968             }
    969 
    970             try {
    971                 // If the buffer size is not specified in streaming mode,
    972                 // use a single frame for the buffer size and let the
    973                 // native code figure out the minimum buffer size.
    974                 if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) {
    975                     mBufferSizeInBytes = mFormat.getChannelCount()
    976                             * mFormat.getBytesPerSample(mFormat.getEncoding());
    977                 }
    978                 final AudioTrack track = new AudioTrack(
    979                         mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId, mOffload);
    980                 if (track.getState() == STATE_UNINITIALIZED) {
    981                     // release is not necessary
    982                     throw new UnsupportedOperationException("Cannot create AudioTrack");
    983                 }
    984                 return track;
    985             } catch (IllegalArgumentException e) {
    986                 throw new UnsupportedOperationException(e.getMessage());
    987             }
    988         }
    989     }
    990 
    991     // mask of all the positional channels supported, however the allowed combinations
    992     // are further restricted by the matching left/right rule and CHANNEL_COUNT_MAX
    993     private static final int SUPPORTED_OUT_CHANNELS =
    994             AudioFormat.CHANNEL_OUT_FRONT_LEFT |
    995             AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
    996             AudioFormat.CHANNEL_OUT_FRONT_CENTER |
    997             AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
    998             AudioFormat.CHANNEL_OUT_BACK_LEFT |
    999             AudioFormat.CHANNEL_OUT_BACK_RIGHT |
   1000             AudioFormat.CHANNEL_OUT_BACK_CENTER |
   1001             AudioFormat.CHANNEL_OUT_SIDE_LEFT |
   1002             AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
   1003 
   1004     // Returns a boolean whether the attributes, format, bufferSizeInBytes, mode allow
   1005     // power saving to be automatically enabled for an AudioTrack. Returns false if
   1006     // power saving is already enabled in the attributes parameter.
   1007     private static boolean shouldEnablePowerSaving(
   1008             @Nullable AudioAttributes attributes, @Nullable AudioFormat format,
   1009             int bufferSizeInBytes, int mode) {
   1010         // If no attributes, OK
   1011         // otherwise check attributes for USAGE_MEDIA and CONTENT_UNKNOWN, MUSIC, or MOVIE.
   1012         if (attributes != null &&
   1013                 (attributes.getAllFlags() != 0  // cannot have any special flags
   1014                 || attributes.getUsage() != AudioAttributes.USAGE_MEDIA
   1015                 || (attributes.getContentType() != AudioAttributes.CONTENT_TYPE_UNKNOWN
   1016                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MUSIC
   1017                     && attributes.getContentType() != AudioAttributes.CONTENT_TYPE_MOVIE))) {
   1018             return false;
   1019         }
   1020 
   1021         // Format must be fully specified and be linear pcm
   1022         if (format == null
   1023                 || format.getSampleRate() == AudioFormat.SAMPLE_RATE_UNSPECIFIED
   1024                 || !AudioFormat.isEncodingLinearPcm(format.getEncoding())
   1025                 || !AudioFormat.isValidEncoding(format.getEncoding())
   1026                 || format.getChannelCount() < 1) {
   1027             return false;
   1028         }
   1029 
   1030         // Mode must be streaming
   1031         if (mode != MODE_STREAM) {
   1032             return false;
   1033         }
   1034 
   1035         // A buffer size of 0 is always compatible with deep buffer (when called from the Builder)
   1036         // but for app compatibility we only use deep buffer power saving for large buffer sizes.
   1037         if (bufferSizeInBytes != 0) {
   1038             final long BUFFER_TARGET_MODE_STREAM_MS = 100;
   1039             final int MILLIS_PER_SECOND = 1000;
   1040             final long bufferTargetSize =
   1041                     BUFFER_TARGET_MODE_STREAM_MS
   1042                     * format.getChannelCount()
   1043                     * format.getBytesPerSample(format.getEncoding())
   1044                     * format.getSampleRate()
   1045                     / MILLIS_PER_SECOND;
   1046             if (bufferSizeInBytes < bufferTargetSize) {
   1047                 return false;
   1048             }
   1049         }
   1050 
   1051         return true;
   1052     }
   1053 
   1054     // Convenience method for the constructor's parameter checks.
   1055     // This is where constructor IllegalArgumentException-s are thrown
   1056     // postconditions:
   1057     //    mChannelCount is valid
   1058     //    mChannelMask is valid
   1059     //    mAudioFormat is valid
   1060     //    mSampleRate is valid
   1061     //    mDataLoadMode is valid
   1062     private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask,
   1063                                  int audioFormat, int mode) {
   1064         //--------------
   1065         // sample rate, note these values are subject to change
   1066         if ((sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN ||
   1067                 sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) &&
   1068                 sampleRateInHz != AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
   1069             throw new IllegalArgumentException(sampleRateInHz
   1070                     + "Hz is not a supported sample rate.");
   1071         }
   1072         mSampleRate = sampleRateInHz;
   1073 
   1074         // IEC61937 is based on stereo. We could coerce it to stereo.
   1075         // But the application needs to know the stream is stereo so that
   1076         // it is encoded and played correctly. So better to just reject it.
   1077         if (audioFormat == AudioFormat.ENCODING_IEC61937
   1078                 && channelConfig != AudioFormat.CHANNEL_OUT_STEREO) {
   1079             throw new IllegalArgumentException(
   1080                     "ENCODING_IEC61937 must be configured as CHANNEL_OUT_STEREO");
   1081         }
   1082 
   1083         //--------------
   1084         // channel config
   1085         mChannelConfiguration = channelConfig;
   1086 
   1087         switch (channelConfig) {
   1088         case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
   1089         case AudioFormat.CHANNEL_OUT_MONO:
   1090         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
   1091             mChannelCount = 1;
   1092             mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
   1093             break;
   1094         case AudioFormat.CHANNEL_OUT_STEREO:
   1095         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
   1096             mChannelCount = 2;
   1097             mChannelMask = AudioFormat.CHANNEL_OUT_STEREO;
   1098             break;
   1099         default:
   1100             if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) {
   1101                 mChannelCount = 0;
   1102                 break; // channel index configuration only
   1103             }
   1104             if (!isMultichannelConfigSupported(channelConfig)) {
   1105                 // input channel configuration features unsupported channels
   1106                 throw new IllegalArgumentException("Unsupported channel configuration.");
   1107             }
   1108             mChannelMask = channelConfig;
   1109             mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
   1110         }
   1111         // check the channel index configuration (if present)
   1112         mChannelIndexMask = channelIndexMask;
   1113         if (mChannelIndexMask != 0) {
   1114             // restrictive: indexMask could allow up to AUDIO_CHANNEL_BITS_LOG2
   1115             final int indexMask = (1 << CHANNEL_COUNT_MAX) - 1;
   1116             if ((channelIndexMask & ~indexMask) != 0) {
   1117                 throw new IllegalArgumentException("Unsupported channel index configuration "
   1118                         + channelIndexMask);
   1119             }
   1120             int channelIndexCount = Integer.bitCount(channelIndexMask);
   1121             if (mChannelCount == 0) {
   1122                  mChannelCount = channelIndexCount;
   1123             } else if (mChannelCount != channelIndexCount) {
   1124                 throw new IllegalArgumentException("Channel count must match");
   1125             }
   1126         }
   1127 
   1128         //--------------
   1129         // audio format
   1130         if (audioFormat == AudioFormat.ENCODING_DEFAULT) {
   1131             audioFormat = AudioFormat.ENCODING_PCM_16BIT;
   1132         }
   1133 
   1134         if (!AudioFormat.isPublicEncoding(audioFormat)) {
   1135             throw new IllegalArgumentException("Unsupported audio encoding.");
   1136         }
   1137         mAudioFormat = audioFormat;
   1138 
   1139         //--------------
   1140         // audio load mode
   1141         if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) ||
   1142                 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) {
   1143             throw new IllegalArgumentException("Invalid mode.");
   1144         }
   1145         mDataLoadMode = mode;
   1146     }
   1147 
   1148     /**
   1149      * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
   1150      * @param channelConfig the mask to validate
   1151      * @return false if the AudioTrack can't be used with such a mask
   1152      */
   1153     private static boolean isMultichannelConfigSupported(int channelConfig) {
   1154         // check for unsupported channels
   1155         if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
   1156             loge("Channel configuration features unsupported channels");
   1157             return false;
   1158         }
   1159         final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
   1160         if (channelCount > CHANNEL_COUNT_MAX) {
   1161             loge("Channel configuration contains too many channels " +
   1162                     channelCount + ">" + CHANNEL_COUNT_MAX);
   1163             return false;
   1164         }
   1165         // check for unsupported multichannel combinations:
   1166         // - FL/FR must be present
   1167         // - L/R channels must be paired (e.g. no single L channel)
   1168         final int frontPair =
   1169                 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
   1170         if ((channelConfig & frontPair) != frontPair) {
   1171                 loge("Front channels must be present in multichannel configurations");
   1172                 return false;
   1173         }
   1174         final int backPair =
   1175                 AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT;
   1176         if ((channelConfig & backPair) != 0) {
   1177             if ((channelConfig & backPair) != backPair) {
   1178                 loge("Rear channels can't be used independently");
   1179                 return false;
   1180             }
   1181         }
   1182         final int sidePair =
   1183                 AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
   1184         if ((channelConfig & sidePair) != 0
   1185                 && (channelConfig & sidePair) != sidePair) {
   1186             loge("Side channels can't be used independently");
   1187             return false;
   1188         }
   1189         return true;
   1190     }
   1191 
   1192 
   1193     // Convenience method for the constructor's audio buffer size check.
   1194     // preconditions:
   1195     //    mChannelCount is valid
   1196     //    mAudioFormat is valid
   1197     // postcondition:
   1198     //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
   1199     private void audioBuffSizeCheck(int audioBufferSize) {
   1200         // NB: this section is only valid with PCM or IEC61937 data.
   1201         //     To update when supporting compressed formats
   1202         int frameSizeInBytes;
   1203         if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
   1204             frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
   1205         } else {
   1206             frameSizeInBytes = 1;
   1207         }
   1208         if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
   1209             throw new IllegalArgumentException("Invalid audio buffer size.");
   1210         }
   1211 
   1212         mNativeBufferSizeInBytes = audioBufferSize;
   1213         mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
   1214     }
   1215 
   1216 
   1217     /**
   1218      * Releases the native AudioTrack resources.
   1219      */
   1220     public void release() {
   1221         // even though native_release() stops the native AudioTrack, we need to stop
   1222         // AudioTrack subclasses too.
   1223         try {
   1224             stop();
   1225         } catch(IllegalStateException ise) {
   1226             // don't raise an exception, we're releasing the resources.
   1227         }
   1228         baseRelease();
   1229         native_release();
   1230         mState = STATE_UNINITIALIZED;
   1231     }
   1232 
   1233     @Override
   1234     protected void finalize() {
   1235         baseRelease();
   1236         native_finalize();
   1237     }
   1238 
   1239     //--------------------------------------------------------------------------
   1240     // Getters
   1241     //--------------------
   1242     /**
   1243      * Returns the minimum gain value, which is the constant 0.0.
   1244      * Gain values less than 0.0 will be clamped to 0.0.
   1245      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
   1246      * @return the minimum value, which is the constant 0.0.
   1247      */
   1248     static public float getMinVolume() {
   1249         return GAIN_MIN;
   1250     }
   1251 
   1252     /**
   1253      * Returns the maximum gain value, which is greater than or equal to 1.0.
   1254      * Gain values greater than the maximum will be clamped to the maximum.
   1255      * <p>The word "volume" in the API name is historical; this is actually a gain.
   1256      * expressed as a linear multiplier on sample values, where a maximum value of 1.0
   1257      * corresponds to a gain of 0 dB (sample values left unmodified).
   1258      * @return the maximum value, which is greater than or equal to 1.0.
   1259      */
   1260     static public float getMaxVolume() {
   1261         return GAIN_MAX;
   1262     }
   1263 
   1264     /**
   1265      * Returns the configured audio source sample rate in Hz.
   1266      * The initial source sample rate depends on the constructor parameters,
   1267      * but the source sample rate may change if {@link #setPlaybackRate(int)} is called.
   1268      * If the constructor had a specific sample rate, then the initial sink sample rate is that
   1269      * value.
   1270      * If the constructor had {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED},
   1271      * then the initial sink sample rate is a route-dependent default value based on the source [sic].
   1272      */
   1273     public int getSampleRate() {
   1274         return mSampleRate;
   1275     }
   1276 
   1277     /**
   1278      * Returns the current playback sample rate rate in Hz.
   1279      */
   1280     public int getPlaybackRate() {
   1281         return native_get_playback_rate();
   1282     }
   1283 
   1284     /**
   1285      * Returns the current playback parameters.
   1286      * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters
   1287      * @return current {@link PlaybackParams}.
   1288      * @throws IllegalStateException if track is not initialized.
   1289      */
   1290     public @NonNull PlaybackParams getPlaybackParams() {
   1291         return native_get_playback_params();
   1292     }
   1293 
   1294     /**
   1295      * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT},
   1296      * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}.
   1297      */
   1298     public int getAudioFormat() {
   1299         return mAudioFormat;
   1300     }
   1301 
   1302     /**
   1303      * Returns the volume stream type of this AudioTrack.
   1304      * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
   1305      * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
   1306      * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
   1307      * {@link AudioManager#STREAM_NOTIFICATION}, {@link AudioManager#STREAM_DTMF} or
   1308      * {@link AudioManager#STREAM_ACCESSIBILITY}.
   1309      */
   1310     public int getStreamType() {
   1311         return mStreamType;
   1312     }
   1313 
   1314     /**
   1315      * Returns the configured channel position mask.
   1316      * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO},
   1317      * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}.
   1318      * This method may return {@link AudioFormat#CHANNEL_INVALID} if
   1319      * a channel index mask was used. Consider
   1320      * {@link #getFormat()} instead, to obtain an {@link AudioFormat},
   1321      * which contains both the channel position mask and the channel index mask.
   1322      */
   1323     public int getChannelConfiguration() {
   1324         return mChannelConfiguration;
   1325     }
   1326 
   1327     /**
   1328      * Returns the configured <code>AudioTrack</code> format.
   1329      * @return an {@link AudioFormat} containing the
   1330      * <code>AudioTrack</code> parameters at the time of configuration.
   1331      */
   1332     public @NonNull AudioFormat getFormat() {
   1333         AudioFormat.Builder builder = new AudioFormat.Builder()
   1334             .setSampleRate(mSampleRate)
   1335             .setEncoding(mAudioFormat);
   1336         if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) {
   1337             builder.setChannelMask(mChannelConfiguration);
   1338         }
   1339         if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) {
   1340             builder.setChannelIndexMask(mChannelIndexMask);
   1341         }
   1342         return builder.build();
   1343     }
   1344 
   1345     /**
   1346      * Returns the configured number of channels.
   1347      */
   1348     public int getChannelCount() {
   1349         return mChannelCount;
   1350     }
   1351 
   1352     /**
   1353      * Returns the state of the AudioTrack instance. This is useful after the
   1354      * AudioTrack instance has been created to check if it was initialized
   1355      * properly. This ensures that the appropriate resources have been acquired.
   1356      * @see #STATE_UNINITIALIZED
   1357      * @see #STATE_INITIALIZED
   1358      * @see #STATE_NO_STATIC_DATA
   1359      */
   1360     public int getState() {
   1361         return mState;
   1362     }
   1363 
   1364     /**
   1365      * Returns the playback state of the AudioTrack instance.
   1366      * @see #PLAYSTATE_STOPPED
   1367      * @see #PLAYSTATE_PAUSED
   1368      * @see #PLAYSTATE_PLAYING
   1369      */
   1370     public int getPlayState() {
   1371         synchronized (mPlayStateLock) {
   1372             return mPlayState;
   1373         }
   1374     }
   1375 
   1376 
   1377     /**
   1378      * Returns the effective size of the <code>AudioTrack</code> buffer
   1379      * that the application writes to.
   1380      * <p> This will be less than or equal to the result of
   1381      * {@link #getBufferCapacityInFrames()}.
   1382      * It will be equal if {@link #setBufferSizeInFrames(int)} has never been called.
   1383      * <p> If the track is subsequently routed to a different output sink, the buffer
   1384      * size and capacity may enlarge to accommodate.
   1385      * <p> If the <code>AudioTrack</code> encoding indicates compressed data,
   1386      * e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
   1387      * the size of the <code>AudioTrack</code> buffer in bytes.
   1388      * <p> See also {@link AudioManager#getProperty(String)} for key
   1389      * {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
   1390      * @return current size in frames of the <code>AudioTrack</code> buffer.
   1391      * @throws IllegalStateException if track is not initialized.
   1392      */
   1393     public int getBufferSizeInFrames() {
   1394         return native_get_buffer_size_frames();
   1395     }
   1396 
   1397     /**
   1398      * Limits the effective size of the <code>AudioTrack</code> buffer
   1399      * that the application writes to.
   1400      * <p> A write to this AudioTrack will not fill the buffer beyond this limit.
   1401      * If a blocking write is used then the write will block until the data
   1402      * can fit within this limit.
   1403      * <p>Changing this limit modifies the latency associated with
   1404      * the buffer for this track. A smaller size will give lower latency
   1405      * but there may be more glitches due to buffer underruns.
   1406      * <p>The actual size used may not be equal to this requested size.
   1407      * It will be limited to a valid range with a maximum of
   1408      * {@link #getBufferCapacityInFrames()}.
   1409      * It may also be adjusted slightly for internal reasons.
   1410      * If bufferSizeInFrames is less than zero then {@link #ERROR_BAD_VALUE}
   1411      * will be returned.
   1412      * <p>This method is only supported for PCM audio.
   1413      * It is not supported for compressed audio tracks.
   1414      *
   1415      * @param bufferSizeInFrames requested buffer size in frames
   1416      * @return the actual buffer size in frames or an error code,
   1417      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
   1418      * @throws IllegalStateException if track is not initialized.
   1419      */
   1420     public int setBufferSizeInFrames(int bufferSizeInFrames) {
   1421         if (mDataLoadMode == MODE_STATIC || mState == STATE_UNINITIALIZED) {
   1422             return ERROR_INVALID_OPERATION;
   1423         }
   1424         if (bufferSizeInFrames < 0) {
   1425             return ERROR_BAD_VALUE;
   1426         }
   1427         return native_set_buffer_size_frames(bufferSizeInFrames);
   1428     }
   1429 
   1430     /**
   1431      *  Returns the maximum size of the <code>AudioTrack</code> buffer in frames.
   1432      *  <p> If the track's creation mode is {@link #MODE_STATIC},
   1433      *  it is equal to the specified bufferSizeInBytes on construction, converted to frame units.
   1434      *  A static track's frame count will not change.
   1435      *  <p> If the track's creation mode is {@link #MODE_STREAM},
   1436      *  it is greater than or equal to the specified bufferSizeInBytes converted to frame units.
   1437      *  For streaming tracks, this value may be rounded up to a larger value if needed by
   1438      *  the target output sink, and
   1439      *  if the track is subsequently routed to a different output sink, the
   1440      *  frame count may enlarge to accommodate.
   1441      *  <p> If the <code>AudioTrack</code> encoding indicates compressed data,
   1442      *  e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
   1443      *  the size of the <code>AudioTrack</code> buffer in bytes.
   1444      *  <p> See also {@link AudioManager#getProperty(String)} for key
   1445      *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
   1446      *  @return maximum size in frames of the <code>AudioTrack</code> buffer.
   1447      *  @throws IllegalStateException if track is not initialized.
   1448      */
   1449     public int getBufferCapacityInFrames() {
   1450         return native_get_buffer_capacity_frames();
   1451     }
   1452 
   1453     /**
   1454      *  Returns the frame count of the native <code>AudioTrack</code> buffer.
   1455      *  @return current size in frames of the <code>AudioTrack</code> buffer.
   1456      *  @throws IllegalStateException
   1457      *  @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead.
   1458      */
   1459     @Deprecated
   1460     protected int getNativeFrameCount() {
   1461         return native_get_buffer_capacity_frames();
   1462     }
   1463 
   1464     /**
   1465      * Returns marker position expressed in frames.
   1466      * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
   1467      * or zero if marker is disabled.
   1468      */
   1469     public int getNotificationMarkerPosition() {
   1470         return native_get_marker_pos();
   1471     }
   1472 
   1473     /**
   1474      * Returns the notification update period expressed in frames.
   1475      * Zero means that no position update notifications are being delivered.
   1476      */
   1477     public int getPositionNotificationPeriod() {
   1478         return native_get_pos_update_period();
   1479     }
   1480 
   1481     /**
   1482      * Returns the playback head position expressed in frames.
   1483      * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
   1484      * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
   1485      * This is a continuously advancing counter.  It will wrap (overflow) periodically,
   1486      * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
   1487      * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}.
   1488      * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates
   1489      * the total number of frames played since reset,
   1490      * <i>not</i> the current offset within the buffer.
   1491      */
   1492     public int getPlaybackHeadPosition() {
   1493         return native_get_position();
   1494     }
   1495 
   1496     /**
   1497      * Returns this track's estimated latency in milliseconds. This includes the latency due
   1498      * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
   1499      *
   1500      * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
   1501      * a better solution.
   1502      * @hide
   1503      */
   1504     public int getLatency() {
   1505         return native_get_latency();
   1506     }
   1507 
   1508     /**
   1509      * Returns the number of underrun occurrences in the application-level write buffer
   1510      * since the AudioTrack was created.
   1511      * An underrun occurs if the application does not write audio
   1512      * data quickly enough, causing the buffer to underflow
   1513      * and a potential audio glitch or pop.
   1514      * <p>
   1515      * Underruns are less likely when buffer sizes are large.
   1516      * It may be possible to eliminate underruns by recreating the AudioTrack with
   1517      * a larger buffer.
   1518      * Or by using {@link #setBufferSizeInFrames(int)} to dynamically increase the
   1519      * effective size of the buffer.
   1520      */
   1521     public int getUnderrunCount() {
   1522         return native_get_underrun_count();
   1523     }
   1524 
   1525     /**
   1526      * Returns the current performance mode of the {@link AudioTrack}.
   1527      *
   1528      * @return one of {@link AudioTrack#PERFORMANCE_MODE_NONE},
   1529      * {@link AudioTrack#PERFORMANCE_MODE_LOW_LATENCY},
   1530      * or {@link AudioTrack#PERFORMANCE_MODE_POWER_SAVING}.
   1531      * Use {@link AudioTrack.Builder#setPerformanceMode}
   1532      * in the {@link AudioTrack.Builder} to enable a performance mode.
   1533      * @throws IllegalStateException if track is not initialized.
   1534      */
   1535     public @PerformanceMode int getPerformanceMode() {
   1536         final int flags = native_get_flags();
   1537         if ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0) {
   1538             return PERFORMANCE_MODE_LOW_LATENCY;
   1539         } else if ((flags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
   1540             return PERFORMANCE_MODE_POWER_SAVING;
   1541         } else {
   1542             return PERFORMANCE_MODE_NONE;
   1543         }
   1544     }
   1545 
   1546     /**
   1547      *  Returns the output sample rate in Hz for the specified stream type.
   1548      */
   1549     static public int getNativeOutputSampleRate(int streamType) {
   1550         return native_get_output_sample_rate(streamType);
   1551     }
   1552 
   1553     /**
   1554      * Returns the estimated minimum buffer size required for an AudioTrack
   1555      * object to be created in the {@link #MODE_STREAM} mode.
   1556      * The size is an estimate because it does not consider either the route or the sink,
   1557      * since neither is known yet.  Note that this size doesn't
   1558      * guarantee a smooth playback under load, and higher values should be chosen according to
   1559      * the expected frequency at which the buffer will be refilled with additional data to play.
   1560      * For example, if you intend to dynamically set the source sample rate of an AudioTrack
   1561      * to a higher value than the initial source sample rate, be sure to configure the buffer size
   1562      * based on the highest planned sample rate.
   1563      * @param sampleRateInHz the source sample rate expressed in Hz.
   1564      *   {@link AudioFormat#SAMPLE_RATE_UNSPECIFIED} is not permitted.
   1565      * @param channelConfig describes the configuration of the audio channels.
   1566      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
   1567      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
   1568      * @param audioFormat the format in which the audio data is represented.
   1569      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
   1570      *   {@link AudioFormat#ENCODING_PCM_8BIT},
   1571      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
   1572      * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
   1573      *   or {@link #ERROR} if unable to query for output properties,
   1574      *   or the minimum buffer size expressed in bytes.
   1575      */
   1576     static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
   1577         int channelCount = 0;
   1578         switch(channelConfig) {
   1579         case AudioFormat.CHANNEL_OUT_MONO:
   1580         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
   1581             channelCount = 1;
   1582             break;
   1583         case AudioFormat.CHANNEL_OUT_STEREO:
   1584         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
   1585             channelCount = 2;
   1586             break;
   1587         default:
   1588             if (!isMultichannelConfigSupported(channelConfig)) {
   1589                 loge("getMinBufferSize(): Invalid channel configuration.");
   1590                 return ERROR_BAD_VALUE;
   1591             } else {
   1592                 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
   1593             }
   1594         }
   1595 
   1596         if (!AudioFormat.isPublicEncoding(audioFormat)) {
   1597             loge("getMinBufferSize(): Invalid audio format.");
   1598             return ERROR_BAD_VALUE;
   1599         }
   1600 
   1601         // sample rate, note these values are subject to change
   1602         // Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed
   1603         if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) ||
   1604                 (sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) {
   1605             loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
   1606             return ERROR_BAD_VALUE;
   1607         }
   1608 
   1609         int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
   1610         if (size <= 0) {
   1611             loge("getMinBufferSize(): error querying hardware");
   1612             return ERROR;
   1613         }
   1614         else {
   1615             return size;
   1616         }
   1617     }
   1618 
   1619     /**
   1620      * Returns the audio session ID.
   1621      *
   1622      * @return the ID of the audio session this AudioTrack belongs to.
   1623      */
   1624     public int getAudioSessionId() {
   1625         return mSessionId;
   1626     }
   1627 
   1628    /**
   1629     * Poll for a timestamp on demand.
   1630     * <p>
   1631     * If you need to track timestamps during initial warmup or after a routing or mode change,
   1632     * you should request a new timestamp periodically until the reported timestamps
   1633     * show that the frame position is advancing, or until it becomes clear that
   1634     * timestamps are unavailable for this route.
   1635     * <p>
   1636     * After the clock is advancing at a stable rate,
   1637     * query for a new timestamp approximately once every 10 seconds to once per minute.
   1638     * Calling this method more often is inefficient.
   1639     * It is also counter-productive to call this method more often than recommended,
   1640     * because the short-term differences between successive timestamp reports are not meaningful.
   1641     * If you need a high-resolution mapping between frame position and presentation time,
   1642     * consider implementing that at application level, based on low-resolution timestamps.
   1643     * <p>
   1644     * The audio data at the returned position may either already have been
   1645     * presented, or may have not yet been presented but is committed to be presented.
   1646     * It is not possible to request the time corresponding to a particular position,
   1647     * or to request the (fractional) position corresponding to a particular time.
   1648     * If you need such features, consider implementing them at application level.
   1649     *
   1650     * @param timestamp a reference to a non-null AudioTimestamp instance allocated
   1651     *        and owned by caller.
   1652     * @return true if a timestamp is available, or false if no timestamp is available.
   1653     *         If a timestamp if available,
   1654     *         the AudioTimestamp instance is filled in with a position in frame units, together
   1655     *         with the estimated time when that frame was presented or is committed to
   1656     *         be presented.
   1657     *         In the case that no timestamp is available, any supplied instance is left unaltered.
   1658     *         A timestamp may be temporarily unavailable while the audio clock is stabilizing,
   1659     *         or during and immediately after a route change.
   1660     *         A timestamp is permanently unavailable for a given route if the route does not support
   1661     *         timestamps.  In this case, the approximate frame position can be obtained
   1662     *         using {@link #getPlaybackHeadPosition}.
   1663     *         However, it may be useful to continue to query for
   1664     *         timestamps occasionally, to recover after a route change.
   1665     */
   1666     // Add this text when the "on new timestamp" API is added:
   1667     //   Use if you need to get the most recent timestamp outside of the event callback handler.
   1668     public boolean getTimestamp(AudioTimestamp timestamp)
   1669     {
   1670         if (timestamp == null) {
   1671             throw new IllegalArgumentException();
   1672         }
   1673         // It's unfortunate, but we have to either create garbage every time or use synchronized
   1674         long[] longArray = new long[2];
   1675         int ret = native_get_timestamp(longArray);
   1676         if (ret != SUCCESS) {
   1677             return false;
   1678         }
   1679         timestamp.framePosition = longArray[0];
   1680         timestamp.nanoTime = longArray[1];
   1681         return true;
   1682     }
   1683 
   1684     /**
   1685      * Poll for a timestamp on demand.
   1686      * <p>
   1687      * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code.
   1688      *
   1689      * @param timestamp a reference to a non-null AudioTimestamp instance allocated
   1690      *        and owned by caller.
   1691      * @return {@link #SUCCESS} if a timestamp is available
   1692      *         {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called
   1693      *         immediately after start/ACTIVE, when the number of frames consumed is less than the
   1694      *         overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll
   1695      *         again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time
   1696      *         for the timestamp.
   1697      *         {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
   1698      *         needs to be recreated.
   1699      *         {@link #ERROR_INVALID_OPERATION} if current route does not support
   1700      *         timestamps. In this case, the approximate frame position can be obtained
   1701      *         using {@link #getPlaybackHeadPosition}.
   1702      *
   1703      *         The AudioTimestamp instance is filled in with a position in frame units, together
   1704      *         with the estimated time when that frame was presented or is committed to
   1705      *         be presented.
   1706      * @hide
   1707      */
   1708      // Add this text when the "on new timestamp" API is added:
   1709      //   Use if you need to get the most recent timestamp outside of the event callback handler.
   1710      public int getTimestampWithStatus(AudioTimestamp timestamp)
   1711      {
   1712          if (timestamp == null) {
   1713              throw new IllegalArgumentException();
   1714          }
   1715          // It's unfortunate, but we have to either create garbage every time or use synchronized
   1716          long[] longArray = new long[2];
   1717          int ret = native_get_timestamp(longArray);
   1718          timestamp.framePosition = longArray[0];
   1719          timestamp.nanoTime = longArray[1];
   1720          return ret;
   1721      }
   1722 
   1723     /**
   1724      *  Return Metrics data about the current AudioTrack instance.
   1725      *
   1726      * @return a {@link PersistableBundle} containing the set of attributes and values
   1727      * available for the media being handled by this instance of AudioTrack
   1728      * The attributes are descibed in {@link MetricsConstants}.
   1729      *
   1730      * Additional vendor-specific fields may also be present in
   1731      * the return value.
   1732      */
   1733     public PersistableBundle getMetrics() {
   1734         PersistableBundle bundle = native_getMetrics();
   1735         return bundle;
   1736     }
   1737 
   1738     private native PersistableBundle native_getMetrics();
   1739 
   1740     //--------------------------------------------------------------------------
   1741     // Initialization / configuration
   1742     //--------------------
   1743     /**
   1744      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
   1745      * for each periodic playback head position update.
   1746      * Notifications will be received in the same thread as the one in which the AudioTrack
   1747      * instance was created.
   1748      * @param listener
   1749      */
   1750     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
   1751         setPlaybackPositionUpdateListener(listener, null);
   1752     }
   1753 
   1754     /**
   1755      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
   1756      * for each periodic playback head position update.
   1757      * Use this method to receive AudioTrack events in the Handler associated with another
   1758      * thread than the one in which you created the AudioTrack instance.
   1759      * @param listener
   1760      * @param handler the Handler that will receive the event notification messages.
   1761      */
   1762     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
   1763                                                     Handler handler) {
   1764         if (listener != null) {
   1765             mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler);
   1766         } else {
   1767             mEventHandlerDelegate = null;
   1768         }
   1769     }
   1770 
   1771 
   1772     private static float clampGainOrLevel(float gainOrLevel) {
   1773         if (Float.isNaN(gainOrLevel)) {
   1774             throw new IllegalArgumentException();
   1775         }
   1776         if (gainOrLevel < GAIN_MIN) {
   1777             gainOrLevel = GAIN_MIN;
   1778         } else if (gainOrLevel > GAIN_MAX) {
   1779             gainOrLevel = GAIN_MAX;
   1780         }
   1781         return gainOrLevel;
   1782     }
   1783 
   1784 
   1785      /**
   1786      * Sets the specified left and right output gain values on the AudioTrack.
   1787      * <p>Gain values are clamped to the closed interval [0.0, max] where
   1788      * max is the value of {@link #getMaxVolume}.
   1789      * A value of 0.0 results in zero gain (silence), and
   1790      * a value of 1.0 means unity gain (signal unchanged).
   1791      * The default value is 1.0 meaning unity gain.
   1792      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
   1793      * @param leftGain output gain for the left channel.
   1794      * @param rightGain output gain for the right channel
   1795      * @return error code or success, see {@link #SUCCESS},
   1796      *    {@link #ERROR_INVALID_OPERATION}
   1797      * @deprecated Applications should use {@link #setVolume} instead, as it
   1798      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
   1799      */
   1800     @Deprecated
   1801     public int setStereoVolume(float leftGain, float rightGain) {
   1802         if (mState == STATE_UNINITIALIZED) {
   1803             return ERROR_INVALID_OPERATION;
   1804         }
   1805 
   1806         baseSetVolume(leftGain, rightGain);
   1807         return SUCCESS;
   1808     }
   1809 
   1810     @Override
   1811     void playerSetVolume(boolean muting, float leftVolume, float rightVolume) {
   1812         leftVolume = clampGainOrLevel(muting ? 0.0f : leftVolume);
   1813         rightVolume = clampGainOrLevel(muting ? 0.0f : rightVolume);
   1814 
   1815         native_setVolume(leftVolume, rightVolume);
   1816     }
   1817 
   1818 
   1819     /**
   1820      * Sets the specified output gain value on all channels of this track.
   1821      * <p>Gain values are clamped to the closed interval [0.0, max] where
   1822      * max is the value of {@link #getMaxVolume}.
   1823      * A value of 0.0 results in zero gain (silence), and
   1824      * a value of 1.0 means unity gain (signal unchanged).
   1825      * The default value is 1.0 meaning unity gain.
   1826      * <p>This API is preferred over {@link #setStereoVolume}, as it
   1827      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
   1828      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
   1829      * @param gain output gain for all channels.
   1830      * @return error code or success, see {@link #SUCCESS},
   1831      *    {@link #ERROR_INVALID_OPERATION}
   1832      */
   1833     public int setVolume(float gain) {
   1834         return setStereoVolume(gain, gain);
   1835     }
   1836 
   1837     @Override
   1838     /* package */ int playerApplyVolumeShaper(
   1839             @NonNull VolumeShaper.Configuration configuration,
   1840             @NonNull VolumeShaper.Operation operation) {
   1841         return native_applyVolumeShaper(configuration, operation);
   1842     }
   1843 
   1844     @Override
   1845     /* package */ @Nullable VolumeShaper.State playerGetVolumeShaperState(int id) {
   1846         return native_getVolumeShaperState(id);
   1847     }
   1848 
   1849     @Override
   1850     public @NonNull VolumeShaper createVolumeShaper(
   1851             @NonNull VolumeShaper.Configuration configuration) {
   1852         return new VolumeShaper(configuration, this);
   1853     }
   1854 
   1855     /**
   1856      * Sets the playback sample rate for this track. This sets the sampling rate at which
   1857      * the audio data will be consumed and played back
   1858      * (as set by the sampleRateInHz parameter in the
   1859      * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
   1860      * not the original sampling rate of the
   1861      * content. For example, setting it to half the sample rate of the content will cause the
   1862      * playback to last twice as long, but will also result in a pitch shift down by one octave.
   1863      * The valid sample rate range is from 1 Hz to twice the value returned by
   1864      * {@link #getNativeOutputSampleRate(int)}.
   1865      * Use {@link #setPlaybackParams(PlaybackParams)} for speed control.
   1866      * <p> This method may also be used to repurpose an existing <code>AudioTrack</code>
   1867      * for playback of content of differing sample rate,
   1868      * but with identical encoding and channel mask.
   1869      * @param sampleRateInHz the sample rate expressed in Hz
   1870      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
   1871      *    {@link #ERROR_INVALID_OPERATION}
   1872      */
   1873     public int setPlaybackRate(int sampleRateInHz) {
   1874         if (mState != STATE_INITIALIZED) {
   1875             return ERROR_INVALID_OPERATION;
   1876         }
   1877         if (sampleRateInHz <= 0) {
   1878             return ERROR_BAD_VALUE;
   1879         }
   1880         return native_set_playback_rate(sampleRateInHz);
   1881     }
   1882 
   1883 
   1884     /**
   1885      * Sets the playback parameters.
   1886      * This method returns failure if it cannot apply the playback parameters.
   1887      * One possible cause is that the parameters for speed or pitch are out of range.
   1888      * Another possible cause is that the <code>AudioTrack</code> is streaming
   1889      * (see {@link #MODE_STREAM}) and the
   1890      * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer
   1891      * on configuration must be larger than the speed multiplied by the minimum size
   1892      * {@link #getMinBufferSize(int, int, int)}) to allow proper playback.
   1893      * @param params see {@link PlaybackParams}. In particular,
   1894      * speed, pitch, and audio mode should be set.
   1895      * @throws IllegalArgumentException if the parameters are invalid or not accepted.
   1896      * @throws IllegalStateException if track is not initialized.
   1897      */
   1898     public void setPlaybackParams(@NonNull PlaybackParams params) {
   1899         if (params == null) {
   1900             throw new IllegalArgumentException("params is null");
   1901         }
   1902         native_set_playback_params(params);
   1903     }
   1904 
   1905 
   1906     /**
   1907      * Sets the position of the notification marker.  At most one marker can be active.
   1908      * @param markerInFrames marker position in wrapping frame units similar to
   1909      * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
   1910      * To set a marker at a position which would appear as zero due to wraparound,
   1911      * a workaround is to use a non-zero position near zero, such as -1 or 1.
   1912      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
   1913      *  {@link #ERROR_INVALID_OPERATION}
   1914      */
   1915     public int setNotificationMarkerPosition(int markerInFrames) {
   1916         if (mState == STATE_UNINITIALIZED) {
   1917             return ERROR_INVALID_OPERATION;
   1918         }
   1919         return native_set_marker_pos(markerInFrames);
   1920     }
   1921 
   1922 
   1923     /**
   1924      * Sets the period for the periodic notification event.
   1925      * @param periodInFrames update period expressed in frames.
   1926      * Zero period means no position updates.  A negative period is not allowed.
   1927      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
   1928      */
   1929     public int setPositionNotificationPeriod(int periodInFrames) {
   1930         if (mState == STATE_UNINITIALIZED) {
   1931             return ERROR_INVALID_OPERATION;
   1932         }
   1933         return native_set_pos_update_period(periodInFrames);
   1934     }
   1935 
   1936 
   1937     /**
   1938      * Sets the playback head position within the static buffer.
   1939      * The track must be stopped or paused for the position to be changed,
   1940      * and must use the {@link #MODE_STATIC} mode.
   1941      * @param positionInFrames playback head position within buffer, expressed in frames.
   1942      * Zero corresponds to start of buffer.
   1943      * The position must not be greater than the buffer size in frames, or negative.
   1944      * Though this method and {@link #getPlaybackHeadPosition()} have similar names,
   1945      * the position values have different meanings.
   1946      * <br>
   1947      * If looping is currently enabled and the new position is greater than or equal to the
   1948      * loop end marker, the behavior varies by API level:
   1949      * as of {@link android.os.Build.VERSION_CODES#M},
   1950      * the looping is first disabled and then the position is set.
   1951      * For earlier API levels, the behavior is unspecified.
   1952      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
   1953      *    {@link #ERROR_INVALID_OPERATION}
   1954      */
   1955     public int setPlaybackHeadPosition(int positionInFrames) {
   1956         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
   1957                 getPlayState() == PLAYSTATE_PLAYING) {
   1958             return ERROR_INVALID_OPERATION;
   1959         }
   1960         if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
   1961             return ERROR_BAD_VALUE;
   1962         }
   1963         return native_set_position(positionInFrames);
   1964     }
   1965 
   1966     /**
   1967      * Sets the loop points and the loop count. The loop can be infinite.
   1968      * Similarly to setPlaybackHeadPosition,
   1969      * the track must be stopped or paused for the loop points to be changed,
   1970      * and must use the {@link #MODE_STATIC} mode.
   1971      * @param startInFrames loop start marker expressed in frames.
   1972      * Zero corresponds to start of buffer.
   1973      * The start marker must not be greater than or equal to the buffer size in frames, or negative.
   1974      * @param endInFrames loop end marker expressed in frames.
   1975      * The total buffer size in frames corresponds to end of buffer.
   1976      * The end marker must not be greater than the buffer size in frames.
   1977      * For looping, the end marker must not be less than or equal to the start marker,
   1978      * but to disable looping
   1979      * it is permitted for start marker, end marker, and loop count to all be 0.
   1980      * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}.
   1981      * If the loop period (endInFrames - startInFrames) is too small for the implementation to
   1982      * support,
   1983      * {@link #ERROR_BAD_VALUE} is returned.
   1984      * The loop range is the interval [startInFrames, endInFrames).
   1985      * <br>
   1986      * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged,
   1987      * unless it is greater than or equal to the loop end marker, in which case
   1988      * it is forced to the loop start marker.
   1989      * For earlier API levels, the effect on position is unspecified.
   1990      * @param loopCount the number of times the loop is looped; must be greater than or equal to -1.
   1991      *    A value of -1 means infinite looping, and 0 disables looping.
   1992      *    A value of positive N means to "loop" (go back) N times.  For example,
   1993      *    a value of one means to play the region two times in total.
   1994      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
   1995      *    {@link #ERROR_INVALID_OPERATION}
   1996      */
   1997     public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) {
   1998         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
   1999                 getPlayState() == PLAYSTATE_PLAYING) {
   2000             return ERROR_INVALID_OPERATION;
   2001         }
   2002         if (loopCount == 0) {
   2003             ;   // explicitly allowed as an exception to the loop region range check
   2004         } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
   2005                 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
   2006             return ERROR_BAD_VALUE;
   2007         }
   2008         return native_set_loop(startInFrames, endInFrames, loopCount);
   2009     }
   2010 
   2011     /**
   2012      * Sets the audio presentation.
   2013      * If the audio presentation is invalid then {@link #ERROR_BAD_VALUE} will be returned.
   2014      * If a multi-stream decoder (MSD) is not present, or the format does not support
   2015      * multiple presentations, then {@link #ERROR_INVALID_OPERATION} will be returned.
   2016      * {@link #ERROR} is returned in case of any other error.
   2017      * @param presentation see {@link AudioPresentation}. In particular, id should be set.
   2018      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR},
   2019      *    {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}
   2020      * @throws IllegalArgumentException if the audio presentation is null.
   2021      * @throws IllegalStateException if track is not initialized.
   2022      */
   2023     public int setPresentation(@NonNull AudioPresentation presentation) {
   2024         if (presentation == null) {
   2025             throw new IllegalArgumentException("audio presentation is null");
   2026         }
   2027         return native_setPresentation(presentation.getPresentationId(),
   2028                 presentation.getProgramId());
   2029     }
   2030 
   2031     /**
   2032      * Sets the initialization state of the instance. This method was originally intended to be used
   2033      * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
   2034      * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
   2035      * @param state the state of the AudioTrack instance
   2036      * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
   2037      */
   2038     @Deprecated
   2039     protected void setState(int state) {
   2040         mState = state;
   2041     }
   2042 
   2043 
   2044     //---------------------------------------------------------
   2045     // Transport control methods
   2046     //--------------------
   2047     /**
   2048      * Starts playing an AudioTrack.
   2049      * <p>
   2050      * If track's creation mode is {@link #MODE_STATIC}, you must have called one of
   2051      * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)},
   2052      * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)},
   2053      * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to
   2054      * play().
   2055      * <p>
   2056      * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to
   2057      * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor).
   2058      * If you don't call write() first, or if you call write() but with an insufficient amount of
   2059      * data, then the track will be in underrun state at play().  In this case,
   2060      * playback will not actually start playing until the data path is filled to a
   2061      * device-specific minimum level.  This requirement for the path to be filled
   2062      * to a minimum level is also true when resuming audio playback after calling stop().
   2063      * Similarly the buffer will need to be filled up again after
   2064      * the track underruns due to failure to call write() in a timely manner with sufficient data.
   2065      * For portability, an application should prime the data path to the maximum allowed
   2066      * by writing data until the write() method returns a short transfer count.
   2067      * This allows play() to start immediately, and reduces the chance of underrun.
   2068      *
   2069      * @throws IllegalStateException if the track isn't properly initialized
   2070      */
   2071     public void play()
   2072     throws IllegalStateException {
   2073         if (mState != STATE_INITIALIZED) {
   2074             throw new IllegalStateException("play() called on uninitialized AudioTrack.");
   2075         }
   2076         //FIXME use lambda to pass startImpl to superclass
   2077         final int delay = getStartDelayMs();
   2078         if (delay == 0) {
   2079             startImpl();
   2080         } else {
   2081             new Thread() {
   2082                 public void run() {
   2083                     try {
   2084                         Thread.sleep(delay);
   2085                     } catch (InterruptedException e) {
   2086                         e.printStackTrace();
   2087                     }
   2088                     baseSetStartDelayMs(0);
   2089                     try {
   2090                         startImpl();
   2091                     } catch (IllegalStateException e) {
   2092                         // fail silently for a state exception when it is happening after
   2093                         // a delayed start, as the player state could have changed between the
   2094                         // call to start() and the execution of startImpl()
   2095                     }
   2096                 }
   2097             }.start();
   2098         }
   2099     }
   2100 
   2101     private void startImpl() {
   2102         synchronized(mPlayStateLock) {
   2103             baseStart();
   2104             native_start();
   2105             mPlayState = PLAYSTATE_PLAYING;
   2106         }
   2107     }
   2108 
   2109     /**
   2110      * Stops playing the audio data.
   2111      * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
   2112      * after the last buffer that was written has been played. For an immediate stop, use
   2113      * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
   2114      * back yet.
   2115      * @throws IllegalStateException
   2116      */
   2117     public void stop()
   2118     throws IllegalStateException {
   2119         if (mState != STATE_INITIALIZED) {
   2120             throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
   2121         }
   2122 
   2123         // stop playing
   2124         synchronized(mPlayStateLock) {
   2125             native_stop();
   2126             baseStop();
   2127             mPlayState = PLAYSTATE_STOPPED;
   2128             mAvSyncHeader = null;
   2129             mAvSyncBytesRemaining = 0;
   2130         }
   2131     }
   2132 
   2133     /**
   2134      * Pauses the playback of the audio data. Data that has not been played
   2135      * back will not be discarded. Subsequent calls to {@link #play} will play
   2136      * this data back. See {@link #flush()} to discard this data.
   2137      *
   2138      * @throws IllegalStateException
   2139      */
   2140     public void pause()
   2141     throws IllegalStateException {
   2142         if (mState != STATE_INITIALIZED) {
   2143             throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
   2144         }
   2145 
   2146         // pause playback
   2147         synchronized(mPlayStateLock) {
   2148             native_pause();
   2149             basePause();
   2150             mPlayState = PLAYSTATE_PAUSED;
   2151         }
   2152     }
   2153 
   2154 
   2155     //---------------------------------------------------------
   2156     // Audio data supply
   2157     //--------------------
   2158 
   2159     /**
   2160      * Flushes the audio data currently queued for playback. Any data that has
   2161      * been written but not yet presented will be discarded.  No-op if not stopped or paused,
   2162      * or if the track's creation mode is not {@link #MODE_STREAM}.
   2163      * <BR> Note that although data written but not yet presented is discarded, there is no
   2164      * guarantee that all of the buffer space formerly used by that data
   2165      * is available for a subsequent write.
   2166      * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code>
   2167      * less than or equal to the total buffer size
   2168      * may return a short actual transfer count.
   2169      */
   2170     public void flush() {
   2171         if (mState == STATE_INITIALIZED) {
   2172             // flush the data in native layer
   2173             native_flush();
   2174             mAvSyncHeader = null;
   2175             mAvSyncBytesRemaining = 0;
   2176         }
   2177 
   2178     }
   2179 
   2180     /**
   2181      * Writes the audio data to the audio sink for playback (streaming mode),
   2182      * or copies audio data for later playback (static buffer mode).
   2183      * The format specified in the AudioTrack constructor should be
   2184      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
   2185      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
   2186      * <p>
   2187      * In streaming mode, the write will normally block until all the data has been enqueued for
   2188      * playback, and will return a full transfer count.  However, if the track is stopped or paused
   2189      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
   2190      * occurs during the write, then the write may return a short transfer count.
   2191      * <p>
   2192      * In static buffer mode, copies the data to the buffer starting at offset 0.
   2193      * Note that the actual playback of this data might occur after this function returns.
   2194      *
   2195      * @param audioData the array that holds the data to play.
   2196      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
   2197      *    starts.
   2198      *    Must not be negative, or cause the data access to go out of bounds of the array.
   2199      * @param sizeInBytes the number of bytes to write in audioData after the offset.
   2200      *    Must not be negative, or cause the data access to go out of bounds of the array.
   2201      * @return zero or the positive number of bytes that were written, or one of the following
   2202      *    error codes. The number of bytes will be a multiple of the frame size in bytes
   2203      *    not to exceed sizeInBytes.
   2204      * <ul>
   2205      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
   2206      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
   2207      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
   2208      *    needs to be recreated. The dead object error code is not returned if some data was
   2209      *    successfully transferred. In this case, the error is returned at the next write()</li>
   2210      * <li>{@link #ERROR} in case of other error</li>
   2211      * </ul>
   2212      * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code>
   2213      * set to  {@link #WRITE_BLOCKING}.
   2214      */
   2215     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) {
   2216         return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING);
   2217     }
   2218 
   2219     /**
   2220      * Writes the audio data to the audio sink for playback (streaming mode),
   2221      * or copies audio data for later playback (static buffer mode).
   2222      * The format specified in the AudioTrack constructor should be
   2223      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
   2224      * The format can be {@link AudioFormat#ENCODING_PCM_16BIT}, but this is deprecated.
   2225      * <p>
   2226      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
   2227      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
   2228      * for playback, and will return a full transfer count.  However, if the write mode is
   2229      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
   2230      * interrupts the write by calling stop or pause, or an I/O error
   2231      * occurs during the write, then the write may return a short transfer count.
   2232      * <p>
   2233      * In static buffer mode, copies the data to the buffer starting at offset 0,
   2234      * and the write mode is ignored.
   2235      * Note that the actual playback of this data might occur after this function returns.
   2236      *
   2237      * @param audioData the array that holds the data to play.
   2238      * @param offsetInBytes the offset expressed in bytes in audioData where the data to write
   2239      *    starts.
   2240      *    Must not be negative, or cause the data access to go out of bounds of the array.
   2241      * @param sizeInBytes the number of bytes to write in audioData after the offset.
   2242      *    Must not be negative, or cause the data access to go out of bounds of the array.
   2243      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
   2244      *     effect in static mode.
   2245      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
   2246      *         to the audio sink.
   2247      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
   2248      *     queuing as much audio data for playback as possible without blocking.
   2249      * @return zero or the positive number of bytes that were written, or one of the following
   2250      *    error codes. The number of bytes will be a multiple of the frame size in bytes
   2251      *    not to exceed sizeInBytes.
   2252      * <ul>
   2253      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
   2254      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
   2255      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
   2256      *    needs to be recreated. The dead object error code is not returned if some data was
   2257      *    successfully transferred. In this case, the error is returned at the next write()</li>
   2258      * <li>{@link #ERROR} in case of other error</li>
   2259      * </ul>
   2260      */
   2261     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes,
   2262             @WriteMode int writeMode) {
   2263 
   2264         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
   2265             return ERROR_INVALID_OPERATION;
   2266         }
   2267 
   2268         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
   2269             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
   2270             return ERROR_BAD_VALUE;
   2271         }
   2272 
   2273         if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
   2274                 || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
   2275                 || (offsetInBytes + sizeInBytes > audioData.length)) {
   2276             return ERROR_BAD_VALUE;
   2277         }
   2278 
   2279         int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
   2280                 writeMode == WRITE_BLOCKING);
   2281 
   2282         if ((mDataLoadMode == MODE_STATIC)
   2283                 && (mState == STATE_NO_STATIC_DATA)
   2284                 && (ret > 0)) {
   2285             // benign race with respect to other APIs that read mState
   2286             mState = STATE_INITIALIZED;
   2287         }
   2288 
   2289         return ret;
   2290     }
   2291 
   2292     /**
   2293      * Writes the audio data to the audio sink for playback (streaming mode),
   2294      * or copies audio data for later playback (static buffer mode).
   2295      * The format specified in the AudioTrack constructor should be
   2296      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
   2297      * <p>
   2298      * In streaming mode, the write will normally block until all the data has been enqueued for
   2299      * playback, and will return a full transfer count.  However, if the track is stopped or paused
   2300      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
   2301      * occurs during the write, then the write may return a short transfer count.
   2302      * <p>
   2303      * In static buffer mode, copies the data to the buffer starting at offset 0.
   2304      * Note that the actual playback of this data might occur after this function returns.
   2305      *
   2306      * @param audioData the array that holds the data to play.
   2307      * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
   2308      *     starts.
   2309      *    Must not be negative, or cause the data access to go out of bounds of the array.
   2310      * @param sizeInShorts the number of shorts to read in audioData after the offset.
   2311      *    Must not be negative, or cause the data access to go out of bounds of the array.
   2312      * @return zero or the positive number of shorts that were written, or one of the following
   2313      *    error codes. The number of shorts will be a multiple of the channel count not to
   2314      *    exceed sizeInShorts.
   2315      * <ul>
   2316      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
   2317      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
   2318      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
   2319      *    needs to be recreated. The dead object error code is not returned if some data was
   2320      *    successfully transferred. In this case, the error is returned at the next write()</li>
   2321      * <li>{@link #ERROR} in case of other error</li>
   2322      * </ul>
   2323      * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code>
   2324      * set to  {@link #WRITE_BLOCKING}.
   2325      */
   2326     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) {
   2327         return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING);
   2328     }
   2329 
   2330     /**
   2331      * Writes the audio data to the audio sink for playback (streaming mode),
   2332      * or copies audio data for later playback (static buffer mode).
   2333      * The format specified in the AudioTrack constructor should be
   2334      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
   2335      * <p>
   2336      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
   2337      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
   2338      * for playback, and will return a full transfer count.  However, if the write mode is
   2339      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
   2340      * interrupts the write by calling stop or pause, or an I/O error
   2341      * occurs during the write, then the write may return a short transfer count.
   2342      * <p>
   2343      * In static buffer mode, copies the data to the buffer starting at offset 0.
   2344      * Note that the actual playback of this data might occur after this function returns.
   2345      *
   2346      * @param audioData the array that holds the data to write.
   2347      * @param offsetInShorts the offset expressed in shorts in audioData where the data to write
   2348      *     starts.
   2349      *    Must not be negative, or cause the data access to go out of bounds of the array.
   2350      * @param sizeInShorts the number of shorts to read in audioData after the offset.
   2351      *    Must not be negative, or cause the data access to go out of bounds of the array.
   2352      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
   2353      *     effect in static mode.
   2354      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
   2355      *         to the audio sink.
   2356      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
   2357      *     queuing as much audio data for playback as possible without blocking.
   2358      * @return zero or the positive number of shorts that were written, or one of the following
   2359      *    error codes. The number of shorts will be a multiple of the channel count not to
   2360      *    exceed sizeInShorts.
   2361      * <ul>
   2362      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
   2363      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
   2364      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
   2365      *    needs to be recreated. The dead object error code is not returned if some data was
   2366      *    successfully transferred. In this case, the error is returned at the next write()</li>
   2367      * <li>{@link #ERROR} in case of other error</li>
   2368      * </ul>
   2369      */
   2370     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts,
   2371             @WriteMode int writeMode) {
   2372 
   2373         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
   2374             return ERROR_INVALID_OPERATION;
   2375         }
   2376 
   2377         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
   2378             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
   2379             return ERROR_BAD_VALUE;
   2380         }
   2381 
   2382         if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
   2383                 || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
   2384                 || (offsetInShorts + sizeInShorts > audioData.length)) {
   2385             return ERROR_BAD_VALUE;
   2386         }
   2387 
   2388         int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat,
   2389                 writeMode == WRITE_BLOCKING);
   2390 
   2391         if ((mDataLoadMode == MODE_STATIC)
   2392                 && (mState == STATE_NO_STATIC_DATA)
   2393                 && (ret > 0)) {
   2394             // benign race with respect to other APIs that read mState
   2395             mState = STATE_INITIALIZED;
   2396         }
   2397 
   2398         return ret;
   2399     }
   2400 
   2401     /**
   2402      * Writes the audio data to the audio sink for playback (streaming mode),
   2403      * or copies audio data for later playback (static buffer mode).
   2404      * The format specified in the AudioTrack constructor should be
   2405      * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array.
   2406      * <p>
   2407      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
   2408      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
   2409      * for playback, and will return a full transfer count.  However, if the write mode is
   2410      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
   2411      * interrupts the write by calling stop or pause, or an I/O error
   2412      * occurs during the write, then the write may return a short transfer count.
   2413      * <p>
   2414      * In static buffer mode, copies the data to the buffer starting at offset 0,
   2415      * and the write mode is ignored.
   2416      * Note that the actual playback of this data might occur after this function returns.
   2417      *
   2418      * @param audioData the array that holds the data to write.
   2419      *     The implementation does not clip for sample values within the nominal range
   2420      *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
   2421      *     less than or equal to unity (1.0f), and in the absence of post-processing effects
   2422      *     that could add energy, such as reverb.  For the convenience of applications
   2423      *     that compute samples using filters with non-unity gain,
   2424      *     sample values +3 dB beyond the nominal range are permitted.
   2425      *     However such values may eventually be limited or clipped, depending on various gains
   2426      *     and later processing in the audio path.  Therefore applications are encouraged
   2427      *     to provide samples values within the nominal range.
   2428      * @param offsetInFloats the offset, expressed as a number of floats,
   2429      *     in audioData where the data to write starts.
   2430      *    Must not be negative, or cause the data access to go out of bounds of the array.
   2431      * @param sizeInFloats the number of floats to write in audioData after the offset.
   2432      *    Must not be negative, or cause the data access to go out of bounds of the array.
   2433      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
   2434      *     effect in static mode.
   2435      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
   2436      *         to the audio sink.
   2437      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
   2438      *     queuing as much audio data for playback as possible without blocking.
   2439      * @return zero or the positive number of floats that were written, or one of the following
   2440      *    error codes. The number of floats will be a multiple of the channel count not to
   2441      *    exceed sizeInFloats.
   2442      * <ul>
   2443      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
   2444      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
   2445      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
   2446      *    needs to be recreated. The dead object error code is not returned if some data was
   2447      *    successfully transferred. In this case, the error is returned at the next write()</li>
   2448      * <li>{@link #ERROR} in case of other error</li>
   2449      * </ul>
   2450      */
   2451     public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats,
   2452             @WriteMode int writeMode) {
   2453 
   2454         if (mState == STATE_UNINITIALIZED) {
   2455             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
   2456             return ERROR_INVALID_OPERATION;
   2457         }
   2458 
   2459         if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
   2460             Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
   2461             return ERROR_INVALID_OPERATION;
   2462         }
   2463 
   2464         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
   2465             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
   2466             return ERROR_BAD_VALUE;
   2467         }
   2468 
   2469         if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
   2470                 || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
   2471                 || (offsetInFloats + sizeInFloats > audioData.length)) {
   2472             Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
   2473             return ERROR_BAD_VALUE;
   2474         }
   2475 
   2476         int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
   2477                 writeMode == WRITE_BLOCKING);
   2478 
   2479         if ((mDataLoadMode == MODE_STATIC)
   2480                 && (mState == STATE_NO_STATIC_DATA)
   2481                 && (ret > 0)) {
   2482             // benign race with respect to other APIs that read mState
   2483             mState = STATE_INITIALIZED;
   2484         }
   2485 
   2486         return ret;
   2487     }
   2488 
   2489 
   2490     /**
   2491      * Writes the audio data to the audio sink for playback (streaming mode),
   2492      * or copies audio data for later playback (static buffer mode).
   2493      * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor.
   2494      * <p>
   2495      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
   2496      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
   2497      * for playback, and will return a full transfer count.  However, if the write mode is
   2498      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
   2499      * interrupts the write by calling stop or pause, or an I/O error
   2500      * occurs during the write, then the write may return a short transfer count.
   2501      * <p>
   2502      * In static buffer mode, copies the data to the buffer starting at offset 0,
   2503      * and the write mode is ignored.
   2504      * Note that the actual playback of this data might occur after this function returns.
   2505      *
   2506      * @param audioData the buffer that holds the data to write, starting at the position reported
   2507      *     by <code>audioData.position()</code>.
   2508      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
   2509      *     have been advanced to reflect the amount of data that was successfully written to
   2510      *     the AudioTrack.
   2511      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
   2512      *     that the number of bytes requested be a multiple of the frame size (sample size in
   2513      *     bytes multiplied by the channel count).
   2514      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
   2515      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
   2516      *     effect in static mode.
   2517      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
   2518      *         to the audio sink.
   2519      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
   2520      *     queuing as much audio data for playback as possible without blocking.
   2521      * @return zero or the positive number of bytes that were written, or one of the following
   2522      *    error codes.
   2523      * <ul>
   2524      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
   2525      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
   2526      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
   2527      *    needs to be recreated. The dead object error code is not returned if some data was
   2528      *    successfully transferred. In this case, the error is returned at the next write()</li>
   2529      * <li>{@link #ERROR} in case of other error</li>
   2530      * </ul>
   2531      */
   2532     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
   2533             @WriteMode int writeMode) {
   2534 
   2535         if (mState == STATE_UNINITIALIZED) {
   2536             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
   2537             return ERROR_INVALID_OPERATION;
   2538         }
   2539 
   2540         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
   2541             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
   2542             return ERROR_BAD_VALUE;
   2543         }
   2544 
   2545         if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
   2546             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
   2547             return ERROR_BAD_VALUE;
   2548         }
   2549 
   2550         int ret = 0;
   2551         if (audioData.isDirect()) {
   2552             ret = native_write_native_bytes(audioData,
   2553                     audioData.position(), sizeInBytes, mAudioFormat,
   2554                     writeMode == WRITE_BLOCKING);
   2555         } else {
   2556             ret = native_write_byte(NioUtils.unsafeArray(audioData),
   2557                     NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
   2558                     sizeInBytes, mAudioFormat,
   2559                     writeMode == WRITE_BLOCKING);
   2560         }
   2561 
   2562         if ((mDataLoadMode == MODE_STATIC)
   2563                 && (mState == STATE_NO_STATIC_DATA)
   2564                 && (ret > 0)) {
   2565             // benign race with respect to other APIs that read mState
   2566             mState = STATE_INITIALIZED;
   2567         }
   2568 
   2569         if (ret > 0) {
   2570             audioData.position(audioData.position() + ret);
   2571         }
   2572 
   2573         return ret;
   2574     }
   2575 
   2576     /**
   2577      * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track.
   2578      * The blocking behavior will depend on the write mode.
   2579      * @param audioData the buffer that holds the data to write, starting at the position reported
   2580      *     by <code>audioData.position()</code>.
   2581      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
   2582      *     have been advanced to reflect the amount of data that was successfully written to
   2583      *     the AudioTrack.
   2584      * @param sizeInBytes number of bytes to write.  It is recommended but not enforced
   2585      *     that the number of bytes requested be a multiple of the frame size (sample size in
   2586      *     bytes multiplied by the channel count).
   2587      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
   2588      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}.
   2589      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
   2590      *         to the audio sink.
   2591      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
   2592      *     queuing as much audio data for playback as possible without blocking.
   2593      * @param timestamp The timestamp of the first decodable audio frame in the provided audioData.
   2594      * @return zero or the positive number of bytes that were written, or one of the following
   2595      *    error codes.
   2596      * <ul>
   2597      * <li>{@link #ERROR_INVALID_OPERATION} if the track isn't properly initialized</li>
   2598      * <li>{@link #ERROR_BAD_VALUE} if the parameters don't resolve to valid data and indexes</li>
   2599      * <li>{@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
   2600      *    needs to be recreated. The dead object error code is not returned if some data was
   2601      *    successfully transferred. In this case, the error is returned at the next write()</li>
   2602      * <li>{@link #ERROR} in case of other error</li>
   2603      * </ul>
   2604      */
   2605     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
   2606             @WriteMode int writeMode, long timestamp) {
   2607 
   2608         if (mState == STATE_UNINITIALIZED) {
   2609             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
   2610             return ERROR_INVALID_OPERATION;
   2611         }
   2612 
   2613         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
   2614             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
   2615             return ERROR_BAD_VALUE;
   2616         }
   2617 
   2618         if (mDataLoadMode != MODE_STREAM) {
   2619             Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track");
   2620             return ERROR_INVALID_OPERATION;
   2621         }
   2622 
   2623         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) {
   2624             Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts...");
   2625             return write(audioData, sizeInBytes, writeMode);
   2626         }
   2627 
   2628         if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
   2629             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
   2630             return ERROR_BAD_VALUE;
   2631         }
   2632 
   2633         // create timestamp header if none exists
   2634         if (mAvSyncHeader == null) {
   2635             mAvSyncHeader = ByteBuffer.allocate(mOffset);
   2636             mAvSyncHeader.order(ByteOrder.BIG_ENDIAN);
   2637             mAvSyncHeader.putInt(0x55550002);
   2638         }
   2639 
   2640         if (mAvSyncBytesRemaining == 0) {
   2641             mAvSyncHeader.putInt(4, sizeInBytes);
   2642             mAvSyncHeader.putLong(8, timestamp);
   2643             mAvSyncHeader.putInt(16, mOffset);
   2644             mAvSyncHeader.position(0);
   2645             mAvSyncBytesRemaining = sizeInBytes;
   2646         }
   2647 
   2648         // write timestamp header if not completely written already
   2649         int ret = 0;
   2650         if (mAvSyncHeader.remaining() != 0) {
   2651             ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode);
   2652             if (ret < 0) {
   2653                 Log.e(TAG, "AudioTrack.write() could not write timestamp header!");
   2654                 mAvSyncHeader = null;
   2655                 mAvSyncBytesRemaining = 0;
   2656                 return ret;
   2657             }
   2658             if (mAvSyncHeader.remaining() > 0) {
   2659                 Log.v(TAG, "AudioTrack.write() partial timestamp header written.");
   2660                 return 0;
   2661             }
   2662         }
   2663 
   2664         // write audio data
   2665         int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes);
   2666         ret = write(audioData, sizeToWrite, writeMode);
   2667         if (ret < 0) {
   2668             Log.e(TAG, "AudioTrack.write() could not write audio data!");
   2669             mAvSyncHeader = null;
   2670             mAvSyncBytesRemaining = 0;
   2671             return ret;
   2672         }
   2673 
   2674         mAvSyncBytesRemaining -= ret;
   2675 
   2676         return ret;
   2677     }
   2678 
   2679 
   2680     /**
   2681      * Sets the playback head position within the static buffer to zero,
   2682      * that is it rewinds to start of static buffer.
   2683      * The track must be stopped or paused, and
   2684      * the track's creation mode must be {@link #MODE_STATIC}.
   2685      * <p>
   2686      * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by
   2687      * {@link #getPlaybackHeadPosition()} to zero.
   2688      * For earlier API levels, the reset behavior is unspecified.
   2689      * <p>
   2690      * Use {@link #setPlaybackHeadPosition(int)} with a zero position
   2691      * if the reset of <code>getPlaybackHeadPosition()</code> is not needed.
   2692      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
   2693      *  {@link #ERROR_INVALID_OPERATION}
   2694      */
   2695     public int reloadStaticData() {
   2696         if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
   2697             return ERROR_INVALID_OPERATION;
   2698         }
   2699         return native_reload_static();
   2700     }
   2701 
   2702     //--------------------------------------------------------------------------
   2703     // Audio effects management
   2704     //--------------------
   2705 
   2706     /**
   2707      * Attaches an auxiliary effect to the audio track. A typical auxiliary
   2708      * effect is a reverberation effect which can be applied on any sound source
   2709      * that directs a certain amount of its energy to this effect. This amount
   2710      * is defined by setAuxEffectSendLevel().
   2711      * {@see #setAuxEffectSendLevel(float)}.
   2712      * <p>After creating an auxiliary effect (e.g.
   2713      * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
   2714      * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
   2715      * this method to attach the audio track to the effect.
   2716      * <p>To detach the effect from the audio track, call this method with a
   2717      * null effect id.
   2718      *
   2719      * @param effectId system wide unique id of the effect to attach
   2720      * @return error code or success, see {@link #SUCCESS},
   2721      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
   2722      */
   2723     public int attachAuxEffect(int effectId) {
   2724         if (mState == STATE_UNINITIALIZED) {
   2725             return ERROR_INVALID_OPERATION;
   2726         }
   2727         return native_attachAuxEffect(effectId);
   2728     }
   2729 
   2730     /**
   2731      * Sets the send level of the audio track to the attached auxiliary effect
   2732      * {@link #attachAuxEffect(int)}.  Effect levels
   2733      * are clamped to the closed interval [0.0, max] where
   2734      * max is the value of {@link #getMaxVolume}.
   2735      * A value of 0.0 results in no effect, and a value of 1.0 is full send.
   2736      * <p>By default the send level is 0.0f, so even if an effect is attached to the player
   2737      * this method must be called for the effect to be applied.
   2738      * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
   2739      * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
   2740      * so an appropriate conversion from linear UI input x to level is:
   2741      * x == 0 -&gt; level = 0
   2742      * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
   2743      *
   2744      * @param level linear send level
   2745      * @return error code or success, see {@link #SUCCESS},
   2746      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
   2747      */
   2748     public int setAuxEffectSendLevel(float level) {
   2749         if (mState == STATE_UNINITIALIZED) {
   2750             return ERROR_INVALID_OPERATION;
   2751         }
   2752         return baseSetAuxEffectSendLevel(level);
   2753     }
   2754 
   2755     @Override
   2756     int playerSetAuxEffectSendLevel(boolean muting, float level) {
   2757         level = clampGainOrLevel(muting ? 0.0f : level);
   2758         int err = native_setAuxEffectSendLevel(level);
   2759         return err == 0 ? SUCCESS : ERROR;
   2760     }
   2761 
   2762     //--------------------------------------------------------------------------
   2763     // Explicit Routing
   2764     //--------------------
   2765     private AudioDeviceInfo mPreferredDevice = null;
   2766 
   2767     /**
   2768      * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route
   2769      * the output from this AudioTrack.
   2770      * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink.
   2771      *  If deviceInfo is null, default routing is restored.
   2772      * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and
   2773      * does not correspond to a valid audio output device.
   2774      */
   2775     @Override
   2776     public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) {
   2777         // Do some validation....
   2778         if (deviceInfo != null && !deviceInfo.isSink()) {
   2779             return false;
   2780         }
   2781         int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0;
   2782         boolean status = native_setOutputDevice(preferredDeviceId);
   2783         if (status == true) {
   2784             synchronized (this) {
   2785                 mPreferredDevice = deviceInfo;
   2786             }
   2787         }
   2788         return status;
   2789     }
   2790 
   2791     /**
   2792      * Returns the selected output specified by {@link #setPreferredDevice}. Note that this
   2793      * is not guaranteed to correspond to the actual device being used for playback.
   2794      */
   2795     @Override
   2796     public AudioDeviceInfo getPreferredDevice() {
   2797         synchronized (this) {
   2798             return mPreferredDevice;
   2799         }
   2800     }
   2801 
   2802     /**
   2803      * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack.
   2804      * Note: The query is only valid if the AudioTrack is currently playing. If it is not,
   2805      * <code>getRoutedDevice()</code> will return null.
   2806      */
   2807     @Override
   2808     public AudioDeviceInfo getRoutedDevice() {
   2809         int deviceId = native_getRoutedDeviceId();
   2810         if (deviceId == 0) {
   2811             return null;
   2812         }
   2813         AudioDeviceInfo[] devices =
   2814                 AudioManager.getDevicesStatic(AudioManager.GET_DEVICES_OUTPUTS);
   2815         for (int i = 0; i < devices.length; i++) {
   2816             if (devices[i].getId() == deviceId) {
   2817                 return devices[i];
   2818             }
   2819         }
   2820         return null;
   2821     }
   2822 
   2823     /*
   2824      * Call BEFORE adding a routing callback handler.
   2825      */
   2826     @GuardedBy("mRoutingChangeListeners")
   2827     private void testEnableNativeRoutingCallbacksLocked() {
   2828         if (mRoutingChangeListeners.size() == 0) {
   2829             native_enableDeviceCallback();
   2830         }
   2831     }
   2832 
   2833     /*
   2834      * Call AFTER removing a routing callback handler.
   2835      */
   2836     @GuardedBy("mRoutingChangeListeners")
   2837     private void testDisableNativeRoutingCallbacksLocked() {
   2838         if (mRoutingChangeListeners.size() == 0) {
   2839             native_disableDeviceCallback();
   2840         }
   2841     }
   2842 
   2843     //--------------------------------------------------------------------------
   2844     // (Re)Routing Info
   2845     //--------------------
   2846     /**
   2847      * The list of AudioRouting.OnRoutingChangedListener interfaces added (with
   2848      * {@link #addOnRoutingChangedListener(android.media.AudioRouting.OnRoutingChangedListener, Handler)}
   2849      * by an app to receive (re)routing notifications.
   2850      */
   2851     @GuardedBy("mRoutingChangeListeners")
   2852     private ArrayMap<AudioRouting.OnRoutingChangedListener,
   2853             NativeRoutingEventHandlerDelegate> mRoutingChangeListeners = new ArrayMap<>();
   2854 
   2855    /**
   2856     * Adds an {@link AudioRouting.OnRoutingChangedListener} to receive notifications of routing
   2857     * changes on this AudioTrack.
   2858     * @param listener The {@link AudioRouting.OnRoutingChangedListener} interface to receive
   2859     * notifications of rerouting events.
   2860     * @param handler  Specifies the {@link Handler} object for the thread on which to execute
   2861     * the callback. If <code>null</code>, the {@link Handler} associated with the main
   2862     * {@link Looper} will be used.
   2863     */
   2864     @Override
   2865     public void addOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener,
   2866             Handler handler) {
   2867         synchronized (mRoutingChangeListeners) {
   2868             if (listener != null && !mRoutingChangeListeners.containsKey(listener)) {
   2869                 testEnableNativeRoutingCallbacksLocked();
   2870                 mRoutingChangeListeners.put(
   2871                         listener, new NativeRoutingEventHandlerDelegate(this, listener,
   2872                                 handler != null ? handler : new Handler(mInitializationLooper)));
   2873             }
   2874         }
   2875     }
   2876 
   2877     /**
   2878      * Removes an {@link AudioRouting.OnRoutingChangedListener} which has been previously added
   2879      * to receive rerouting notifications.
   2880      * @param listener The previously added {@link AudioRouting.OnRoutingChangedListener} interface
   2881      * to remove.
   2882      */
   2883     @Override
   2884     public void removeOnRoutingChangedListener(AudioRouting.OnRoutingChangedListener listener) {
   2885         synchronized (mRoutingChangeListeners) {
   2886             if (mRoutingChangeListeners.containsKey(listener)) {
   2887                 mRoutingChangeListeners.remove(listener);
   2888             }
   2889             testDisableNativeRoutingCallbacksLocked();
   2890         }
   2891     }
   2892 
   2893     //--------------------------------------------------------------------------
   2894     // (Re)Routing Info
   2895     //--------------------
   2896     /**
   2897      * Defines the interface by which applications can receive notifications of
   2898      * routing changes for the associated {@link AudioTrack}.
   2899      *
   2900      * @deprecated users should switch to the general purpose
   2901      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
   2902      */
   2903     @Deprecated
   2904     public interface OnRoutingChangedListener extends AudioRouting.OnRoutingChangedListener {
   2905         /**
   2906          * Called when the routing of an AudioTrack changes from either and
   2907          * explicit or policy rerouting. Use {@link #getRoutedDevice()} to
   2908          * retrieve the newly routed-to device.
   2909          */
   2910         public void onRoutingChanged(AudioTrack audioTrack);
   2911 
   2912         @Override
   2913         default public void onRoutingChanged(AudioRouting router) {
   2914             if (router instanceof AudioTrack) {
   2915                 onRoutingChanged((AudioTrack) router);
   2916             }
   2917         }
   2918     }
   2919 
   2920     /**
   2921      * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes
   2922      * on this AudioTrack.
   2923      * @param listener The {@link OnRoutingChangedListener} interface to receive notifications
   2924      * of rerouting events.
   2925      * @param handler  Specifies the {@link Handler} object for the thread on which to execute
   2926      * the callback. If <code>null</code>, the {@link Handler} associated with the main
   2927      * {@link Looper} will be used.
   2928      * @deprecated users should switch to the general purpose
   2929      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
   2930      */
   2931     @Deprecated
   2932     public void addOnRoutingChangedListener(OnRoutingChangedListener listener,
   2933             android.os.Handler handler) {
   2934         addOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener, handler);
   2935     }
   2936 
   2937     /**
   2938      * Removes an {@link OnRoutingChangedListener} which has been previously added
   2939      * to receive rerouting notifications.
   2940      * @param listener The previously added {@link OnRoutingChangedListener} interface to remove.
   2941      * @deprecated users should switch to the general purpose
   2942      *             {@link AudioRouting.OnRoutingChangedListener} class instead.
   2943      */
   2944     @Deprecated
   2945     public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) {
   2946         removeOnRoutingChangedListener((AudioRouting.OnRoutingChangedListener) listener);
   2947     }
   2948 
   2949     /**
   2950      * Sends device list change notification to all listeners.
   2951      */
   2952     private void broadcastRoutingChange() {
   2953         AudioManager.resetAudioPortGeneration();
   2954         synchronized (mRoutingChangeListeners) {
   2955             for (NativeRoutingEventHandlerDelegate delegate : mRoutingChangeListeners.values()) {
   2956                 delegate.notifyClient();
   2957             }
   2958         }
   2959     }
   2960 
   2961     //---------------------------------------------------------
   2962     // Interface definitions
   2963     //--------------------
   2964     /**
   2965      * Interface definition for a callback to be invoked when the playback head position of
   2966      * an AudioTrack has reached a notification marker or has increased by a certain period.
   2967      */
   2968     public interface OnPlaybackPositionUpdateListener  {
   2969         /**
   2970          * Called on the listener to notify it that the previously set marker has been reached
   2971          * by the playback head.
   2972          */
   2973         void onMarkerReached(AudioTrack track);
   2974 
   2975         /**
   2976          * Called on the listener to periodically notify it that the playback head has reached
   2977          * a multiple of the notification period.
   2978          */
   2979         void onPeriodicNotification(AudioTrack track);
   2980     }
   2981 
   2982     /**
   2983      * @hide
   2984      * Abstract class to receive event notification about the stream playback.
   2985      * See {@link AudioTrack#setStreamEventCallback(Executor, StreamEventCallback)} to register
   2986      * the callback on the given {@link AudioTrack} instance.
   2987      */
   2988     public abstract static class StreamEventCallback {
   2989         /** @hide */ // add hidden empty constructor so it doesn't show in SDK
   2990         public StreamEventCallback() { }
   2991         /**
   2992          * Called when an offloaded track is no longer valid and has been discarded by the system.
   2993          * An example of this happening is when an offloaded track has been paused too long, and
   2994          * gets invalidated by the system to prevent any other offload.
   2995          * @param track the {@link AudioTrack} on which the event happened
   2996          */
   2997         public void onTearDown(AudioTrack track) { }
   2998         /**
   2999          * Called when all the buffers of an offloaded track that were queued in the audio system
   3000          * (e.g. the combination of the Android audio framework and the device's audio hardware)
   3001          * have been played after {@link AudioTrack#stop()} has been called.
   3002          * @param track the {@link AudioTrack} on which the event happened
   3003          */
   3004         public void onStreamPresentationEnd(AudioTrack track) { }
   3005         /**
   3006          * Called when more audio data can be written without blocking on an offloaded track.
   3007          * @param track the {@link AudioTrack} on which the event happened
   3008          */
   3009         public void onStreamDataRequest(AudioTrack track) { }
   3010     }
   3011 
   3012     private Executor mStreamEventExec;
   3013     private StreamEventCallback mStreamEventCb;
   3014     private final Object mStreamEventCbLock = new Object();
   3015 
   3016     /**
   3017      * @hide
   3018      * Sets the callback for the notification of stream events.
   3019      * @param executor {@link Executor} to handle the callbacks
   3020      * @param eventCallback the callback to receive the stream event notifications
   3021      */
   3022     public void setStreamEventCallback(@NonNull @CallbackExecutor Executor executor,
   3023             @NonNull StreamEventCallback eventCallback) {
   3024         if (eventCallback == null) {
   3025             throw new IllegalArgumentException("Illegal null StreamEventCallback");
   3026         }
   3027         if (executor == null) {
   3028             throw new IllegalArgumentException("Illegal null Executor for the StreamEventCallback");
   3029         }
   3030         synchronized (mStreamEventCbLock) {
   3031             mStreamEventExec = executor;
   3032             mStreamEventCb = eventCallback;
   3033         }
   3034     }
   3035 
   3036     /**
   3037      * @hide
   3038      * Unregisters the callback for notification of stream events, previously set
   3039      * by {@link #setStreamEventCallback(Executor, StreamEventCallback)}.
   3040      */
   3041     public void removeStreamEventCallback() {
   3042         synchronized (mStreamEventCbLock) {
   3043             mStreamEventExec = null;
   3044             mStreamEventCb = null;
   3045         }
   3046     }
   3047 
   3048     //---------------------------------------------------------
   3049     // Inner classes
   3050     //--------------------
   3051     /**
   3052      * Helper class to handle the forwarding of native events to the appropriate listener
   3053      * (potentially) handled in a different thread
   3054      */
   3055     private class NativePositionEventHandlerDelegate {
   3056         private final Handler mHandler;
   3057 
   3058         NativePositionEventHandlerDelegate(final AudioTrack track,
   3059                                    final OnPlaybackPositionUpdateListener listener,
   3060                                    Handler handler) {
   3061             // find the looper for our new event handler
   3062             Looper looper;
   3063             if (handler != null) {
   3064                 looper = handler.getLooper();
   3065             } else {
   3066                 // no given handler, use the looper the AudioTrack was created in
   3067                 looper = mInitializationLooper;
   3068             }
   3069 
   3070             // construct the event handler with this looper
   3071             if (looper != null) {
   3072                 // implement the event handler delegate
   3073                 mHandler = new Handler(looper) {
   3074                     @Override
   3075                     public void handleMessage(Message msg) {
   3076                         if (track == null) {
   3077                             return;
   3078                         }
   3079                         switch(msg.what) {
   3080                         case NATIVE_EVENT_MARKER:
   3081                             if (listener != null) {
   3082                                 listener.onMarkerReached(track);
   3083                             }
   3084                             break;
   3085                         case NATIVE_EVENT_NEW_POS:
   3086                             if (listener != null) {
   3087                                 listener.onPeriodicNotification(track);
   3088                             }
   3089                             break;
   3090                         default:
   3091                             loge("Unknown native event type: " + msg.what);
   3092                             break;
   3093                         }
   3094                     }
   3095                 };
   3096             } else {
   3097                 mHandler = null;
   3098             }
   3099         }
   3100 
   3101         Handler getHandler() {
   3102             return mHandler;
   3103         }
   3104     }
   3105 
   3106     //---------------------------------------------------------
   3107     // Methods for IPlayer interface
   3108     //--------------------
   3109     @Override
   3110     void playerStart() {
   3111         play();
   3112     }
   3113 
   3114     @Override
   3115     void playerPause() {
   3116         pause();
   3117     }
   3118 
   3119     @Override
   3120     void playerStop() {
   3121         stop();
   3122     }
   3123 
   3124     //---------------------------------------------------------
   3125     // Java methods called from the native side
   3126     //--------------------
   3127     @SuppressWarnings("unused")
   3128     private static void postEventFromNative(Object audiotrack_ref,
   3129             int what, int arg1, int arg2, Object obj) {
   3130         //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
   3131         final AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get();
   3132         if (track == null) {
   3133             return;
   3134         }
   3135 
   3136         if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) {
   3137             track.broadcastRoutingChange();
   3138             return;
   3139         }
   3140 
   3141         if (what == NATIVE_EVENT_MORE_DATA || what == NATIVE_EVENT_NEW_IAUDIOTRACK
   3142                 || what == NATIVE_EVENT_STREAM_END) {
   3143             final Executor exec;
   3144             final StreamEventCallback cb;
   3145             synchronized (track.mStreamEventCbLock) {
   3146                 exec = track.mStreamEventExec;
   3147                 cb = track.mStreamEventCb;
   3148             }
   3149             if ((exec == null) || (cb == null)) {
   3150                 return;
   3151             }
   3152             switch (what) {
   3153                 case NATIVE_EVENT_MORE_DATA:
   3154                     exec.execute(() -> cb.onStreamDataRequest(track));
   3155                     return;
   3156                 case NATIVE_EVENT_NEW_IAUDIOTRACK:
   3157                     // TODO also release track as it's not longer usable
   3158                     exec.execute(() -> cb.onTearDown(track));
   3159                     return;
   3160                 case NATIVE_EVENT_STREAM_END:
   3161                     exec.execute(() -> cb.onStreamPresentationEnd(track));
   3162                     return;
   3163             }
   3164         }
   3165 
   3166         NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate;
   3167         if (delegate != null) {
   3168             Handler handler = delegate.getHandler();
   3169             if (handler != null) {
   3170                 Message m = handler.obtainMessage(what, arg1, arg2, obj);
   3171                 handler.sendMessage(m);
   3172             }
   3173         }
   3174     }
   3175 
   3176 
   3177     //---------------------------------------------------------
   3178     // Native methods called from the Java side
   3179     //--------------------
   3180 
   3181     // post-condition: mStreamType is overwritten with a value
   3182     //     that reflects the audio attributes (e.g. an AudioAttributes object with a usage of
   3183     //     AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC
   3184     private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this,
   3185             Object /*AudioAttributes*/ attributes,
   3186             int[] sampleRate, int channelMask, int channelIndexMask, int audioFormat,
   3187             int buffSizeInBytes, int mode, int[] sessionId, long nativeAudioTrack,
   3188             boolean offload);
   3189 
   3190     private native final void native_finalize();
   3191 
   3192     /**
   3193      * @hide
   3194      */
   3195     public native final void native_release();
   3196 
   3197     private native final void native_start();
   3198 
   3199     private native final void native_stop();
   3200 
   3201     private native final void native_pause();
   3202 
   3203     private native final void native_flush();
   3204 
   3205     private native final int native_write_byte(byte[] audioData,
   3206                                                int offsetInBytes, int sizeInBytes, int format,
   3207                                                boolean isBlocking);
   3208 
   3209     private native final int native_write_short(short[] audioData,
   3210                                                 int offsetInShorts, int sizeInShorts, int format,
   3211                                                 boolean isBlocking);
   3212 
   3213     private native final int native_write_float(float[] audioData,
   3214                                                 int offsetInFloats, int sizeInFloats, int format,
   3215                                                 boolean isBlocking);
   3216 
   3217     private native final int native_write_native_bytes(Object audioData,
   3218             int positionInBytes, int sizeInBytes, int format, boolean blocking);
   3219 
   3220     private native final int native_reload_static();
   3221 
   3222     private native final int native_get_buffer_size_frames();
   3223     private native final int native_set_buffer_size_frames(int bufferSizeInFrames);
   3224     private native final int native_get_buffer_capacity_frames();
   3225 
   3226     private native final void native_setVolume(float leftVolume, float rightVolume);
   3227 
   3228     private native final int native_set_playback_rate(int sampleRateInHz);
   3229     private native final int native_get_playback_rate();
   3230 
   3231     private native final void native_set_playback_params(@NonNull PlaybackParams params);
   3232     private native final @NonNull PlaybackParams native_get_playback_params();
   3233 
   3234     private native final int native_set_marker_pos(int marker);
   3235     private native final int native_get_marker_pos();
   3236 
   3237     private native final int native_set_pos_update_period(int updatePeriod);
   3238     private native final int native_get_pos_update_period();
   3239 
   3240     private native final int native_set_position(int position);
   3241     private native final int native_get_position();
   3242 
   3243     private native final int native_get_latency();
   3244 
   3245     private native final int native_get_underrun_count();
   3246 
   3247     private native final int native_get_flags();
   3248 
   3249     // longArray must be a non-null array of length >= 2
   3250     // [0] is assigned the frame position
   3251     // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
   3252     private native final int native_get_timestamp(long[] longArray);
   3253 
   3254     private native final int native_set_loop(int start, int end, int loopCount);
   3255 
   3256     static private native final int native_get_output_sample_rate(int streamType);
   3257     static private native final int native_get_min_buff_size(
   3258             int sampleRateInHz, int channelConfig, int audioFormat);
   3259 
   3260     private native final int native_attachAuxEffect(int effectId);
   3261     private native final int native_setAuxEffectSendLevel(float level);
   3262 
   3263     private native final boolean native_setOutputDevice(int deviceId);
   3264     private native final int native_getRoutedDeviceId();
   3265     private native final void native_enableDeviceCallback();
   3266     private native final void native_disableDeviceCallback();
   3267     static private native int native_get_FCC_8();
   3268 
   3269     private native int native_applyVolumeShaper(
   3270             @NonNull VolumeShaper.Configuration configuration,
   3271             @NonNull VolumeShaper.Operation operation);
   3272 
   3273     private native @Nullable VolumeShaper.State native_getVolumeShaperState(int id);
   3274     private native final int native_setPresentation(int presentationId, int programId);
   3275 
   3276     //---------------------------------------------------------
   3277     // Utility methods
   3278     //------------------
   3279 
   3280     private static void logd(String msg) {
   3281         Log.d(TAG, msg);
   3282     }
   3283 
   3284     private static void loge(String msg) {
   3285         Log.e(TAG, msg);
   3286     }
   3287 
   3288     public final static class MetricsConstants
   3289     {
   3290         private MetricsConstants() {}
   3291 
   3292         /**
   3293          * Key to extract the Stream Type for this track
   3294          * from the {@link AudioTrack#getMetrics} return value.
   3295          * The value is a String.
   3296          */
   3297         public static final String STREAMTYPE = "android.media.audiotrack.streamtype";
   3298 
   3299         /**
   3300          * Key to extract the Content Type for this track
   3301          * from the {@link AudioTrack#getMetrics} return value.
   3302          * The value is a String.
   3303          */
   3304         public static final String CONTENTTYPE = "android.media.audiotrack.type";
   3305 
   3306         /**
   3307          * Key to extract the Content Type for this track
   3308          * from the {@link AudioTrack#getMetrics} return value.
   3309          * The value is a String.
   3310          */
   3311         public static final String USAGE = "android.media.audiotrack.usage";
   3312 
   3313         /**
   3314          * Key to extract the sample rate for this track in Hz
   3315          * from the {@link AudioTrack#getMetrics} return value.
   3316          * The value is an integer.
   3317          */
   3318         public static final String SAMPLERATE = "android.media.audiorecord.samplerate";
   3319 
   3320         /**
   3321          * Key to extract the channel mask information for this track
   3322          * from the {@link AudioTrack#getMetrics} return value.
   3323          *
   3324          * The value is a Long integer.
   3325          */
   3326         public static final String CHANNELMASK = "android.media.audiorecord.channelmask";
   3327 
   3328     }
   3329 }
   3330