Home | History | Annotate | Download | only in media
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 package android.media;
     18 
     19 import java.lang.annotation.Retention;
     20 import java.lang.annotation.RetentionPolicy;
     21 import java.lang.ref.WeakReference;
     22 import java.lang.Math;
     23 import java.nio.ByteBuffer;
     24 import java.nio.ByteOrder;
     25 import java.nio.NioUtils;
     26 import java.util.Collection;
     27 
     28 import android.annotation.IntDef;
     29 import android.annotation.NonNull;
     30 import android.app.ActivityThread;
     31 import android.app.AppOpsManager;
     32 import android.content.Context;
     33 import android.os.Handler;
     34 import android.os.IBinder;
     35 import android.os.Looper;
     36 import android.os.Message;
     37 import android.os.Process;
     38 import android.os.RemoteException;
     39 import android.os.ServiceManager;
     40 import android.util.ArrayMap;
     41 import android.util.Log;
     42 
     43 import com.android.internal.app.IAppOpsService;
     44 
     45 
     46 /**
     47  * The AudioTrack class manages and plays a single audio resource for Java applications.
     48  * It allows streaming of PCM audio buffers to the audio sink for playback. This is
     49  * achieved by "pushing" the data to the AudioTrack object using one of the
     50  *  {@link #write(byte[], int, int)}, {@link #write(short[], int, int)},
     51  *  and {@link #write(float[], int, int, int)} methods.
     52  *
     53  * <p>An AudioTrack instance can operate under two modes: static or streaming.<br>
     54  * In Streaming mode, the application writes a continuous stream of data to the AudioTrack, using
     55  * one of the {@code write()} methods. These are blocking and return when the data has been
     56  * transferred from the Java layer to the native layer and queued for playback. The streaming
     57  * mode is most useful when playing blocks of audio data that for instance are:
     58  *
     59  * <ul>
     60  *   <li>too big to fit in memory because of the duration of the sound to play,</li>
     61  *   <li>too big to fit in memory because of the characteristics of the audio data
     62  *         (high sampling rate, bits per sample ...)</li>
     63  *   <li>received or generated while previously queued audio is playing.</li>
     64  * </ul>
     65  *
     66  * The static mode should be chosen when dealing with short sounds that fit in memory and
     67  * that need to be played with the smallest latency possible. The static mode will
     68  * therefore be preferred for UI and game sounds that are played often, and with the
     69  * smallest overhead possible.
     70  *
     71  * <p>Upon creation, an AudioTrack object initializes its associated audio buffer.
     72  * The size of this buffer, specified during the construction, determines how long an AudioTrack
     73  * can play before running out of data.<br>
     74  * For an AudioTrack using the static mode, this size is the maximum size of the sound that can
     75  * be played from it.<br>
     76  * For the streaming mode, data will be written to the audio sink in chunks of
     77  * sizes less than or equal to the total buffer size.
     78  *
     79  * AudioTrack is not final and thus permits subclasses, but such use is not recommended.
     80  */
     81 public class AudioTrack
     82 {
     83     //---------------------------------------------------------
     84     // Constants
     85     //--------------------
     86     /** Minimum value for a linear gain or auxiliary effect level.
     87      *  This value must be exactly equal to 0.0f; do not change it.
     88      */
     89     private static final float GAIN_MIN = 0.0f;
     90     /** Maximum value for a linear gain or auxiliary effect level.
     91      *  This value must be greater than or equal to 1.0f.
     92      */
     93     private static final float GAIN_MAX = 1.0f;
     94 
     95     /** Minimum value for sample rate */
     96     private static final int SAMPLE_RATE_HZ_MIN = 4000;
     97     /** Maximum value for sample rate */
     98     private static final int SAMPLE_RATE_HZ_MAX = 192000;
     99 
    100     // FCC_8
    101     /** Maximum value for AudioTrack channel count */
    102     private static final int CHANNEL_COUNT_MAX = 8;
    103 
    104     /** indicates AudioTrack state is stopped */
    105     public static final int PLAYSTATE_STOPPED = 1;  // matches SL_PLAYSTATE_STOPPED
    106     /** indicates AudioTrack state is paused */
    107     public static final int PLAYSTATE_PAUSED  = 2;  // matches SL_PLAYSTATE_PAUSED
    108     /** indicates AudioTrack state is playing */
    109     public static final int PLAYSTATE_PLAYING = 3;  // matches SL_PLAYSTATE_PLAYING
    110 
    111     // keep these values in sync with android_media_AudioTrack.cpp
    112     /**
    113      * Creation mode where audio data is transferred from Java to the native layer
    114      * only once before the audio starts playing.
    115      */
    116     public static final int MODE_STATIC = 0;
    117     /**
    118      * Creation mode where audio data is streamed from Java to the native layer
    119      * as the audio is playing.
    120      */
    121     public static final int MODE_STREAM = 1;
    122 
    123     /** @hide */
    124     @IntDef({
    125         MODE_STATIC,
    126         MODE_STREAM
    127     })
    128     @Retention(RetentionPolicy.SOURCE)
    129     public @interface TransferMode {}
    130 
    131     /**
    132      * State of an AudioTrack that was not successfully initialized upon creation.
    133      */
    134     public static final int STATE_UNINITIALIZED = 0;
    135     /**
    136      * State of an AudioTrack that is ready to be used.
    137      */
    138     public static final int STATE_INITIALIZED   = 1;
    139     /**
    140      * State of a successfully initialized AudioTrack that uses static data,
    141      * but that hasn't received that data yet.
    142      */
    143     public static final int STATE_NO_STATIC_DATA = 2;
    144 
    145     /**
    146      * Denotes a successful operation.
    147      */
    148     public  static final int SUCCESS                               = AudioSystem.SUCCESS;
    149     /**
    150      * Denotes a generic operation failure.
    151      */
    152     public  static final int ERROR                                 = AudioSystem.ERROR;
    153     /**
    154      * Denotes a failure due to the use of an invalid value.
    155      */
    156     public  static final int ERROR_BAD_VALUE                       = AudioSystem.BAD_VALUE;
    157     /**
    158      * Denotes a failure due to the improper use of a method.
    159      */
    160     public  static final int ERROR_INVALID_OPERATION               = AudioSystem.INVALID_OPERATION;
    161     /**
    162      * An error code indicating that the object reporting it is no longer valid and needs to
    163      * be recreated.
    164      * @hide
    165      */
    166     public  static final int ERROR_DEAD_OBJECT                     = AudioSystem.DEAD_OBJECT;
    167     /**
    168      * {@link #getTimestampWithStatus(AudioTimestamp)} is called in STOPPED or FLUSHED state,
    169      * or immediately after start/ACTIVE.
    170      * @hide
    171      */
    172     public  static final int ERROR_WOULD_BLOCK                     = AudioSystem.WOULD_BLOCK;
    173 
    174     // Error codes:
    175     // to keep in sync with frameworks/base/core/jni/android_media_AudioTrack.cpp
    176     private static final int ERROR_NATIVESETUP_AUDIOSYSTEM         = -16;
    177     private static final int ERROR_NATIVESETUP_INVALIDCHANNELMASK  = -17;
    178     private static final int ERROR_NATIVESETUP_INVALIDFORMAT       = -18;
    179     private static final int ERROR_NATIVESETUP_INVALIDSTREAMTYPE   = -19;
    180     private static final int ERROR_NATIVESETUP_NATIVEINITFAILED    = -20;
    181 
    182     // Events:
    183     // to keep in sync with frameworks/av/include/media/AudioTrack.h
    184     /**
    185      * Event id denotes when playback head has reached a previously set marker.
    186      */
    187     private static final int NATIVE_EVENT_MARKER  = 3;
    188     /**
    189      * Event id denotes when previously set update period has elapsed during playback.
    190      */
    191     private static final int NATIVE_EVENT_NEW_POS = 4;
    192 
    193     private final static String TAG = "android.media.AudioTrack";
    194 
    195 
    196     /** @hide */
    197     @IntDef({
    198         WRITE_BLOCKING,
    199         WRITE_NON_BLOCKING
    200     })
    201     @Retention(RetentionPolicy.SOURCE)
    202     public @interface WriteMode {}
    203 
    204     /**
    205      * The write mode indicating the write operation will block until all data has been written,
    206      * to be used as the actual value of the writeMode parameter in
    207      * {@link #write(byte[], int, int, int)}, {@link #write(short[], int, int, int)},
    208      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
    209      * {@link #write(ByteBuffer, int, int, long)}.
    210      */
    211     public final static int WRITE_BLOCKING = 0;
    212 
    213     /**
    214      * The write mode indicating the write operation will return immediately after
    215      * queuing as much audio data for playback as possible without blocking,
    216      * to be used as the actual value of the writeMode parameter in
    217      * {@link #write(ByteBuffer, int, int)}, {@link #write(short[], int, int, int)},
    218      * {@link #write(float[], int, int, int)}, {@link #write(ByteBuffer, int, int)}, and
    219      * {@link #write(ByteBuffer, int, int, long)}.
    220      */
    221     public final static int WRITE_NON_BLOCKING = 1;
    222 
    223     //--------------------------------------------------------------------------
    224     // Member variables
    225     //--------------------
    226     /**
    227      * Indicates the state of the AudioTrack instance.
    228      * One of STATE_UNINITIALIZED, STATE_INITIALIZED, or STATE_NO_STATIC_DATA.
    229      */
    230     private int mState = STATE_UNINITIALIZED;
    231     /**
    232      * Indicates the play state of the AudioTrack instance.
    233      * One of PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, or PLAYSTATE_PLAYING.
    234      */
    235     private int mPlayState = PLAYSTATE_STOPPED;
    236     /**
    237      * Lock to ensure mPlayState updates reflect the actual state of the object.
    238      */
    239     private final Object mPlayStateLock = new Object();
    240     /**
    241      * Sizes of the native audio buffer.
    242      * These values are set during construction and can be stale.
    243      * To obtain the current native audio buffer frame count use {@link #getBufferSizeInFrames()}.
    244      */
    245     private int mNativeBufferSizeInBytes = 0;
    246     private int mNativeBufferSizeInFrames = 0;
    247     /**
    248      * Handler for events coming from the native code.
    249      */
    250     private NativePositionEventHandlerDelegate mEventHandlerDelegate;
    251     /**
    252      * Looper associated with the thread that creates the AudioTrack instance.
    253      */
    254     private final Looper mInitializationLooper;
    255     /**
    256      * The audio data source sampling rate in Hz.
    257      */
    258     private int mSampleRate; // initialized by all constructors via audioParamCheck()
    259     /**
    260      * The number of audio output channels (1 is mono, 2 is stereo, etc.).
    261      */
    262     private int mChannelCount = 1;
    263     /**
    264      * The audio channel mask used for calling native AudioTrack
    265      */
    266     private int mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
    267 
    268     /**
    269      * The type of the audio stream to play. See
    270      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
    271      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
    272      *   {@link AudioManager#STREAM_ALARM}, {@link AudioManager#STREAM_NOTIFICATION}, and
    273      *   {@link AudioManager#STREAM_DTMF}.
    274      */
    275     private int mStreamType = AudioManager.STREAM_MUSIC;
    276 
    277     private final AudioAttributes mAttributes;
    278     /**
    279      * The way audio is consumed by the audio sink, one of MODE_STATIC or MODE_STREAM.
    280      */
    281     private int mDataLoadMode = MODE_STREAM;
    282     /**
    283      * The current channel position mask, as specified on AudioTrack creation.
    284      * Can be set simultaneously with channel index mask {@link #mChannelIndexMask}.
    285      * May be set to {@link AudioFormat#CHANNEL_INVALID} if a channel index mask is specified.
    286      */
    287     private int mChannelConfiguration = AudioFormat.CHANNEL_OUT_MONO;
    288     /**
    289      * The channel index mask if specified, otherwise 0.
    290      */
    291     private int mChannelIndexMask = 0;
    292     /**
    293      * The encoding of the audio samples.
    294      * @see AudioFormat#ENCODING_PCM_8BIT
    295      * @see AudioFormat#ENCODING_PCM_16BIT
    296      * @see AudioFormat#ENCODING_PCM_FLOAT
    297      */
    298     private int mAudioFormat;   // initialized by all constructors via audioParamCheck()
    299     /**
    300      * Audio session ID
    301      */
    302     private int mSessionId = AudioSystem.AUDIO_SESSION_ALLOCATE;
    303     /**
    304      * Reference to the app-ops service.
    305      */
    306     private final IAppOpsService mAppOps;
    307     /**
    308      * HW_AV_SYNC track AV Sync Header
    309      */
    310     private ByteBuffer mAvSyncHeader = null;
    311     /**
    312      * HW_AV_SYNC track audio data bytes remaining to write after current AV sync header
    313      */
    314     private int mAvSyncBytesRemaining = 0;
    315 
    316     //--------------------------------
    317     // Used exclusively by native code
    318     //--------------------
    319     /**
    320      * Accessed by native methods: provides access to C++ AudioTrack object.
    321      */
    322     @SuppressWarnings("unused")
    323     private long mNativeTrackInJavaObj;
    324     /**
    325      * Accessed by native methods: provides access to the JNI data (i.e. resources used by
    326      * the native AudioTrack object, but not stored in it).
    327      */
    328     @SuppressWarnings("unused")
    329     private long mJniData;
    330 
    331 
    332     //--------------------------------------------------------------------------
    333     // Constructor, Finalize
    334     //--------------------
    335     /**
    336      * Class constructor.
    337      * @param streamType the type of the audio stream. See
    338      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
    339      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
    340      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
    341      * @param sampleRateInHz the initial source sample rate expressed in Hz.
    342      * @param channelConfig describes the configuration of the audio channels.
    343      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
    344      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
    345      * @param audioFormat the format in which the audio data is represented.
    346      *   See {@link AudioFormat#ENCODING_PCM_16BIT},
    347      *   {@link AudioFormat#ENCODING_PCM_8BIT},
    348      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
    349      * @param bufferSizeInBytes the total size (in bytes) of the internal buffer where audio data is
    350      *   read from for playback. This should be a multiple of the frame size in bytes.
    351      *   <p> If the track's creation mode is {@link #MODE_STATIC},
    352      *   this is the maximum length sample, or audio clip, that can be played by this instance.
    353      *   <p> If the track's creation mode is {@link #MODE_STREAM},
    354      *   this should be the desired buffer size
    355      *   for the <code>AudioTrack</code> to satisfy the application's
    356      *   natural latency requirements.
    357      *   If <code>bufferSizeInBytes</code> is less than the
    358      *   minimum buffer size for the output sink, it is automatically increased to the minimum
    359      *   buffer size.
    360      *   The method {@link #getBufferSizeInFrames()} returns the
    361      *   actual size in frames of the native buffer created, which
    362      *   determines the frequency to write
    363      *   to the streaming <code>AudioTrack</code> to avoid underrun.
    364      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
    365      * @throws java.lang.IllegalArgumentException
    366      */
    367     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
    368             int bufferSizeInBytes, int mode)
    369     throws IllegalArgumentException {
    370         this(streamType, sampleRateInHz, channelConfig, audioFormat,
    371                 bufferSizeInBytes, mode, AudioSystem.AUDIO_SESSION_ALLOCATE);
    372     }
    373 
    374     /**
    375      * Class constructor with audio session. Use this constructor when the AudioTrack must be
    376      * attached to a particular audio session. The primary use of the audio session ID is to
    377      * associate audio effects to a particular instance of AudioTrack: if an audio session ID
    378      * is provided when creating an AudioEffect, this effect will be applied only to audio tracks
    379      * and media players in the same session and not to the output mix.
    380      * When an AudioTrack is created without specifying a session, it will create its own session
    381      * which can be retrieved by calling the {@link #getAudioSessionId()} method.
    382      * If a non-zero session ID is provided, this AudioTrack will share effects attached to this
    383      * session
    384      * with all other media players or audio tracks in the same session, otherwise a new session
    385      * will be created for this track if none is supplied.
    386      * @param streamType the type of the audio stream. See
    387      *   {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
    388      *   {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC},
    389      *   {@link AudioManager#STREAM_ALARM}, and {@link AudioManager#STREAM_NOTIFICATION}.
    390      * @param sampleRateInHz the initial source sample rate expressed in Hz.
    391      * @param channelConfig describes the configuration of the audio channels.
    392      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
    393      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
    394      * @param audioFormat the format in which the audio data is represented.
    395      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
    396      *   {@link AudioFormat#ENCODING_PCM_8BIT},
    397      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
    398      * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read
    399      *   from for playback. If using the AudioTrack in streaming mode, you can write data into
    400      *   this buffer in smaller chunks than this size. If using the AudioTrack in static mode,
    401      *   this is the maximum size of the sound that will be played for this instance.
    402      *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
    403      *   for the successful creation of an AudioTrack instance in streaming mode. Using values
    404      *   smaller than getMinBufferSize() will result in an initialization failure.
    405      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}
    406      * @param sessionId Id of audio session the AudioTrack must be attached to
    407      * @throws java.lang.IllegalArgumentException
    408      */
    409     public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
    410             int bufferSizeInBytes, int mode, int sessionId)
    411     throws IllegalArgumentException {
    412         // mState already == STATE_UNINITIALIZED
    413         this((new AudioAttributes.Builder())
    414                     .setLegacyStreamType(streamType)
    415                     .build(),
    416                 (new AudioFormat.Builder())
    417                     .setChannelMask(channelConfig)
    418                     .setEncoding(audioFormat)
    419                     .setSampleRate(sampleRateInHz)
    420                     .build(),
    421                 bufferSizeInBytes,
    422                 mode, sessionId);
    423     }
    424 
    425     /**
    426      * Class constructor with {@link AudioAttributes} and {@link AudioFormat}.
    427      * @param attributes a non-null {@link AudioAttributes} instance.
    428      * @param format a non-null {@link AudioFormat} instance describing the format of the data
    429      *     that will be played through this AudioTrack. See {@link AudioFormat.Builder} for
    430      *     configuring the audio format parameters such as encoding, channel mask and sample rate.
    431      * @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read
    432      *   from for playback. If using the AudioTrack in streaming mode, you can write data into
    433      *   this buffer in smaller chunks than this size. If using the AudioTrack in static mode,
    434      *   this is the maximum size of the sound that will be played for this instance.
    435      *   See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
    436      *   for the successful creation of an AudioTrack instance in streaming mode. Using values
    437      *   smaller than getMinBufferSize() will result in an initialization failure.
    438      * @param mode streaming or static buffer. See {@link #MODE_STATIC} and {@link #MODE_STREAM}.
    439      * @param sessionId ID of audio session the AudioTrack must be attached to, or
    440      *   {@link AudioManager#AUDIO_SESSION_ID_GENERATE} if the session isn't known at construction
    441      *   time. See also {@link AudioManager#generateAudioSessionId()} to obtain a session ID before
    442      *   construction.
    443      * @throws IllegalArgumentException
    444      */
    445     public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
    446             int mode, int sessionId)
    447                     throws IllegalArgumentException {
    448         // mState already == STATE_UNINITIALIZED
    449 
    450         if (attributes == null) {
    451             throw new IllegalArgumentException("Illegal null AudioAttributes");
    452         }
    453         if (format == null) {
    454             throw new IllegalArgumentException("Illegal null AudioFormat");
    455         }
    456 
    457         // remember which looper is associated with the AudioTrack instantiation
    458         Looper looper;
    459         if ((looper = Looper.myLooper()) == null) {
    460             looper = Looper.getMainLooper();
    461         }
    462 
    463         int rate = 0;
    464         if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_SAMPLE_RATE) != 0)
    465         {
    466             rate = format.getSampleRate();
    467         } else {
    468             rate = AudioSystem.getPrimaryOutputSamplingRate();
    469             if (rate <= 0) {
    470                 rate = 44100;
    471             }
    472         }
    473         int channelIndexMask = 0;
    474         if ((format.getPropertySetMask()
    475                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
    476             channelIndexMask = format.getChannelIndexMask();
    477         }
    478         int channelMask = 0;
    479         if ((format.getPropertySetMask()
    480                 & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
    481             channelMask = format.getChannelMask();
    482         } else if (channelIndexMask == 0) { // if no masks at all, use stereo
    483             channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
    484                     | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
    485         }
    486         int encoding = AudioFormat.ENCODING_DEFAULT;
    487         if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
    488             encoding = format.getEncoding();
    489         }
    490         audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
    491         mStreamType = AudioSystem.STREAM_DEFAULT;
    492 
    493         audioBuffSizeCheck(bufferSizeInBytes);
    494 
    495         mInitializationLooper = looper;
    496         IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE);
    497         mAppOps = IAppOpsService.Stub.asInterface(b);
    498 
    499         mAttributes = new AudioAttributes.Builder(attributes).build();
    500 
    501         if (sessionId < 0) {
    502             throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
    503         }
    504 
    505         int[] session = new int[1];
    506         session[0] = sessionId;
    507         // native initialization
    508         int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
    509                 mSampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
    510                 mNativeBufferSizeInBytes, mDataLoadMode, session);
    511         if (initResult != SUCCESS) {
    512             loge("Error code "+initResult+" when initializing AudioTrack.");
    513             return; // with mState == STATE_UNINITIALIZED
    514         }
    515 
    516         mSessionId = session[0];
    517 
    518         if (mDataLoadMode == MODE_STATIC) {
    519             mState = STATE_NO_STATIC_DATA;
    520         } else {
    521             mState = STATE_INITIALIZED;
    522         }
    523     }
    524 
    525     /**
    526      * Builder class for {@link AudioTrack} objects.
    527      * Use this class to configure and create an <code>AudioTrack</code> instance. By setting audio
    528      * attributes and audio format parameters, you indicate which of those vary from the default
    529      * behavior on the device.
    530      * <p> Here is an example where <code>Builder</code> is used to specify all {@link AudioFormat}
    531      * parameters, to be used by a new <code>AudioTrack</code> instance:
    532      *
    533      * <pre class="prettyprint">
    534      * AudioTrack player = new AudioTrack.Builder()
    535      *         .setAudioAttributes(new AudioAttributes.Builder()
    536      *                  .setUsage(AudioAttributes.USAGE_ALARM)
    537      *                  .setContentType(CONTENT_TYPE_MUSIC)
    538      *                  .build())
    539      *         .setAudioFormat(new AudioFormat.Builder()
    540      *                 .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
    541      *                 .setSampleRate(441000)
    542      *                 .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
    543      *                 .build())
    544      *         .setBufferSize(minBuffSize)
    545      *         .build();
    546      * </pre>
    547      * <p>
    548      * If the audio attributes are not set with {@link #setAudioAttributes(AudioAttributes)},
    549      * attributes comprising {@link AudioAttributes#USAGE_MEDIA} will be used.
    550      * <br>If the audio format is not specified or is incomplete, its sample rate will be the
    551      * default output sample rate of the device (see
    552      * {@link AudioManager#PROPERTY_OUTPUT_SAMPLE_RATE}), its channel configuration will be
    553      * {@link AudioFormat#CHANNEL_OUT_STEREO} and the encoding will be
    554      * {@link AudioFormat#ENCODING_PCM_16BIT}.
    555      * <br>If the buffer size is not specified with {@link #setBufferSizeInBytes(int)},
    556      * and the mode is {@link AudioTrack#MODE_STREAM}, the minimum buffer size is used.
    557      * <br>If the transfer mode is not specified with {@link #setTransferMode(int)},
    558      * <code>MODE_STREAM</code> will be used.
    559      * <br>If the session ID is not specified with {@link #setSessionId(int)}, a new one will
    560      * be generated.
    561      */
    562     public static class Builder {
    563         private AudioAttributes mAttributes;
    564         private AudioFormat mFormat;
    565         private int mBufferSizeInBytes;
    566         private int mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
    567         private int mMode = MODE_STREAM;
    568 
    569         /**
    570          * Constructs a new Builder with the default values as described above.
    571          */
    572         public Builder() {
    573         }
    574 
    575         /**
    576          * Sets the {@link AudioAttributes}.
    577          * @param attributes a non-null {@link AudioAttributes} instance that describes the audio
    578          *     data to be played.
    579          * @return the same Builder instance.
    580          * @throws IllegalArgumentException
    581          */
    582         public @NonNull Builder setAudioAttributes(@NonNull AudioAttributes attributes)
    583                 throws IllegalArgumentException {
    584             if (attributes == null) {
    585                 throw new IllegalArgumentException("Illegal null AudioAttributes argument");
    586             }
    587             // keep reference, we only copy the data when building
    588             mAttributes = attributes;
    589             return this;
    590         }
    591 
    592         /**
    593          * Sets the format of the audio data to be played by the {@link AudioTrack}.
    594          * See {@link AudioFormat.Builder} for configuring the audio format parameters such
    595          * as encoding, channel mask and sample rate.
    596          * @param format a non-null {@link AudioFormat} instance.
    597          * @return the same Builder instance.
    598          * @throws IllegalArgumentException
    599          */
    600         public @NonNull Builder setAudioFormat(@NonNull AudioFormat format)
    601                 throws IllegalArgumentException {
    602             if (format == null) {
    603                 throw new IllegalArgumentException("Illegal null AudioFormat argument");
    604             }
    605             // keep reference, we only copy the data when building
    606             mFormat = format;
    607             return this;
    608         }
    609 
    610         /**
    611          * Sets the total size (in bytes) of the buffer where audio data is read from for playback.
    612          * If using the {@link AudioTrack} in streaming mode
    613          * (see {@link AudioTrack#MODE_STREAM}, you can write data into this buffer in smaller
    614          * chunks than this size. See {@link #getMinBufferSize(int, int, int)} to determine
    615          * the minimum required buffer size for the successful creation of an AudioTrack instance
    616          * in streaming mode. Using values smaller than <code>getMinBufferSize()</code> will result
    617          * in an exception when trying to build the <code>AudioTrack</code>.
    618          * <br>If using the <code>AudioTrack</code> in static mode (see
    619          * {@link AudioTrack#MODE_STATIC}), this is the maximum size of the sound that will be
    620          * played by this instance.
    621          * @param bufferSizeInBytes
    622          * @return the same Builder instance.
    623          * @throws IllegalArgumentException
    624          */
    625         public @NonNull Builder setBufferSizeInBytes(int bufferSizeInBytes)
    626                 throws IllegalArgumentException {
    627             if (bufferSizeInBytes <= 0) {
    628                 throw new IllegalArgumentException("Invalid buffer size " + bufferSizeInBytes);
    629             }
    630             mBufferSizeInBytes = bufferSizeInBytes;
    631             return this;
    632         }
    633 
    634         /**
    635          * Sets the mode under which buffers of audio data are transferred from the
    636          * {@link AudioTrack} to the framework.
    637          * @param mode one of {@link AudioTrack#MODE_STREAM}, {@link AudioTrack#MODE_STATIC}.
    638          * @return the same Builder instance.
    639          * @throws IllegalArgumentException
    640          */
    641         public @NonNull Builder setTransferMode(@TransferMode int mode)
    642                 throws IllegalArgumentException {
    643             switch(mode) {
    644                 case MODE_STREAM:
    645                 case MODE_STATIC:
    646                     mMode = mode;
    647                     break;
    648                 default:
    649                     throw new IllegalArgumentException("Invalid transfer mode " + mode);
    650             }
    651             return this;
    652         }
    653 
    654         /**
    655          * Sets the session ID the {@link AudioTrack} will be attached to.
    656          * @param sessionId a strictly positive ID number retrieved from another
    657          *     <code>AudioTrack</code> via {@link AudioTrack#getAudioSessionId()} or allocated by
    658          *     {@link AudioManager} via {@link AudioManager#generateAudioSessionId()}, or
    659          *     {@link AudioManager#AUDIO_SESSION_ID_GENERATE}.
    660          * @return the same Builder instance.
    661          * @throws IllegalArgumentException
    662          */
    663         public @NonNull Builder setSessionId(int sessionId)
    664                 throws IllegalArgumentException {
    665             if ((sessionId != AudioManager.AUDIO_SESSION_ID_GENERATE) && (sessionId < 1)) {
    666                 throw new IllegalArgumentException("Invalid audio session ID " + sessionId);
    667             }
    668             mSessionId = sessionId;
    669             return this;
    670         }
    671 
    672         /**
    673          * Builds an {@link AudioTrack} instance initialized with all the parameters set
    674          * on this <code>Builder</code>.
    675          * @return a new successfully initialized {@link AudioTrack} instance.
    676          * @throws UnsupportedOperationException if the parameters set on the <code>Builder</code>
    677          *     were incompatible, or if they are not supported by the device,
    678          *     or if the device was not available.
    679          */
    680         public @NonNull AudioTrack build() throws UnsupportedOperationException {
    681             if (mAttributes == null) {
    682                 mAttributes = new AudioAttributes.Builder()
    683                         .setUsage(AudioAttributes.USAGE_MEDIA)
    684                         .build();
    685             }
    686             if (mFormat == null) {
    687                 mFormat = new AudioFormat.Builder()
    688                         .setChannelMask(AudioFormat.CHANNEL_OUT_STEREO)
    689                         .setSampleRate(AudioSystem.getPrimaryOutputSamplingRate())
    690                         .setEncoding(AudioFormat.ENCODING_DEFAULT)
    691                         .build();
    692             }
    693             try {
    694                 // If the buffer size is not specified in streaming mode,
    695                 // use a single frame for the buffer size and let the
    696                 // native code figure out the minimum buffer size.
    697                 if (mMode == MODE_STREAM && mBufferSizeInBytes == 0) {
    698                     mBufferSizeInBytes = mFormat.getChannelCount()
    699                             * mFormat.getBytesPerSample(mFormat.getEncoding());
    700                 }
    701                 final AudioTrack track = new AudioTrack(
    702                         mAttributes, mFormat, mBufferSizeInBytes, mMode, mSessionId);
    703                 if (track.getState() == STATE_UNINITIALIZED) {
    704                     // release is not necessary
    705                     throw new UnsupportedOperationException("Cannot create AudioTrack");
    706                 }
    707                 return track;
    708             } catch (IllegalArgumentException e) {
    709                 throw new UnsupportedOperationException(e.getMessage());
    710             }
    711         }
    712     }
    713 
    714     // mask of all the positional channels supported, however the allowed combinations
    715     // are further restricted by the matching left/right rule and CHANNEL_COUNT_MAX
    716     private static final int SUPPORTED_OUT_CHANNELS =
    717             AudioFormat.CHANNEL_OUT_FRONT_LEFT |
    718             AudioFormat.CHANNEL_OUT_FRONT_RIGHT |
    719             AudioFormat.CHANNEL_OUT_FRONT_CENTER |
    720             AudioFormat.CHANNEL_OUT_LOW_FREQUENCY |
    721             AudioFormat.CHANNEL_OUT_BACK_LEFT |
    722             AudioFormat.CHANNEL_OUT_BACK_RIGHT |
    723             AudioFormat.CHANNEL_OUT_BACK_CENTER |
    724             AudioFormat.CHANNEL_OUT_SIDE_LEFT |
    725             AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
    726 
    727     // Convenience method for the constructor's parameter checks.
    728     // This is where constructor IllegalArgumentException-s are thrown
    729     // postconditions:
    730     //    mChannelCount is valid
    731     //    mChannelMask is valid
    732     //    mAudioFormat is valid
    733     //    mSampleRate is valid
    734     //    mDataLoadMode is valid
    735     private void audioParamCheck(int sampleRateInHz, int channelConfig, int channelIndexMask,
    736                                  int audioFormat, int mode) {
    737         //--------------
    738         // sample rate, note these values are subject to change
    739         if (sampleRateInHz < SAMPLE_RATE_HZ_MIN || sampleRateInHz > SAMPLE_RATE_HZ_MAX) {
    740             throw new IllegalArgumentException(sampleRateInHz
    741                     + "Hz is not a supported sample rate.");
    742         }
    743         mSampleRate = sampleRateInHz;
    744 
    745         //--------------
    746         // channel config
    747         mChannelConfiguration = channelConfig;
    748 
    749         switch (channelConfig) {
    750         case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
    751         case AudioFormat.CHANNEL_OUT_MONO:
    752         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
    753             mChannelCount = 1;
    754             mChannelMask = AudioFormat.CHANNEL_OUT_MONO;
    755             break;
    756         case AudioFormat.CHANNEL_OUT_STEREO:
    757         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
    758             mChannelCount = 2;
    759             mChannelMask = AudioFormat.CHANNEL_OUT_STEREO;
    760             break;
    761         default:
    762             if (channelConfig == AudioFormat.CHANNEL_INVALID && channelIndexMask != 0) {
    763                 mChannelCount = 0;
    764                 break; // channel index configuration only
    765             }
    766             if (!isMultichannelConfigSupported(channelConfig)) {
    767                 // input channel configuration features unsupported channels
    768                 throw new IllegalArgumentException("Unsupported channel configuration.");
    769             }
    770             mChannelMask = channelConfig;
    771             mChannelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
    772         }
    773         // check the channel index configuration (if present)
    774         mChannelIndexMask = channelIndexMask;
    775         if (mChannelIndexMask != 0) {
    776             // restrictive: indexMask could allow up to AUDIO_CHANNEL_BITS_LOG2
    777             final int indexMask = (1 << CHANNEL_COUNT_MAX) - 1;
    778             if ((channelIndexMask & ~indexMask) != 0) {
    779                 throw new IllegalArgumentException("Unsupported channel index configuration "
    780                         + channelIndexMask);
    781             }
    782             int channelIndexCount = Integer.bitCount(channelIndexMask);
    783             if (mChannelCount == 0) {
    784                  mChannelCount = channelIndexCount;
    785             } else if (mChannelCount != channelIndexCount) {
    786                 throw new IllegalArgumentException("Channel count must match");
    787             }
    788         }
    789 
    790         //--------------
    791         // audio format
    792         if (audioFormat == AudioFormat.ENCODING_DEFAULT) {
    793             audioFormat = AudioFormat.ENCODING_PCM_16BIT;
    794         }
    795 
    796         if (!AudioFormat.isPublicEncoding(audioFormat)) {
    797             throw new IllegalArgumentException("Unsupported audio encoding.");
    798         }
    799         mAudioFormat = audioFormat;
    800 
    801         //--------------
    802         // audio load mode
    803         if (((mode != MODE_STREAM) && (mode != MODE_STATIC)) ||
    804                 ((mode != MODE_STREAM) && !AudioFormat.isEncodingLinearPcm(mAudioFormat))) {
    805             throw new IllegalArgumentException("Invalid mode.");
    806         }
    807         mDataLoadMode = mode;
    808     }
    809 
    810     /**
    811      * Convenience method to check that the channel configuration (a.k.a channel mask) is supported
    812      * @param channelConfig the mask to validate
    813      * @return false if the AudioTrack can't be used with such a mask
    814      */
    815     private static boolean isMultichannelConfigSupported(int channelConfig) {
    816         // check for unsupported channels
    817         if ((channelConfig & SUPPORTED_OUT_CHANNELS) != channelConfig) {
    818             loge("Channel configuration features unsupported channels");
    819             return false;
    820         }
    821         final int channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
    822         if (channelCount > CHANNEL_COUNT_MAX) {
    823             loge("Channel configuration contains too many channels " +
    824                     channelCount + ">" + CHANNEL_COUNT_MAX);
    825             return false;
    826         }
    827         // check for unsupported multichannel combinations:
    828         // - FL/FR must be present
    829         // - L/R channels must be paired (e.g. no single L channel)
    830         final int frontPair =
    831                 AudioFormat.CHANNEL_OUT_FRONT_LEFT | AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
    832         if ((channelConfig & frontPair) != frontPair) {
    833                 loge("Front channels must be present in multichannel configurations");
    834                 return false;
    835         }
    836         final int backPair =
    837                 AudioFormat.CHANNEL_OUT_BACK_LEFT | AudioFormat.CHANNEL_OUT_BACK_RIGHT;
    838         if ((channelConfig & backPair) != 0) {
    839             if ((channelConfig & backPair) != backPair) {
    840                 loge("Rear channels can't be used independently");
    841                 return false;
    842             }
    843         }
    844         final int sidePair =
    845                 AudioFormat.CHANNEL_OUT_SIDE_LEFT | AudioFormat.CHANNEL_OUT_SIDE_RIGHT;
    846         if ((channelConfig & sidePair) != 0
    847                 && (channelConfig & sidePair) != sidePair) {
    848             loge("Side channels can't be used independently");
    849             return false;
    850         }
    851         return true;
    852     }
    853 
    854 
    855     // Convenience method for the constructor's audio buffer size check.
    856     // preconditions:
    857     //    mChannelCount is valid
    858     //    mAudioFormat is valid
    859     // postcondition:
    860     //    mNativeBufferSizeInBytes is valid (multiple of frame size, positive)
    861     private void audioBuffSizeCheck(int audioBufferSize) {
    862         // NB: this section is only valid with PCM data.
    863         //     To update when supporting compressed formats
    864         int frameSizeInBytes;
    865         if (AudioFormat.isEncodingLinearPcm(mAudioFormat)) {
    866             frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
    867         } else {
    868             frameSizeInBytes = 1;
    869         }
    870         if ((audioBufferSize % frameSizeInBytes != 0) || (audioBufferSize < 1)) {
    871             throw new IllegalArgumentException("Invalid audio buffer size.");
    872         }
    873 
    874         mNativeBufferSizeInBytes = audioBufferSize;
    875         mNativeBufferSizeInFrames = audioBufferSize / frameSizeInBytes;
    876     }
    877 
    878 
    879     /**
    880      * Releases the native AudioTrack resources.
    881      */
    882     public void release() {
    883         // even though native_release() stops the native AudioTrack, we need to stop
    884         // AudioTrack subclasses too.
    885         try {
    886             stop();
    887         } catch(IllegalStateException ise) {
    888             // don't raise an exception, we're releasing the resources.
    889         }
    890         native_release();
    891         mState = STATE_UNINITIALIZED;
    892     }
    893 
    894     @Override
    895     protected void finalize() {
    896         native_finalize();
    897     }
    898 
    899     //--------------------------------------------------------------------------
    900     // Getters
    901     //--------------------
    902     /**
    903      * Returns the minimum gain value, which is the constant 0.0.
    904      * Gain values less than 0.0 will be clamped to 0.0.
    905      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
    906      * @return the minimum value, which is the constant 0.0.
    907      */
    908     static public float getMinVolume() {
    909         return GAIN_MIN;
    910     }
    911 
    912     /**
    913      * Returns the maximum gain value, which is greater than or equal to 1.0.
    914      * Gain values greater than the maximum will be clamped to the maximum.
    915      * <p>The word "volume" in the API name is historical; this is actually a gain.
    916      * expressed as a linear multiplier on sample values, where a maximum value of 1.0
    917      * corresponds to a gain of 0 dB (sample values left unmodified).
    918      * @return the maximum value, which is greater than or equal to 1.0.
    919      */
    920     static public float getMaxVolume() {
    921         return GAIN_MAX;
    922     }
    923 
    924     /**
    925      * Returns the configured audio data sample rate in Hz
    926      */
    927     public int getSampleRate() {
    928         return mSampleRate;
    929     }
    930 
    931     /**
    932      * Returns the current playback sample rate rate in Hz.
    933      */
    934     public int getPlaybackRate() {
    935         return native_get_playback_rate();
    936     }
    937 
    938     /**
    939      * Returns the current playback parameters.
    940      * See {@link #setPlaybackParams(PlaybackParams)} to set playback parameters
    941      * @return current {@link PlaybackParams}.
    942      * @throws IllegalStateException if track is not initialized.
    943      */
    944     public @NonNull PlaybackParams getPlaybackParams() {
    945         return native_get_playback_params();
    946     }
    947 
    948     /**
    949      * Returns the configured audio data encoding. See {@link AudioFormat#ENCODING_PCM_8BIT},
    950      * {@link AudioFormat#ENCODING_PCM_16BIT}, and {@link AudioFormat#ENCODING_PCM_FLOAT}.
    951      */
    952     public int getAudioFormat() {
    953         return mAudioFormat;
    954     }
    955 
    956     /**
    957      * Returns the type of audio stream this AudioTrack is configured for.
    958      * Compare the result against {@link AudioManager#STREAM_VOICE_CALL},
    959      * {@link AudioManager#STREAM_SYSTEM}, {@link AudioManager#STREAM_RING},
    960      * {@link AudioManager#STREAM_MUSIC}, {@link AudioManager#STREAM_ALARM},
    961      * {@link AudioManager#STREAM_NOTIFICATION}, or {@link AudioManager#STREAM_DTMF}.
    962      */
    963     public int getStreamType() {
    964         return mStreamType;
    965     }
    966 
    967     /**
    968      * Returns the configured channel position mask.
    969      * <p> For example, refer to {@link AudioFormat#CHANNEL_OUT_MONO},
    970      * {@link AudioFormat#CHANNEL_OUT_STEREO}, {@link AudioFormat#CHANNEL_OUT_5POINT1}.
    971      * This method may return {@link AudioFormat#CHANNEL_INVALID} if
    972      * a channel index mask was used. Consider
    973      * {@link #getFormat()} instead, to obtain an {@link AudioFormat},
    974      * which contains both the channel position mask and the channel index mask.
    975      */
    976     public int getChannelConfiguration() {
    977         return mChannelConfiguration;
    978     }
    979 
    980     /**
    981      * Returns the configured <code>AudioTrack</code> format.
    982      * @return an {@link AudioFormat} containing the
    983      * <code>AudioTrack</code> parameters at the time of configuration.
    984      */
    985     public @NonNull AudioFormat getFormat() {
    986         AudioFormat.Builder builder = new AudioFormat.Builder()
    987             .setSampleRate(mSampleRate)
    988             .setEncoding(mAudioFormat);
    989         if (mChannelConfiguration != AudioFormat.CHANNEL_INVALID) {
    990             builder.setChannelMask(mChannelConfiguration);
    991         }
    992         if (mChannelIndexMask != AudioFormat.CHANNEL_INVALID /* 0 */) {
    993             builder.setChannelIndexMask(mChannelIndexMask);
    994         }
    995         return builder.build();
    996     }
    997 
    998     /**
    999      * Returns the configured number of channels.
   1000      */
   1001     public int getChannelCount() {
   1002         return mChannelCount;
   1003     }
   1004 
   1005     /**
   1006      * Returns the state of the AudioTrack instance. This is useful after the
   1007      * AudioTrack instance has been created to check if it was initialized
   1008      * properly. This ensures that the appropriate resources have been acquired.
   1009      * @see #STATE_UNINITIALIZED
   1010      * @see #STATE_INITIALIZED
   1011      * @see #STATE_NO_STATIC_DATA
   1012      */
   1013     public int getState() {
   1014         return mState;
   1015     }
   1016 
   1017     /**
   1018      * Returns the playback state of the AudioTrack instance.
   1019      * @see #PLAYSTATE_STOPPED
   1020      * @see #PLAYSTATE_PAUSED
   1021      * @see #PLAYSTATE_PLAYING
   1022      */
   1023     public int getPlayState() {
   1024         synchronized (mPlayStateLock) {
   1025             return mPlayState;
   1026         }
   1027     }
   1028 
   1029     /**
   1030      *  Returns the frame count of the native <code>AudioTrack</code> buffer.
   1031      *  <p> If the track's creation mode is {@link #MODE_STATIC},
   1032      *  it is equal to the specified bufferSizeInBytes on construction, converted to frame units.
   1033      *  A static track's native frame count will not change.
   1034      *  <p> If the track's creation mode is {@link #MODE_STREAM},
   1035      *  it is greater than or equal to the specified bufferSizeInBytes converted to frame units.
   1036      *  For streaming tracks, this value may be rounded up to a larger value if needed by
   1037      *  the target output sink, and
   1038      *  if the track is subsequently routed to a different output sink, the native
   1039      *  frame count may enlarge to accommodate.
   1040      *  <p> If the <code>AudioTrack</code> encoding indicates compressed data,
   1041      *  e.g. {@link AudioFormat#ENCODING_AC3}, then the frame count returned is
   1042      *  the size of the native <code>AudioTrack</code> buffer in bytes.
   1043      *  <p> See also {@link AudioManager#getProperty(String)} for key
   1044      *  {@link AudioManager#PROPERTY_OUTPUT_FRAMES_PER_BUFFER}.
   1045      *  @return current size in frames of the <code>AudioTrack</code> buffer.
   1046      *  @throws IllegalStateException
   1047      */
   1048     public int getBufferSizeInFrames() {
   1049         return native_get_native_frame_count();
   1050     }
   1051 
   1052     /**
   1053      *  Returns the frame count of the native <code>AudioTrack</code> buffer.
   1054      *  @return current size in frames of the <code>AudioTrack</code> buffer.
   1055      *  @throws IllegalStateException
   1056      *  @deprecated Use the identical public method {@link #getBufferSizeInFrames()} instead.
   1057      */
   1058     @Deprecated
   1059     protected int getNativeFrameCount() {
   1060         return native_get_native_frame_count();
   1061     }
   1062 
   1063     /**
   1064      * Returns marker position expressed in frames.
   1065      * @return marker position in wrapping frame units similar to {@link #getPlaybackHeadPosition},
   1066      * or zero if marker is disabled.
   1067      */
   1068     public int getNotificationMarkerPosition() {
   1069         return native_get_marker_pos();
   1070     }
   1071 
   1072     /**
   1073      * Returns the notification update period expressed in frames.
   1074      * Zero means that no position update notifications are being delivered.
   1075      */
   1076     public int getPositionNotificationPeriod() {
   1077         return native_get_pos_update_period();
   1078     }
   1079 
   1080     /**
   1081      * Returns the playback head position expressed in frames.
   1082      * Though the "int" type is signed 32-bits, the value should be reinterpreted as if it is
   1083      * unsigned 32-bits.  That is, the next position after 0x7FFFFFFF is (int) 0x80000000.
   1084      * This is a continuously advancing counter.  It will wrap (overflow) periodically,
   1085      * for example approximately once every 27:03:11 hours:minutes:seconds at 44.1 kHz.
   1086      * It is reset to zero by {@link #flush()}, {@link #reloadStaticData()}, and {@link #stop()}.
   1087      * If the track's creation mode is {@link #MODE_STATIC}, the return value indicates
   1088      * the total number of frames played since reset,
   1089      * <i>not</i> the current offset within the buffer.
   1090      */
   1091     public int getPlaybackHeadPosition() {
   1092         return native_get_position();
   1093     }
   1094 
   1095     /**
   1096      * Returns this track's estimated latency in milliseconds. This includes the latency due
   1097      * to AudioTrack buffer size, AudioMixer (if any) and audio hardware driver.
   1098      *
   1099      * DO NOT UNHIDE. The existing approach for doing A/V sync has too many problems. We need
   1100      * a better solution.
   1101      * @hide
   1102      */
   1103     public int getLatency() {
   1104         return native_get_latency();
   1105     }
   1106 
   1107     /**
   1108      *  Returns the output sample rate in Hz for the specified stream type.
   1109      */
   1110     static public int getNativeOutputSampleRate(int streamType) {
   1111         return native_get_output_sample_rate(streamType);
   1112     }
   1113 
   1114     /**
   1115      * Returns the minimum buffer size required for the successful creation of an AudioTrack
   1116      * object to be created in the {@link #MODE_STREAM} mode. Note that this size doesn't
   1117      * guarantee a smooth playback under load, and higher values should be chosen according to
   1118      * the expected frequency at which the buffer will be refilled with additional data to play.
   1119      * For example, if you intend to dynamically set the source sample rate of an AudioTrack
   1120      * to a higher value than the initial source sample rate, be sure to configure the buffer size
   1121      * based on the highest planned sample rate.
   1122      * @param sampleRateInHz the source sample rate expressed in Hz.
   1123      * @param channelConfig describes the configuration of the audio channels.
   1124      *   See {@link AudioFormat#CHANNEL_OUT_MONO} and
   1125      *   {@link AudioFormat#CHANNEL_OUT_STEREO}
   1126      * @param audioFormat the format in which the audio data is represented.
   1127      *   See {@link AudioFormat#ENCODING_PCM_16BIT} and
   1128      *   {@link AudioFormat#ENCODING_PCM_8BIT},
   1129      *   and {@link AudioFormat#ENCODING_PCM_FLOAT}.
   1130      * @return {@link #ERROR_BAD_VALUE} if an invalid parameter was passed,
   1131      *   or {@link #ERROR} if unable to query for output properties,
   1132      *   or the minimum buffer size expressed in bytes.
   1133      */
   1134     static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
   1135         int channelCount = 0;
   1136         switch(channelConfig) {
   1137         case AudioFormat.CHANNEL_OUT_MONO:
   1138         case AudioFormat.CHANNEL_CONFIGURATION_MONO:
   1139             channelCount = 1;
   1140             break;
   1141         case AudioFormat.CHANNEL_OUT_STEREO:
   1142         case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
   1143             channelCount = 2;
   1144             break;
   1145         default:
   1146             if (!isMultichannelConfigSupported(channelConfig)) {
   1147                 loge("getMinBufferSize(): Invalid channel configuration.");
   1148                 return ERROR_BAD_VALUE;
   1149             } else {
   1150                 channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
   1151             }
   1152         }
   1153 
   1154         if (!AudioFormat.isPublicEncoding(audioFormat)) {
   1155             loge("getMinBufferSize(): Invalid audio format.");
   1156             return ERROR_BAD_VALUE;
   1157         }
   1158 
   1159         // sample rate, note these values are subject to change
   1160         if ( (sampleRateInHz < SAMPLE_RATE_HZ_MIN) || (sampleRateInHz > SAMPLE_RATE_HZ_MAX) ) {
   1161             loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
   1162             return ERROR_BAD_VALUE;
   1163         }
   1164 
   1165         int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
   1166         if (size <= 0) {
   1167             loge("getMinBufferSize(): error querying hardware");
   1168             return ERROR;
   1169         }
   1170         else {
   1171             return size;
   1172         }
   1173     }
   1174 
   1175     /**
   1176      * Returns the audio session ID.
   1177      *
   1178      * @return the ID of the audio session this AudioTrack belongs to.
   1179      */
   1180     public int getAudioSessionId() {
   1181         return mSessionId;
   1182     }
   1183 
   1184    /**
   1185     * Poll for a timestamp on demand.
   1186     * <p>
   1187     * If you need to track timestamps during initial warmup or after a routing or mode change,
   1188     * you should request a new timestamp periodically until the reported timestamps
   1189     * show that the frame position is advancing, or until it becomes clear that
   1190     * timestamps are unavailable for this route.
   1191     * <p>
   1192     * After the clock is advancing at a stable rate,
   1193     * query for a new timestamp approximately once every 10 seconds to once per minute.
   1194     * Calling this method more often is inefficient.
   1195     * It is also counter-productive to call this method more often than recommended,
   1196     * because the short-term differences between successive timestamp reports are not meaningful.
   1197     * If you need a high-resolution mapping between frame position and presentation time,
   1198     * consider implementing that at application level, based on low-resolution timestamps.
   1199     * <p>
   1200     * The audio data at the returned position may either already have been
   1201     * presented, or may have not yet been presented but is committed to be presented.
   1202     * It is not possible to request the time corresponding to a particular position,
   1203     * or to request the (fractional) position corresponding to a particular time.
   1204     * If you need such features, consider implementing them at application level.
   1205     *
   1206     * @param timestamp a reference to a non-null AudioTimestamp instance allocated
   1207     *        and owned by caller.
   1208     * @return true if a timestamp is available, or false if no timestamp is available.
   1209     *         If a timestamp if available,
   1210     *         the AudioTimestamp instance is filled in with a position in frame units, together
   1211     *         with the estimated time when that frame was presented or is committed to
   1212     *         be presented.
   1213     *         In the case that no timestamp is available, any supplied instance is left unaltered.
   1214     *         A timestamp may be temporarily unavailable while the audio clock is stabilizing,
   1215     *         or during and immediately after a route change.
   1216     *         A timestamp is permanently unavailable for a given route if the route does not support
   1217     *         timestamps.  In this case, the approximate frame position can be obtained
   1218     *         using {@link #getPlaybackHeadPosition}.
   1219     *         However, it may be useful to continue to query for
   1220     *         timestamps occasionally, to recover after a route change.
   1221     */
   1222     // Add this text when the "on new timestamp" API is added:
   1223     //   Use if you need to get the most recent timestamp outside of the event callback handler.
   1224     public boolean getTimestamp(AudioTimestamp timestamp)
   1225     {
   1226         if (timestamp == null) {
   1227             throw new IllegalArgumentException();
   1228         }
   1229         // It's unfortunate, but we have to either create garbage every time or use synchronized
   1230         long[] longArray = new long[2];
   1231         int ret = native_get_timestamp(longArray);
   1232         if (ret != SUCCESS) {
   1233             return false;
   1234         }
   1235         timestamp.framePosition = longArray[0];
   1236         timestamp.nanoTime = longArray[1];
   1237         return true;
   1238     }
   1239 
   1240     /**
   1241      * Poll for a timestamp on demand.
   1242      * <p>
   1243      * Same as {@link #getTimestamp(AudioTimestamp)} but with a more useful return code.
   1244      *
   1245      * @param timestamp a reference to a non-null AudioTimestamp instance allocated
   1246      *        and owned by caller.
   1247      * @return {@link #SUCCESS} if a timestamp is available
   1248      *         {@link #ERROR_WOULD_BLOCK} if called in STOPPED or FLUSHED state, or if called
   1249      *         immediately after start/ACTIVE, when the number of frames consumed is less than the
   1250      *         overall hardware latency to physical output. In WOULD_BLOCK cases, one might poll
   1251      *         again, or use {@link #getPlaybackHeadPosition}, or use 0 position and current time
   1252      *         for the timestamp.
   1253      *         {@link #ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
   1254      *         needs to be recreated.
   1255      *         {@link #ERROR_INVALID_OPERATION} if current route does not support
   1256      *         timestamps. In this case, the approximate frame position can be obtained
   1257      *         using {@link #getPlaybackHeadPosition}.
   1258      *
   1259      *         The AudioTimestamp instance is filled in with a position in frame units, together
   1260      *         with the estimated time when that frame was presented or is committed to
   1261      *         be presented.
   1262      * @hide
   1263      */
   1264      // Add this text when the "on new timestamp" API is added:
   1265      //   Use if you need to get the most recent timestamp outside of the event callback handler.
   1266      public int getTimestampWithStatus(AudioTimestamp timestamp)
   1267      {
   1268          if (timestamp == null) {
   1269              throw new IllegalArgumentException();
   1270          }
   1271          // It's unfortunate, but we have to either create garbage every time or use synchronized
   1272          long[] longArray = new long[2];
   1273          int ret = native_get_timestamp(longArray);
   1274          timestamp.framePosition = longArray[0];
   1275          timestamp.nanoTime = longArray[1];
   1276          return ret;
   1277      }
   1278 
   1279     //--------------------------------------------------------------------------
   1280     // Initialization / configuration
   1281     //--------------------
   1282     /**
   1283      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
   1284      * for each periodic playback head position update.
   1285      * Notifications will be received in the same thread as the one in which the AudioTrack
   1286      * instance was created.
   1287      * @param listener
   1288      */
   1289     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener) {
   1290         setPlaybackPositionUpdateListener(listener, null);
   1291     }
   1292 
   1293     /**
   1294      * Sets the listener the AudioTrack notifies when a previously set marker is reached or
   1295      * for each periodic playback head position update.
   1296      * Use this method to receive AudioTrack events in the Handler associated with another
   1297      * thread than the one in which you created the AudioTrack instance.
   1298      * @param listener
   1299      * @param handler the Handler that will receive the event notification messages.
   1300      */
   1301     public void setPlaybackPositionUpdateListener(OnPlaybackPositionUpdateListener listener,
   1302                                                     Handler handler) {
   1303         if (listener != null) {
   1304             mEventHandlerDelegate = new NativePositionEventHandlerDelegate(this, listener, handler);
   1305         } else {
   1306             mEventHandlerDelegate = null;
   1307         }
   1308     }
   1309 
   1310 
   1311     private static float clampGainOrLevel(float gainOrLevel) {
   1312         if (Float.isNaN(gainOrLevel)) {
   1313             throw new IllegalArgumentException();
   1314         }
   1315         if (gainOrLevel < GAIN_MIN) {
   1316             gainOrLevel = GAIN_MIN;
   1317         } else if (gainOrLevel > GAIN_MAX) {
   1318             gainOrLevel = GAIN_MAX;
   1319         }
   1320         return gainOrLevel;
   1321     }
   1322 
   1323 
   1324      /**
   1325      * Sets the specified left and right output gain values on the AudioTrack.
   1326      * <p>Gain values are clamped to the closed interval [0.0, max] where
   1327      * max is the value of {@link #getMaxVolume}.
   1328      * A value of 0.0 results in zero gain (silence), and
   1329      * a value of 1.0 means unity gain (signal unchanged).
   1330      * The default value is 1.0 meaning unity gain.
   1331      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
   1332      * @param leftGain output gain for the left channel.
   1333      * @param rightGain output gain for the right channel
   1334      * @return error code or success, see {@link #SUCCESS},
   1335      *    {@link #ERROR_INVALID_OPERATION}
   1336      * @deprecated Applications should use {@link #setVolume} instead, as it
   1337      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
   1338      */
   1339     public int setStereoVolume(float leftGain, float rightGain) {
   1340         if (isRestricted()) {
   1341             return SUCCESS;
   1342         }
   1343         if (mState == STATE_UNINITIALIZED) {
   1344             return ERROR_INVALID_OPERATION;
   1345         }
   1346 
   1347         leftGain = clampGainOrLevel(leftGain);
   1348         rightGain = clampGainOrLevel(rightGain);
   1349 
   1350         native_setVolume(leftGain, rightGain);
   1351 
   1352         return SUCCESS;
   1353     }
   1354 
   1355 
   1356     /**
   1357      * Sets the specified output gain value on all channels of this track.
   1358      * <p>Gain values are clamped to the closed interval [0.0, max] where
   1359      * max is the value of {@link #getMaxVolume}.
   1360      * A value of 0.0 results in zero gain (silence), and
   1361      * a value of 1.0 means unity gain (signal unchanged).
   1362      * The default value is 1.0 meaning unity gain.
   1363      * <p>This API is preferred over {@link #setStereoVolume}, as it
   1364      * more gracefully scales down to mono, and up to multi-channel content beyond stereo.
   1365      * <p>The word "volume" in the API name is historical; this is actually a linear gain.
   1366      * @param gain output gain for all channels.
   1367      * @return error code or success, see {@link #SUCCESS},
   1368      *    {@link #ERROR_INVALID_OPERATION}
   1369      */
   1370     public int setVolume(float gain) {
   1371         return setStereoVolume(gain, gain);
   1372     }
   1373 
   1374 
   1375     /**
   1376      * Sets the playback sample rate for this track. This sets the sampling rate at which
   1377      * the audio data will be consumed and played back
   1378      * (as set by the sampleRateInHz parameter in the
   1379      * {@link #AudioTrack(int, int, int, int, int, int)} constructor),
   1380      * not the original sampling rate of the
   1381      * content. For example, setting it to half the sample rate of the content will cause the
   1382      * playback to last twice as long, but will also result in a pitch shift down by one octave.
   1383      * The valid sample rate range is from 1 Hz to twice the value returned by
   1384      * {@link #getNativeOutputSampleRate(int)}.
   1385      * Use {@link #setPlaybackParams(PlaybackParams)} for speed control.
   1386      * <p> This method may also be used to repurpose an existing <code>AudioTrack</code>
   1387      * for playback of content of differing sample rate,
   1388      * but with identical encoding and channel mask.
   1389      * @param sampleRateInHz the sample rate expressed in Hz
   1390      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
   1391      *    {@link #ERROR_INVALID_OPERATION}
   1392      */
   1393     public int setPlaybackRate(int sampleRateInHz) {
   1394         if (mState != STATE_INITIALIZED) {
   1395             return ERROR_INVALID_OPERATION;
   1396         }
   1397         if (sampleRateInHz <= 0) {
   1398             return ERROR_BAD_VALUE;
   1399         }
   1400         return native_set_playback_rate(sampleRateInHz);
   1401     }
   1402 
   1403 
   1404     /**
   1405      * Sets the playback parameters.
   1406      * This method returns failure if it cannot apply the playback parameters.
   1407      * One possible cause is that the parameters for speed or pitch are out of range.
   1408      * Another possible cause is that the <code>AudioTrack</code> is streaming
   1409      * (see {@link #MODE_STREAM}) and the
   1410      * buffer size is too small. For speeds greater than 1.0f, the <code>AudioTrack</code> buffer
   1411      * on configuration must be larger than the speed multiplied by the minimum size
   1412      * {@link #getMinBufferSize(int, int, int)}) to allow proper playback.
   1413      * @param params see {@link PlaybackParams}. In particular,
   1414      * speed, pitch, and audio mode should be set.
   1415      * @throws IllegalArgumentException if the parameters are invalid or not accepted.
   1416      * @throws IllegalStateException if track is not initialized.
   1417      */
   1418     public void setPlaybackParams(@NonNull PlaybackParams params) {
   1419         if (params == null) {
   1420             throw new IllegalArgumentException("params is null");
   1421         }
   1422         native_set_playback_params(params);
   1423     }
   1424 
   1425 
   1426     /**
   1427      * Sets the position of the notification marker.  At most one marker can be active.
   1428      * @param markerInFrames marker position in wrapping frame units similar to
   1429      * {@link #getPlaybackHeadPosition}, or zero to disable the marker.
   1430      * To set a marker at a position which would appear as zero due to wraparound,
   1431      * a workaround is to use a non-zero position near zero, such as -1 or 1.
   1432      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
   1433      *  {@link #ERROR_INVALID_OPERATION}
   1434      */
   1435     public int setNotificationMarkerPosition(int markerInFrames) {
   1436         if (mState == STATE_UNINITIALIZED) {
   1437             return ERROR_INVALID_OPERATION;
   1438         }
   1439         return native_set_marker_pos(markerInFrames);
   1440     }
   1441 
   1442 
   1443     /**
   1444      * Sets the period for the periodic notification event.
   1445      * @param periodInFrames update period expressed in frames.
   1446      * Zero period means no position updates.  A negative period is not allowed.
   1447      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_INVALID_OPERATION}
   1448      */
   1449     public int setPositionNotificationPeriod(int periodInFrames) {
   1450         if (mState == STATE_UNINITIALIZED) {
   1451             return ERROR_INVALID_OPERATION;
   1452         }
   1453         return native_set_pos_update_period(periodInFrames);
   1454     }
   1455 
   1456 
   1457     /**
   1458      * Sets the playback head position within the static buffer.
   1459      * The track must be stopped or paused for the position to be changed,
   1460      * and must use the {@link #MODE_STATIC} mode.
   1461      * @param positionInFrames playback head position within buffer, expressed in frames.
   1462      * Zero corresponds to start of buffer.
   1463      * The position must not be greater than the buffer size in frames, or negative.
   1464      * Though this method and {@link #getPlaybackHeadPosition()} have similar names,
   1465      * the position values have different meanings.
   1466      * <br>
   1467      * If looping is currently enabled and the new position is greater than or equal to the
   1468      * loop end marker, the behavior varies by API level:
   1469      * as of {@link android.os.Build.VERSION_CODES#M},
   1470      * the looping is first disabled and then the position is set.
   1471      * For earlier API levels, the behavior is unspecified.
   1472      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
   1473      *    {@link #ERROR_INVALID_OPERATION}
   1474      */
   1475     public int setPlaybackHeadPosition(int positionInFrames) {
   1476         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
   1477                 getPlayState() == PLAYSTATE_PLAYING) {
   1478             return ERROR_INVALID_OPERATION;
   1479         }
   1480         if (!(0 <= positionInFrames && positionInFrames <= mNativeBufferSizeInFrames)) {
   1481             return ERROR_BAD_VALUE;
   1482         }
   1483         return native_set_position(positionInFrames);
   1484     }
   1485 
   1486     /**
   1487      * Sets the loop points and the loop count. The loop can be infinite.
   1488      * Similarly to setPlaybackHeadPosition,
   1489      * the track must be stopped or paused for the loop points to be changed,
   1490      * and must use the {@link #MODE_STATIC} mode.
   1491      * @param startInFrames loop start marker expressed in frames.
   1492      * Zero corresponds to start of buffer.
   1493      * The start marker must not be greater than or equal to the buffer size in frames, or negative.
   1494      * @param endInFrames loop end marker expressed in frames.
   1495      * The total buffer size in frames corresponds to end of buffer.
   1496      * The end marker must not be greater than the buffer size in frames.
   1497      * For looping, the end marker must not be less than or equal to the start marker,
   1498      * but to disable looping
   1499      * it is permitted for start marker, end marker, and loop count to all be 0.
   1500      * If any input parameters are out of range, this method returns {@link #ERROR_BAD_VALUE}.
   1501      * If the loop period (endInFrames - startInFrames) is too small for the implementation to
   1502      * support,
   1503      * {@link #ERROR_BAD_VALUE} is returned.
   1504      * The loop range is the interval [startInFrames, endInFrames).
   1505      * <br>
   1506      * As of {@link android.os.Build.VERSION_CODES#M}, the position is left unchanged,
   1507      * unless it is greater than or equal to the loop end marker, in which case
   1508      * it is forced to the loop start marker.
   1509      * For earlier API levels, the effect on position is unspecified.
   1510      * @param loopCount the number of times the loop is looped; must be greater than or equal to -1.
   1511      *    A value of -1 means infinite looping, and 0 disables looping.
   1512      *    A value of positive N means to "loop" (go back) N times.  For example,
   1513      *    a value of one means to play the region two times in total.
   1514      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
   1515      *    {@link #ERROR_INVALID_OPERATION}
   1516      */
   1517     public int setLoopPoints(int startInFrames, int endInFrames, int loopCount) {
   1518         if (mDataLoadMode == MODE_STREAM || mState == STATE_UNINITIALIZED ||
   1519                 getPlayState() == PLAYSTATE_PLAYING) {
   1520             return ERROR_INVALID_OPERATION;
   1521         }
   1522         if (loopCount == 0) {
   1523             ;   // explicitly allowed as an exception to the loop region range check
   1524         } else if (!(0 <= startInFrames && startInFrames < mNativeBufferSizeInFrames &&
   1525                 startInFrames < endInFrames && endInFrames <= mNativeBufferSizeInFrames)) {
   1526             return ERROR_BAD_VALUE;
   1527         }
   1528         return native_set_loop(startInFrames, endInFrames, loopCount);
   1529     }
   1530 
   1531     /**
   1532      * Sets the initialization state of the instance. This method was originally intended to be used
   1533      * in an AudioTrack subclass constructor to set a subclass-specific post-initialization state.
   1534      * However, subclasses of AudioTrack are no longer recommended, so this method is obsolete.
   1535      * @param state the state of the AudioTrack instance
   1536      * @deprecated Only accessible by subclasses, which are not recommended for AudioTrack.
   1537      */
   1538     @Deprecated
   1539     protected void setState(int state) {
   1540         mState = state;
   1541     }
   1542 
   1543 
   1544     //---------------------------------------------------------
   1545     // Transport control methods
   1546     //--------------------
   1547     /**
   1548      * Starts playing an AudioTrack.
   1549      * <p>
   1550      * If track's creation mode is {@link #MODE_STATIC}, you must have called one of
   1551      * the write methods ({@link #write(byte[], int, int)}, {@link #write(byte[], int, int, int)},
   1552      * {@link #write(short[], int, int)}, {@link #write(short[], int, int, int)},
   1553      * {@link #write(float[], int, int, int)}, or {@link #write(ByteBuffer, int, int)}) prior to
   1554      * play().
   1555      * <p>
   1556      * If the mode is {@link #MODE_STREAM}, you can optionally prime the data path prior to
   1557      * calling play(), by writing up to <code>bufferSizeInBytes</code> (from constructor).
   1558      * If you don't call write() first, or if you call write() but with an insufficient amount of
   1559      * data, then the track will be in underrun state at play().  In this case,
   1560      * playback will not actually start playing until the data path is filled to a
   1561      * device-specific minimum level.  This requirement for the path to be filled
   1562      * to a minimum level is also true when resuming audio playback after calling stop().
   1563      * Similarly the buffer will need to be filled up again after
   1564      * the track underruns due to failure to call write() in a timely manner with sufficient data.
   1565      * For portability, an application should prime the data path to the maximum allowed
   1566      * by writing data until the write() method returns a short transfer count.
   1567      * This allows play() to start immediately, and reduces the chance of underrun.
   1568      *
   1569      * @throws IllegalStateException if the track isn't properly initialized
   1570      */
   1571     public void play()
   1572     throws IllegalStateException {
   1573         if (mState != STATE_INITIALIZED) {
   1574             throw new IllegalStateException("play() called on uninitialized AudioTrack.");
   1575         }
   1576         if (isRestricted()) {
   1577             setVolume(0);
   1578         }
   1579         synchronized(mPlayStateLock) {
   1580             native_start();
   1581             mPlayState = PLAYSTATE_PLAYING;
   1582         }
   1583     }
   1584 
   1585     private boolean isRestricted() {
   1586         if ((mAttributes.getAllFlags() & AudioAttributes.FLAG_BYPASS_INTERRUPTION_POLICY) != 0) {
   1587             return false;
   1588         }
   1589         try {
   1590             final int usage = AudioAttributes.usageForLegacyStreamType(mStreamType);
   1591             final int mode = mAppOps.checkAudioOperation(AppOpsManager.OP_PLAY_AUDIO, usage,
   1592                     Process.myUid(), ActivityThread.currentPackageName());
   1593             return mode != AppOpsManager.MODE_ALLOWED;
   1594         } catch (RemoteException e) {
   1595             return false;
   1596         }
   1597     }
   1598 
   1599     /**
   1600      * Stops playing the audio data.
   1601      * When used on an instance created in {@link #MODE_STREAM} mode, audio will stop playing
   1602      * after the last buffer that was written has been played. For an immediate stop, use
   1603      * {@link #pause()}, followed by {@link #flush()} to discard audio data that hasn't been played
   1604      * back yet.
   1605      * @throws IllegalStateException
   1606      */
   1607     public void stop()
   1608     throws IllegalStateException {
   1609         if (mState != STATE_INITIALIZED) {
   1610             throw new IllegalStateException("stop() called on uninitialized AudioTrack.");
   1611         }
   1612 
   1613         // stop playing
   1614         synchronized(mPlayStateLock) {
   1615             native_stop();
   1616             mPlayState = PLAYSTATE_STOPPED;
   1617             mAvSyncHeader = null;
   1618             mAvSyncBytesRemaining = 0;
   1619         }
   1620     }
   1621 
   1622     /**
   1623      * Pauses the playback of the audio data. Data that has not been played
   1624      * back will not be discarded. Subsequent calls to {@link #play} will play
   1625      * this data back. See {@link #flush()} to discard this data.
   1626      *
   1627      * @throws IllegalStateException
   1628      */
   1629     public void pause()
   1630     throws IllegalStateException {
   1631         if (mState != STATE_INITIALIZED) {
   1632             throw new IllegalStateException("pause() called on uninitialized AudioTrack.");
   1633         }
   1634         //logd("pause()");
   1635 
   1636         // pause playback
   1637         synchronized(mPlayStateLock) {
   1638             native_pause();
   1639             mPlayState = PLAYSTATE_PAUSED;
   1640         }
   1641     }
   1642 
   1643 
   1644     //---------------------------------------------------------
   1645     // Audio data supply
   1646     //--------------------
   1647 
   1648     /**
   1649      * Flushes the audio data currently queued for playback. Any data that has
   1650      * been written but not yet presented will be discarded.  No-op if not stopped or paused,
   1651      * or if the track's creation mode is not {@link #MODE_STREAM}.
   1652      * <BR> Note that although data written but not yet presented is discarded, there is no
   1653      * guarantee that all of the buffer space formerly used by that data
   1654      * is available for a subsequent write.
   1655      * For example, a call to {@link #write(byte[], int, int)} with <code>sizeInBytes</code>
   1656      * less than or equal to the total buffer size
   1657      * may return a short actual transfer count.
   1658      */
   1659     public void flush() {
   1660         if (mState == STATE_INITIALIZED) {
   1661             // flush the data in native layer
   1662             native_flush();
   1663             mAvSyncHeader = null;
   1664             mAvSyncBytesRemaining = 0;
   1665         }
   1666 
   1667     }
   1668 
   1669     /**
   1670      * Writes the audio data to the audio sink for playback (streaming mode),
   1671      * or copies audio data for later playback (static buffer mode).
   1672      * The format specified in the AudioTrack constructor should be
   1673      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
   1674      * <p>
   1675      * In streaming mode, the write will normally block until all the data has been enqueued for
   1676      * playback, and will return a full transfer count.  However, if the track is stopped or paused
   1677      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
   1678      * occurs during the write, then the write may return a short transfer count.
   1679      * <p>
   1680      * In static buffer mode, copies the data to the buffer starting at offset 0.
   1681      * Note that the actual playback of this data might occur after this function returns.
   1682      *
   1683      * @param audioData the array that holds the data to play.
   1684      * @param offsetInBytes the offset expressed in bytes in audioData where the data to play
   1685      *    starts.
   1686      * @param sizeInBytes the number of bytes to read in audioData after the offset.
   1687      * @return zero or the positive number of bytes that were written, or
   1688      *    {@link #ERROR_INVALID_OPERATION}
   1689      *    if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if
   1690      *    the parameters don't resolve to valid data and indexes, or
   1691      *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
   1692      *    needs to be recreated.
   1693      *    The dead object error code is not returned if some data was successfully transferred.
   1694      *    In this case, the error is returned at the next write().
   1695      *
   1696      * This is equivalent to {@link #write(byte[], int, int, int)} with <code>writeMode</code>
   1697      * set to  {@link #WRITE_BLOCKING}.
   1698      */
   1699     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes) {
   1700         return write(audioData, offsetInBytes, sizeInBytes, WRITE_BLOCKING);
   1701     }
   1702 
   1703     /**
   1704      * Writes the audio data to the audio sink for playback (streaming mode),
   1705      * or copies audio data for later playback (static buffer mode).
   1706      * The format specified in the AudioTrack constructor should be
   1707      * {@link AudioFormat#ENCODING_PCM_8BIT} to correspond to the data in the array.
   1708      * <p>
   1709      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
   1710      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
   1711      * for playback, and will return a full transfer count.  However, if the write mode is
   1712      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
   1713      * interrupts the write by calling stop or pause, or an I/O error
   1714      * occurs during the write, then the write may return a short transfer count.
   1715      * <p>
   1716      * In static buffer mode, copies the data to the buffer starting at offset 0,
   1717      * and the write mode is ignored.
   1718      * Note that the actual playback of this data might occur after this function returns.
   1719      *
   1720      * @param audioData the array that holds the data to play.
   1721      * @param offsetInBytes the offset expressed in bytes in audioData where the data to play
   1722      *    starts.
   1723      * @param sizeInBytes the number of bytes to read in audioData after the offset.
   1724      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
   1725      *     effect in static mode.
   1726      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
   1727      *         to the audio sink.
   1728      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
   1729      *     queuing as much audio data for playback as possible without blocking.
   1730      * @return zero or the positive number of bytes that were written, or
   1731      *    {@link #ERROR_INVALID_OPERATION}
   1732      *    if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if
   1733      *    the parameters don't resolve to valid data and indexes, or
   1734      *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
   1735      *    needs to be recreated.
   1736      *    The dead object error code is not returned if some data was successfully transferred.
   1737      *    In this case, the error is returned at the next write().
   1738      */
   1739     public int write(@NonNull byte[] audioData, int offsetInBytes, int sizeInBytes,
   1740             @WriteMode int writeMode) {
   1741 
   1742         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
   1743             return ERROR_INVALID_OPERATION;
   1744         }
   1745 
   1746         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
   1747             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
   1748             return ERROR_BAD_VALUE;
   1749         }
   1750 
   1751         if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0)
   1752                 || (offsetInBytes + sizeInBytes < 0)    // detect integer overflow
   1753                 || (offsetInBytes + sizeInBytes > audioData.length)) {
   1754             return ERROR_BAD_VALUE;
   1755         }
   1756 
   1757         int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
   1758                 writeMode == WRITE_BLOCKING);
   1759 
   1760         if ((mDataLoadMode == MODE_STATIC)
   1761                 && (mState == STATE_NO_STATIC_DATA)
   1762                 && (ret > 0)) {
   1763             // benign race with respect to other APIs that read mState
   1764             mState = STATE_INITIALIZED;
   1765         }
   1766 
   1767         return ret;
   1768     }
   1769 
   1770     /**
   1771      * Writes the audio data to the audio sink for playback (streaming mode),
   1772      * or copies audio data for later playback (static buffer mode).
   1773      * The format specified in the AudioTrack constructor should be
   1774      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
   1775      * <p>
   1776      * In streaming mode, the write will normally block until all the data has been enqueued for
   1777      * playback, and will return a full transfer count.  However, if the track is stopped or paused
   1778      * on entry, or another thread interrupts the write by calling stop or pause, or an I/O error
   1779      * occurs during the write, then the write may return a short transfer count.
   1780      * <p>
   1781      * In static buffer mode, copies the data to the buffer starting at offset 0.
   1782      * Note that the actual playback of this data might occur after this function returns.
   1783      *
   1784      * @param audioData the array that holds the data to play.
   1785      * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
   1786      *     starts.
   1787      * @param sizeInShorts the number of shorts to read in audioData after the offset.
   1788      * @return zero or the positive number of shorts that were written, or
   1789      *    {@link #ERROR_INVALID_OPERATION}
   1790      *    if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if
   1791      *    the parameters don't resolve to valid data and indexes, or
   1792      *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
   1793      *    needs to be recreated.
   1794      *    The dead object error code is not returned if some data was successfully transferred.
   1795      *    In this case, the error is returned at the next write().
   1796      *
   1797      * This is equivalent to {@link #write(short[], int, int, int)} with <code>writeMode</code>
   1798      * set to  {@link #WRITE_BLOCKING}.
   1799      */
   1800     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts) {
   1801         return write(audioData, offsetInShorts, sizeInShorts, WRITE_BLOCKING);
   1802     }
   1803 
   1804     /**
   1805      * Writes the audio data to the audio sink for playback (streaming mode),
   1806      * or copies audio data for later playback (static buffer mode).
   1807      * The format specified in the AudioTrack constructor should be
   1808      * {@link AudioFormat#ENCODING_PCM_16BIT} to correspond to the data in the array.
   1809      * <p>
   1810      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
   1811      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
   1812      * for playback, and will return a full transfer count.  However, if the write mode is
   1813      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
   1814      * interrupts the write by calling stop or pause, or an I/O error
   1815      * occurs during the write, then the write may return a short transfer count.
   1816      * <p>
   1817      * In static buffer mode, copies the data to the buffer starting at offset 0.
   1818      * Note that the actual playback of this data might occur after this function returns.
   1819      *
   1820      * @param audioData the array that holds the data to play.
   1821      * @param offsetInShorts the offset expressed in shorts in audioData where the data to play
   1822      *     starts.
   1823      * @param sizeInShorts the number of shorts to read in audioData after the offset.
   1824      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
   1825      *     effect in static mode.
   1826      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
   1827      *         to the audio sink.
   1828      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
   1829      *     queuing as much audio data for playback as possible without blocking.
   1830      * @return zero or the positive number of shorts that were written, or
   1831      *    {@link #ERROR_INVALID_OPERATION}
   1832      *    if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if
   1833      *    the parameters don't resolve to valid data and indexes, or
   1834      *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
   1835      *    needs to be recreated.
   1836      *    The dead object error code is not returned if some data was successfully transferred.
   1837      *    In this case, the error is returned at the next write().
   1838      */
   1839     public int write(@NonNull short[] audioData, int offsetInShorts, int sizeInShorts,
   1840             @WriteMode int writeMode) {
   1841 
   1842         if (mState == STATE_UNINITIALIZED || mAudioFormat == AudioFormat.ENCODING_PCM_FLOAT) {
   1843             return ERROR_INVALID_OPERATION;
   1844         }
   1845 
   1846         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
   1847             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
   1848             return ERROR_BAD_VALUE;
   1849         }
   1850 
   1851         if ( (audioData == null) || (offsetInShorts < 0 ) || (sizeInShorts < 0)
   1852                 || (offsetInShorts + sizeInShorts < 0)  // detect integer overflow
   1853                 || (offsetInShorts + sizeInShorts > audioData.length)) {
   1854             return ERROR_BAD_VALUE;
   1855         }
   1856 
   1857         int ret = native_write_short(audioData, offsetInShorts, sizeInShorts, mAudioFormat,
   1858                 writeMode == WRITE_BLOCKING);
   1859 
   1860         if ((mDataLoadMode == MODE_STATIC)
   1861                 && (mState == STATE_NO_STATIC_DATA)
   1862                 && (ret > 0)) {
   1863             // benign race with respect to other APIs that read mState
   1864             mState = STATE_INITIALIZED;
   1865         }
   1866 
   1867         return ret;
   1868     }
   1869 
   1870     /**
   1871      * Writes the audio data to the audio sink for playback (streaming mode),
   1872      * or copies audio data for later playback (static buffer mode).
   1873      * The format specified in the AudioTrack constructor should be
   1874      * {@link AudioFormat#ENCODING_PCM_FLOAT} to correspond to the data in the array.
   1875      * <p>
   1876      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
   1877      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
   1878      * for playback, and will return a full transfer count.  However, if the write mode is
   1879      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
   1880      * interrupts the write by calling stop or pause, or an I/O error
   1881      * occurs during the write, then the write may return a short transfer count.
   1882      * <p>
   1883      * In static buffer mode, copies the data to the buffer starting at offset 0,
   1884      * and the write mode is ignored.
   1885      * Note that the actual playback of this data might occur after this function returns.
   1886      *
   1887      * @param audioData the array that holds the data to play.
   1888      *     The implementation does not clip for sample values within the nominal range
   1889      *     [-1.0f, 1.0f], provided that all gains in the audio pipeline are
   1890      *     less than or equal to unity (1.0f), and in the absence of post-processing effects
   1891      *     that could add energy, such as reverb.  For the convenience of applications
   1892      *     that compute samples using filters with non-unity gain,
   1893      *     sample values +3 dB beyond the nominal range are permitted.
   1894      *     However such values may eventually be limited or clipped, depending on various gains
   1895      *     and later processing in the audio path.  Therefore applications are encouraged
   1896      *     to provide samples values within the nominal range.
   1897      * @param offsetInFloats the offset, expressed as a number of floats,
   1898      *     in audioData where the data to play starts.
   1899      * @param sizeInFloats the number of floats to read in audioData after the offset.
   1900      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
   1901      *     effect in static mode.
   1902      *     <br>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
   1903      *         to the audio sink.
   1904      *     <br>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
   1905      *     queuing as much audio data for playback as possible without blocking.
   1906      * @return zero or the positive number of floats that were written, or
   1907      *    {@link #ERROR_INVALID_OPERATION}
   1908      *    if the track isn't properly initialized, or {@link #ERROR_BAD_VALUE} if
   1909      *    the parameters don't resolve to valid data and indexes, or
   1910      *    {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
   1911      *    needs to be recreated.
   1912      *    The dead object error code is not returned if some data was successfully transferred.
   1913      *    In this case, the error is returned at the next write().
   1914      */
   1915     public int write(@NonNull float[] audioData, int offsetInFloats, int sizeInFloats,
   1916             @WriteMode int writeMode) {
   1917 
   1918         if (mState == STATE_UNINITIALIZED) {
   1919             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
   1920             return ERROR_INVALID_OPERATION;
   1921         }
   1922 
   1923         if (mAudioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
   1924             Log.e(TAG, "AudioTrack.write(float[] ...) requires format ENCODING_PCM_FLOAT");
   1925             return ERROR_INVALID_OPERATION;
   1926         }
   1927 
   1928         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
   1929             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
   1930             return ERROR_BAD_VALUE;
   1931         }
   1932 
   1933         if ( (audioData == null) || (offsetInFloats < 0 ) || (sizeInFloats < 0)
   1934                 || (offsetInFloats + sizeInFloats < 0)  // detect integer overflow
   1935                 || (offsetInFloats + sizeInFloats > audioData.length)) {
   1936             Log.e(TAG, "AudioTrack.write() called with invalid array, offset, or size");
   1937             return ERROR_BAD_VALUE;
   1938         }
   1939 
   1940         int ret = native_write_float(audioData, offsetInFloats, sizeInFloats, mAudioFormat,
   1941                 writeMode == WRITE_BLOCKING);
   1942 
   1943         if ((mDataLoadMode == MODE_STATIC)
   1944                 && (mState == STATE_NO_STATIC_DATA)
   1945                 && (ret > 0)) {
   1946             // benign race with respect to other APIs that read mState
   1947             mState = STATE_INITIALIZED;
   1948         }
   1949 
   1950         return ret;
   1951     }
   1952 
   1953 
   1954     /**
   1955      * Writes the audio data to the audio sink for playback (streaming mode),
   1956      * or copies audio data for later playback (static buffer mode).
   1957      * The audioData in ByteBuffer should match the format specified in the AudioTrack constructor.
   1958      * <p>
   1959      * In streaming mode, the blocking behavior depends on the write mode.  If the write mode is
   1960      * {@link #WRITE_BLOCKING}, the write will normally block until all the data has been enqueued
   1961      * for playback, and will return a full transfer count.  However, if the write mode is
   1962      * {@link #WRITE_NON_BLOCKING}, or the track is stopped or paused on entry, or another thread
   1963      * interrupts the write by calling stop or pause, or an I/O error
   1964      * occurs during the write, then the write may return a short transfer count.
   1965      * <p>
   1966      * In static buffer mode, copies the data to the buffer starting at offset 0,
   1967      * and the write mode is ignored.
   1968      * Note that the actual playback of this data might occur after this function returns.
   1969      *
   1970      * @param audioData the buffer that holds the data to play, starting at the position reported
   1971      *     by <code>audioData.position()</code>.
   1972      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
   1973      *     have been advanced to reflect the amount of data that was successfully written to
   1974      *     the AudioTrack.
   1975      * @param sizeInBytes number of bytes to write.
   1976      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
   1977      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}. It has no
   1978      *     effect in static mode.
   1979      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
   1980      *         to the audio sink.
   1981      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
   1982      *     queuing as much audio data for playback as possible without blocking.
   1983      * @return zero or the positive number of bytes that were written, or
   1984      *     {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or
   1985      *     {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
   1986      *     needs to be recreated.
   1987      *     The dead object error code is not returned if some data was successfully transferred.
   1988      *     In this case, the error is returned at the next write().
   1989      */
   1990     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
   1991             @WriteMode int writeMode) {
   1992 
   1993         if (mState == STATE_UNINITIALIZED) {
   1994             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
   1995             return ERROR_INVALID_OPERATION;
   1996         }
   1997 
   1998         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
   1999             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
   2000             return ERROR_BAD_VALUE;
   2001         }
   2002 
   2003         if ( (audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
   2004             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
   2005             return ERROR_BAD_VALUE;
   2006         }
   2007 
   2008         int ret = 0;
   2009         if (audioData.isDirect()) {
   2010             ret = native_write_native_bytes(audioData,
   2011                     audioData.position(), sizeInBytes, mAudioFormat,
   2012                     writeMode == WRITE_BLOCKING);
   2013         } else {
   2014             ret = native_write_byte(NioUtils.unsafeArray(audioData),
   2015                     NioUtils.unsafeArrayOffset(audioData) + audioData.position(),
   2016                     sizeInBytes, mAudioFormat,
   2017                     writeMode == WRITE_BLOCKING);
   2018         }
   2019 
   2020         if ((mDataLoadMode == MODE_STATIC)
   2021                 && (mState == STATE_NO_STATIC_DATA)
   2022                 && (ret > 0)) {
   2023             // benign race with respect to other APIs that read mState
   2024             mState = STATE_INITIALIZED;
   2025         }
   2026 
   2027         if (ret > 0) {
   2028             audioData.position(audioData.position() + ret);
   2029         }
   2030 
   2031         return ret;
   2032     }
   2033 
   2034     /**
   2035      * Writes the audio data to the audio sink for playback in streaming mode on a HW_AV_SYNC track.
   2036      * The blocking behavior will depend on the write mode.
   2037      * @param audioData the buffer that holds the data to play, starting at the position reported
   2038      *     by <code>audioData.position()</code>.
   2039      *     <BR>Note that upon return, the buffer position (<code>audioData.position()</code>) will
   2040      *     have been advanced to reflect the amount of data that was successfully written to
   2041      *     the AudioTrack.
   2042      * @param sizeInBytes number of bytes to write.
   2043      *     <BR>Note this may differ from <code>audioData.remaining()</code>, but cannot exceed it.
   2044      * @param writeMode one of {@link #WRITE_BLOCKING}, {@link #WRITE_NON_BLOCKING}.
   2045      *     <BR>With {@link #WRITE_BLOCKING}, the write will block until all data has been written
   2046      *         to the audio sink.
   2047      *     <BR>With {@link #WRITE_NON_BLOCKING}, the write will return immediately after
   2048      *     queuing as much audio data for playback as possible without blocking.
   2049      * @param timestamp The timestamp of the first decodable audio frame in the provided audioData.
   2050      * @return zero or a positive number of bytes that were written, or
   2051      *     {@link #ERROR_BAD_VALUE}, {@link #ERROR_INVALID_OPERATION}, or
   2052      *     {@link AudioManager#ERROR_DEAD_OBJECT} if the AudioTrack is not valid anymore and
   2053      *     needs to be recreated.
   2054      *     The dead object error code is not returned if some data was successfully transferred.
   2055      *     In this case, the error is returned at the next write().
   2056      */
   2057     public int write(@NonNull ByteBuffer audioData, int sizeInBytes,
   2058             @WriteMode int writeMode, long timestamp) {
   2059 
   2060         if (mState == STATE_UNINITIALIZED) {
   2061             Log.e(TAG, "AudioTrack.write() called in invalid state STATE_UNINITIALIZED");
   2062             return ERROR_INVALID_OPERATION;
   2063         }
   2064 
   2065         if ((writeMode != WRITE_BLOCKING) && (writeMode != WRITE_NON_BLOCKING)) {
   2066             Log.e(TAG, "AudioTrack.write() called with invalid blocking mode");
   2067             return ERROR_BAD_VALUE;
   2068         }
   2069 
   2070         if (mDataLoadMode != MODE_STREAM) {
   2071             Log.e(TAG, "AudioTrack.write() with timestamp called for non-streaming mode track");
   2072             return ERROR_INVALID_OPERATION;
   2073         }
   2074 
   2075         if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) == 0) {
   2076             Log.d(TAG, "AudioTrack.write() called on a regular AudioTrack. Ignoring pts...");
   2077             return write(audioData, sizeInBytes, writeMode);
   2078         }
   2079 
   2080         if ((audioData == null) || (sizeInBytes < 0) || (sizeInBytes > audioData.remaining())) {
   2081             Log.e(TAG, "AudioTrack.write() called with invalid size (" + sizeInBytes + ") value");
   2082             return ERROR_BAD_VALUE;
   2083         }
   2084 
   2085         // create timestamp header if none exists
   2086         if (mAvSyncHeader == null) {
   2087             mAvSyncHeader = ByteBuffer.allocate(16);
   2088             mAvSyncHeader.order(ByteOrder.BIG_ENDIAN);
   2089             mAvSyncHeader.putInt(0x55550001);
   2090             mAvSyncHeader.putInt(sizeInBytes);
   2091             mAvSyncHeader.putLong(timestamp);
   2092             mAvSyncHeader.position(0);
   2093             mAvSyncBytesRemaining = sizeInBytes;
   2094         }
   2095 
   2096         // write timestamp header if not completely written already
   2097         int ret = 0;
   2098         if (mAvSyncHeader.remaining() != 0) {
   2099             ret = write(mAvSyncHeader, mAvSyncHeader.remaining(), writeMode);
   2100             if (ret < 0) {
   2101                 Log.e(TAG, "AudioTrack.write() could not write timestamp header!");
   2102                 mAvSyncHeader = null;
   2103                 mAvSyncBytesRemaining = 0;
   2104                 return ret;
   2105             }
   2106             if (mAvSyncHeader.remaining() > 0) {
   2107                 Log.v(TAG, "AudioTrack.write() partial timestamp header written.");
   2108                 return 0;
   2109             }
   2110         }
   2111 
   2112         // write audio data
   2113         int sizeToWrite = Math.min(mAvSyncBytesRemaining, sizeInBytes);
   2114         ret = write(audioData, sizeToWrite, writeMode);
   2115         if (ret < 0) {
   2116             Log.e(TAG, "AudioTrack.write() could not write audio data!");
   2117             mAvSyncHeader = null;
   2118             mAvSyncBytesRemaining = 0;
   2119             return ret;
   2120         }
   2121 
   2122         mAvSyncBytesRemaining -= ret;
   2123         if (mAvSyncBytesRemaining == 0) {
   2124             mAvSyncHeader = null;
   2125         }
   2126 
   2127         return ret;
   2128     }
   2129 
   2130 
   2131     /**
   2132      * Sets the playback head position within the static buffer to zero,
   2133      * that is it rewinds to start of static buffer.
   2134      * The track must be stopped or paused, and
   2135      * the track's creation mode must be {@link #MODE_STATIC}.
   2136      * <p>
   2137      * As of {@link android.os.Build.VERSION_CODES#M}, also resets the value returned by
   2138      * {@link #getPlaybackHeadPosition()} to zero.
   2139      * For earlier API levels, the reset behavior is unspecified.
   2140      * <p>
   2141      * Use {@link #setPlaybackHeadPosition(int)} with a zero position
   2142      * if the reset of <code>getPlaybackHeadPosition()</code> is not needed.
   2143      * @return error code or success, see {@link #SUCCESS}, {@link #ERROR_BAD_VALUE},
   2144      *  {@link #ERROR_INVALID_OPERATION}
   2145      */
   2146     public int reloadStaticData() {
   2147         if (mDataLoadMode == MODE_STREAM || mState != STATE_INITIALIZED) {
   2148             return ERROR_INVALID_OPERATION;
   2149         }
   2150         return native_reload_static();
   2151     }
   2152 
   2153     //--------------------------------------------------------------------------
   2154     // Audio effects management
   2155     //--------------------
   2156 
   2157     /**
   2158      * Attaches an auxiliary effect to the audio track. A typical auxiliary
   2159      * effect is a reverberation effect which can be applied on any sound source
   2160      * that directs a certain amount of its energy to this effect. This amount
   2161      * is defined by setAuxEffectSendLevel().
   2162      * {@see #setAuxEffectSendLevel(float)}.
   2163      * <p>After creating an auxiliary effect (e.g.
   2164      * {@link android.media.audiofx.EnvironmentalReverb}), retrieve its ID with
   2165      * {@link android.media.audiofx.AudioEffect#getId()} and use it when calling
   2166      * this method to attach the audio track to the effect.
   2167      * <p>To detach the effect from the audio track, call this method with a
   2168      * null effect id.
   2169      *
   2170      * @param effectId system wide unique id of the effect to attach
   2171      * @return error code or success, see {@link #SUCCESS},
   2172      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR_BAD_VALUE}
   2173      */
   2174     public int attachAuxEffect(int effectId) {
   2175         if (mState == STATE_UNINITIALIZED) {
   2176             return ERROR_INVALID_OPERATION;
   2177         }
   2178         return native_attachAuxEffect(effectId);
   2179     }
   2180 
   2181     /**
   2182      * Sets the send level of the audio track to the attached auxiliary effect
   2183      * {@link #attachAuxEffect(int)}.  Effect levels
   2184      * are clamped to the closed interval [0.0, max] where
   2185      * max is the value of {@link #getMaxVolume}.
   2186      * A value of 0.0 results in no effect, and a value of 1.0 is full send.
   2187      * <p>By default the send level is 0.0f, so even if an effect is attached to the player
   2188      * this method must be called for the effect to be applied.
   2189      * <p>Note that the passed level value is a linear scalar. UI controls should be scaled
   2190      * logarithmically: the gain applied by audio framework ranges from -72dB to at least 0dB,
   2191      * so an appropriate conversion from linear UI input x to level is:
   2192      * x == 0 -&gt; level = 0
   2193      * 0 &lt; x &lt;= R -&gt; level = 10^(72*(x-R)/20/R)
   2194      *
   2195      * @param level linear send level
   2196      * @return error code or success, see {@link #SUCCESS},
   2197      *    {@link #ERROR_INVALID_OPERATION}, {@link #ERROR}
   2198      */
   2199     public int setAuxEffectSendLevel(float level) {
   2200         if (isRestricted()) {
   2201             return SUCCESS;
   2202         }
   2203         if (mState == STATE_UNINITIALIZED) {
   2204             return ERROR_INVALID_OPERATION;
   2205         }
   2206         level = clampGainOrLevel(level);
   2207         int err = native_setAuxEffectSendLevel(level);
   2208         return err == 0 ? SUCCESS : ERROR;
   2209     }
   2210 
   2211     //--------------------------------------------------------------------------
   2212     // Explicit Routing
   2213     //--------------------
   2214     private AudioDeviceInfo mPreferredDevice = null;
   2215 
   2216     /**
   2217      * Specifies an audio device (via an {@link AudioDeviceInfo} object) to route
   2218      * the output from this AudioTrack.
   2219      * @param deviceInfo The {@link AudioDeviceInfo} specifying the audio sink.
   2220      *  If deviceInfo is null, default routing is restored.
   2221      * @return true if succesful, false if the specified {@link AudioDeviceInfo} is non-null and
   2222      * does not correspond to a valid audio output device.
   2223      */
   2224     public boolean setPreferredDevice(AudioDeviceInfo deviceInfo) {
   2225         // Do some validation....
   2226         if (deviceInfo != null && !deviceInfo.isSink()) {
   2227             return false;
   2228         }
   2229         int preferredDeviceId = deviceInfo != null ? deviceInfo.getId() : 0;
   2230         boolean status = native_setOutputDevice(preferredDeviceId);
   2231         if (status == true) {
   2232             synchronized (this) {
   2233                 mPreferredDevice = deviceInfo;
   2234             }
   2235         }
   2236         return status;
   2237     }
   2238 
   2239     /**
   2240      * Returns the selected output specified by {@link #setPreferredDevice}. Note that this
   2241      * is not guaranteed to correspond to the actual device being used for playback.
   2242      */
   2243     public AudioDeviceInfo getPreferredDevice() {
   2244         synchronized (this) {
   2245             return mPreferredDevice;
   2246         }
   2247     }
   2248 
   2249     //--------------------------------------------------------------------------
   2250     // (Re)Routing Info
   2251     //--------------------
   2252     /**
   2253      * Defines the interface by which applications can receive notifications of routing
   2254      * changes for the associated {@link AudioTrack}.
   2255      */
   2256     public interface OnRoutingChangedListener {
   2257         /**
   2258          * Called when the routing of an AudioTrack changes from either and explicit or
   2259          * policy rerouting.  Use {@link #getRoutedDevice()} to retrieve the newly routed-to
   2260          * device.
   2261          */
   2262         public void onRoutingChanged(AudioTrack audioTrack);
   2263     }
   2264 
   2265     /**
   2266      * Returns an {@link AudioDeviceInfo} identifying the current routing of this AudioTrack.
   2267      * Note: The query is only valid if the AudioTrack is currently playing. If it is not,
   2268      * <code>getRoutedDevice()</code> will return null.
   2269      */
   2270     public AudioDeviceInfo getRoutedDevice() {
   2271         int deviceId = native_getRoutedDeviceId();
   2272         if (deviceId == 0) {
   2273             return null;
   2274         }
   2275         AudioDeviceInfo[] devices =
   2276                 AudioManager.getDevicesStatic(AudioManager.GET_DEVICES_OUTPUTS);
   2277         for (int i = 0; i < devices.length; i++) {
   2278             if (devices[i].getId() == deviceId) {
   2279                 return devices[i];
   2280             }
   2281         }
   2282         return null;
   2283     }
   2284 
   2285     /**
   2286      * The list of AudioTrack.OnRoutingChangedListener interfaces added (with
   2287      * {@link AudioTrack#addOnRoutingChangedListener(OnRoutingChangedListener, android.os.Handler)}
   2288      * by an app to receive (re)routing notifications.
   2289      */
   2290     private ArrayMap<OnRoutingChangedListener, NativeRoutingEventHandlerDelegate>
   2291         mRoutingChangeListeners =
   2292             new ArrayMap<OnRoutingChangedListener, NativeRoutingEventHandlerDelegate>();
   2293 
   2294     /**
   2295      * Adds an {@link OnRoutingChangedListener} to receive notifications of routing changes
   2296      * on this AudioTrack.
   2297      * @param listener The {@link OnRoutingChangedListener} interface to receive notifications
   2298      * of rerouting events.
   2299      * @param handler  Specifies the {@link Handler} object for the thread on which to execute
   2300      * the callback. If <code>null</code>, the {@link Handler} associated with the main
   2301      * {@link Looper} will be used.
   2302      */
   2303     public void addOnRoutingChangedListener(OnRoutingChangedListener listener,
   2304             android.os.Handler handler) {
   2305         if (listener != null && !mRoutingChangeListeners.containsKey(listener)) {
   2306             synchronized (mRoutingChangeListeners) {
   2307                 if (mRoutingChangeListeners.size() == 0) {
   2308                     native_enableDeviceCallback();
   2309                 }
   2310                 mRoutingChangeListeners.put(
   2311                     listener, new NativeRoutingEventHandlerDelegate(this, listener,
   2312                             handler != null ? handler : new Handler(mInitializationLooper)));
   2313             }
   2314         }
   2315     }
   2316 
   2317     /**
   2318      * Removes an {@link OnRoutingChangedListener} which has been previously added
   2319      * to receive rerouting notifications.
   2320      * @param listener The previously added {@link OnRoutingChangedListener} interface to remove.
   2321      */
   2322     public void removeOnRoutingChangedListener(OnRoutingChangedListener listener) {
   2323         synchronized (mRoutingChangeListeners) {
   2324             if (mRoutingChangeListeners.containsKey(listener)) {
   2325                 mRoutingChangeListeners.remove(listener);
   2326             }
   2327             if (mRoutingChangeListeners.size() == 0) {
   2328                 native_disableDeviceCallback();
   2329             }
   2330         }
   2331     }
   2332 
   2333     /**
   2334      * Sends device list change notification to all listeners.
   2335      */
   2336     private void broadcastRoutingChange() {
   2337         Collection<NativeRoutingEventHandlerDelegate> values;
   2338         synchronized (mRoutingChangeListeners) {
   2339             values = mRoutingChangeListeners.values();
   2340         }
   2341         AudioManager.resetAudioPortGeneration();
   2342         for(NativeRoutingEventHandlerDelegate delegate : values) {
   2343             Handler handler = delegate.getHandler();
   2344             if (handler != null) {
   2345                 handler.sendEmptyMessage(AudioSystem.NATIVE_EVENT_ROUTING_CHANGE);
   2346             }
   2347         }
   2348     }
   2349 
   2350     //---------------------------------------------------------
   2351     // Interface definitions
   2352     //--------------------
   2353     /**
   2354      * Interface definition for a callback to be invoked when the playback head position of
   2355      * an AudioTrack has reached a notification marker or has increased by a certain period.
   2356      */
   2357     public interface OnPlaybackPositionUpdateListener  {
   2358         /**
   2359          * Called on the listener to notify it that the previously set marker has been reached
   2360          * by the playback head.
   2361          */
   2362         void onMarkerReached(AudioTrack track);
   2363 
   2364         /**
   2365          * Called on the listener to periodically notify it that the playback head has reached
   2366          * a multiple of the notification period.
   2367          */
   2368         void onPeriodicNotification(AudioTrack track);
   2369     }
   2370 
   2371     //---------------------------------------------------------
   2372     // Inner classes
   2373     //--------------------
   2374     /**
   2375      * Helper class to handle the forwarding of native events to the appropriate listener
   2376      * (potentially) handled in a different thread
   2377      */
   2378     private class NativePositionEventHandlerDelegate {
   2379         private final Handler mHandler;
   2380 
   2381         NativePositionEventHandlerDelegate(final AudioTrack track,
   2382                                    final OnPlaybackPositionUpdateListener listener,
   2383                                    Handler handler) {
   2384             // find the looper for our new event handler
   2385             Looper looper;
   2386             if (handler != null) {
   2387                 looper = handler.getLooper();
   2388             } else {
   2389                 // no given handler, use the looper the AudioTrack was created in
   2390                 looper = mInitializationLooper;
   2391             }
   2392 
   2393             // construct the event handler with this looper
   2394             if (looper != null) {
   2395                 // implement the event handler delegate
   2396                 mHandler = new Handler(looper) {
   2397                     @Override
   2398                     public void handleMessage(Message msg) {
   2399                         if (track == null) {
   2400                             return;
   2401                         }
   2402                         switch(msg.what) {
   2403                         case NATIVE_EVENT_MARKER:
   2404                             if (listener != null) {
   2405                                 listener.onMarkerReached(track);
   2406                             }
   2407                             break;
   2408                         case NATIVE_EVENT_NEW_POS:
   2409                             if (listener != null) {
   2410                                 listener.onPeriodicNotification(track);
   2411                             }
   2412                             break;
   2413                         default:
   2414                             loge("Unknown native event type: " + msg.what);
   2415                             break;
   2416                         }
   2417                     }
   2418                 };
   2419             } else {
   2420                 mHandler = null;
   2421             }
   2422         }
   2423 
   2424         Handler getHandler() {
   2425             return mHandler;
   2426         }
   2427     }
   2428 
   2429     /**
   2430      * Helper class to handle the forwarding of native events to the appropriate listener
   2431      * (potentially) handled in a different thread
   2432      */
   2433     private class NativeRoutingEventHandlerDelegate {
   2434         private final Handler mHandler;
   2435 
   2436         NativeRoutingEventHandlerDelegate(final AudioTrack track,
   2437                                    final OnRoutingChangedListener listener,
   2438                                    Handler handler) {
   2439             // find the looper for our new event handler
   2440             Looper looper;
   2441             if (handler != null) {
   2442                 looper = handler.getLooper();
   2443             } else {
   2444                 // no given handler, use the looper the AudioTrack was created in
   2445                 looper = mInitializationLooper;
   2446             }
   2447 
   2448             // construct the event handler with this looper
   2449             if (looper != null) {
   2450                 // implement the event handler delegate
   2451                 mHandler = new Handler(looper) {
   2452                     @Override
   2453                     public void handleMessage(Message msg) {
   2454                         if (track == null) {
   2455                             return;
   2456                         }
   2457                         switch(msg.what) {
   2458                         case AudioSystem.NATIVE_EVENT_ROUTING_CHANGE:
   2459                             if (listener != null) {
   2460                                 listener.onRoutingChanged(track);
   2461                             }
   2462                             break;
   2463                         default:
   2464                             loge("Unknown native event type: " + msg.what);
   2465                             break;
   2466                         }
   2467                     }
   2468                 };
   2469             } else {
   2470                 mHandler = null;
   2471             }
   2472         }
   2473 
   2474         Handler getHandler() {
   2475             return mHandler;
   2476         }
   2477     }
   2478 
   2479     //---------------------------------------------------------
   2480     // Java methods called from the native side
   2481     //--------------------
   2482     @SuppressWarnings("unused")
   2483     private static void postEventFromNative(Object audiotrack_ref,
   2484             int what, int arg1, int arg2, Object obj) {
   2485         //logd("Event posted from the native side: event="+ what + " args="+ arg1+" "+arg2);
   2486         AudioTrack track = (AudioTrack)((WeakReference)audiotrack_ref).get();
   2487         if (track == null) {
   2488             return;
   2489         }
   2490 
   2491         if (what == AudioSystem.NATIVE_EVENT_ROUTING_CHANGE) {
   2492             track.broadcastRoutingChange();
   2493             return;
   2494         }
   2495         NativePositionEventHandlerDelegate delegate = track.mEventHandlerDelegate;
   2496         if (delegate != null) {
   2497             Handler handler = delegate.getHandler();
   2498             if (handler != null) {
   2499                 Message m = handler.obtainMessage(what, arg1, arg2, obj);
   2500                 handler.sendMessage(m);
   2501             }
   2502         }
   2503     }
   2504 
   2505 
   2506     //---------------------------------------------------------
   2507     // Native methods called from the Java side
   2508     //--------------------
   2509 
   2510     // post-condition: mStreamType is overwritten with a value
   2511     //     that reflects the audio attributes (e.g. an AudioAttributes object with a usage of
   2512     //     AudioAttributes.USAGE_MEDIA will map to AudioManager.STREAM_MUSIC
   2513     private native final int native_setup(Object /*WeakReference<AudioTrack>*/ audiotrack_this,
   2514             Object /*AudioAttributes*/ attributes,
   2515             int sampleRate, int channelMask, int channelIndexMask, int audioFormat,
   2516             int buffSizeInBytes, int mode, int[] sessionId);
   2517 
   2518     private native final void native_finalize();
   2519 
   2520     private native final void native_release();
   2521 
   2522     private native final void native_start();
   2523 
   2524     private native final void native_stop();
   2525 
   2526     private native final void native_pause();
   2527 
   2528     private native final void native_flush();
   2529 
   2530     private native final int native_write_byte(byte[] audioData,
   2531                                                int offsetInBytes, int sizeInBytes, int format,
   2532                                                boolean isBlocking);
   2533 
   2534     private native final int native_write_short(short[] audioData,
   2535                                                 int offsetInShorts, int sizeInShorts, int format,
   2536                                                 boolean isBlocking);
   2537 
   2538     private native final int native_write_float(float[] audioData,
   2539                                                 int offsetInFloats, int sizeInFloats, int format,
   2540                                                 boolean isBlocking);
   2541 
   2542     private native final int native_write_native_bytes(Object audioData,
   2543             int positionInBytes, int sizeInBytes, int format, boolean blocking);
   2544 
   2545     private native final int native_reload_static();
   2546 
   2547     private native final int native_get_native_frame_count();
   2548 
   2549     private native final void native_setVolume(float leftVolume, float rightVolume);
   2550 
   2551     private native final int native_set_playback_rate(int sampleRateInHz);
   2552     private native final int native_get_playback_rate();
   2553 
   2554     private native final void native_set_playback_params(@NonNull PlaybackParams params);
   2555     private native final @NonNull PlaybackParams native_get_playback_params();
   2556 
   2557     private native final int native_set_marker_pos(int marker);
   2558     private native final int native_get_marker_pos();
   2559 
   2560     private native final int native_set_pos_update_period(int updatePeriod);
   2561     private native final int native_get_pos_update_period();
   2562 
   2563     private native final int native_set_position(int position);
   2564     private native final int native_get_position();
   2565 
   2566     private native final int native_get_latency();
   2567 
   2568     // longArray must be a non-null array of length >= 2
   2569     // [0] is assigned the frame position
   2570     // [1] is assigned the time in CLOCK_MONOTONIC nanoseconds
   2571     private native final int native_get_timestamp(long[] longArray);
   2572 
   2573     private native final int native_set_loop(int start, int end, int loopCount);
   2574 
   2575     static private native final int native_get_output_sample_rate(int streamType);
   2576     static private native final int native_get_min_buff_size(
   2577             int sampleRateInHz, int channelConfig, int audioFormat);
   2578 
   2579     private native final int native_attachAuxEffect(int effectId);
   2580     private native final int native_setAuxEffectSendLevel(float level);
   2581 
   2582     private native final boolean native_setOutputDevice(int deviceId);
   2583     private native final int native_getRoutedDeviceId();
   2584     private native final void native_enableDeviceCallback();
   2585     private native final void native_disableDeviceCallback();
   2586 
   2587     //---------------------------------------------------------
   2588     // Utility methods
   2589     //------------------
   2590 
   2591     private static void logd(String msg) {
   2592         Log.d(TAG, msg);
   2593     }
   2594 
   2595     private static void loge(String msg) {
   2596         Log.e(TAG, msg);
   2597     }
   2598 }
   2599